aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig37
-rw-r--r--drivers/acpi/Makefile9
-rw-r--r--drivers/acpi/ac.c7
-rw-r--r--drivers/acpi/acpi_memhotplug.c12
-rw-r--r--drivers/acpi/asus_acpi.c227
-rw-r--r--drivers/acpi/battery.c5
-rw-r--r--drivers/acpi/bay.c411
-rw-r--r--drivers/acpi/blacklist.c401
-rw-r--r--drivers/acpi/bus.c89
-rw-r--r--drivers/acpi/button.c12
-rw-r--r--drivers/acpi/cm_sbs.c15
-rw-r--r--drivers/acpi/container.c5
-rw-r--r--drivers/acpi/debug.c17
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c3
-rw-r--r--drivers/acpi/dispatcher/dsmthdat.c83
-rw-r--r--drivers/acpi/dispatcher/dsobject.c62
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c2
-rw-r--r--drivers/acpi/dispatcher/dswexec.c12
-rw-r--r--drivers/acpi/dock.c388
-rw-r--r--drivers/acpi/ec.c442
-rw-r--r--drivers/acpi/events/evgpe.c2
-rw-r--r--drivers/acpi/events/evxfevnt.c41
-rw-r--r--drivers/acpi/executer/exconfig.c123
-rw-r--r--drivers/acpi/executer/exconvrt.c32
-rw-r--r--drivers/acpi/executer/exdump.c87
-rw-r--r--drivers/acpi/executer/exmisc.c14
-rw-r--r--drivers/acpi/executer/exoparg1.c26
-rw-r--r--drivers/acpi/executer/exoparg2.c4
-rw-r--r--drivers/acpi/executer/exresnte.c16
-rw-r--r--drivers/acpi/executer/exresolv.c60
-rw-r--r--drivers/acpi/executer/exresop.c50
-rw-r--r--drivers/acpi/executer/exstore.c61
-rw-r--r--drivers/acpi/executer/exstoren.c3
-rw-r--r--drivers/acpi/fan.c7
-rw-r--r--drivers/acpi/glue.c40
-rw-r--r--drivers/acpi/hardware/hwsleep.c44
-rw-r--r--drivers/acpi/namespace/Makefile2
-rw-r--r--drivers/acpi/namespace/nsdump.c5
-rw-r--r--drivers/acpi/namespace/nseval.c73
-rw-r--r--drivers/acpi/namespace/nsnames.c7
-rw-r--r--drivers/acpi/namespace/nspredef.c900
-rw-r--r--drivers/acpi/namespace/nssearch.c2
-rw-r--r--drivers/acpi/namespace/nsxfeval.c78
-rw-r--r--drivers/acpi/namespace/nsxfname.c5
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/osl.c159
-rw-r--r--drivers/acpi/parser/psloop.c2
-rw-r--r--drivers/acpi/parser/psparse.c32
-rw-r--r--drivers/acpi/pci_irq.c56
-rw-r--r--drivers/acpi/pci_link.c17
-rw-r--r--drivers/acpi/pci_root.c10
-rw-r--r--drivers/acpi/pci_slot.c12
-rw-r--r--drivers/acpi/power.c83
-rw-r--r--drivers/acpi/processor_core.c99
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/processor_perflib.c37
-rw-r--r--drivers/acpi/processor_thermal.c1
-rw-r--r--drivers/acpi/processor_throttling.c14
-rw-r--r--drivers/acpi/resources/rscalc.c5
-rw-r--r--drivers/acpi/resources/rscreate.c10
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sbshc.c6
-rw-r--r--drivers/acpi/scan.c120
-rw-r--r--drivers/acpi/sleep/main.c106
-rw-r--r--drivers/acpi/sleep/proc.c2
-rw-r--r--drivers/acpi/sleep/wakeup.c8
-rw-r--r--drivers/acpi/system.c52
-rw-r--r--drivers/acpi/tables/tbfadt.c34
-rw-r--r--drivers/acpi/tables/tbinstal.c61
-rw-r--r--drivers/acpi/thermal.c66
-rw-r--r--drivers/acpi/toshiba_acpi.c58
-rw-r--r--drivers/acpi/utilities/utalloc.c53
-rw-r--r--drivers/acpi/utilities/utcopy.c29
-rw-r--r--drivers/acpi/utilities/utdelete.c12
-rw-r--r--drivers/acpi/utilities/utglobal.c52
-rw-r--r--drivers/acpi/utilities/utmisc.c9
-rw-r--r--drivers/acpi/utilities/utobject.c15
-rw-r--r--drivers/acpi/utilities/utxface.c7
-rw-r--r--drivers/acpi/utils.c20
-rw-r--r--drivers/acpi/video.c102
-rw-r--r--drivers/acpi/video_detect.c267
-rw-r--r--drivers/acpi/wmi.c51
-rw-r--r--drivers/ata/Kconfig44
-rw-r--r--drivers/ata/ahci.c41
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c25
-rw-r--r--drivers/ata/libata-acpi.c135
-rw-r--r--drivers/ata/libata-core.c130
-rw-r--r--drivers/ata/libata-eh.c61
-rw-r--r--drivers/ata/libata-scsi.c7
-rw-r--r--drivers/ata/libata-sff.c24
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_ali.c1
-rw-r--r--drivers/ata/pata_amd.c1
-rw-r--r--drivers/ata/pata_artop.c2
-rw-r--r--drivers/ata/pata_atiixp.c1
-rw-r--r--drivers/ata/pata_cmd640.c1
-rw-r--r--drivers/ata/pata_cmd64x.c2
-rw-r--r--drivers/ata/pata_cs5530.c1
-rw-r--r--drivers/ata/pata_cs5535.c3
-rw-r--r--drivers/ata/pata_cs5536.c1
-rw-r--r--drivers/ata/pata_cypress.c2
-rw-r--r--drivers/ata/pata_efar.c2
-rw-r--r--drivers/ata/pata_hpt366.c16
-rw-r--r--drivers/ata/pata_isapnp.c2
-rw-r--r--drivers/ata/pata_it821x.c7
-rw-r--r--drivers/ata/pata_jmicron.c2
-rw-r--r--drivers/ata/pata_legacy.c2
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpiix.c2
-rw-r--r--drivers/ata/pata_netcell.c2
-rw-r--r--drivers/ata/pata_ninja32.c51
-rw-r--r--drivers/ata/pata_ns87410.c1
-rw-r--r--drivers/ata/pata_ns87415.c2
-rw-r--r--drivers/ata/pata_oldpiix.c2
-rw-r--r--drivers/ata/pata_opti.c1
-rw-r--r--drivers/ata/pata_optidma.c1
-rw-r--r--drivers/ata/pata_pcmcia.c3
-rw-r--r--drivers/ata/pata_pdc202xx_old.c2
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_qdi.c2
-rw-r--r--drivers/ata/pata_radisys.c2
-rw-r--r--drivers/ata/pata_rb532_cf.c15
-rw-r--r--drivers/ata/pata_sc1200.c2
-rw-r--r--drivers/ata/pata_scc.c2
-rw-r--r--drivers/ata/pata_sch.c2
-rw-r--r--drivers/ata/pata_serverworks.c1
-rw-r--r--drivers/ata/pata_sil680.c1
-rw-r--r--drivers/ata/pata_sis.c3
-rw-r--r--drivers/ata/pata_sl82c105.c1
-rw-r--r--drivers/ata/pata_triflex.c2
-rw-r--r--drivers/ata/pata_via.c1
-rw-r--r--drivers/ata/pata_winbond.c2
-rw-r--r--drivers/ata/sata_nv.c53
-rw-r--r--drivers/ata/sata_promise.c20
-rw-r--r--drivers/ata/sata_sil24.c5
-rw-r--r--drivers/ata/sata_via.c188
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/base/sys.c10
-rw-r--r--drivers/block/DAC960.c6
-rw-r--r--drivers/block/Kconfig29
-rw-r--r--drivers/block/amiflop.c46
-rw-r--r--drivers/block/aoe/aoeblk.c12
-rw-r--r--drivers/block/ataflop.c37
-rw-r--r--drivers/block/brd.c5
-rw-r--r--drivers/block/cciss.c113
-rw-r--r--drivers/block/cpqarray.c35
-rw-r--r--drivers/block/floppy.c56
-rw-r--r--drivers/block/loop.c64
-rw-r--r--drivers/block/nbd.c28
-rw-r--r--drivers/block/paride/pcd.c21
-rw-r--r--drivers/block/paride/pd.c14
-rw-r--r--drivers/block/paride/pf.c22
-rw-r--r--drivers/block/paride/pt.c2
-rw-r--r--drivers/block/pktcdvd.c58
-rw-r--r--drivers/block/swim3.c32
-rw-r--r--drivers/block/ub.c42
-rw-r--r--drivers/block/viodasd.c10
-rw-r--r--drivers/block/virtio_blk.c8
-rw-r--r--drivers/block/xd.c4
-rw-r--r--drivers/block/xd.h2
-rw-r--r--drivers/block/xen-blkfront.c23
-rw-r--r--drivers/block/xsysace.c34
-rw-r--r--drivers/block/z2ram.c7
-rw-r--r--drivers/bluetooth/bluecard_cs.c2
-rw-r--r--drivers/bluetooth/bpa10x.c4
-rw-r--r--drivers/bluetooth/bt3c_cs.c6
-rw-r--r--drivers/bluetooth/btsdio.c2
-rw-r--r--drivers/bluetooth/btuart_cs.c6
-rw-r--r--drivers/bluetooth/dtl1_cs.c7
-rw-r--r--drivers/cdrom/cdrom.c39
-rw-r--r--drivers/cdrom/gdrom.c15
-rw-r--r--drivers/cdrom/viocd.c21
-rw-r--r--drivers/char/Kconfig46
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/uninorth-agp.c2
-rw-r--r--drivers/char/amiserial.c6
-rw-r--r--drivers/char/cp437.uni12
-rw-r--r--drivers/char/ds1286.c585
-rw-r--r--drivers/char/hpet.c3
-rw-r--r--drivers/char/hvc_console.c86
-rw-r--r--drivers/char/hvc_console.h12
-rw-r--r--drivers/char/hvc_irq.c5
-rw-r--r--drivers/char/hvc_iseries.c1
-rw-r--r--drivers/char/hvc_vio.c1
-rw-r--r--drivers/char/hvc_xen.c1
-rw-r--r--drivers/char/hw_random/amd-rng.c2
-rw-r--r--drivers/char/hw_random/geode-rng.c2
-rw-r--r--drivers/char/hw_random/intel-rng.c2
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/char/ip27-rtc.c329
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c3
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c20
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c16
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c1
-rw-r--r--drivers/char/isicom.c6
-rw-r--r--drivers/char/istallion.c4
-rw-r--r--drivers/char/mxser.c3
-rw-r--r--drivers/char/nvram.c6
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/random.c7
-rw-r--r--drivers/char/raw.c8
-rw-r--r--drivers/char/rtc.c2
-rw-r--r--drivers/char/serial167.c2
-rw-r--r--drivers/char/sonypi.c5
-rw-r--r--drivers/char/specialix.c2
-rw-r--r--drivers/char/tty_io.c15
-rw-r--r--drivers/char/tty_port.c2
-rw-r--r--drivers/char/virtio_console.c1
-rw-r--r--drivers/char/vt.c12
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.c3
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.h3
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.c3
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.h3
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c12
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h3
-rw-r--r--drivers/cpuidle/cpuidle.c18
-rw-r--r--drivers/crypto/talitos.c4
-rw-r--r--drivers/dca/dca-core.c2
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dmatest.c22
-rw-r--r--drivers/dma/ioat_dma.c22
-rw-r--r--drivers/dma/iop-adma.c27
-rw-r--r--drivers/dma/iovlock.c17
-rw-r--r--drivers/dma/mv_xor.c15
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/cell_edac.c3
-rw-r--r--drivers/edac/i5000_edac.c4
-rw-r--r--drivers/edac/i82875p_edac.c14
-rw-r--r--drivers/edac/x38_edac.c524
-rw-r--r--drivers/firewire/fw-device.c14
-rw-r--r--drivers/firewire/fw-ohci.c63
-rw-r--r--drivers/firewire/fw-sbp2.c45
-rw-r--r--drivers/firewire/fw-topology.c6
-rw-r--r--drivers/firewire/fw-transaction.c3
-rw-r--r--drivers/firewire/fw-transaction.h4
-rw-r--r--drivers/firmware/dmi_scan.c17
-rw-r--r--drivers/gpio/Kconfig15
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpio/twl4030-gpio.c521
-rw-r--r--drivers/gpio/xilinx_gpio.c235
-rw-r--r--drivers/gpu/drm/drm_drawable.c15
-rw-r--r--drivers/gpu/drm/drm_drv.c12
-rw-r--r--drivers/gpu/drm/drm_fops.c2
-rw-r--r--drivers/gpu/drm/drm_ioc32.c34
-rw-r--r--drivers/gpu/drm/drm_irq.c89
-rw-r--r--drivers/gpu/drm/drm_lock.c11
-rw-r--r--drivers/gpu/drm/drm_stub.c1
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c38
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h59
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c908
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c693
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c18
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c15
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c8
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c5
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c6
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h1
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c13
-rw-r--r--drivers/gpu/drm/via/via_irq.c1
-rw-r--r--drivers/gpu/drm/via/via_map.c11
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-apple.c80
-rw-r--r--drivers/hid/hid-core.c63
-rw-r--r--drivers/hid/hid-dell.c1
-rw-r--r--drivers/hid/hid-gyration.c4
-rw-r--r--drivers/hid/hid-ids.h15
-rw-r--r--drivers/hid/hid-lg.c5
-rw-r--r--drivers/hid/hid-pl.c2
-rw-r--r--drivers/hid/hid-sony.c44
-rw-r--r--drivers/hid/hidraw.c33
-rw-r--r--drivers/hid/usbhid/hid-core.c72
-rw-r--r--drivers/hid/usbhid/hiddev.c5
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hwmon/Kconfig29
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/abituguru3.c30
-rw-r--r--drivers/hwmon/adt7462.c2002
-rw-r--r--drivers/hwmon/adt7470.c75
-rw-r--r--drivers/hwmon/adt7473.c118
-rw-r--r--drivers/hwmon/applesmc.c56
-rw-r--r--drivers/hwmon/hwmon-vid.c1
-rw-r--r--drivers/hwmon/ibmaem.c18
-rw-r--r--drivers/hwmon/lis3lv02d.c581
-rw-r--r--drivers/hwmon/lis3lv02d.h149
-rw-r--r--drivers/hwmon/lm85.c52
-rw-r--r--drivers/hwmon/lm90.c52
-rw-r--r--drivers/hwmon/w83781d.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c21
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-elektor.c3
-rw-r--r--drivers/i2c/busses/i2c-highlander.c4
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c3
-rw-r--r--drivers/i2c/busses/i2c-parport.c4
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c18
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c3
-rw-r--r--drivers/i2c/busses/scx200_i2c.c1
-rw-r--r--drivers/i2c/chips/Kconfig2
-rw-r--r--drivers/i2c/chips/Makefile3
-rw-r--r--drivers/i2c/chips/isp1301_omap.c65
-rw-r--r--drivers/i2c/i2c-core.c5
-rw-r--r--drivers/ide/Kconfig24
-rw-r--r--drivers/ide/Makefile86
-rw-r--r--drivers/ide/aec62xx.c (renamed from drivers/ide/pci/aec62xx.c)0
-rw-r--r--drivers/ide/ali14xx.c (renamed from drivers/ide/legacy/ali14xx.c)0
-rw-r--r--drivers/ide/alim15x3.c (renamed from drivers/ide/pci/alim15x3.c)4
-rw-r--r--drivers/ide/amd74xx.c (renamed from drivers/ide/pci/amd74xx.c)11
-rw-r--r--drivers/ide/arm/Makefile10
-rw-r--r--drivers/ide/atiixp.c (renamed from drivers/ide/pci/atiixp.c)0
-rw-r--r--drivers/ide/au1xxx-ide.c (renamed from drivers/ide/mips/au1xxx-ide.c)0
-rw-r--r--drivers/ide/buddha.c (renamed from drivers/ide/legacy/buddha.c)0
-rw-r--r--drivers/ide/cmd640.c (renamed from drivers/ide/pci/cmd640.c)0
-rw-r--r--drivers/ide/cmd64x.c (renamed from drivers/ide/pci/cmd64x.c)0
-rw-r--r--drivers/ide/cs5520.c (renamed from drivers/ide/pci/cs5520.c)0
-rw-r--r--drivers/ide/cs5530.c (renamed from drivers/ide/pci/cs5530.c)0
-rw-r--r--drivers/ide/cs5535.c (renamed from drivers/ide/pci/cs5535.c)0
-rw-r--r--drivers/ide/cy82c693.c (renamed from drivers/ide/pci/cy82c693.c)0
-rw-r--r--drivers/ide/delkin_cb.c (renamed from drivers/ide/pci/delkin_cb.c)0
-rw-r--r--drivers/ide/dtc2278.c (renamed from drivers/ide/legacy/dtc2278.c)0
-rw-r--r--drivers/ide/falconide.c (renamed from drivers/ide/legacy/falconide.c)0
-rw-r--r--drivers/ide/gayle.c (renamed from drivers/ide/legacy/gayle.c)0
-rw-r--r--drivers/ide/h8300/Makefile2
-rw-r--r--drivers/ide/hpt366.c (renamed from drivers/ide/pci/hpt366.c)2
-rw-r--r--drivers/ide/ht6560b.c (renamed from drivers/ide/legacy/ht6560b.c)0
-rw-r--r--drivers/ide/icside.c (renamed from drivers/ide/arm/icside.c)8
-rw-r--r--drivers/ide/ide-4drives.c (renamed from drivers/ide/legacy/ide-4drives.c)0
-rw-r--r--drivers/ide/ide-cd.c26
-rw-r--r--drivers/ide/ide-cs.c (renamed from drivers/ide/legacy/ide-cs.c)1
-rw-r--r--drivers/ide/ide-disk.c10
-rw-r--r--drivers/ide/ide-disk.h2
-rw-r--r--drivers/ide/ide-disk_ioctl.c5
-rw-r--r--drivers/ide/ide-floppy.h4
-rw-r--r--drivers/ide/ide-floppy_ioctl.c17
-rw-r--r--drivers/ide/ide-gd.c27
-rw-r--r--drivers/ide/ide-h8300.c (renamed from drivers/ide/h8300/ide-h8300.c)0
-rw-r--r--drivers/ide/ide-io.c24
-rw-r--r--drivers/ide/ide-ioctls.c3
-rw-r--r--drivers/ide/ide-iops.c11
-rw-r--r--drivers/ide/ide-pci-generic.c (renamed from drivers/ide/pci/generic.c)2
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/ide-proc.c2
-rw-r--r--drivers/ide/ide-tape.c17
-rw-r--r--drivers/ide/ide_arm.c (renamed from drivers/ide/arm/ide_arm.c)0
-rw-r--r--drivers/ide/ide_platform.c (renamed from drivers/ide/legacy/ide_platform.c)0
-rw-r--r--drivers/ide/it8213.c (renamed from drivers/ide/pci/it8213.c)0
-rw-r--r--drivers/ide/it821x.c (renamed from drivers/ide/pci/it821x.c)2
-rw-r--r--drivers/ide/jmicron.c (renamed from drivers/ide/pci/jmicron.c)2
-rw-r--r--drivers/ide/legacy/Makefile25
-rw-r--r--drivers/ide/macide.c (renamed from drivers/ide/legacy/macide.c)0
-rw-r--r--drivers/ide/mips/Makefile3
-rw-r--r--drivers/ide/ns87415.c (renamed from drivers/ide/pci/ns87415.c)0
-rw-r--r--drivers/ide/opti621.c (renamed from drivers/ide/pci/opti621.c)0
-rw-r--r--drivers/ide/palm_bk3710.c (renamed from drivers/ide/arm/palm_bk3710.c)0
-rw-r--r--drivers/ide/pci/Makefile43
-rw-r--r--drivers/ide/pdc202xx_new.c (renamed from drivers/ide/pci/pdc202xx_new.c)0
-rw-r--r--drivers/ide/pdc202xx_old.c (renamed from drivers/ide/pci/pdc202xx_old.c)0
-rw-r--r--drivers/ide/piix.c (renamed from drivers/ide/pci/piix.c)2
-rw-r--r--drivers/ide/pmac.c (renamed from drivers/ide/ppc/pmac.c)30
-rw-r--r--drivers/ide/ppc/Makefile2
-rw-r--r--drivers/ide/q40ide.c (renamed from drivers/ide/legacy/q40ide.c)0
-rw-r--r--drivers/ide/qd65xx.c (renamed from drivers/ide/legacy/qd65xx.c)0
-rw-r--r--drivers/ide/qd65xx.h (renamed from drivers/ide/legacy/qd65xx.h)0
-rw-r--r--drivers/ide/rapide.c (renamed from drivers/ide/arm/rapide.c)4
-rw-r--r--drivers/ide/rz1000.c (renamed from drivers/ide/pci/rz1000.c)0
-rw-r--r--drivers/ide/sc1200.c (renamed from drivers/ide/pci/sc1200.c)0
-rw-r--r--drivers/ide/scc_pata.c (renamed from drivers/ide/pci/scc_pata.c)12
-rw-r--r--drivers/ide/serverworks.c (renamed from drivers/ide/pci/serverworks.c)0
-rw-r--r--drivers/ide/sgiioc4.c (renamed from drivers/ide/pci/sgiioc4.c)30
-rw-r--r--drivers/ide/siimage.c (renamed from drivers/ide/pci/siimage.c)4
-rw-r--r--drivers/ide/sis5513.c (renamed from drivers/ide/pci/sis5513.c)0
-rw-r--r--drivers/ide/sl82c105.c (renamed from drivers/ide/pci/sl82c105.c)0
-rw-r--r--drivers/ide/slc90e66.c (renamed from drivers/ide/pci/slc90e66.c)0
-rw-r--r--drivers/ide/tc86c001.c (renamed from drivers/ide/pci/tc86c001.c)0
-rw-r--r--drivers/ide/triflex.c (renamed from drivers/ide/pci/triflex.c)0
-rw-r--r--drivers/ide/trm290.c (renamed from drivers/ide/pci/trm290.c)0
-rw-r--r--drivers/ide/tx4938ide.c323
-rw-r--r--drivers/ide/tx4939ide.c754
-rw-r--r--drivers/ide/umc8672.c (renamed from drivers/ide/legacy/umc8672.c)0
-rw-r--r--drivers/ide/via82cxxx.c (renamed from drivers/ide/pci/via82cxxx.c)0
-rw-r--r--drivers/idle/Kconfig18
-rw-r--r--drivers/idle/Makefile2
-rw-r--r--drivers/idle/i7300_idle.c609
-rw-r--r--drivers/ieee1394/dv1394.c13
-rw-r--r--drivers/ieee1394/highlevel.c25
-rw-r--r--drivers/ieee1394/hosts.c4
-rw-r--r--drivers/ieee1394/hosts.h4
-rw-r--r--drivers/ieee1394/nodemgr.c22
-rw-r--r--drivers/ieee1394/raw1394.c9
-rw-r--r--drivers/ieee1394/sbp2.c14
-rw-r--r--drivers/infiniband/core/mad.c14
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/uverbs_main.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c44
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c82
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c41
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c51
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c10
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c21
-rw-r--r--drivers/infiniband/hw/nes/nes.c16
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c64
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c73
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c4
-rw-r--r--drivers/input/evdev.c1
-rw-r--r--drivers/input/ff-memless.c5
-rw-r--r--drivers/input/joydev.c1
-rw-r--r--drivers/input/keyboard/atkbd.c27
-rw-r--r--drivers/input/misc/cm109.c37
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c13
-rw-r--r--drivers/input/misc/sgi_btns.c1
-rw-r--r--drivers/input/mouse/Kconfig25
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/elantech.c674
-rw-r--r--drivers/input/mouse/elantech.h124
-rw-r--r--drivers/input/mouse/hgpk.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c23
-rw-r--r--drivers/input/mouse/psmouse.h1
-rw-r--r--drivers/input/mousedev.c1
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h22
-rw-r--r--drivers/input/serio/serio_raw.c1
-rw-r--r--drivers/input/tablet/wacom.h13
-rw-r--r--drivers/input/tablet/wacom_sys.c228
-rw-r--r--drivers/input/tablet/wacom_wac.c160
-rw-r--r--drivers/input/tablet/wacom_wac.h4
-rw-r--r--drivers/input/touchscreen/Kconfig6
-rw-r--r--drivers/input/touchscreen/elo.c2
-rw-r--r--drivers/input/xen-kbdfront.c6
-rw-r--r--drivers/isdn/hardware/avm/b1isa.c6
-rw-r--r--drivers/isdn/hisax/config.c16
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c4
-rw-r--r--drivers/isdn/i4l/isdn_net.c6
-rw-r--r--drivers/leds/Kconfig25
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/led-class.c12
-rw-r--r--drivers/leds/leds-ams-delta.c20
-rw-r--r--drivers/leds/leds-cm-x270.c124
-rw-r--r--drivers/leds/leds-da903x.c176
-rw-r--r--drivers/leds/leds-hp-disk.c155
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/leds-wrap.c5
-rw-r--r--drivers/leds/ledtrig-backlight.c110
-rw-r--r--drivers/leds/ledtrig-timer.c8
-rw-r--r--drivers/macintosh/rack-meter.c10
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/bitmap.c22
-rw-r--r--drivers/md/dm-crypt.c56
-rw-r--r--drivers/md/dm-delay.c3
-rw-r--r--drivers/md/dm-exception-store.c108
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-kcopyd.c14
-rw-r--r--drivers/md/dm-linear.c15
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-mpath.c25
-rw-r--r--drivers/md/dm-path-selector.c3
-rw-r--r--drivers/md/dm-raid1.c795
-rw-r--r--drivers/md/dm-region-hash.c704
-rw-r--r--drivers/md/dm-round-robin.c3
-rw-r--r--drivers/md/dm-snap.c43
-rw-r--r--drivers/md/dm-snap.h7
-rw-r--r--drivers/md/dm-stripe.c10
-rw-r--r--drivers/md/dm-table.c39
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c110
-rw-r--r--drivers/md/dm.h9
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c77
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/media/common/saa7146_fops.c4
-rw-r--r--drivers/media/common/saa7146_video.c12
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c7
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig3
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c22
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.h1
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700.h5
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c16
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c139
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-urb.c6
-rw-r--r--drivers/media/dvb/dvb-usb/usb-urb.c19
-rw-r--r--drivers/media/dvb/frontends/af9013.c6
-rw-r--r--drivers/media/dvb/frontends/s5h1411.c84
-rw-r--r--drivers/media/dvb/frontends/s5h1411.h2
-rw-r--r--drivers/media/dvb/siano/sms-cards.c2
-rw-r--r--drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c15
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusb_dec.c7
-rw-r--r--drivers/media/radio/Kconfig14
-rw-r--r--drivers/media/radio/dsbr100.c62
-rw-r--r--drivers/media/radio/radio-mr800.c5
-rw-r--r--drivers/media/radio/radio-si470x.c19
-rw-r--r--drivers/media/video/arv.c2
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c6
-rw-r--r--drivers/media/video/c-qcam.c2
-rw-r--r--drivers/media/video/cafe_ccic.c10
-rw-r--r--drivers/media/video/compat_ioctl32.c3
-rw-r--r--drivers/media/video/cpia.c6
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c2
-rw-r--r--drivers/media/video/cx18/cx18-driver.c28
-rw-r--r--drivers/media/video/cx18/cx18-driver.h20
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c23
-rw-r--r--drivers/media/video/cx18/cx18-dvb.h1
-rw-r--r--drivers/media/video/cx18/cx18-io.c17
-rw-r--r--drivers/media/video/cx18/cx18-io.h21
-rw-r--r--drivers/media/video/cx18/cx18-irq.c96
-rw-r--r--drivers/media/video/cx18/cx18-irq.h4
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c6
-rw-r--r--drivers/media/video/cx18/cx18-queue.c14
-rw-r--r--drivers/media/video/cx18/cx18-scb.h40
-rw-r--r--drivers/media/video/cx18/cx18-streams.c36
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c2
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c14
-rw-r--r--drivers/media/video/cx88/cx88-cards.c4
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c16
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c2
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c33
-rw-r--r--drivers/media/video/cx88/cx88-video.c16
-rw-r--r--drivers/media/video/cx88/cx88.h1
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c33
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c58
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c10
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c140
-rw-r--r--drivers/media/video/em28xx/em28xx.h6
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c24
-rw-r--r--drivers/media/video/gspca/Kconfig144
-rw-r--r--drivers/media/video/gspca/conex.c3
-rw-r--r--drivers/media/video/gspca/finepix.c8
-rw-r--r--drivers/media/video/gspca/gspca.c56
-rw-r--r--drivers/media/video/gspca/gspca.h6
-rw-r--r--drivers/media/video/gspca/pac7311.c3
-rw-r--r--drivers/media/video/gspca/spca501.c3
-rw-r--r--drivers/media/video/gspca/spca505.c4
-rw-r--r--drivers/media/video/gspca/spca561.c3
-rw-r--r--drivers/media/video/gspca/vc032x.c3
-rw-r--r--drivers/media/video/gspca/zc3xx.c15
-rw-r--r--drivers/media/video/ivtv/Kconfig5
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c38
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c1
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c13
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.h3
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c4
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-encoder.c4
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c17
-rw-r--r--drivers/media/video/pwc/pwc-if.c2
-rw-r--r--drivers/media/video/s2255drv.c2
-rw-r--r--drivers/media/video/saa7110.c4
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c9
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/video/se401.c2
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c24
-rw-r--r--drivers/media/video/stk-webcam.c4
-rw-r--r--drivers/media/video/stv680.c3
-rw-r--r--drivers/media/video/tvaudio.c231
-rw-r--r--drivers/media/video/usbvideo/ibmcam.c12
-rw-r--r--drivers/media/video/usbvideo/usbvideo.c2
-rw-r--r--drivers/media/video/usbvideo/vicam.c3
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c2
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c12
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c12
-rw-r--r--drivers/media/video/v4l1-compat.c221
-rw-r--r--drivers/media/video/v4l2-int-device.c5
-rw-r--r--drivers/media/video/v4l2-ioctl.c19
-rw-r--r--drivers/media/video/videobuf-dvb.c52
-rw-r--r--drivers/media/video/vivi.c6
-rw-r--r--drivers/media/video/w9968cf.c16
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c24
-rw-r--r--drivers/media/video/zr364xx.c2
-rw-r--r--drivers/memstick/core/mspro_block.c9
-rw-r--r--drivers/message/fusion/mptctl.c7
-rw-r--r--drivers/message/fusion/mptlan.c108
-rw-r--r--drivers/message/fusion/mptscsih.c6
-rw-r--r--drivers/message/i2o/i2o_block.c27
-rw-r--r--drivers/message/i2o/i2o_config.c21
-rw-r--r--drivers/message/i2o/iop.c1
-rw-r--r--drivers/mfd/Kconfig16
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/da903x.c2
-rw-r--r--drivers/mfd/sm501.c25
-rw-r--r--drivers/mfd/twl4030-core.c421
-rw-r--r--drivers/mfd/twl4030-irq.c743
-rw-r--r--drivers/mfd/wm8350-core.c5
-rw-r--r--drivers/mfd/wm8350-i2c.c15
-rw-r--r--drivers/misc/Kconfig28
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/acer-wmi.c231
-rw-r--r--drivers/misc/asus-laptop.c27
-rw-r--r--drivers/misc/c2port/Kconfig35
-rw-r--r--drivers/misc/c2port/Makefile3
-rw-r--r--drivers/misc/c2port/c2port-duramar2150.c158
-rw-r--r--drivers/misc/c2port/core.c1003
-rw-r--r--drivers/misc/compal-laptop.c12
-rw-r--r--drivers/misc/eeepc-laptop.c244
-rw-r--r--drivers/misc/fujitsu-laptop.c178
-rw-r--r--drivers/misc/hdpuftrs/hdpu_nexus.c1
-rw-r--r--drivers/misc/ics932s401.c515
-rw-r--r--drivers/misc/intel_menlow.c41
-rw-r--r--drivers/misc/msi-laptop.c16
-rw-r--r--drivers/misc/panasonic-laptop.c766
-rw-r--r--drivers/misc/sgi-gru/Makefile4
-rw-r--r--drivers/misc/sgi-gru/grufault.c4
-rw-r--r--drivers/misc/sgi-gru/grufile.c2
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c1
-rw-r--r--drivers/misc/sgi-xp/Makefile4
-rw-r--r--drivers/misc/sgi-xp/xp.h11
-rw-r--r--drivers/misc/sgi-xp/xp_main.c7
-rw-r--r--drivers/misc/sgi-xp/xp_sn2.c34
-rw-r--r--drivers/misc/sgi-xp/xp_uv.c70
-rw-r--r--drivers/misc/sgi-xp/xpc.h12
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c15
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c290
-rw-r--r--drivers/misc/sony-laptop.c15
-rw-r--r--drivers/misc/thinkpad_acpi.c131
-rw-r--r--drivers/mmc/card/block.c13
-rw-r--r--drivers/mmc/core/bus.c3
-rw-r--r--drivers/mmc/core/core.c6
-rw-r--r--drivers/mmc/core/host.c5
-rw-r--r--drivers/mmc/core/sdio_bus.c3
-rw-r--r--drivers/mmc/host/mmc_spi.c2
-rw-r--r--drivers/mmc/host/mmci.c5
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/tifm_sd.c16
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c13
-rw-r--r--drivers/mtd/chips/jedec_probe.c10
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/m25p80.c28
-rw-r--r--drivers/mtd/maps/cdb89712.c13
-rw-r--r--drivers/mtd/maps/h720x-flash.c6
-rw-r--r--drivers/mtd/maps/physmap.c26
-rw-r--r--drivers/mtd/mtd_blkdevs.c24
-rw-r--r--drivers/mtd/mtdchar.c10
-rw-r--r--drivers/mtd/nand/fsl_upm.c8
-rw-r--r--drivers/mtd/nand/pasemi_nand.c1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c1
-rw-r--r--drivers/mtd/onenand/omap2.c18
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/3c509.c6
-rw-r--r--drivers/net/8139cp.c5
-rw-r--r--drivers/net/8139too.c5
-rw-r--r--drivers/net/Kconfig43
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/amd8111e.c23
-rw-r--r--drivers/net/arm/at91_ether.c6
-rw-r--r--drivers/net/atl1e/atl1e.h1
-rw-r--r--drivers/net/atl1e/atl1e_hw.c4
-rw-r--r--drivers/net/atlx/atl1.c24
-rw-r--r--drivers/net/atlx/atl1.h2
-rw-r--r--drivers/net/atlx/atl2.c8
-rw-r--r--drivers/net/ax88796.c6
-rw-r--r--drivers/net/bnx2.c50
-rw-r--r--drivers/net/bnx2.h6
-rw-r--r--drivers/net/bnx2x_init.h9
-rw-r--r--drivers/net/bnx2x_main.c10
-rw-r--r--drivers/net/bonding/bond_alb.c13
-rw-r--r--drivers/net/bonding/bond_main.c68
-rw-r--r--drivers/net/chelsio/sge.c4
-rw-r--r--drivers/net/cris/eth_v10.c4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c4
-rw-r--r--drivers/net/cxgb3/l2t.c1
-rw-r--r--drivers/net/cxgb3/t3_hw.c8
-rw-r--r--drivers/net/dm9000.c9
-rw-r--r--drivers/net/e100.c20
-rw-r--r--drivers/net/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/e1000/e1000_main.c1
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c8
-rw-r--r--drivers/net/e1000e/ich8lan.c9
-rw-r--r--drivers/net/e1000e/netdev.c26
-rw-r--r--drivers/net/e1000e/param.c25
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c25
-rw-r--r--drivers/net/ehea/ehea_qmr.c176
-rw-r--r--drivers/net/ehea/ehea_qmr.h5
-rw-r--r--drivers/net/enc28j60.c18
-rw-r--r--drivers/net/fec_mpc52xx.c18
-rw-r--r--drivers/net/fec_mpc52xx_phy.c55
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c6
-rw-r--r--drivers/net/gianfar.c55
-rw-r--r--drivers/net/gianfar_mii.c21
-rw-r--r--drivers/net/gianfar_mii.h3
-rw-r--r--drivers/net/hp-plus.c2
-rw-r--r--drivers/net/ibm_newemac/core.c10
-rw-r--r--drivers/net/ibm_newemac/mal.c15
-rw-r--r--drivers/net/igb/igb_ethtool.c8
-rw-r--r--drivers/net/igb/igb_main.c64
-rw-r--r--drivers/net/ipg.c8
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c68
-rw-r--r--drivers/net/jme.c21
-rw-r--r--drivers/net/jme.h2
-rw-r--r--drivers/net/loopback.c9
-rw-r--r--drivers/net/macvlan.c3
-rw-r--r--drivers/net/mlx4/Makefile7
-rw-r--r--drivers/net/mlx4/alloc.c97
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/en_cq.c146
-rw-r--r--drivers/net/mlx4/en_main.c253
-rw-r--r--drivers/net/mlx4/en_netdev.c1088
-rw-r--r--drivers/net/mlx4/en_params.c482
-rw-r--r--drivers/net/mlx4/en_port.c261
-rw-r--r--drivers/net/mlx4/en_port.h570
-rw-r--r--drivers/net/mlx4/en_resources.c96
-rw-r--r--drivers/net/mlx4/en_rx.c1080
-rw-r--r--drivers/net/mlx4/en_tx.c820
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/mlx4/fw.c20
-rw-r--r--drivers/net/mlx4/fw.h7
-rw-r--r--drivers/net/mlx4/main.c295
-rw-r--r--drivers/net/mlx4/mcg.c4
-rw-r--r--drivers/net/mlx4/mlx4.h55
-rw-r--r--drivers/net/mlx4/mlx4_en.h561
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mlx4/pd.c4
-rw-r--r--drivers/net/mlx4/port.c319
-rw-r--r--drivers/net/mlx4/qp.c81
-rw-r--r--drivers/net/mlx4/srq.c2
-rw-r--r--drivers/net/mv643xx_eth.c14
-rw-r--r--drivers/net/myri10ge/myri10ge.c10
-rw-r--r--drivers/net/netx-eth.c2
-rw-r--r--drivers/net/niu.c299
-rw-r--r--drivers/net/niu.h13
-rw-r--r--drivers/net/pcmcia/axnet_cs.c2
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/marvell.c66
-rw-r--r--drivers/net/phy/mdio_bus.c7
-rw-r--r--drivers/net/phy/phy_device.c47
-rw-r--r--drivers/net/phy/vitesse.c64
-rw-r--r--drivers/net/pppoe.c6
-rw-r--r--drivers/net/pppol2tp.c1
-rw-r--r--drivers/net/qla3xxx.c19
-rw-r--r--drivers/net/qlge/qlge.h5
-rw-r--r--drivers/net/qlge/qlge_main.c89
-rw-r--r--drivers/net/r8169.c74
-rw-r--r--drivers/net/sfc/ethtool.c4
-rw-r--r--drivers/net/sh_eth.c4
-rw-r--r--drivers/net/sis190.c1
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/smc911x.c56
-rw-r--r--drivers/net/smc911x.h6
-rw-r--r--drivers/net/smc91x.c14
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/starfire.c5
-rw-r--r--drivers/net/sungem.c144
-rw-r--r--drivers/net/tlan.c23
-rw-r--r--drivers/net/tulip/dmfe.c12
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/ucc_geth_ethtool.c7
-rw-r--r--drivers/net/usb/asix.c8
-rw-r--r--drivers/net/usb/dm9601.c15
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/via-velocity.c13
-rw-r--r--drivers/net/wan/syncppp.c5
-rw-r--r--drivers/net/wan/z85230.c1
-rw-r--r--drivers/net/wireless/ath5k/base.c95
-rw-r--r--drivers/net/wireless/ath5k/base.h4
-rw-r--r--drivers/net/wireless/ath5k/debug.c12
-rw-r--r--drivers/net/wireless/ath5k/desc.c16
-rw-r--r--drivers/net/wireless/ath5k/initvals.c2
-rw-r--r--drivers/net/wireless/ath5k/reset.c22
-rw-r--r--drivers/net/wireless/ath9k/beacon.c10
-rw-r--r--drivers/net/wireless/ath9k/recv.c19
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h5
-rw-r--r--drivers/net/wireless/ipw2200.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c14
-rw-r--r--drivers/net/wireless/libertas/cmd.c4
-rw-r--r--drivers/net/wireless/libertas/rx.c2
-rw-r--r--drivers/net/wireless/libertas/scan.c4
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/orinoco.c42
-rw-r--r--drivers/net/wireless/p54/p54common.c31
-rw-r--r--drivers/net/wireless/p54/p54pci.c132
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig2
-rw-r--r--drivers/net/wireless/rtl8187_dev.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netfront.c6
-rw-r--r--drivers/net/xtsonic.c319
-rw-r--r--drivers/of/device.c11
-rw-r--r--drivers/of/of_i2c.c2
-rw-r--r--drivers/of/of_spi.c2
-rw-r--r--drivers/oprofile/buffer_sync.c41
-rw-r--r--drivers/oprofile/buffer_sync.h4
-rw-r--r--drivers/oprofile/cpu_buffer.c106
-rw-r--r--drivers/oprofile/cpu_buffer.h12
-rw-r--r--drivers/oprofile/event_buffer.c40
-rw-r--r--drivers/oprofile/event_buffer.h17
-rw-r--r--drivers/oprofile/oprof.c26
-rw-r--r--drivers/oprofile/oprof.h12
-rw-r--r--drivers/oprofile/oprofile_files.c36
-rw-r--r--drivers/oprofile/oprofile_stats.c24
-rw-r--r--drivers/oprofile/oprofile_stats.h10
-rw-r--r--drivers/oprofile/oprofilefs.c78
-rw-r--r--drivers/oprofile/timer_int.c4
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parport/Kconfig2
-rw-r--r--drivers/parport/parport_cs.c2
-rw-r--r--drivers/parport/parport_pc.c20
-rw-r--r--drivers/parport/parport_serial.c2
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/dmar.c119
-rw-r--r--drivers/pci/hotplug/acpiphp.h11
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c32
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c24
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c4
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h6
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c75
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c4
-rw-r--r--drivers/pci/hotplug/cpqphp.h13
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c45
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/fakephp.c26
-rw-r--r--drivers/pci/hotplug/ibmphp.h5
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c5
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c19
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c64
-rw-r--r--drivers/pci/hotplug/pciehp.h12
-rw-r--r--drivers/pci/hotplug/pciehp_core.c110
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c108
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c103
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c19
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c10
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c22
-rw-r--r--drivers/pci/hotplug/shpchp.h31
-rw-r--r--drivers/pci/hotplug/shpchp_core.c80
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c158
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c113
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c36
-rw-r--r--drivers/pci/intel-iommu.c252
-rw-r--r--drivers/pci/irq.c60
-rw-r--r--drivers/pci/msi.c21
-rw-r--r--drivers/pci/pci-acpi.c103
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c124
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/pcie/aspm.c29
-rw-r--r--drivers/pci/probe.c15
-rw-r--r--drivers/pci/quirks.c223
-rw-r--r--drivers/pci/rom.c6
-rw-r--r--drivers/pci/search.c9
-rw-r--r--drivers/pci/slot.c161
-rw-r--r--drivers/pcmcia/Kconfig4
-rw-r--r--drivers/pcmcia/Makefile3
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/cistpl.c3
-rw-r--r--drivers/pcmcia/cs.c14
-rw-r--r--drivers/pcmcia/ds.c11
-rw-r--r--drivers/pcmcia/hd64465_ss.c939
-rw-r--r--drivers/pcmcia/pcmcia_resource.c6
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c6
-rw-r--r--drivers/pnp/Kconfig20
-rw-r--r--drivers/pnp/Makefile4
-rw-r--r--drivers/pnp/base.h10
-rw-r--r--drivers/pnp/core.c29
-rw-r--r--drivers/pnp/driver.c4
-rw-r--r--drivers/pnp/interface.c1
-rw-r--r--drivers/pnp/isapnp/Makefile4
-rw-r--r--drivers/pnp/isapnp/core.c12
-rw-r--r--drivers/pnp/manager.c34
-rw-r--r--drivers/pnp/pnpacpi/Makefile4
-rw-r--r--drivers/pnp/pnpacpi/core.c16
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c43
-rw-r--r--drivers/pnp/pnpbios/Makefile4
-rw-r--r--drivers/pnp/pnpbios/core.c4
-rw-r--r--drivers/pnp/pnpbios/rsparser.c18
-rw-r--r--drivers/pnp/quirks.c5
-rw-r--r--drivers/pnp/resource.c12
-rw-r--r--drivers/pnp/support.c14
-rw-r--r--drivers/ps3/ps3-lpm.c1
-rw-r--r--drivers/rapidio/rio-scan.c4
-rw-r--r--drivers/rapidio/rio.c2
-rw-r--r--drivers/regulator/Kconfig15
-rw-r--r--drivers/regulator/da903x.c29
-rw-r--r--drivers/rtc/Kconfig43
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-cmos.c2
-rw-r--r--drivers/rtc/rtc-dev.c3
-rw-r--r--drivers/rtc/rtc-ds1390.c220
-rw-r--r--drivers/rtc/rtc-ds1672.c6
-rw-r--r--drivers/rtc/rtc-ds3234.c4
-rw-r--r--drivers/rtc/rtc-m48t59.c34
-rw-r--r--drivers/rtc/rtc-max6900.c6
-rw-r--r--drivers/rtc/rtc-rx8581.c281
-rw-r--r--drivers/rtc/rtc-s3c.c8
-rw-r--r--drivers/rtc/rtc-starfire.c66
-rw-r--r--drivers/rtc/rtc-sun4v.c69
-rw-r--r--drivers/rtc/rtc-twl4030.c564
-rw-r--r--drivers/rtc/rtc-wm8350.c514
-rw-r--r--drivers/s390/block/dasd.c16
-rw-r--r--drivers/s390/block/dasd_genhd.c4
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/dasd_ioctl.c15
-rw-r--r--drivers/s390/block/dcssblk.c17
-rw-r--r--drivers/s390/char/sclp_cmd.c3
-rw-r--r--drivers/s390/char/tape_block.c31
-rw-r--r--drivers/s390/char/tape_core.c8
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/qdio_debug.c19
-rw-r--r--drivers/s390/cio/qdio_main.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c6
-rw-r--r--drivers/s390/kvm/kvm_virtio.c4
-rw-r--r--drivers/s390/net/qeth_core_main.c3
-rw-r--r--drivers/s390/net/qeth_l2_main.c27
-rw-r--r--drivers/s390/net/qeth_l3_main.c13
-rw-r--r--drivers/s390/net/qeth_l3_sys.c7
-rw-r--r--drivers/s390/scsi/zfcp_aux.c3
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c4
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c42
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h8
-rw-r--r--drivers/s390/scsi/zfcp_erp.c8
-rw-r--r--drivers/s390/scsi/zfcp_fc.c7
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c43
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c14
-rw-r--r--drivers/sbus/char/jsflash.c1
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-xxxx.c7
-rw-r--r--drivers/scsi/aacraid/linit.c8
-rw-r--r--drivers/scsi/advansys.c4
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.reg185
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c15
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c12
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg.h_shipped567
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped1723
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.reg124
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c7
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped875
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped1165
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y10
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c3
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c12
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c3
-rw-r--r--drivers/scsi/dpt_i2o.c4
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/gdth.c12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c16
-rw-r--r--drivers/scsi/ide-scsi.c22
-rw-r--r--drivers/scsi/ipr.c16
-rw-r--r--drivers/scsi/libiscsi.c11
-rw-r--r--drivers/scsi/megaraid.c11
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_error.c8
-rw-r--r--drivers/scsi/scsi_ioctl.c6
-rw-r--r--drivers/scsi/scsi_lib.c104
-rw-r--r--drivers/scsi/scsi_netlink.c9
-rw-r--r--drivers/scsi/sd.c56
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/sr.c40
-rw-r--r--drivers/scsi/st.c7
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/sun3x_esp.c4
-rw-r--r--drivers/serial/8250_pci.c231
-rw-r--r--drivers/serial/Kconfig6
-rw-r--r--drivers/serial/atmel_serial.c17
-rw-r--r--drivers/serial/crisv10.c4
-rw-r--r--drivers/serial/crisv10.h2
-rw-r--r--drivers/serial/ioc3_serial.c6
-rw-r--r--drivers/serial/mpc52xx_uart.c4
-rw-r--r--drivers/serial/netx-serial.c4
-rw-r--r--drivers/serial/s3c2440.c2
-rw-r--r--drivers/serial/sh-sci.c62
-rw-r--r--drivers/serial/sh-sci.h112
-rw-r--r--drivers/serial/uartlite.c4
-rw-r--r--drivers/sh/maple/maple.c2
-rw-r--r--drivers/spi/atmel_spi.c3
-rw-r--r--drivers/spi/au1550_spi.c26
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c5
-rw-r--r--drivers/spi/pxa2xx_spi.c24
-rw-r--r--drivers/spi/spi_imx.c70
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c3
-rw-r--r--drivers/spi/spidev.c4
-rw-r--r--drivers/ssb/Kconfig5
-rw-r--r--drivers/staging/Kconfig21
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/at76_usb/at76_usb.c4
-rw-r--r--drivers/staging/echo/bit_operations.h205
-rw-r--r--drivers/staging/echo/echo.c836
-rw-r--r--drivers/staging/echo/echo.h58
-rw-r--r--drivers/staging/echo/fir.h376
-rw-r--r--drivers/staging/echo/mmx.h29
-rw-r--r--drivers/staging/echo/oslec.h86
-rw-r--r--drivers/staging/et131x/et1310_phy.c2
-rw-r--r--drivers/staging/et131x/et131x_debug.c1
-rw-r--r--drivers/staging/et131x/et131x_initpci.c1
-rw-r--r--drivers/staging/go7007/go7007-driver.c1
-rw-r--r--drivers/staging/go7007/go7007-fw.c1
-rw-r--r--drivers/staging/go7007/go7007-i2c.c1
-rw-r--r--drivers/staging/go7007/go7007-usb.c1
-rw-r--r--drivers/staging/go7007/snd-go7007.c1
-rw-r--r--drivers/staging/go7007/wis-ov7640.c1
-rw-r--r--drivers/staging/go7007/wis-saa7113.c1
-rw-r--r--drivers/staging/go7007/wis-saa7115.c1
-rw-r--r--drivers/staging/go7007/wis-sony-tuner.c1
-rw-r--r--drivers/staging/go7007/wis-tw2804.c1
-rw-r--r--drivers/staging/go7007/wis-tw9903.c1
-rw-r--r--drivers/staging/go7007/wis-uda1342.c1
-rw-r--r--drivers/staging/me4000/me4000.c910
-rw-r--r--drivers/staging/me4000/me4000.h194
-rw-r--r--drivers/staging/poch/Kconfig6
-rw-r--r--drivers/staging/poch/Makefile1
-rw-r--r--drivers/staging/poch/README7
-rw-r--r--drivers/staging/poch/poch.c1425
-rw-r--r--drivers/staging/poch/poch.h29
-rw-r--r--drivers/staging/slicoss/slicoss.c18
-rw-r--r--drivers/staging/sxg/README1
-rw-r--r--drivers/staging/sxg/sxg.c1379
-rw-r--r--drivers/staging/sxg/sxg_os.h41
-rw-r--r--drivers/staging/sxg/sxgdbg.h2
-rw-r--r--drivers/staging/sxg/sxghif.h410
-rw-r--r--drivers/staging/sxg/sxghw.h404
-rw-r--r--drivers/staging/sxg/sxgphycode.h12
-rw-r--r--drivers/staging/usbip/Kconfig2
-rw-r--r--drivers/staging/usbip/usbip_common.c2
-rw-r--r--drivers/staging/usbip/vhci_rx.c2
-rw-r--r--drivers/staging/winbond/Kconfig2
-rw-r--r--drivers/staging/winbond/README1
-rw-r--r--drivers/staging/winbond/bss_f.h6
-rw-r--r--drivers/staging/winbond/ds_tkip.h6
-rw-r--r--drivers/staging/winbond/linux/common.h17
-rw-r--r--drivers/staging/winbond/linux/wb35reg.c63
-rw-r--r--drivers/staging/winbond/linux/wb35reg_f.h12
-rw-r--r--drivers/staging/winbond/linux/wb35reg_s.h4
-rw-r--r--drivers/staging/winbond/linux/wb35rx.c175
-rw-r--r--drivers/staging/winbond/linux/wb35rx_s.h2
-rw-r--r--drivers/staging/winbond/linux/wb35tx.c138
-rw-r--r--drivers/staging/winbond/linux/wb35tx_f.h2
-rw-r--r--drivers/staging/winbond/linux/wbusb.c259
-rw-r--r--drivers/staging/winbond/mds.c30
-rw-r--r--drivers/staging/winbond/mds_f.h6
-rw-r--r--drivers/staging/winbond/mds_s.h8
-rw-r--r--drivers/staging/winbond/mlme_s.h4
-rw-r--r--drivers/staging/winbond/mlmetxrx.c4
-rw-r--r--drivers/staging/winbond/mlmetxrx_f.h4
-rw-r--r--drivers/staging/winbond/reg.c24
-rw-r--r--drivers/staging/winbond/sme_api.c1
-rw-r--r--drivers/staging/winbond/sme_api.h2
-rw-r--r--drivers/staging/winbond/wbhal.c32
-rw-r--r--drivers/staging/winbond/wbhal_f.h28
-rw-r--r--drivers/staging/winbond/wbhal_s.h4
-rw-r--r--drivers/staging/winbond/wblinux.c208
-rw-r--r--drivers/staging/winbond/wblinux_s.h4
-rw-r--r--drivers/staging/wlan-ng/Kconfig2
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h2
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c1
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c2
-rw-r--r--drivers/staging/wlan-ng/wlan_compat.h8
-rw-r--r--drivers/telephony/ixj.c1
-rw-r--r--drivers/telephony/phonedev.c2
-rw-r--r--drivers/uio/uio.c3
-rw-r--r--drivers/usb/Kconfig5
-rw-r--r--drivers/usb/Makefile3
-rw-r--r--drivers/usb/atm/speedtch.c12
-rw-r--r--drivers/usb/class/cdc-acm.c12
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/class/usbtmc.c3
-rw-r--r--drivers/usb/core/driver.c7
-rw-r--r--drivers/usb/core/hcd.c35
-rw-r--r--drivers/usb/core/hcd.h1
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/core/message.c1
-rw-r--r--drivers/usb/core/sysfs.c2
-rw-r--r--drivers/usb/core/urb.c26
-rw-r--r--drivers/usb/gadget/config.c2
-rw-r--r--drivers/usb/gadget/f_acm.c4
-rw-r--r--drivers/usb/gadget/f_rndis.c7
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.c3
-rw-r--r--drivers/usb/gadget/inode.c1
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c14
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c7
-rw-r--r--drivers/usb/host/Kconfig52
-rw-r--r--drivers/usb/host/Makefile3
-rw-r--r--drivers/usb/host/ehci-hcd.c25
-rw-r--r--drivers/usb/host/ehci-pci.c24
-rw-r--r--drivers/usb/host/ehci-ps3.c1
-rw-r--r--drivers/usb/host/ehci-sched.c4
-rw-r--r--drivers/usb/host/ehci.h12
-rw-r--r--drivers/usb/host/hwa-hc.c925
-rw-r--r--drivers/usb/host/isp1760-if.c22
-rw-r--r--drivers/usb/host/ohci-hcd.c21
-rw-r--r--drivers/usb/host/ohci-ps3.c3
-rw-r--r--drivers/usb/host/ohci-tmio.c376
-rw-r--r--drivers/usb/host/r8a66597-hcd.c5
-rw-r--r--drivers/usb/host/whci/Kbuild11
-rw-r--r--drivers/usb/host/whci/asl.c367
-rw-r--r--drivers/usb/host/whci/hcd.c339
-rw-r--r--drivers/usb/host/whci/hw.c87
-rw-r--r--drivers/usb/host/whci/init.c188
-rw-r--r--drivers/usb/host/whci/int.c95
-rw-r--r--drivers/usb/host/whci/pzl.c398
-rw-r--r--drivers/usb/host/whci/qset.c567
-rw-r--r--drivers/usb/host/whci/whcd.h197
-rw-r--r--drivers/usb/host/whci/whci-hc.h416
-rw-r--r--drivers/usb/host/whci/wusb.c241
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/misc/usbtest.c3
-rw-r--r--drivers/usb/misc/vstusb.c2
-rw-r--r--drivers/usb/mon/mon_bin.c5
-rw-r--r--drivers/usb/musb/musb_core.c6
-rw-r--r--drivers/usb/musb/musb_debug.h4
-rw-r--r--drivers/usb/musb/musb_host.c159
-rw-r--r--drivers/usb/musb/musb_host.h1
-rw-r--r--drivers/usb/musb/omap2430.c2
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/serial/console.c1
-rw-r--r--drivers/usb/serial/cp2101.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.h6
-rw-r--r--drivers/usb/serial/ir-usb.c2
-rw-r--r--drivers/usb/serial/option.c137
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h8
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c55
-rw-r--r--drivers/usb/serial/usb-serial.c1
-rw-r--r--drivers/usb/storage/Kconfig4
-rw-r--r--drivers/usb/storage/initializers.c3
-rw-r--r--drivers/usb/storage/unusual_devs.h364
-rw-r--r--drivers/usb/wusbcore/Kconfig41
-rw-r--r--drivers/usb/wusbcore/Makefile26
-rw-r--r--drivers/usb/wusbcore/cbaf.c673
-rw-r--r--drivers/usb/wusbcore/crypto.c538
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c143
-rw-r--r--drivers/usb/wusbcore/devconnect.c1297
-rw-r--r--drivers/usb/wusbcore/mmc.c321
-rw-r--r--drivers/usb/wusbcore/pal.c42
-rw-r--r--drivers/usb/wusbcore/reservation.c115
-rw-r--r--drivers/usb/wusbcore/rh.c477
-rw-r--r--drivers/usb/wusbcore/security.c642
-rw-r--r--drivers/usb/wusbcore/wa-hc.c95
-rw-r--r--drivers/usb/wusbcore/wa-hc.h417
-rw-r--r--drivers/usb/wusbcore/wa-nep.c310
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c562
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c1709
-rw-r--r--drivers/usb/wusbcore/wusbhc.c418
-rw-r--r--drivers/usb/wusbcore/wusbhc.h495
-rw-r--r--drivers/uwb/Kconfig90
-rw-r--r--drivers/uwb/Makefile29
-rw-r--r--drivers/uwb/address.c374
-rw-r--r--drivers/uwb/beacon.c642
-rw-r--r--drivers/uwb/driver.c144
-rw-r--r--drivers/uwb/drp-avail.c288
-rw-r--r--drivers/uwb/drp-ie.c232
-rw-r--r--drivers/uwb/drp.c461
-rw-r--r--drivers/uwb/est.c477
-rw-r--r--drivers/uwb/hwa-rc.c926
-rw-r--r--drivers/uwb/i1480/Makefile2
-rw-r--r--drivers/uwb/i1480/dfu/Makefile9
-rw-r--r--drivers/uwb/i1480/dfu/dfu.c217
-rw-r--r--drivers/uwb/i1480/dfu/i1480-dfu.h260
-rw-r--r--drivers/uwb/i1480/dfu/mac.c527
-rw-r--r--drivers/uwb/i1480/dfu/phy.c203
-rw-r--r--drivers/uwb/i1480/dfu/usb.c500
-rw-r--r--drivers/uwb/i1480/i1480-est.c99
-rw-r--r--drivers/uwb/i1480/i1480-wlp.h200
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/Makefile8
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h284
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/lc.c421
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/netdev.c368
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/rx.c486
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/sysfs.c408
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/tx.c632
-rw-r--r--drivers/uwb/ie.c541
-rw-r--r--drivers/uwb/lc-dev.c492
-rw-r--r--drivers/uwb/lc-rc.c495
-rw-r--r--drivers/uwb/neh.c616
-rw-r--r--drivers/uwb/pal.c91
-rw-r--r--drivers/uwb/reset.c362
-rw-r--r--drivers/uwb/rsv.c680
-rw-r--r--drivers/uwb/scan.c133
-rw-r--r--drivers/uwb/umc-bus.c218
-rw-r--r--drivers/uwb/umc-dev.c104
-rw-r--r--drivers/uwb/umc-drv.c31
-rw-r--r--drivers/uwb/uwb-debug.c367
-rw-r--r--drivers/uwb/uwb-internal.h305
-rw-r--r--drivers/uwb/uwbd.c410
-rw-r--r--drivers/uwb/whc-rc.c520
-rw-r--r--drivers/uwb/whci.c269
-rw-r--r--drivers/uwb/wlp/Makefile10
-rw-r--r--drivers/uwb/wlp/driver.c43
-rw-r--r--drivers/uwb/wlp/eda.c449
-rw-r--r--drivers/uwb/wlp/messages.c1946
-rw-r--r--drivers/uwb/wlp/sysfs.c709
-rw-r--r--drivers/uwb/wlp/txrx.c374
-rw-r--r--drivers/uwb/wlp/wlp-internal.h228
-rw-r--r--drivers/uwb/wlp/wlp-lc.c585
-rw-r--r--drivers/uwb/wlp/wss-lc.c1055
-rw-r--r--drivers/video/Kconfig32
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/aty/radeon_accel.c291
-rw-r--r--drivers/video/aty/radeon_backlight.c2
-rw-r--r--drivers/video/aty/radeon_base.c22
-rw-r--r--drivers/video/aty/radeon_pm.c6
-rw-r--r--drivers/video/aty/radeonfb.h38
-rw-r--r--drivers/video/backlight/Kconfig37
-rw-r--r--drivers/video/backlight/Makefile4
-rw-r--r--drivers/video/backlight/corgi_lcd.c4
-rw-r--r--drivers/video/backlight/da903x.c203
-rw-r--r--drivers/video/backlight/kb3886_bl.c204
-rw-r--r--drivers/video/backlight/lcd.c11
-rw-r--r--drivers/video/backlight/tosa_bl.c198
-rw-r--r--drivers/video/backlight/tosa_lcd.c280
-rw-r--r--drivers/video/cirrusfb.c5
-rw-r--r--drivers/video/console/fbcon.c17
-rw-r--r--drivers/video/fbmem.c63
-rw-r--r--drivers/video/macfb.c74
-rw-r--r--drivers/video/mb862xx/Makefile5
-rw-r--r--drivers/video/mb862xx/mb862xx_reg.h138
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c1061
-rw-r--r--drivers/video/mb862xx/mb862xxfb.h83
-rw-r--r--drivers/video/omap/Makefile1
-rw-r--r--drivers/video/omap/lcd_sx1.c327
-rw-r--r--drivers/video/omap/omapfb_main.c2
-rw-r--r--drivers/video/pxafb.c5
-rw-r--r--drivers/video/tmiofb.c10
-rw-r--r--drivers/video/via/global.h3
-rw-r--r--drivers/video/via/viafbdev.c17
-rw-r--r--drivers/video/xen-fbfront.c6
-rw-r--r--drivers/video/xilinxfb.c5
-rw-r--r--drivers/w1/masters/Kconfig9
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/omap_hdq.c725
-rw-r--r--drivers/w1/slaves/Kconfig7
-rw-r--r--drivers/w1/slaves/Makefile2
-rw-r--r--drivers/w1/slaves/w1_bq27000.c123
-rw-r--r--drivers/w1/w1.h1
-rw-r--r--drivers/w1/w1_io.c3
-rw-r--r--drivers/watchdog/Kconfig8
-rw-r--r--drivers/watchdog/acquirewdt.c6
-rw-r--r--drivers/watchdog/advantechwdt.c6
-rw-r--r--drivers/watchdog/at91sam9_wdt.c2
-rw-r--r--drivers/watchdog/bfin_wdt.c2
-rw-r--r--drivers/watchdog/booke_wdt.c5
-rw-r--r--drivers/watchdog/eurotechwdt.c4
-rw-r--r--drivers/watchdog/hpwdt.c5
-rw-r--r--drivers/watchdog/i6300esb.c3
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c31
-rw-r--r--drivers/watchdog/iTCO_wdt.c164
-rw-r--r--drivers/watchdog/ib700wdt.c6
-rw-r--r--drivers/watchdog/indydog.c2
-rw-r--r--drivers/watchdog/mpcore_wdt.c4
-rw-r--r--drivers/watchdog/mtx-1_wdt.c4
-rw-r--r--drivers/watchdog/omap_wdt.c2
-rw-r--r--drivers/watchdog/pcwd_pci.c2
-rw-r--r--drivers/watchdog/pcwd_usb.c2
-rw-r--r--drivers/watchdog/rc32434_wdt.c3
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/watchdog/sa1100_wdt.c2
-rw-r--r--drivers/watchdog/sb_wdog.c4
-rw-r--r--drivers/watchdog/sbc8360.c6
-rw-r--r--drivers/watchdog/sbc_epx_c3.c2
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c2
-rw-r--r--drivers/watchdog/softdog.c3
-rw-r--r--drivers/watchdog/w83627hf_wdt.c6
-rw-r--r--drivers/watchdog/w83697hf_wdt.c4
-rw-r--r--drivers/watchdog/w83697ug_wdt.c2
-rw-r--r--drivers/watchdog/wafer5823wdt.c4
-rw-r--r--drivers/watchdog/wdt.c4
-rw-r--r--drivers/watchdog/wdt285.c3
-rw-r--r--drivers/watchdog/wdt_pci.c4
-rw-r--r--drivers/xen/balloon.c14
-rw-r--r--drivers/xen/cpu_hotplug.c2
-rw-r--r--drivers/xen/events.c2
-rw-r--r--drivers/xen/features.c6
-rw-r--r--drivers/xen/grant-table.c1
-rw-r--r--drivers/xen/manage.c2
-rw-r--r--drivers/xen/xencomm.c23
1318 files changed, 75390 insertions, 20889 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d19b6f5a1106..2f557f570ade 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -68,6 +68,8 @@ source "drivers/ssb/Kconfig"
68 68
69source "drivers/mfd/Kconfig" 69source "drivers/mfd/Kconfig"
70 70
71source "drivers/regulator/Kconfig"
72
71source "drivers/media/Kconfig" 73source "drivers/media/Kconfig"
72 74
73source "drivers/video/Kconfig" 75source "drivers/video/Kconfig"
@@ -78,6 +80,8 @@ source "drivers/hid/Kconfig"
78 80
79source "drivers/usb/Kconfig" 81source "drivers/usb/Kconfig"
80 82
83source "drivers/uwb/Kconfig"
84
81source "drivers/mmc/Kconfig" 85source "drivers/mmc/Kconfig"
82 86
83source "drivers/memstick/Kconfig" 87source "drivers/memstick/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 46c8681a07f4..fceb71a741c3 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_MAC) += macintosh/
56obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/ 56obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
57obj-$(CONFIG_PARIDE) += block/paride/ 57obj-$(CONFIG_PARIDE) += block/paride/
58obj-$(CONFIG_TC) += tc/ 58obj-$(CONFIG_TC) += tc/
59obj-$(CONFIG_UWB) += uwb/
59obj-$(CONFIG_USB) += usb/ 60obj-$(CONFIG_USB) += usb/
60obj-$(CONFIG_USB_MUSB_HDRC) += usb/musb/ 61obj-$(CONFIG_USB_MUSB_HDRC) += usb/musb/
61obj-$(CONFIG_PCI) += usb/ 62obj-$(CONFIG_PCI) += usb/
@@ -82,6 +83,7 @@ obj-$(CONFIG_EISA) += eisa/
82obj-y += lguest/ 83obj-y += lguest/
83obj-$(CONFIG_CPU_FREQ) += cpufreq/ 84obj-$(CONFIG_CPU_FREQ) += cpufreq/
84obj-$(CONFIG_CPU_IDLE) += cpuidle/ 85obj-$(CONFIG_CPU_IDLE) += cpuidle/
86obj-y += idle/
85obj-$(CONFIG_MMC) += mmc/ 87obj-$(CONFIG_MMC) += mmc/
86obj-$(CONFIG_MEMSTICK) += memstick/ 88obj-$(CONFIG_MEMSTICK) += memstick/
87obj-$(CONFIG_NEW_LEDS) += leds/ 89obj-$(CONFIG_NEW_LEDS) += leds/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index da49b006bcc5..b0243fd55ac0 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -42,7 +42,7 @@ if ACPI
42 42
43config ACPI_SLEEP 43config ACPI_SLEEP
44 bool 44 bool
45 depends on PM_SLEEP 45 depends on SUSPEND || HIBERNATION
46 default y 46 default y
47 47
48config ACPI_PROCFS 48config ACPI_PROCFS
@@ -157,18 +157,11 @@ config ACPI_FAN
157 applications to perform basic fan control (on, off, status). 157 applications to perform basic fan control (on, off, status).
158 158
159config ACPI_DOCK 159config ACPI_DOCK
160 tristate "Dock" 160 bool "Dock"
161 depends on EXPERIMENTAL 161 depends on EXPERIMENTAL
162 help 162 help
163 This driver adds support for ACPI controlled docking stations 163 This driver adds support for ACPI controlled docking stations and removable
164 164 drive bays such as the IBM ultrabay or the Dell Module Bay.
165config ACPI_BAY
166 tristate "Removable Drive Bay (EXPERIMENTAL)"
167 depends on EXPERIMENTAL
168 depends on ACPI_DOCK
169 help
170 This driver adds support for ACPI controlled removable drive
171 bays such as the IBM ultrabay or the Dell Module Bay.
172 165
173config ACPI_PROCESSOR 166config ACPI_PROCESSOR
174 tristate "Processor" 167 tristate "Processor"
@@ -319,9 +312,13 @@ config ACPI_DEBUG
319 bool "Debug Statements" 312 bool "Debug Statements"
320 default n 313 default n
321 help 314 help
322 The ACPI driver can optionally report errors with a great deal 315 The ACPI subsystem can produce debug output. Saying Y enables this
323 of verbosity. Saying Y enables these statements. This will increase 316 output and increases the kernel size by around 50K.
324 your kernel size by around 50K. 317
318 Use the acpi.debug_layer and acpi.debug_level kernel command-line
319 parameters documented in Documentation/acpi/debug.txt and
320 Documentation/kernel-parameters.txt to control the type and
321 amount of debug output.
325 322
326config ACPI_DEBUG_FUNC_TRACE 323config ACPI_DEBUG_FUNC_TRACE
327 bool "Additionally enable ACPI function tracing" 324 bool "Additionally enable ACPI function tracing"
@@ -331,14 +328,6 @@ config ACPI_DEBUG_FUNC_TRACE
331 ACPI Debug Statements slow down ACPI processing. Function trace 328 ACPI Debug Statements slow down ACPI processing. Function trace
332 is about half of the penalty and is rarely useful. 329 is about half of the penalty and is rarely useful.
333 330
334config ACPI_EC
335 bool
336 default y
337 help
338 This driver is required on some systems for the proper operation of
339 the battery and thermal drivers. If you are compiling for a
340 mobile system, say Y.
341
342config ACPI_PCI_SLOT 331config ACPI_PCI_SLOT
343 tristate "PCI slot detection driver" 332 tristate "PCI slot detection driver"
344 default n 333 default n
@@ -348,10 +337,6 @@ config ACPI_PCI_SLOT
348 help you correlate PCI bus addresses with the physical geography 337 help you correlate PCI bus addresses with the physical geography
349 of your slots. If you are unsure, say N. 338 of your slots. If you are unsure, say N.
350 339
351config ACPI_POWER
352 bool
353 default y
354
355config ACPI_SYSTEM 340config ACPI_SYSTEM
356 bool 341 bool
357 default y 342 default y
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 52a4cd4b81d0..3c0c93300f12 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -39,20 +39,23 @@ obj-y += sleep/
39obj-y += bus.o glue.o 39obj-y += bus.o glue.o
40obj-y += scan.o 40obj-y += scan.o
41# Keep EC driver first. Initialization of others depend on it. 41# Keep EC driver first. Initialization of others depend on it.
42obj-$(CONFIG_ACPI_EC) += ec.o 42obj-y += ec.o
43obj-$(CONFIG_ACPI_AC) += ac.o 43obj-$(CONFIG_ACPI_AC) += ac.o
44obj-$(CONFIG_ACPI_BATTERY) += battery.o 44obj-$(CONFIG_ACPI_BATTERY) += battery.o
45obj-$(CONFIG_ACPI_BUTTON) += button.o 45obj-$(CONFIG_ACPI_BUTTON) += button.o
46obj-$(CONFIG_ACPI_FAN) += fan.o 46obj-$(CONFIG_ACPI_FAN) += fan.o
47obj-$(CONFIG_ACPI_DOCK) += dock.o 47obj-$(CONFIG_ACPI_DOCK) += dock.o
48obj-$(CONFIG_ACPI_BAY) += bay.o
49obj-$(CONFIG_ACPI_VIDEO) += video.o 48obj-$(CONFIG_ACPI_VIDEO) += video.o
49ifdef CONFIG_ACPI_VIDEO
50obj-y += video_detect.o
51endif
52
50obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o 53obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
51obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o 54obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
52obj-$(CONFIG_ACPI_POWER) += power.o
53obj-$(CONFIG_ACPI_PROCESSOR) += processor.o 55obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
54obj-$(CONFIG_ACPI_CONTAINER) += container.o 56obj-$(CONFIG_ACPI_CONTAINER) += container.o
55obj-$(CONFIG_ACPI_THERMAL) += thermal.o 57obj-$(CONFIG_ACPI_THERMAL) += thermal.o
58obj-y += power.o
56obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o 59obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o
57obj-$(CONFIG_ACPI_DEBUG) += debug.o 60obj-$(CONFIG_ACPI_DEBUG) += debug.o
58obj-$(CONFIG_ACPI_NUMA) += numa.o 61obj-$(CONFIG_ACPI_NUMA) += numa.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 831883b7d6c9..9b917dac7732 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -37,7 +37,6 @@
37#include <acpi/acpi_bus.h> 37#include <acpi/acpi_bus.h>
38#include <acpi/acpi_drivers.h> 38#include <acpi/acpi_drivers.h>
39 39
40#define ACPI_AC_COMPONENT 0x00020000
41#define ACPI_AC_CLASS "ac_adapter" 40#define ACPI_AC_CLASS "ac_adapter"
42#define ACPI_AC_DEVICE_NAME "AC Adapter" 41#define ACPI_AC_DEVICE_NAME "AC Adapter"
43#define ACPI_AC_FILE_STATE "state" 42#define ACPI_AC_FILE_STATE "state"
@@ -85,7 +84,7 @@ struct acpi_ac {
85 struct power_supply charger; 84 struct power_supply charger;
86#endif 85#endif
87 struct acpi_device * device; 86 struct acpi_device * device;
88 unsigned long state; 87 unsigned long long state;
89}; 88};
90 89
91#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger); 90#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger);
@@ -242,7 +241,7 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
242 acpi_ac_get_state(ac); 241 acpi_ac_get_state(ac);
243 acpi_bus_generate_proc_event(device, event, (u32) ac->state); 242 acpi_bus_generate_proc_event(device, event, (u32) ac->state);
244 acpi_bus_generate_netlink_event(device->pnp.device_class, 243 acpi_bus_generate_netlink_event(device->pnp.device_class,
245 device->dev.bus_id, event, 244 dev_name(&device->dev), event,
246 (u32) ac->state); 245 (u32) ac->state);
247#ifdef CONFIG_ACPI_SYSFS_POWER 246#ifdef CONFIG_ACPI_SYSFS_POWER
248 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); 247 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
@@ -269,7 +268,7 @@ static int acpi_ac_add(struct acpi_device *device)
269 ac->device = device; 268 ac->device = device;
270 strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME); 269 strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
271 strcpy(acpi_device_class(device), ACPI_AC_CLASS); 270 strcpy(acpi_device_class(device), ACPI_AC_CLASS);
272 acpi_driver_data(device) = ac; 271 device->driver_data = ac;
273 272
274 result = acpi_ac_get_state(ac); 273 result = acpi_ac_get_state(ac);
275 if (result) 274 if (result)
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 5f1127ad5a95..63a17b55b39b 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -32,7 +32,6 @@
32#include <linux/memory_hotplug.h> 32#include <linux/memory_hotplug.h>
33#include <acpi/acpi_drivers.h> 33#include <acpi/acpi_drivers.h>
34 34
35#define ACPI_MEMORY_DEVICE_COMPONENT 0x08000000UL
36#define ACPI_MEMORY_DEVICE_CLASS "memory" 35#define ACPI_MEMORY_DEVICE_CLASS "memory"
37#define ACPI_MEMORY_DEVICE_HID "PNP0C80" 36#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
38#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device" 37#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device"
@@ -194,8 +193,7 @@ acpi_memory_get_device(acpi_handle handle,
194 193
195static int acpi_memory_check_device(struct acpi_memory_device *mem_device) 194static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
196{ 195{
197 unsigned long current_status; 196 unsigned long long current_status;
198
199 197
200 /* Get device present/absent information from the _STA */ 198 /* Get device present/absent information from the _STA */
201 if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA", 199 if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA",
@@ -264,7 +262,7 @@ static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device)
264 acpi_status status; 262 acpi_status status;
265 struct acpi_object_list arg_list; 263 struct acpi_object_list arg_list;
266 union acpi_object arg; 264 union acpi_object arg;
267 unsigned long current_status; 265 unsigned long long current_status;
268 266
269 267
270 /* Issue the _EJ0 command */ 268 /* Issue the _EJ0 command */
@@ -403,7 +401,7 @@ static int acpi_memory_device_add(struct acpi_device *device)
403 mem_device->device = device; 401 mem_device->device = device;
404 sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME); 402 sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
405 sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS); 403 sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
406 acpi_driver_data(device) = mem_device; 404 device->driver_data = mem_device;
407 405
408 /* Get the range from the _CRS */ 406 /* Get the range from the _CRS */
409 result = acpi_memory_get_device_resources(mem_device); 407 result = acpi_memory_get_device_resources(mem_device);
@@ -454,8 +452,8 @@ static int acpi_memory_device_start (struct acpi_device *device)
454 /* call add_memory func */ 452 /* call add_memory func */
455 result = acpi_memory_enable_device(mem_device); 453 result = acpi_memory_enable_device(mem_device);
456 if (result) 454 if (result)
457 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 455 printk(KERN_ERR PREFIX
458 "Error in acpi_memory_enable_device\n")); 456 "Error in acpi_memory_enable_device\n");
459 } 457 }
460 return result; 458 return result;
461} 459}
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index d3d0886d637f..1e74988c7b2d 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -42,7 +42,7 @@
42 42
43#define ASUS_ACPI_VERSION "0.30" 43#define ASUS_ACPI_VERSION "0.30"
44 44
45#define PROC_ASUS "asus" //the directory 45#define PROC_ASUS "asus" /* The directory */
46#define PROC_MLED "mled" 46#define PROC_MLED "mled"
47#define PROC_WLED "wled" 47#define PROC_WLED "wled"
48#define PROC_TLED "tled" 48#define PROC_TLED "tled"
@@ -66,10 +66,10 @@
66/* 66/*
67 * Flags for hotk status 67 * Flags for hotk status
68 */ 68 */
69#define MLED_ON 0x01 //mail LED 69#define MLED_ON 0x01 /* Mail LED */
70#define WLED_ON 0x02 //wireless LED 70#define WLED_ON 0x02 /* Wireless LED */
71#define TLED_ON 0x04 //touchpad LED 71#define TLED_ON 0x04 /* Touchpad LED */
72#define BT_ON 0x08 //internal Bluetooth 72#define BT_ON 0x08 /* Internal Bluetooth */
73 73
74MODULE_AUTHOR("Julien Lerouge, Karol Kozimor"); 74MODULE_AUTHOR("Julien Lerouge, Karol Kozimor");
75MODULE_DESCRIPTION(ACPI_HOTK_NAME); 75MODULE_DESCRIPTION(ACPI_HOTK_NAME);
@@ -82,28 +82,28 @@ MODULE_PARM_DESC(asus_uid, "UID for entries in /proc/acpi/asus");
82module_param(asus_gid, uint, 0); 82module_param(asus_gid, uint, 0);
83MODULE_PARM_DESC(asus_gid, "GID for entries in /proc/acpi/asus"); 83MODULE_PARM_DESC(asus_gid, "GID for entries in /proc/acpi/asus");
84 84
85/* For each model, all features implemented, 85/* For each model, all features implemented,
86 * those marked with R are relative to HOTK, A for absolute */ 86 * those marked with R are relative to HOTK, A for absolute */
87struct model_data { 87struct model_data {
88 char *name; //name of the laptop________________A 88 char *name; /* name of the laptop________________A */
89 char *mt_mled; //method to handle mled_____________R 89 char *mt_mled; /* method to handle mled_____________R */
90 char *mled_status; //node to handle mled reading_______A 90 char *mled_status; /* node to handle mled reading_______A */
91 char *mt_wled; //method to handle wled_____________R 91 char *mt_wled; /* method to handle wled_____________R */
92 char *wled_status; //node to handle wled reading_______A 92 char *wled_status; /* node to handle wled reading_______A */
93 char *mt_tled; //method to handle tled_____________R 93 char *mt_tled; /* method to handle tled_____________R */
94 char *tled_status; //node to handle tled reading_______A 94 char *tled_status; /* node to handle tled reading_______A */
95 char *mt_ledd; //method to handle LED display______R 95 char *mt_ledd; /* method to handle LED display______R */
96 char *mt_bt_switch; //method to switch Bluetooth on/off_R 96 char *mt_bt_switch; /* method to switch Bluetooth on/off_R */
97 char *bt_status; //no model currently supports this__? 97 char *bt_status; /* no model currently supports this__? */
98 char *mt_lcd_switch; //method to turn LCD on/off_________A 98 char *mt_lcd_switch; /* method to turn LCD on/off_________A */
99 char *lcd_status; //node to read LCD panel state______A 99 char *lcd_status; /* node to read LCD panel state______A */
100 char *brightness_up; //method to set brightness up_______A 100 char *brightness_up; /* method to set brightness up_______A */
101 char *brightness_down; //guess what ?______________________A 101 char *brightness_down; /* method to set brightness down ____A */
102 char *brightness_set; //method to set absolute brightness_R 102 char *brightness_set; /* method to set absolute brightness_R */
103 char *brightness_get; //method to get absolute brightness_R 103 char *brightness_get; /* method to get absolute brightness_R */
104 char *brightness_status; //node to get brightness____________A 104 char *brightness_status;/* node to get brightness____________A */
105 char *display_set; //method to set video output________R 105 char *display_set; /* method to set video output________R */
106 char *display_get; //method to get video output________R 106 char *display_get; /* method to get video output________R */
107}; 107};
108 108
109/* 109/*
@@ -111,41 +111,41 @@ struct model_data {
111 * about the hotk device 111 * about the hotk device
112 */ 112 */
113struct asus_hotk { 113struct asus_hotk {
114 struct acpi_device *device; //the device we are in 114 struct acpi_device *device; /* the device we are in */
115 acpi_handle handle; //the handle of the hotk device 115 acpi_handle handle; /* the handle of the hotk device */
116 char status; //status of the hotk, for LEDs, ... 116 char status; /* status of the hotk, for LEDs */
117 u32 ledd_status; //status of the LED display 117 u32 ledd_status; /* status of the LED display */
118 struct model_data *methods; //methods available on the laptop 118 struct model_data *methods; /* methods available on the laptop */
119 u8 brightness; //brightness level 119 u8 brightness; /* brightness level */
120 enum { 120 enum {
121 A1x = 0, //A1340D, A1300F 121 A1x = 0, /* A1340D, A1300F */
122 A2x, //A2500H 122 A2x, /* A2500H */
123 A4G, //A4700G 123 A4G, /* A4700G */
124 D1x, //D1 124 D1x, /* D1 */
125 L2D, //L2000D 125 L2D, /* L2000D */
126 L3C, //L3800C 126 L3C, /* L3800C */
127 L3D, //L3400D 127 L3D, /* L3400D */
128 L3H, //L3H, L2000E, L5D 128 L3H, /* L3H, L2000E, L5D */
129 L4R, //L4500R 129 L4R, /* L4500R */
130 L5x, //L5800C 130 L5x, /* L5800C */
131 L8L, //L8400L 131 L8L, /* L8400L */
132 M1A, //M1300A 132 M1A, /* M1300A */
133 M2E, //M2400E, L4400L 133 M2E, /* M2400E, L4400L */
134 M6N, //M6800N, W3400N 134 M6N, /* M6800N, W3400N */
135 M6R, //M6700R, A3000G 135 M6R, /* M6700R, A3000G */
136 P30, //Samsung P30 136 P30, /* Samsung P30 */
137 S1x, //S1300A, but also L1400B and M2400A (L84F) 137 S1x, /* S1300A, but also L1400B and M2400A (L84F) */
138 S2x, //S200 (J1 reported), Victor MP-XP7210 138 S2x, /* S200 (J1 reported), Victor MP-XP7210 */
139 W1N, //W1000N 139 W1N, /* W1000N */
140 W5A, //W5A 140 W5A, /* W5A */
141 W3V, //W3030V 141 W3V, /* W3030V */
142 xxN, //M2400N, M3700N, M5200N, M6800N, S1300N, S5200N 142 xxN, /* M2400N, M3700N, M5200N, M6800N,
143 A4S, //Z81sp 143 S1300N, S5200N*/
144 //(Centrino) 144 A4S, /* Z81sp */
145 F3Sa, 145 F3Sa, /* (Centrino) */
146 END_MODEL 146 END_MODEL
147 } model; //Models currently supported 147 } model; /* Models currently supported */
148 u16 event_count[128]; //count for each event TODO make this better 148 u16 event_count[128]; /* Count for each event TODO make this better */
149}; 149};
150 150
151/* Here we go */ 151/* Here we go */
@@ -459,18 +459,18 @@ static struct acpi_driver asus_hotk_driver = {
459 }, 459 },
460}; 460};
461 461
462/* 462/*
463 * This function evaluates an ACPI method, given an int as parameter, the 463 * This function evaluates an ACPI method, given an int as parameter, the
464 * method is searched within the scope of the handle, can be NULL. The output 464 * method is searched within the scope of the handle, can be NULL. The output
465 * of the method is written is output, which can also be NULL 465 * of the method is written is output, which can also be NULL
466 * 466 *
467 * returns 1 if write is successful, 0 else. 467 * returns 1 if write is successful, 0 else.
468 */ 468 */
469static int write_acpi_int(acpi_handle handle, const char *method, int val, 469static int write_acpi_int(acpi_handle handle, const char *method, int val,
470 struct acpi_buffer *output) 470 struct acpi_buffer *output)
471{ 471{
472 struct acpi_object_list params; //list of input parameters (an int here) 472 struct acpi_object_list params; /* list of input parameters (int) */
473 union acpi_object in_obj; //the only param we use 473 union acpi_object in_obj; /* the only param we use */
474 acpi_status status; 474 acpi_status status;
475 475
476 params.count = 1; 476 params.count = 1;
@@ -507,18 +507,18 @@ proc_read_info(char *page, char **start, off_t off, int count, int *eof,
507{ 507{
508 int len = 0; 508 int len = 0;
509 int temp; 509 int temp;
510 char buf[16]; //enough for all info 510 char buf[16]; /* enough for all info */
511 /* 511 /*
512 * We use the easy way, we don't care of off and count, so we don't set eof 512 * We use the easy way, we don't care of off and count,
513 * to 1 513 * so we don't set eof to 1
514 */ 514 */
515 515
516 len += sprintf(page, ACPI_HOTK_NAME " " ASUS_ACPI_VERSION "\n"); 516 len += sprintf(page, ACPI_HOTK_NAME " " ASUS_ACPI_VERSION "\n");
517 len += sprintf(page + len, "Model reference : %s\n", 517 len += sprintf(page + len, "Model reference : %s\n",
518 hotk->methods->name); 518 hotk->methods->name);
519 /* 519 /*
520 * The SFUN method probably allows the original driver to get the list 520 * The SFUN method probably allows the original driver to get the list
521 * of features supported by a given model. For now, 0x0100 or 0x0800 521 * of features supported by a given model. For now, 0x0100 or 0x0800
522 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card. 522 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
523 * The significance of others is yet to be found. 523 * The significance of others is yet to be found.
524 */ 524 */
@@ -528,7 +528,7 @@ proc_read_info(char *page, char **start, off_t off, int count, int *eof,
528 /* 528 /*
529 * Another value for userspace: the ASYM method returns 0x02 for 529 * Another value for userspace: the ASYM method returns 0x02 for
530 * battery low and 0x04 for battery critical, its readings tend to be 530 * battery low and 0x04 for battery critical, its readings tend to be
531 * more accurate than those provided by _BST. 531 * more accurate than those provided by _BST.
532 * Note: since not all the laptops provide this method, errors are 532 * Note: since not all the laptops provide this method, errors are
533 * silently ignored. 533 * silently ignored.
534 */ 534 */
@@ -579,7 +579,7 @@ static int read_led(const char *ledname, int ledmask)
579 return (hotk->status & ledmask) ? 1 : 0; 579 return (hotk->status & ledmask) ? 1 : 0;
580} 580}
581 581
582static int parse_arg(const char __user * buf, unsigned long count, int *val) 582static int parse_arg(const char __user *buf, unsigned long count, int *val)
583{ 583{
584 char s[32]; 584 char s[32];
585 if (!count) 585 if (!count)
@@ -596,7 +596,7 @@ static int parse_arg(const char __user * buf, unsigned long count, int *val)
596 596
597/* FIXME: kill extraneous args so it can be called independently */ 597/* FIXME: kill extraneous args so it can be called independently */
598static int 598static int
599write_led(const char __user * buffer, unsigned long count, 599write_led(const char __user *buffer, unsigned long count,
600 char *ledname, int ledmask, int invert) 600 char *ledname, int ledmask, int invert)
601{ 601{
602 int rv, value; 602 int rv, value;
@@ -631,7 +631,7 @@ proc_read_mled(char *page, char **start, off_t off, int count, int *eof,
631} 631}
632 632
633static int 633static int
634proc_write_mled(struct file *file, const char __user * buffer, 634proc_write_mled(struct file *file, const char __user *buffer,
635 unsigned long count, void *data) 635 unsigned long count, void *data)
636{ 636{
637 return write_led(buffer, count, hotk->methods->mt_mled, MLED_ON, 1); 637 return write_led(buffer, count, hotk->methods->mt_mled, MLED_ON, 1);
@@ -648,7 +648,7 @@ proc_read_ledd(char *page, char **start, off_t off, int count, int *eof,
648} 648}
649 649
650static int 650static int
651proc_write_ledd(struct file *file, const char __user * buffer, 651proc_write_ledd(struct file *file, const char __user *buffer,
652 unsigned long count, void *data) 652 unsigned long count, void *data)
653{ 653{
654 int rv, value; 654 int rv, value;
@@ -677,7 +677,7 @@ proc_read_wled(char *page, char **start, off_t off, int count, int *eof,
677} 677}
678 678
679static int 679static int
680proc_write_wled(struct file *file, const char __user * buffer, 680proc_write_wled(struct file *file, const char __user *buffer,
681 unsigned long count, void *data) 681 unsigned long count, void *data)
682{ 682{
683 return write_led(buffer, count, hotk->methods->mt_wled, WLED_ON, 0); 683 return write_led(buffer, count, hotk->methods->mt_wled, WLED_ON, 0);
@@ -694,10 +694,10 @@ proc_read_bluetooth(char *page, char **start, off_t off, int count, int *eof,
694} 694}
695 695
696static int 696static int
697proc_write_bluetooth(struct file *file, const char __user * buffer, 697proc_write_bluetooth(struct file *file, const char __user *buffer,
698 unsigned long count, void *data) 698 unsigned long count, void *data)
699{ 699{
700 /* Note: mt_bt_switch controls both internal Bluetooth adapter's 700 /* Note: mt_bt_switch controls both internal Bluetooth adapter's
701 presence and its LED */ 701 presence and its LED */
702 return write_led(buffer, count, hotk->methods->mt_bt_switch, BT_ON, 0); 702 return write_led(buffer, count, hotk->methods->mt_bt_switch, BT_ON, 0);
703} 703}
@@ -714,7 +714,7 @@ proc_read_tled(char *page, char **start, off_t off, int count, int *eof,
714} 714}
715 715
716static int 716static int
717proc_write_tled(struct file *file, const char __user * buffer, 717proc_write_tled(struct file *file, const char __user *buffer,
718 unsigned long count, void *data) 718 unsigned long count, void *data)
719{ 719{
720 return write_led(buffer, count, hotk->methods->mt_tled, TLED_ON, 0); 720 return write_led(buffer, count, hotk->methods->mt_tled, TLED_ON, 0);
@@ -734,7 +734,7 @@ static int get_lcd_state(void)
734 734
735 input.count = 2; 735 input.count = 2;
736 input.pointer = mt_params; 736 input.pointer = mt_params;
737 /* Note: the following values are partly guessed up, but 737 /* Note: the following values are partly guessed up, but
738 otherwise they seem to work */ 738 otherwise they seem to work */
739 mt_params[0].type = ACPI_TYPE_INTEGER; 739 mt_params[0].type = ACPI_TYPE_INTEGER;
740 mt_params[0].integer.value = 0x02; 740 mt_params[0].integer.value = 0x02;
@@ -753,7 +753,7 @@ static int get_lcd_state(void)
753 /* That's what the AML code does */ 753 /* That's what the AML code does */
754 lcd = out_obj.integer.value >> 8; 754 lcd = out_obj.integer.value >> 8;
755 } else if (hotk->model == F3Sa) { 755 } else if (hotk->model == F3Sa) {
756 unsigned long tmp; 756 unsigned long long tmp;
757 union acpi_object param; 757 union acpi_object param;
758 struct acpi_object_list input; 758 struct acpi_object_list input;
759 acpi_status status; 759 acpi_status status;
@@ -796,12 +796,13 @@ static int set_lcd_state(int value)
796 acpi_evaluate_object(NULL, 796 acpi_evaluate_object(NULL,
797 hotk->methods->mt_lcd_switch, 797 hotk->methods->mt_lcd_switch,
798 NULL, NULL); 798 NULL, NULL);
799 } else { /* L3H and the like have to be handled differently */ 799 } else {
800 /* L3H and the like must be handled differently */
800 if (!write_acpi_int 801 if (!write_acpi_int
801 (hotk->handle, hotk->methods->mt_lcd_switch, 0x07, 802 (hotk->handle, hotk->methods->mt_lcd_switch, 0x07,
802 NULL)) 803 NULL))
803 status = AE_ERROR; 804 status = AE_ERROR;
804 /* L3H's AML executes EHK (0x07) upon Fn+F7 keypress, 805 /* L3H's AML executes EHK (0x07) upon Fn+F7 keypress,
805 the exact behaviour is simulated here */ 806 the exact behaviour is simulated here */
806 } 807 }
807 if (ACPI_FAILURE(status)) 808 if (ACPI_FAILURE(status))
@@ -819,7 +820,7 @@ proc_read_lcd(char *page, char **start, off_t off, int count, int *eof,
819} 820}
820 821
821static int 822static int
822proc_write_lcd(struct file *file, const char __user * buffer, 823proc_write_lcd(struct file *file, const char __user *buffer,
823 unsigned long count, void *data) 824 unsigned long count, void *data)
824{ 825{
825 int rv, value; 826 int rv, value;
@@ -897,7 +898,7 @@ proc_read_brn(char *page, char **start, off_t off, int count, int *eof,
897} 898}
898 899
899static int 900static int
900proc_write_brn(struct file *file, const char __user * buffer, 901proc_write_brn(struct file *file, const char __user *buffer,
901 unsigned long count, void *data) 902 unsigned long count, void *data)
902{ 903{
903 int rv, value; 904 int rv, value;
@@ -921,7 +922,7 @@ static void set_display(int value)
921} 922}
922 923
923/* 924/*
924 * Now, *this* one could be more user-friendly, but so far, no-one has 925 * Now, *this* one could be more user-friendly, but so far, no-one has
925 * complained. The significance of bits is the same as in proc_write_disp() 926 * complained. The significance of bits is the same as in proc_write_disp()
926 */ 927 */
927static int 928static int
@@ -933,18 +934,18 @@ proc_read_disp(char *page, char **start, off_t off, int count, int *eof,
933 if (!read_acpi_int(hotk->handle, hotk->methods->display_get, &value)) 934 if (!read_acpi_int(hotk->handle, hotk->methods->display_get, &value))
934 printk(KERN_WARNING 935 printk(KERN_WARNING
935 "Asus ACPI: Error reading display status\n"); 936 "Asus ACPI: Error reading display status\n");
936 value &= 0x07; /* needed for some models, shouldn't hurt others */ 937 value &= 0x07; /* needed for some models, shouldn't hurt others */
937 return sprintf(page, "%d\n", value); 938 return sprintf(page, "%d\n", value);
938} 939}
939 940
940/* 941/*
941 * Experimental support for display switching. As of now: 1 should activate 942 * Experimental support for display switching. As of now: 1 should activate
942 * the LCD output, 2 should do for CRT, and 4 for TV-Out. Any combination 943 * the LCD output, 2 should do for CRT, and 4 for TV-Out. Any combination
943 * (bitwise) of these will suffice. I never actually tested 3 displays hooked up 944 * (bitwise) of these will suffice. I never actually tested 3 displays hooked
944 * simultaneously, so be warned. See the acpi4asus README for more info. 945 * up simultaneously, so be warned. See the acpi4asus README for more info.
945 */ 946 */
946static int 947static int
947proc_write_disp(struct file *file, const char __user * buffer, 948proc_write_disp(struct file *file, const char __user *buffer,
948 unsigned long count, void *data) 949 unsigned long count, void *data)
949{ 950{
950 int rv, value; 951 int rv, value;
@@ -957,12 +958,12 @@ proc_write_disp(struct file *file, const char __user * buffer,
957 958
958typedef int (proc_readfunc) (char *page, char **start, off_t off, int count, 959typedef int (proc_readfunc) (char *page, char **start, off_t off, int count,
959 int *eof, void *data); 960 int *eof, void *data);
960typedef int (proc_writefunc) (struct file * file, const char __user * buffer, 961typedef int (proc_writefunc) (struct file *file, const char __user *buffer,
961 unsigned long count, void *data); 962 unsigned long count, void *data);
962 963
963static int 964static int
964asus_proc_add(char *name, proc_writefunc * writefunc, 965asus_proc_add(char *name, proc_writefunc *writefunc,
965 proc_readfunc * readfunc, mode_t mode, 966 proc_readfunc *readfunc, mode_t mode,
966 struct acpi_device *device) 967 struct acpi_device *device)
967{ 968{
968 struct proc_dir_entry *proc = 969 struct proc_dir_entry *proc =
@@ -1040,9 +1041,9 @@ static int asus_hotk_add_fs(struct acpi_device *device)
1040 &proc_read_bluetooth, mode, device); 1041 &proc_read_bluetooth, mode, device);
1041 } 1042 }
1042 1043
1043 /* 1044 /*
1044 * We need both read node and write method as LCD switch is also accessible 1045 * We need both read node and write method as LCD switch is also
1045 * from keyboard 1046 * accessible from the keyboard
1046 */ 1047 */
1047 if (hotk->methods->mt_lcd_switch && hotk->methods->lcd_status) { 1048 if (hotk->methods->mt_lcd_switch && hotk->methods->lcd_status) {
1048 asus_proc_add(PROC_LCD, &proc_write_lcd, &proc_read_lcd, mode, 1049 asus_proc_add(PROC_LCD, &proc_write_lcd, &proc_read_lcd, mode,
@@ -1096,11 +1097,10 @@ static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
1096 if (!hotk) 1097 if (!hotk)
1097 return; 1098 return;
1098 1099
1099 if ((event & ~((u32) BR_UP)) < 16) { 1100 if ((event & ~((u32) BR_UP)) < 16)
1100 hotk->brightness = (event & ~((u32) BR_UP)); 1101 hotk->brightness = (event & ~((u32) BR_UP));
1101 } else if ((event & ~((u32) BR_DOWN)) < 16) { 1102 else if ((event & ~((u32) BR_DOWN)) < 16)
1102 hotk->brightness = (event & ~((u32) BR_DOWN)); 1103 hotk->brightness = (event & ~((u32) BR_DOWN));
1103 }
1104 1104
1105 acpi_bus_generate_proc_event(hotk->device, event, 1105 acpi_bus_generate_proc_event(hotk->device, event,
1106 hotk->event_count[event % 128]++); 1106 hotk->event_count[event % 128]++);
@@ -1186,8 +1186,8 @@ static int asus_hotk_get_info(void)
1186 acpi_status status; 1186 acpi_status status;
1187 1187
1188 /* 1188 /*
1189 * Get DSDT headers early enough to allow for differentiating between 1189 * Get DSDT headers early enough to allow for differentiating between
1190 * models, but late enough to allow acpi_bus_register_driver() to fail 1190 * models, but late enough to allow acpi_bus_register_driver() to fail
1191 * before doing anything ACPI-specific. Should we encounter a machine, 1191 * before doing anything ACPI-specific. Should we encounter a machine,
1192 * which needs special handling (i.e. its hotkey device has a different 1192 * which needs special handling (i.e. its hotkey device has a different
1193 * HID), this bit will be moved. A global variable asus_info contains 1193 * HID), this bit will be moved. A global variable asus_info contains
@@ -1212,8 +1212,8 @@ static int asus_hotk_get_info(void)
1212 1212
1213 /* 1213 /*
1214 * Try to match the object returned by INIT to the specific model. 1214 * Try to match the object returned by INIT to the specific model.
1215 * Handle every possible object (or the lack of thereof) the DSDT 1215 * Handle every possible object (or the lack of thereof) the DSDT
1216 * writers might throw at us. When in trouble, we pass NULL to 1216 * writers might throw at us. When in trouble, we pass NULL to
1217 * asus_model_match() and try something completely different. 1217 * asus_model_match() and try something completely different.
1218 */ 1218 */
1219 if (buffer.pointer) { 1219 if (buffer.pointer) {
@@ -1244,6 +1244,8 @@ static int asus_hotk_get_info(void)
1244 "default values\n", string); 1244 "default values\n", string);
1245 printk(KERN_NOTICE 1245 printk(KERN_NOTICE
1246 " send /proc/acpi/dsdt to the developers\n"); 1246 " send /proc/acpi/dsdt to the developers\n");
1247 kfree(model);
1248 return -ENODEV;
1247 } 1249 }
1248 hotk->methods = &model_conf[hotk->model]; 1250 hotk->methods = &model_conf[hotk->model];
1249 return AE_OK; 1251 return AE_OK;
@@ -1254,7 +1256,7 @@ static int asus_hotk_get_info(void)
1254 /* Sort of per-model blacklist */ 1256 /* Sort of per-model blacklist */
1255 if (strncmp(string, "L2B", 3) == 0) 1257 if (strncmp(string, "L2B", 3) == 0)
1256 hotk->methods->lcd_status = NULL; 1258 hotk->methods->lcd_status = NULL;
1257 /* L2B is similar enough to L3C to use its settings, with this only 1259 /* L2B is similar enough to L3C to use its settings, with this only
1258 exception */ 1260 exception */
1259 else if (strncmp(string, "A3G", 3) == 0) 1261 else if (strncmp(string, "A3G", 3) == 0)
1260 hotk->methods->lcd_status = "\\BLFG"; 1262 hotk->methods->lcd_status = "\\BLFG";
@@ -1321,7 +1323,7 @@ static int asus_hotk_add(struct acpi_device *device)
1321 hotk->handle = device->handle; 1323 hotk->handle = device->handle;
1322 strcpy(acpi_device_name(device), ACPI_HOTK_DEVICE_NAME); 1324 strcpy(acpi_device_name(device), ACPI_HOTK_DEVICE_NAME);
1323 strcpy(acpi_device_class(device), ACPI_HOTK_CLASS); 1325 strcpy(acpi_device_class(device), ACPI_HOTK_CLASS);
1324 acpi_driver_data(device) = hotk; 1326 device->driver_data = hotk;
1325 hotk->device = device; 1327 hotk->device = device;
1326 1328
1327 result = asus_hotk_check(); 1329 result = asus_hotk_check();
@@ -1366,10 +1368,9 @@ static int asus_hotk_add(struct acpi_device *device)
1366 /* LED display is off by default */ 1368 /* LED display is off by default */
1367 hotk->ledd_status = 0xFFF; 1369 hotk->ledd_status = 0xFFF;
1368 1370
1369 end: 1371end:
1370 if (result) { 1372 if (result)
1371 kfree(hotk); 1373 kfree(hotk);
1372 }
1373 1374
1374 return result; 1375 return result;
1375} 1376}
@@ -1394,8 +1395,8 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
1394} 1395}
1395 1396
1396static struct backlight_ops asus_backlight_data = { 1397static struct backlight_ops asus_backlight_data = {
1397 .get_brightness = read_brightness, 1398 .get_brightness = read_brightness,
1398 .update_status = set_brightness_status, 1399 .update_status = set_brightness_status,
1399}; 1400};
1400 1401
1401static void asus_acpi_exit(void) 1402static void asus_acpi_exit(void)
@@ -1442,15 +1443,15 @@ static int __init asus_acpi_init(void)
1442 return -ENODEV; 1443 return -ENODEV;
1443 } 1444 }
1444 1445
1445 asus_backlight_device = backlight_device_register("asus",NULL,NULL, 1446 asus_backlight_device = backlight_device_register("asus", NULL, NULL,
1446 &asus_backlight_data); 1447 &asus_backlight_data);
1447 if (IS_ERR(asus_backlight_device)) { 1448 if (IS_ERR(asus_backlight_device)) {
1448 printk(KERN_ERR "Could not register asus backlight device\n"); 1449 printk(KERN_ERR "Could not register asus backlight device\n");
1449 asus_backlight_device = NULL; 1450 asus_backlight_device = NULL;
1450 asus_acpi_exit(); 1451 asus_acpi_exit();
1451 return -ENODEV; 1452 return -ENODEV;
1452 } 1453 }
1453 asus_backlight_device->props.max_brightness = 15; 1454 asus_backlight_device->props.max_brightness = 15;
1454 1455
1455 return 0; 1456 return 0;
1456} 1457}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 70f7f60929ca..1423b0c0cd2e 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -46,7 +46,6 @@
46 46
47#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF 47#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
48 48
49#define ACPI_BATTERY_COMPONENT 0x00040000
50#define ACPI_BATTERY_CLASS "battery" 49#define ACPI_BATTERY_CLASS "battery"
51#define ACPI_BATTERY_DEVICE_NAME "Battery" 50#define ACPI_BATTERY_DEVICE_NAME "Battery"
52#define ACPI_BATTERY_NOTIFY_STATUS 0x80 51#define ACPI_BATTERY_NOTIFY_STATUS 0x80
@@ -782,7 +781,7 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
782 acpi_bus_generate_proc_event(device, event, 781 acpi_bus_generate_proc_event(device, event,
783 acpi_battery_present(battery)); 782 acpi_battery_present(battery));
784 acpi_bus_generate_netlink_event(device->pnp.device_class, 783 acpi_bus_generate_netlink_event(device->pnp.device_class,
785 device->dev.bus_id, event, 784 dev_name(&device->dev), event,
786 acpi_battery_present(battery)); 785 acpi_battery_present(battery));
787#ifdef CONFIG_ACPI_SYSFS_POWER 786#ifdef CONFIG_ACPI_SYSFS_POWER
788 /* acpi_batter_update could remove power_supply object */ 787 /* acpi_batter_update could remove power_supply object */
@@ -804,7 +803,7 @@ static int acpi_battery_add(struct acpi_device *device)
804 battery->device = device; 803 battery->device = device;
805 strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME); 804 strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
806 strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS); 805 strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
807 acpi_driver_data(device) = battery; 806 device->driver_data = battery;
808 mutex_init(&battery->lock); 807 mutex_init(&battery->lock);
809 acpi_battery_update(battery); 808 acpi_battery_update(battery);
810#ifdef CONFIG_ACPI_PROCFS_POWER 809#ifdef CONFIG_ACPI_PROCFS_POWER
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c
deleted file mode 100644
index 61b6c5beb2d3..000000000000
--- a/drivers/acpi/bay.c
+++ /dev/null
@@ -1,411 +0,0 @@
1/*
2 * bay.c - ACPI removable drive bay driver
3 *
4 * Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/notifier.h>
29#include <acpi/acpi_bus.h>
30#include <acpi/acpi_drivers.h>
31#include <linux/seq_file.h>
32#include <asm/uaccess.h>
33#include <linux/platform_device.h>
34
35ACPI_MODULE_NAME("bay");
36MODULE_AUTHOR("Kristen Carlson Accardi");
37MODULE_DESCRIPTION("ACPI Removable Drive Bay Driver");
38MODULE_LICENSE("GPL");
39#define ACPI_BAY_CLASS "bay"
40#define ACPI_BAY_COMPONENT 0x10000000
41#define _COMPONENT ACPI_BAY_COMPONENT
42#define bay_dprintk(h,s) {\
43 char prefix[80] = {'\0'};\
44 struct acpi_buffer buffer = {sizeof(prefix), prefix};\
45 acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);\
46 printk(KERN_DEBUG PREFIX "%s: %s\n", prefix, s); }
47static void bay_notify(acpi_handle handle, u32 event, void *data);
48
49static const struct acpi_device_id bay_device_ids[] = {
50 {"LNXIOBAY", 0},
51 {"", 0},
52};
53MODULE_DEVICE_TABLE(acpi, bay_device_ids);
54
55struct bay {
56 acpi_handle handle;
57 char *name;
58 struct list_head list;
59 struct platform_device *pdev;
60};
61
62static LIST_HEAD(drive_bays);
63
64
65/*****************************************************************************
66 * Drive Bay functions *
67 *****************************************************************************/
68/**
69 * is_ejectable - see if a device is ejectable
70 * @handle: acpi handle of the device
71 *
72 * If an acpi object has a _EJ0 method, then it is ejectable
73 */
74static int is_ejectable(acpi_handle handle)
75{
76 acpi_status status;
77 acpi_handle tmp;
78
79 status = acpi_get_handle(handle, "_EJ0", &tmp);
80 if (ACPI_FAILURE(status))
81 return 0;
82 return 1;
83}
84
85/**
86 * bay_present - see if the bay device is present
87 * @bay: the drive bay
88 *
89 * execute the _STA method.
90 */
91static int bay_present(struct bay *bay)
92{
93 unsigned long sta;
94 acpi_status status;
95
96 if (bay) {
97 status = acpi_evaluate_integer(bay->handle, "_STA", NULL, &sta);
98 if (ACPI_SUCCESS(status) && sta)
99 return 1;
100 }
101 return 0;
102}
103
104/**
105 * eject_device - respond to an eject request
106 * @handle - the device to eject
107 *
108 * Call this devices _EJ0 method.
109 */
110static void eject_device(acpi_handle handle)
111{
112 struct acpi_object_list arg_list;
113 union acpi_object arg;
114
115 bay_dprintk(handle, "Ejecting device");
116
117 arg_list.count = 1;
118 arg_list.pointer = &arg;
119 arg.type = ACPI_TYPE_INTEGER;
120 arg.integer.value = 1;
121
122 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_EJ0",
123 &arg_list, NULL)))
124 pr_debug("Failed to evaluate _EJ0!\n");
125}
126
127/*
128 * show_present - read method for "present" file in sysfs
129 */
130static ssize_t show_present(struct device *dev,
131 struct device_attribute *attr, char *buf)
132{
133 struct bay *bay = dev_get_drvdata(dev);
134 return snprintf(buf, PAGE_SIZE, "%d\n", bay_present(bay));
135
136}
137static DEVICE_ATTR(present, S_IRUGO, show_present, NULL);
138
139/*
140 * write_eject - write method for "eject" file in sysfs
141 */
142static ssize_t write_eject(struct device *dev, struct device_attribute *attr,
143 const char *buf, size_t count)
144{
145 struct bay *bay = dev_get_drvdata(dev);
146
147 if (!count)
148 return -EINVAL;
149
150 eject_device(bay->handle);
151 return count;
152}
153static DEVICE_ATTR(eject, S_IWUSR, NULL, write_eject);
154
155/**
156 * is_ata - see if a device is an ata device
157 * @handle: acpi handle of the device
158 *
159 * If an acpi object has one of 4 ATA ACPI methods defined,
160 * then it is an ATA device
161 */
162static int is_ata(acpi_handle handle)
163{
164 acpi_handle tmp;
165
166 if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
167 (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
168 (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
169 (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
170 return 1;
171
172 return 0;
173}
174
175/**
176 * parent_is_ata(acpi_handle handle)
177 *
178 */
179static int parent_is_ata(acpi_handle handle)
180{
181 acpi_handle phandle;
182
183 if (acpi_get_parent(handle, &phandle))
184 return 0;
185
186 return is_ata(phandle);
187}
188
189/**
190 * is_ejectable_bay - see if a device is an ejectable drive bay
191 * @handle: acpi handle of the device
192 *
193 * If an acpi object is ejectable and has one of the ACPI ATA
194 * methods defined, then we can safely call it an ejectable
195 * drive bay
196 */
197static int is_ejectable_bay(acpi_handle handle)
198{
199 if ((is_ata(handle) || parent_is_ata(handle)) && is_ejectable(handle))
200 return 1;
201 return 0;
202}
203
204#if 0
205/**
206 * eject_removable_drive - try to eject this drive
207 * @dev : the device structure of the drive
208 *
209 * If a device is a removable drive that requires an _EJ0 method
210 * to be executed in order to safely remove from the system, do
211 * it. ATM - always returns success
212 */
213int eject_removable_drive(struct device *dev)
214{
215 acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
216
217 if (handle) {
218 bay_dprintk(handle, "Got device handle");
219 if (is_ejectable_bay(handle))
220 eject_device(handle);
221 } else {
222 printk("No acpi handle for device\n");
223 }
224
225 /* should I return an error code? */
226 return 0;
227}
228EXPORT_SYMBOL_GPL(eject_removable_drive);
229#endif /* 0 */
230
231static int acpi_bay_add_fs(struct bay *bay)
232{
233 int ret;
234 struct device *dev = &bay->pdev->dev;
235
236 ret = device_create_file(dev, &dev_attr_present);
237 if (ret)
238 goto add_fs_err;
239 ret = device_create_file(dev, &dev_attr_eject);
240 if (ret) {
241 device_remove_file(dev, &dev_attr_present);
242 goto add_fs_err;
243 }
244 return 0;
245
246 add_fs_err:
247 bay_dprintk(bay->handle, "Error adding sysfs files\n");
248 return ret;
249}
250
251static void acpi_bay_remove_fs(struct bay *bay)
252{
253 struct device *dev = &bay->pdev->dev;
254
255 /* cleanup sysfs */
256 device_remove_file(dev, &dev_attr_present);
257 device_remove_file(dev, &dev_attr_eject);
258}
259
260static int bay_is_dock_device(acpi_handle handle)
261{
262 acpi_handle parent;
263
264 acpi_get_parent(handle, &parent);
265
266 /* if the device or it's parent is dependent on the
267 * dock, then we are a dock device
268 */
269 return (is_dock_device(handle) || is_dock_device(parent));
270}
271
272static int bay_add(acpi_handle handle, int id)
273{
274 acpi_status status;
275 struct bay *new_bay;
276 struct platform_device *pdev;
277 struct acpi_buffer nbuffer = {ACPI_ALLOCATE_BUFFER, NULL};
278 acpi_get_name(handle, ACPI_FULL_PATHNAME, &nbuffer);
279
280 bay_dprintk(handle, "Adding notify handler");
281
282 /*
283 * Initialize bay device structure
284 */
285 new_bay = kzalloc(sizeof(*new_bay), GFP_ATOMIC);
286 INIT_LIST_HEAD(&new_bay->list);
287 new_bay->handle = handle;
288 new_bay->name = (char *)nbuffer.pointer;
289
290 /* initialize platform device stuff */
291 pdev = platform_device_register_simple(ACPI_BAY_CLASS, id, NULL, 0);
292 if (IS_ERR(pdev)) {
293 printk(KERN_ERR PREFIX "Error registering bay device\n");
294 goto bay_add_err;
295 }
296 new_bay->pdev = pdev;
297 platform_set_drvdata(pdev, new_bay);
298
299 /*
300 * we want the bay driver to be able to send uevents
301 */
302 pdev->dev.uevent_suppress = 0;
303
304 /* register for events on this device */
305 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
306 bay_notify, new_bay);
307 if (ACPI_FAILURE(status)) {
308 printk(KERN_INFO PREFIX "Error installing bay notify handler\n");
309 platform_device_unregister(new_bay->pdev);
310 goto bay_add_err;
311 }
312
313 if (acpi_bay_add_fs(new_bay)) {
314 acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
315 bay_notify);
316 platform_device_unregister(new_bay->pdev);
317 goto bay_add_err;
318 }
319
320 /* if we are on a dock station, we should register for dock
321 * notifications.
322 */
323 if (bay_is_dock_device(handle)) {
324 bay_dprintk(handle, "Is dependent on dock\n");
325 register_hotplug_dock_device(handle, bay_notify, new_bay);
326 }
327 list_add(&new_bay->list, &drive_bays);
328 printk(KERN_INFO PREFIX "Bay [%s] Added\n", new_bay->name);
329 return 0;
330
331bay_add_err:
332 kfree(new_bay->name);
333 kfree(new_bay);
334 return -ENODEV;
335}
336
337/**
338 * bay_notify - act upon an acpi bay notification
339 * @handle: the bay handle
340 * @event: the acpi event
341 * @data: our driver data struct
342 *
343 */
344static void bay_notify(acpi_handle handle, u32 event, void *data)
345{
346 struct bay *bay_dev = (struct bay *)data;
347 struct device *dev = &bay_dev->pdev->dev;
348 char event_string[12];
349 char *envp[] = { event_string, NULL };
350
351 bay_dprintk(handle, "Bay event");
352 sprintf(event_string, "BAY_EVENT=%d", event);
353 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
354}
355
356static acpi_status
357find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
358{
359 int *count = (int *)context;
360
361 /*
362 * there could be more than one ejectable bay.
363 * so, just return AE_OK always so that every object
364 * will be checked.
365 */
366 if (is_ejectable_bay(handle)) {
367 bay_dprintk(handle, "found ejectable bay");
368 if (!bay_add(handle, *count))
369 (*count)++;
370 }
371 return AE_OK;
372}
373
374static int __init bay_init(void)
375{
376 int bays = 0;
377
378 INIT_LIST_HEAD(&drive_bays);
379
380 if (acpi_disabled)
381 return -ENODEV;
382
383 /* look for dockable drive bays */
384 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
385 ACPI_UINT32_MAX, find_bay, &bays, NULL);
386
387 if (!bays)
388 return -ENODEV;
389
390 return 0;
391}
392
393static void __exit bay_exit(void)
394{
395 struct bay *bay, *tmp;
396
397 list_for_each_entry_safe(bay, tmp, &drive_bays, list) {
398 if (is_dock_device(bay->handle))
399 unregister_hotplug_dock_device(bay->handle);
400 acpi_bay_remove_fs(bay);
401 acpi_remove_notify_handler(bay->handle, ACPI_SYSTEM_NOTIFY,
402 bay_notify);
403 platform_device_unregister(bay->pdev);
404 kfree(bay->name);
405 kfree(bay);
406 }
407}
408
409postcore_initcall(bay_init);
410module_exit(bay_exit);
411
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index ea92bac42c53..09c69806c1fc 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -176,16 +176,6 @@ static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
176 acpi_dmi_osi_linux(1, d); /* enable */ 176 acpi_dmi_osi_linux(1, d); /* enable */
177 return 0; 177 return 0;
178} 178}
179static int __init dmi_disable_osi_linux(const struct dmi_system_id *d)
180{
181 acpi_dmi_osi_linux(0, d); /* disable */
182 return 0;
183}
184static int __init dmi_unknown_osi_linux(const struct dmi_system_id *d)
185{
186 acpi_dmi_osi_linux(-1, d); /* unknown */
187 return 0;
188}
189static int __init dmi_disable_osi_vista(const struct dmi_system_id *d) 179static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
190{ 180{
191 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 181 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
@@ -193,295 +183,21 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
193 return 0; 183 return 0;
194} 184}
195 185
196/*
197 * Most BIOS that invoke OSI(Linux) do nothing with it.
198 * But some cause Linux to break.
199 * Only a couple use it to make Linux run better.
200 *
201 * Thus, Linux should continue to disable OSI(Linux) by default,
202 * should continue to discourage BIOS writers from using it, and
203 * should whitelist the few existing systems that require it.
204 *
205 * If it appears clear a vendor isn't using OSI(Linux)
206 * for anything constructive, blacklist them by name to disable
207 * unnecessary dmesg warnings on all of their products.
208 */
209
210static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { 186static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
211 /*
212 * Disable OSI(Linux) warnings on all "Acer, inc."
213 *
214 * _OSI(Linux) disables the latest Windows BIOS code:
215 * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3100"),
216 * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5050"),
217 * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
218 * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5580"),
219 * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 3010"),
220 * _OSI(Linux) effect unknown:
221 * DMI_MATCH(DMI_PRODUCT_NAME, "Ferrari 5000"),
222 */
223 /*
224 * note that dmi_check_system() uses strstr()
225 * to match sub-strings rather than !strcmp(),
226 * so "Acer" below matches "Acer, inc." above.
227 */
228 /*
229 * Disable OSI(Linux) warnings on all "Acer"
230 *
231 * _OSI(Linux) effect unknown:
232 * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
233 * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720Z"),
234 * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5520"),
235 * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 6460"),
236 * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 7510"),
237 *
238 * _OSI(Linux) is a NOP:
239 * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
240 * DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5220"),
241 */
242 {
243 .callback = dmi_disable_osi_linux,
244 .ident = "Acer",
245 .matches = {
246 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
247 },
248 },
249 /*
250 * Disable OSI(Linux) warnings on all "Apple Computer, Inc."
251 * Disable OSI(Linux) warnings on all "Apple Inc."
252 *
253 * _OSI(Linux) confirmed to be a NOP:
254 * DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
255 * DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
256 * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"),
257 * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,1"),
258 * _OSI(Linux) effect unknown:
259 * DMI_MATCH(DMI_PRODUCT_NAME, "MacPro2,1"),
260 * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"),
261 */
262 {
263 .callback = dmi_disable_osi_linux,
264 .ident = "Apple",
265 .matches = {
266 DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
267 },
268 },
269 /*
270 * Disable OSI(Linux) warnings on all "BenQ"
271 *
272 * _OSI(Linux) confirmed to be a NOP:
273 * DMI_MATCH(DMI_PRODUCT_NAME, "Joybook S31"),
274 */
275 {
276 .callback = dmi_disable_osi_linux,
277 .ident = "BenQ",
278 .matches = {
279 DMI_MATCH(DMI_SYS_VENDOR, "BenQ"),
280 },
281 },
282 /*
283 * Disable OSI(Linux) warnings on all "Clevo Co."
284 *
285 * _OSI(Linux) confirmed to be a NOP:
286 * DMI_MATCH(DMI_PRODUCT_NAME, "M570RU"),
287 */
288 {
289 .callback = dmi_disable_osi_linux,
290 .ident = "Clevo",
291 .matches = {
292 DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."),
293 },
294 },
295 /*
296 * Disable OSI(Linux) warnings on all "COMPAL"
297 *
298 * _OSI(Linux) confirmed to be a NOP:
299 * DMI_MATCH(DMI_BOARD_NAME, "HEL8X"),
300 * _OSI(Linux) unknown effect:
301 * DMI_MATCH(DMI_BOARD_NAME, "IFL91"),
302 */
303 {
304 .callback = dmi_disable_osi_linux,
305 .ident = "Compal",
306 .matches = {
307 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
308 },
309 },
310 { /* OSI(Linux) touches USB, unknown side-effect */
311 .callback = dmi_disable_osi_linux,
312 .ident = "Dell Dimension 5150",
313 .matches = {
314 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
315 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM051"),
316 },
317 },
318 { /* OSI(Linux) is a NOP */
319 .callback = dmi_disable_osi_linux,
320 .ident = "Dell i1501",
321 .matches = {
322 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
323 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1501"),
324 },
325 },
326 { /* OSI(Linux) effect unknown */
327 .callback = dmi_unknown_osi_linux,
328 .ident = "Dell Latitude D830",
329 .matches = {
330 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
331 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D830"),
332 },
333 },
334 { /* OSI(Linux) effect unknown */
335 .callback = dmi_unknown_osi_linux,
336 .ident = "Dell OptiPlex GX620",
337 .matches = {
338 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
339 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX620"),
340 },
341 },
342 { /* OSI(Linux) causes some USB initialization to not run */
343 .callback = dmi_unknown_osi_linux,
344 .ident = "Dell OptiPlex 755",
345 .matches = {
346 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
347 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 755"),
348 },
349 },
350 { /* OSI(Linux) effect unknown */
351 .callback = dmi_unknown_osi_linux,
352 .ident = "Dell PE 1900",
353 .matches = {
354 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
355 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1900"),
356 },
357 },
358 { /* OSI(Linux) is a NOP */
359 .callback = dmi_unknown_osi_linux,
360 .ident = "Dell PE 1950",
361 .matches = {
362 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
363 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
364 },
365 },
366 { /* OSI(Linux) is a NOP */
367 .callback = dmi_disable_osi_linux,
368 .ident = "Dell PE R200",
369 .matches = {
370 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
371 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R200"),
372 },
373 },
374 { /* OSI(Linux) touches USB */
375 .callback = dmi_disable_osi_linux,
376 .ident = "Dell PR 390",
377 .matches = {
378 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
379 DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation 390"),
380 },
381 },
382 { /* OSI(Linux) touches USB */
383 .callback = dmi_unknown_osi_linux,
384 .ident = "Dell PR 390",
385 .matches = {
386 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
387 DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation 690"),
388 },
389 },
390 { /* OSI(Linux) unknown - ASL looks benign, but may effect dock/SMM */
391 .callback = dmi_unknown_osi_linux,
392 .ident = "Dell PR M4300",
393 .matches = {
394 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
395 DMI_MATCH(DMI_PRODUCT_NAME, "Precision M4300"),
396 },
397 },
398 { /* OSI(Linux) is a NOP */
399 .callback = dmi_disable_osi_linux,
400 .ident = "Dell Vostro 1000",
401 .matches = {
402 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
403 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1000"),
404 },
405 },
406 { /* OSI(Linux) effect unknown */
407 .callback = dmi_unknown_osi_linux,
408 .ident = "Dell PE SC440",
409 .matches = {
410 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
411 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge SC440"),
412 },
413 },
414 { /* OSI(Linux) effect unknown */
415 .callback = dmi_unknown_osi_linux,
416 .ident = "Dialogue Flybook V5",
417 .matches = {
418 DMI_MATCH(DMI_SYS_VENDOR, "Dialogue Technology Corporation"),
419 DMI_MATCH(DMI_PRODUCT_NAME, "Flybook V5"),
420 },
421 },
422 /*
423 * Disable OSI(Linux) warnings on all "FUJITSU SIEMENS"
424 *
425 * _OSI(Linux) disables latest Windows BIOS code:
426 * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 2510"),
427 * _OSI(Linux) confirmed to be a NOP:
428 * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 1536"),
429 * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 1556"),
430 * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 1546"),
431 * DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
432 * _OSI(Linux) unknown effect:
433 * DMI_MATCH(DMI_PRODUCT_NAME, "Amilo M1425"),
434 * DMI_MATCH(DMI_PRODUCT_NAME, "Amilo Si 1520"),
435 */
436 {
437 .callback = dmi_disable_osi_linux,
438 .ident = "Fujitsu Siemens",
439 .matches = {
440 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
441 },
442 },
443 { 187 {
444 .callback = dmi_disable_osi_vista, 188 .callback = dmi_disable_osi_vista,
445 .ident = "Fujitsu Siemens", 189 .ident = "Fujitsu Siemens",
446 .matches = { 190 .matches = {
447 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), 191 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
448 DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"), 192 DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
449 }, 193 },
450 }, 194 },
195
451 /* 196 /*
452 * Disable OSI(Linux) warnings on all "Hewlett-Packard" 197 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
453 * 198 * Linux ignores it, except for the machines enumerated below.
454 * _OSI(Linux) confirmed to be a NOP:
455 * .ident = "HP Pavilion tx 1000"
456 * DMI_MATCH(DMI_BOARD_NAME, "30BF"),
457 * .ident = "HP Pavilion dv2000"
458 * DMI_MATCH(DMI_BOARD_NAME, "30B5"),
459 * .ident = "HP Pavilion dv5000",
460 * DMI_MATCH(DMI_BOARD_NAME, "30A7"),
461 * .ident = "HP Pavilion dv6300 30BC",
462 * DMI_MATCH(DMI_BOARD_NAME, "30BC"),
463 * .ident = "HP Pavilion dv6000",
464 * DMI_MATCH(DMI_BOARD_NAME, "30B7"),
465 * DMI_MATCH(DMI_BOARD_NAME, "30B8"),
466 * .ident = "HP Pavilion dv9000",
467 * DMI_MATCH(DMI_BOARD_NAME, "30B9"),
468 * .ident = "HP Pavilion dv9500",
469 * DMI_MATCH(DMI_BOARD_NAME, "30CB"),
470 * .ident = "HP/Compaq Presario C500",
471 * DMI_MATCH(DMI_BOARD_NAME, "30C6"),
472 * .ident = "HP/Compaq Presario F500",
473 * DMI_MATCH(DMI_BOARD_NAME, "30D3"),
474 * _OSI(Linux) unknown effect:
475 * .ident = "HP Pavilion dv6500",
476 * DMI_MATCH(DMI_BOARD_NAME, "30D0"),
477 */ 199 */
478 { 200
479 .callback = dmi_disable_osi_linux,
480 .ident = "Hewlett-Packard",
481 .matches = {
482 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
483 },
484 },
485 /* 201 /*
486 * Lenovo has a mix of systems OSI(Linux) situations 202 * Lenovo has a mix of systems OSI(Linux) situations
487 * and thus we can not wildcard the vendor. 203 * and thus we can not wildcard the vendor.
@@ -519,113 +235,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
519 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"), 235 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
520 }, 236 },
521 }, 237 },
522 {
523 .callback = dmi_disable_osi_linux,
524 .ident = "Lenovo 3000 V100",
525 .matches = {
526 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
527 DMI_MATCH(DMI_PRODUCT_VERSION, "LENOVO3000 V100"),
528 },
529 },
530 {
531 .callback = dmi_disable_osi_linux,
532 .ident = "Lenovo 3000 N100",
533 .matches = {
534 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
535 DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"),
536 },
537 },
538 /*
539 * Disable OSI(Linux) warnings on all "LG Electronics"
540 *
541 * _OSI(Linux) confirmed to be a NOP:
542 * DMI_MATCH(DMI_PRODUCT_NAME, "P1-J150B"),
543 * with DMI_MATCH(DMI_BOARD_NAME, "ROCKY"),
544 *
545 * unknown:
546 * DMI_MATCH(DMI_PRODUCT_NAME, "S1-MDGDG"),
547 * with DMI_MATCH(DMI_BOARD_NAME, "ROCKY"),
548 */
549 {
550 .callback = dmi_disable_osi_linux,
551 .ident = "LG",
552 .matches = {
553 DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
554 },
555 },
556 /* NEC - OSI(Linux) effect unknown */
557 {
558 .callback = dmi_unknown_osi_linux,
559 .ident = "NEC VERSA M360",
560 .matches = {
561 DMI_MATCH(DMI_SYS_VENDOR, "NEC Computers SAS"),
562 DMI_MATCH(DMI_PRODUCT_NAME, "NEC VERSA M360"),
563 },
564 },
565 /* Panasonic */
566 {
567 .callback = dmi_unknown_osi_linux,
568 .ident = "Panasonic",
569 .matches = {
570 DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
571 /* Toughbook CF-52 */
572 DMI_MATCH(DMI_PRODUCT_NAME, "CF-52CCABVBG"),
573 },
574 },
575 /*
576 * Disable OSI(Linux) warnings on all "Samsung Electronics"
577 *
578 * OSI(Linux) disables PNP0C32 and other BIOS code for Windows:
579 * DMI_MATCH(DMI_PRODUCT_NAME, "R40P/R41P"),
580 * DMI_MATCH(DMI_PRODUCT_NAME, "R59P/R60P/R61P"),
581 */
582 {
583 .callback = dmi_disable_osi_linux,
584 .ident = "Samsung",
585 .matches = {
586 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
587 },
588 },
589 /*
590 * Disable OSI(Linux) warnings on all "Sony Corporation"
591 *
592 * _OSI(Linux) is a NOP:
593 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NR11S_S"),
594 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ38GP_C"),
595 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ650N"),
596 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-TZ21MN_N"),
597 * _OSI(Linux) unknown effect:
598 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ11M"),
599 */
600 {
601 .callback = dmi_disable_osi_linux,
602 .ident = "Sony",
603 .matches = {
604 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
605 },
606 },
607 /*
608 * Disable OSI(Linux) warnings on all "TOSHIBA"
609 *
610 * _OSI(Linux) breaks sound (bugzilla 7787):
611 * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P100"),
612 * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P105"),
613 * _OSI(Linux) is a NOP:
614 * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A100"),
615 * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A210"),
616 * _OSI(Linux) unknown effect:
617 * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A135"),
618 * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A200"),
619 * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P205"),
620 * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U305"),
621 */
622 {
623 .callback = dmi_disable_osi_linux,
624 .ident = "Toshiba",
625 .matches = {
626 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
627 },
628 },
629 {} 238 {}
630}; 239};
631 240
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index ccae305ee55d..7edf6d913c13 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -48,6 +48,23 @@ EXPORT_SYMBOL(acpi_root_dir);
48 48
49#define STRUCT_TO_INT(s) (*((int*)&s)) 49#define STRUCT_TO_INT(s) (*((int*)&s))
50 50
51static int set_power_nocheck(const struct dmi_system_id *id)
52{
53 printk(KERN_NOTICE PREFIX "%s detected - "
54 "disable power check in power transistion\n", id->ident);
55 acpi_power_nocheck = 1;
56 return 0;
57}
58static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
59 {
60 set_power_nocheck, "HP Pavilion 05", {
61 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
62 DMI_MATCH(DMI_SYS_VENDOR, "HP Pavilion 05"),
63 DMI_MATCH(DMI_PRODUCT_VERSION, "2001211RE101GLEND") }, NULL},
64 {},
65};
66
67
51/* -------------------------------------------------------------------------- 68/* --------------------------------------------------------------------------
52 Device Management 69 Device Management
53 -------------------------------------------------------------------------- */ 70 -------------------------------------------------------------------------- */
@@ -77,7 +94,7 @@ EXPORT_SYMBOL(acpi_bus_get_device);
77int acpi_bus_get_status(struct acpi_device *device) 94int acpi_bus_get_status(struct acpi_device *device)
78{ 95{
79 acpi_status status = AE_OK; 96 acpi_status status = AE_OK;
80 unsigned long sta = 0; 97 unsigned long long sta = 0;
81 98
82 99
83 if (!device) 100 if (!device)
@@ -95,21 +112,21 @@ int acpi_bus_get_status(struct acpi_device *device)
95 } 112 }
96 113
97 /* 114 /*
98 * Otherwise we assume the status of our parent (unless we don't 115 * According to ACPI spec some device can be present and functional
99 * have one, in which case status is implied). 116 * even if the parent is not present but functional.
117 * In such conditions the child device should not inherit the status
118 * from the parent.
100 */ 119 */
101 else if (device->parent)
102 device->status = device->parent->status;
103 else 120 else
104 STRUCT_TO_INT(device->status) = 121 STRUCT_TO_INT(device->status) =
105 ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | 122 ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
106 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING; 123 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
107 124
108 if (device->status.functional && !device->status.present) { 125 if (device->status.functional && !device->status.present) {
109 printk(KERN_WARNING PREFIX "Device [%s] status [%08x]: " 126 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: "
110 "functional but not present; setting present\n", 127 "functional but not present;\n",
111 device->pnp.bus_id, (u32) STRUCT_TO_INT(device->status)); 128 device->pnp.bus_id,
112 device->status.present = 1; 129 (u32) STRUCT_TO_INT(device->status)));
113 } 130 }
114 131
115 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n", 132 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n",
@@ -155,7 +172,7 @@ int acpi_bus_get_power(acpi_handle handle, int *state)
155 int result = 0; 172 int result = 0;
156 acpi_status status = 0; 173 acpi_status status = 0;
157 struct acpi_device *device = NULL; 174 struct acpi_device *device = NULL;
158 unsigned long psc = 0; 175 unsigned long long psc = 0;
159 176
160 177
161 result = acpi_bus_get_device(handle, &device); 178 result = acpi_bus_get_device(handle, &device);
@@ -223,7 +240,19 @@ int acpi_bus_set_power(acpi_handle handle, int state)
223 /* 240 /*
224 * Get device's current power state 241 * Get device's current power state
225 */ 242 */
226 acpi_bus_get_power(device->handle, &device->power.state); 243 if (!acpi_power_nocheck) {
244 /*
245 * Maybe the incorrect power state is returned on the bogus
246 * bios, which is different with the real power state.
247 * For example: the bios returns D0 state and the real power
248 * state is D3. OS expects to set the device to D0 state. In
249 * such case if OS uses the power state returned by the BIOS,
250 * the device can't be transisted to the correct power state.
251 * So if the acpi_power_nocheck is set, it is unnecessary to
252 * get the power state by calling acpi_bus_get_power.
253 */
254 acpi_bus_get_power(device->handle, &device->power.state);
255 }
227 if ((state == device->power.state) && !device->flags.force_power_state) { 256 if ((state == device->power.state) && !device->flags.force_power_state) {
228 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", 257 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
229 state)); 258 state));
@@ -496,6 +525,19 @@ static int acpi_bus_check_scope(struct acpi_device *device)
496 return 0; 525 return 0;
497} 526}
498 527
528static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list);
529int register_acpi_bus_notifier(struct notifier_block *nb)
530{
531 return blocking_notifier_chain_register(&acpi_bus_notify_list, nb);
532}
533EXPORT_SYMBOL_GPL(register_acpi_bus_notifier);
534
535void unregister_acpi_bus_notifier(struct notifier_block *nb)
536{
537 blocking_notifier_chain_unregister(&acpi_bus_notify_list, nb);
538}
539EXPORT_SYMBOL_GPL(unregister_acpi_bus_notifier);
540
499/** 541/**
500 * acpi_bus_notify 542 * acpi_bus_notify
501 * --------------- 543 * ---------------
@@ -506,6 +548,8 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
506 int result = 0; 548 int result = 0;
507 struct acpi_device *device = NULL; 549 struct acpi_device *device = NULL;
508 550
551 blocking_notifier_call_chain(&acpi_bus_notify_list,
552 type, (void *)handle);
509 553
510 if (acpi_bus_get_device(handle, &device)) 554 if (acpi_bus_get_device(handle, &device))
511 return; 555 return;
@@ -644,6 +688,14 @@ void __init acpi_early_init(void)
644 if (acpi_disabled) 688 if (acpi_disabled)
645 return; 689 return;
646 690
691 /*
692 * ACPI CA initializes acpi_dbg_level to non-zero, which means
693 * we get debug output merely by turning on CONFIG_ACPI_DEBUG.
694 * Turn it off so we don't get output unless the user specifies
695 * acpi.debug_level.
696 */
697 acpi_dbg_level = 0;
698
647 printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION); 699 printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION);
648 700
649 /* enable workarounds, unless strict ACPI spec. compliance */ 701 /* enable workarounds, unless strict ACPI spec. compliance */
@@ -730,7 +782,7 @@ static int __init acpi_bus_init(void)
730 "Unable to initialize ACPI OS objects\n"); 782 "Unable to initialize ACPI OS objects\n");
731 goto error1; 783 goto error1;
732 } 784 }
733#ifdef CONFIG_ACPI_EC 785
734 /* 786 /*
735 * ACPI 2.0 requires the EC driver to be loaded and work before 787 * ACPI 2.0 requires the EC driver to be loaded and work before
736 * the EC device is found in the namespace (i.e. before acpi_initialize_objects() 788 * the EC device is found in the namespace (i.e. before acpi_initialize_objects()
@@ -741,7 +793,6 @@ static int __init acpi_bus_init(void)
741 */ 793 */
742 status = acpi_ec_ecdt_probe(); 794 status = acpi_ec_ecdt_probe();
743 /* Ignore result. Not having an ECDT is not fatal. */ 795 /* Ignore result. Not having an ECDT is not fatal. */
744#endif
745 796
746 status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION); 797 status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
747 if (ACPI_FAILURE(status)) { 798 if (ACPI_FAILURE(status)) {
@@ -749,6 +800,12 @@ static int __init acpi_bus_init(void)
749 goto error1; 800 goto error1;
750 } 801 }
751 802
803 /*
804 * Maybe EC region is required at bus_scan/acpi_get_devices. So it
805 * is necessary to enable it as early as possible.
806 */
807 acpi_boot_ec_enable();
808
752 printk(KERN_INFO PREFIX "Interpreter enabled\n"); 809 printk(KERN_INFO PREFIX "Interpreter enabled\n");
753 810
754 /* Initialize sleep structures */ 811 /* Initialize sleep structures */
@@ -818,7 +875,11 @@ static int __init acpi_init(void)
818 } 875 }
819 } else 876 } else
820 disable_acpi(); 877 disable_acpi();
821 878 /*
879 * If the laptop falls into the DMI check table, the power state check
880 * will be disabled in the course of device power transistion.
881 */
882 dmi_check_system(power_nocheck_dmi_table);
822 return result; 883 return result;
823} 884}
824 885
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 1dfec413588c..171fd914f435 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -33,7 +33,6 @@
33#include <acpi/acpi_bus.h> 33#include <acpi/acpi_bus.h>
34#include <acpi/acpi_drivers.h> 34#include <acpi/acpi_drivers.h>
35 35
36#define ACPI_BUTTON_COMPONENT 0x00080000
37#define ACPI_BUTTON_CLASS "button" 36#define ACPI_BUTTON_CLASS "button"
38#define ACPI_BUTTON_FILE_INFO "info" 37#define ACPI_BUTTON_FILE_INFO "info"
39#define ACPI_BUTTON_FILE_STATE "state" 38#define ACPI_BUTTON_FILE_STATE "state"
@@ -145,7 +144,7 @@ static int acpi_button_state_seq_show(struct seq_file *seq, void *offset)
145{ 144{
146 struct acpi_button *button = seq->private; 145 struct acpi_button *button = seq->private;
147 acpi_status status; 146 acpi_status status;
148 unsigned long state; 147 unsigned long long state;
149 148
150 if (!button || !button->device) 149 if (!button || !button->device)
151 return 0; 150 return 0;
@@ -253,7 +252,7 @@ static int acpi_button_remove_fs(struct acpi_device *device)
253 -------------------------------------------------------------------------- */ 252 -------------------------------------------------------------------------- */
254static int acpi_lid_send_state(struct acpi_button *button) 253static int acpi_lid_send_state(struct acpi_button *button)
255{ 254{
256 unsigned long state; 255 unsigned long long state;
257 acpi_status status; 256 acpi_status status;
258 257
259 status = acpi_evaluate_integer(button->device->handle, "_LID", NULL, 258 status = acpi_evaluate_integer(button->device->handle, "_LID", NULL,
@@ -262,6 +261,7 @@ static int acpi_lid_send_state(struct acpi_button *button)
262 return -ENODEV; 261 return -ENODEV;
263 /* input layer checks if event is redundant */ 262 /* input layer checks if event is redundant */
264 input_report_switch(button->input, SW_LID, !state); 263 input_report_switch(button->input, SW_LID, !state);
264 input_sync(button->input);
265 return 0; 265 return 0;
266} 266}
267 267
@@ -285,8 +285,8 @@ static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
285 input_report_key(input, keycode, 1); 285 input_report_key(input, keycode, 1);
286 input_sync(input); 286 input_sync(input);
287 input_report_key(input, keycode, 0); 287 input_report_key(input, keycode, 0);
288 input_sync(input);
288 } 289 }
289 input_sync(input);
290 290
291 acpi_bus_generate_proc_event(button->device, event, 291 acpi_bus_generate_proc_event(button->device, event,
292 ++button->pushed); 292 ++button->pushed);
@@ -384,7 +384,7 @@ static int acpi_button_add(struct acpi_device *device)
384 return -ENOMEM; 384 return -ENOMEM;
385 385
386 button->device = device; 386 button->device = device;
387 acpi_driver_data(device) = button; 387 device->driver_data = button;
388 388
389 button->input = input = input_allocate_device(); 389 button->input = input = input_allocate_device();
390 if (!input) { 390 if (!input) {
@@ -478,7 +478,7 @@ static int acpi_button_add(struct acpi_device *device)
478 device->wakeup.gpe_number, 478 device->wakeup.gpe_number,
479 ACPI_GPE_TYPE_WAKE_RUN); 479 ACPI_GPE_TYPE_WAKE_RUN);
480 acpi_enable_gpe(device->wakeup.gpe_device, 480 acpi_enable_gpe(device->wakeup.gpe_device,
481 device->wakeup.gpe_number, ACPI_NOT_ISR); 481 device->wakeup.gpe_number);
482 device->wakeup.state.enabled = 1; 482 device->wakeup.state.enabled = 1;
483 } 483 }
484 484
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
index f9db4f444bd0..307963bd1043 100644
--- a/drivers/acpi/cm_sbs.c
+++ b/drivers/acpi/cm_sbs.c
@@ -34,7 +34,6 @@
34ACPI_MODULE_NAME("cm_sbs"); 34ACPI_MODULE_NAME("cm_sbs");
35#define ACPI_AC_CLASS "ac_adapter" 35#define ACPI_AC_CLASS "ac_adapter"
36#define ACPI_BATTERY_CLASS "battery" 36#define ACPI_BATTERY_CLASS "battery"
37#define ACPI_SBS_COMPONENT 0x00080000
38#define _COMPONENT ACPI_SBS_COMPONENT 37#define _COMPONENT ACPI_SBS_COMPONENT
39static struct proc_dir_entry *acpi_ac_dir; 38static struct proc_dir_entry *acpi_ac_dir;
40static struct proc_dir_entry *acpi_battery_dir; 39static struct proc_dir_entry *acpi_battery_dir;
@@ -52,8 +51,8 @@ struct proc_dir_entry *acpi_lock_ac_dir(void)
52 if (acpi_ac_dir) { 51 if (acpi_ac_dir) {
53 lock_ac_dir_cnt++; 52 lock_ac_dir_cnt++;
54 } else { 53 } else {
55 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 54 printk(KERN_ERR PREFIX
56 "Cannot create %s\n", ACPI_AC_CLASS)); 55 "Cannot create %s\n", ACPI_AC_CLASS);
57 } 56 }
58 mutex_unlock(&cm_sbs_mutex); 57 mutex_unlock(&cm_sbs_mutex);
59 return acpi_ac_dir; 58 return acpi_ac_dir;
@@ -83,8 +82,8 @@ struct proc_dir_entry *acpi_lock_battery_dir(void)
83 if (acpi_battery_dir) { 82 if (acpi_battery_dir) {
84 lock_battery_dir_cnt++; 83 lock_battery_dir_cnt++;
85 } else { 84 } else {
86 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 85 printk(KERN_ERR PREFIX
87 "Cannot create %s\n", ACPI_BATTERY_CLASS)); 86 "Cannot create %s\n", ACPI_BATTERY_CLASS);
88 } 87 }
89 mutex_unlock(&cm_sbs_mutex); 88 mutex_unlock(&cm_sbs_mutex);
90 return acpi_battery_dir; 89 return acpi_battery_dir;
@@ -105,9 +104,3 @@ void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
105 return; 104 return;
106} 105}
107EXPORT_SYMBOL(acpi_unlock_battery_dir); 106EXPORT_SYMBOL(acpi_unlock_battery_dir);
108
109static int __init acpi_cm_sbs_init(void)
110{
111 return 0;
112}
113subsys_initcall(acpi_cm_sbs_init);
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 3c25ec7a1871..17020c12623c 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -41,7 +41,6 @@
41#define INSTALL_NOTIFY_HANDLER 1 41#define INSTALL_NOTIFY_HANDLER 1
42#define UNINSTALL_NOTIFY_HANDLER 2 42#define UNINSTALL_NOTIFY_HANDLER 2
43 43
44#define ACPI_CONTAINER_COMPONENT 0x01000000
45#define _COMPONENT ACPI_CONTAINER_COMPONENT 44#define _COMPONENT ACPI_CONTAINER_COMPONENT
46ACPI_MODULE_NAME("container"); 45ACPI_MODULE_NAME("container");
47 46
@@ -76,7 +75,7 @@ static int is_device_present(acpi_handle handle)
76{ 75{
77 acpi_handle temp; 76 acpi_handle temp;
78 acpi_status status; 77 acpi_status status;
79 unsigned long sta; 78 unsigned long long sta;
80 79
81 80
82 status = acpi_get_handle(handle, "_STA", &temp); 81 status = acpi_get_handle(handle, "_STA", &temp);
@@ -108,7 +107,7 @@ static int acpi_container_add(struct acpi_device *device)
108 container->handle = device->handle; 107 container->handle = device->handle;
109 strcpy(acpi_device_name(device), ACPI_CONTAINER_DEVICE_NAME); 108 strcpy(acpi_device_name(device), ACPI_CONTAINER_DEVICE_NAME);
110 strcpy(acpi_device_class(device), ACPI_CONTAINER_CLASS); 109 strcpy(acpi_device_class(device), ACPI_CONTAINER_CLASS);
111 acpi_driver_data(device) = container; 110 device->driver_data = container;
112 111
113 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device <%s> bid <%s>\n", 112 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device <%s> bid <%s>\n",
114 acpi_device_name(device), acpi_device_bid(device))); 113 acpi_device_name(device), acpi_device_bid(device)));
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index 6df564f4ca6e..c48396892008 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -44,11 +44,24 @@ static const struct acpi_dlayer acpi_debug_layers[] = {
44 ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER), 44 ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
45 ACPI_DEBUG_INIT(ACPI_COMPILER), 45 ACPI_DEBUG_INIT(ACPI_COMPILER),
46 ACPI_DEBUG_INIT(ACPI_TOOLS), 46 ACPI_DEBUG_INIT(ACPI_TOOLS),
47
48 ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
49 ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
50 ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
51 ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
52 ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
53 ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
54 ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
55 ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
56 ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
57 ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
58 ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
59 ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
60 ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
61 ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
47}; 62};
48 63
49static const struct acpi_dlevel acpi_debug_levels[] = { 64static const struct acpi_dlevel acpi_debug_levels[] = {
50 ACPI_DEBUG_INIT(ACPI_LV_ERROR),
51 ACPI_DEBUG_INIT(ACPI_LV_WARN),
52 ACPI_DEBUG_INIT(ACPI_LV_INIT), 65 ACPI_DEBUG_INIT(ACPI_LV_INIT),
53 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT), 66 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
54 ACPI_DEBUG_INIT(ACPI_LV_INFO), 67 ACPI_DEBUG_INIT(ACPI_LV_INFO),
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index 4613b9ca5792..279a5a60a0dd 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -103,6 +103,9 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
103 NULL); 103 NULL);
104 acpi_ex_enter_interpreter(); 104 acpi_ex_enter_interpreter();
105 } 105 }
106
107 acpi_ds_clear_implicit_return(walk_state);
108
106#ifdef ACPI_DISASSEMBLER 109#ifdef ACPI_DISASSEMBLER
107 if (ACPI_FAILURE(status)) { 110 if (ACPI_FAILURE(status)) {
108 111
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/dispatcher/dsmthdat.c
index 13c43eac35db..d03f81bd1bcb 100644
--- a/drivers/acpi/dispatcher/dsmthdat.c
+++ b/drivers/acpi/dispatcher/dsmthdat.c
@@ -43,7 +43,6 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acdispat.h> 45#include <acpi/acdispat.h>
46#include <acpi/amlcode.h>
47#include <acpi/acnamesp.h> 46#include <acpi/acnamesp.h>
48#include <acpi/acinterp.h> 47#include <acpi/acinterp.h>
49 48
@@ -52,11 +51,11 @@ ACPI_MODULE_NAME("dsmthdat")
52 51
53/* Local prototypes */ 52/* Local prototypes */
54static void 53static void
55acpi_ds_method_data_delete_value(u16 opcode, 54acpi_ds_method_data_delete_value(u8 type,
56 u32 index, struct acpi_walk_state *walk_state); 55 u32 index, struct acpi_walk_state *walk_state);
57 56
58static acpi_status 57static acpi_status
59acpi_ds_method_data_set_value(u16 opcode, 58acpi_ds_method_data_set_value(u8 type,
60 u32 index, 59 u32 index,
61 union acpi_operand_object *object, 60 union acpi_operand_object *object,
62 struct acpi_walk_state *walk_state); 61 struct acpi_walk_state *walk_state);
@@ -216,7 +215,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
216 * Store the argument in the method/walk descriptor. 215 * Store the argument in the method/walk descriptor.
217 * Do not copy the arg in order to implement call by reference 216 * Do not copy the arg in order to implement call by reference
218 */ 217 */
219 status = acpi_ds_method_data_set_value(AML_ARG_OP, index, 218 status = acpi_ds_method_data_set_value(ACPI_REFCLASS_ARG, index,
220 params[index], 219 params[index],
221 walk_state); 220 walk_state);
222 if (ACPI_FAILURE(status)) { 221 if (ACPI_FAILURE(status)) {
@@ -234,7 +233,8 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
234 * 233 *
235 * FUNCTION: acpi_ds_method_data_get_node 234 * FUNCTION: acpi_ds_method_data_get_node
236 * 235 *
237 * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP 236 * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
237 * ACPI_REFCLASS_ARG
238 * Index - Which Local or Arg whose type to get 238 * Index - Which Local or Arg whose type to get
239 * walk_state - Current walk state object 239 * walk_state - Current walk state object
240 * Node - Where the node is returned. 240 * Node - Where the node is returned.
@@ -246,7 +246,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
246 ******************************************************************************/ 246 ******************************************************************************/
247 247
248acpi_status 248acpi_status
249acpi_ds_method_data_get_node(u16 opcode, 249acpi_ds_method_data_get_node(u8 type,
250 u32 index, 250 u32 index,
251 struct acpi_walk_state *walk_state, 251 struct acpi_walk_state *walk_state,
252 struct acpi_namespace_node **node) 252 struct acpi_namespace_node **node)
@@ -256,8 +256,8 @@ acpi_ds_method_data_get_node(u16 opcode,
256 /* 256 /*
257 * Method Locals and Arguments are supported 257 * Method Locals and Arguments are supported
258 */ 258 */
259 switch (opcode) { 259 switch (type) {
260 case AML_LOCAL_OP: 260 case ACPI_REFCLASS_LOCAL:
261 261
262 if (index > ACPI_METHOD_MAX_LOCAL) { 262 if (index > ACPI_METHOD_MAX_LOCAL) {
263 ACPI_ERROR((AE_INFO, 263 ACPI_ERROR((AE_INFO,
@@ -271,7 +271,7 @@ acpi_ds_method_data_get_node(u16 opcode,
271 *node = &walk_state->local_variables[index]; 271 *node = &walk_state->local_variables[index];
272 break; 272 break;
273 273
274 case AML_ARG_OP: 274 case ACPI_REFCLASS_ARG:
275 275
276 if (index > ACPI_METHOD_MAX_ARG) { 276 if (index > ACPI_METHOD_MAX_ARG) {
277 ACPI_ERROR((AE_INFO, 277 ACPI_ERROR((AE_INFO,
@@ -286,8 +286,8 @@ acpi_ds_method_data_get_node(u16 opcode,
286 break; 286 break;
287 287
288 default: 288 default:
289 ACPI_ERROR((AE_INFO, "Opcode %d is invalid", opcode)); 289 ACPI_ERROR((AE_INFO, "Type %d is invalid", type));
290 return_ACPI_STATUS(AE_AML_BAD_OPCODE); 290 return_ACPI_STATUS(AE_TYPE);
291 } 291 }
292 292
293 return_ACPI_STATUS(AE_OK); 293 return_ACPI_STATUS(AE_OK);
@@ -297,7 +297,8 @@ acpi_ds_method_data_get_node(u16 opcode,
297 * 297 *
298 * FUNCTION: acpi_ds_method_data_set_value 298 * FUNCTION: acpi_ds_method_data_set_value
299 * 299 *
300 * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP 300 * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
301 * ACPI_REFCLASS_ARG
301 * Index - Which Local or Arg to get 302 * Index - Which Local or Arg to get
302 * Object - Object to be inserted into the stack entry 303 * Object - Object to be inserted into the stack entry
303 * walk_state - Current walk state object 304 * walk_state - Current walk state object
@@ -310,7 +311,7 @@ acpi_ds_method_data_get_node(u16 opcode,
310 ******************************************************************************/ 311 ******************************************************************************/
311 312
312static acpi_status 313static acpi_status
313acpi_ds_method_data_set_value(u16 opcode, 314acpi_ds_method_data_set_value(u8 type,
314 u32 index, 315 u32 index,
315 union acpi_operand_object *object, 316 union acpi_operand_object *object,
316 struct acpi_walk_state *walk_state) 317 struct acpi_walk_state *walk_state)
@@ -321,13 +322,13 @@ acpi_ds_method_data_set_value(u16 opcode,
321 ACPI_FUNCTION_TRACE(ds_method_data_set_value); 322 ACPI_FUNCTION_TRACE(ds_method_data_set_value);
322 323
323 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 324 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
324 "NewObj %p Opcode %X, Refs=%d [%s]\n", object, 325 "NewObj %p Type %2.2X, Refs=%d [%s]\n", object,
325 opcode, object->common.reference_count, 326 type, object->common.reference_count,
326 acpi_ut_get_type_name(object->common.type))); 327 acpi_ut_get_type_name(object->common.type)));
327 328
328 /* Get the namespace node for the arg/local */ 329 /* Get the namespace node for the arg/local */
329 330
330 status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node); 331 status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
331 if (ACPI_FAILURE(status)) { 332 if (ACPI_FAILURE(status)) {
332 return_ACPI_STATUS(status); 333 return_ACPI_STATUS(status);
333 } 334 }
@@ -350,7 +351,8 @@ acpi_ds_method_data_set_value(u16 opcode,
350 * 351 *
351 * FUNCTION: acpi_ds_method_data_get_value 352 * FUNCTION: acpi_ds_method_data_get_value
352 * 353 *
353 * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP 354 * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
355 * ACPI_REFCLASS_ARG
354 * Index - Which local_var or argument to get 356 * Index - Which local_var or argument to get
355 * walk_state - Current walk state object 357 * walk_state - Current walk state object
356 * dest_desc - Where Arg or Local value is returned 358 * dest_desc - Where Arg or Local value is returned
@@ -363,7 +365,7 @@ acpi_ds_method_data_set_value(u16 opcode,
363 ******************************************************************************/ 365 ******************************************************************************/
364 366
365acpi_status 367acpi_status
366acpi_ds_method_data_get_value(u16 opcode, 368acpi_ds_method_data_get_value(u8 type,
367 u32 index, 369 u32 index,
368 struct acpi_walk_state *walk_state, 370 struct acpi_walk_state *walk_state,
369 union acpi_operand_object **dest_desc) 371 union acpi_operand_object **dest_desc)
@@ -383,7 +385,7 @@ acpi_ds_method_data_get_value(u16 opcode,
383 385
384 /* Get the namespace node for the arg/local */ 386 /* Get the namespace node for the arg/local */
385 387
386 status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node); 388 status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
387 if (ACPI_FAILURE(status)) { 389 if (ACPI_FAILURE(status)) {
388 return_ACPI_STATUS(status); 390 return_ACPI_STATUS(status);
389 } 391 }
@@ -419,8 +421,8 @@ acpi_ds_method_data_get_value(u16 opcode,
419 /* Otherwise, return the error */ 421 /* Otherwise, return the error */
420 422
421 else 423 else
422 switch (opcode) { 424 switch (type) {
423 case AML_ARG_OP: 425 case ACPI_REFCLASS_ARG:
424 426
425 ACPI_ERROR((AE_INFO, 427 ACPI_ERROR((AE_INFO,
426 "Uninitialized Arg[%d] at node %p", 428 "Uninitialized Arg[%d] at node %p",
@@ -428,7 +430,7 @@ acpi_ds_method_data_get_value(u16 opcode,
428 430
429 return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG); 431 return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
430 432
431 case AML_LOCAL_OP: 433 case ACPI_REFCLASS_LOCAL:
432 434
433 ACPI_ERROR((AE_INFO, 435 ACPI_ERROR((AE_INFO,
434 "Uninitialized Local[%d] at node %p", 436 "Uninitialized Local[%d] at node %p",
@@ -437,9 +439,10 @@ acpi_ds_method_data_get_value(u16 opcode,
437 return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL); 439 return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL);
438 440
439 default: 441 default:
442
440 ACPI_ERROR((AE_INFO, 443 ACPI_ERROR((AE_INFO,
441 "Not a Arg/Local opcode: %X", 444 "Not a Arg/Local opcode: %X",
442 opcode)); 445 type));
443 return_ACPI_STATUS(AE_AML_INTERNAL); 446 return_ACPI_STATUS(AE_AML_INTERNAL);
444 } 447 }
445 } 448 }
@@ -458,7 +461,8 @@ acpi_ds_method_data_get_value(u16 opcode,
458 * 461 *
459 * FUNCTION: acpi_ds_method_data_delete_value 462 * FUNCTION: acpi_ds_method_data_delete_value
460 * 463 *
461 * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP 464 * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
465 * ACPI_REFCLASS_ARG
462 * Index - Which local_var or argument to delete 466 * Index - Which local_var or argument to delete
463 * walk_state - Current walk state object 467 * walk_state - Current walk state object
464 * 468 *
@@ -470,7 +474,7 @@ acpi_ds_method_data_get_value(u16 opcode,
470 ******************************************************************************/ 474 ******************************************************************************/
471 475
472static void 476static void
473acpi_ds_method_data_delete_value(u16 opcode, 477acpi_ds_method_data_delete_value(u8 type,
474 u32 index, struct acpi_walk_state *walk_state) 478 u32 index, struct acpi_walk_state *walk_state)
475{ 479{
476 acpi_status status; 480 acpi_status status;
@@ -481,7 +485,7 @@ acpi_ds_method_data_delete_value(u16 opcode,
481 485
482 /* Get the namespace node for the arg/local */ 486 /* Get the namespace node for the arg/local */
483 487
484 status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node); 488 status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
485 if (ACPI_FAILURE(status)) { 489 if (ACPI_FAILURE(status)) {
486 return_VOID; 490 return_VOID;
487 } 491 }
@@ -514,7 +518,8 @@ acpi_ds_method_data_delete_value(u16 opcode,
514 * 518 *
515 * FUNCTION: acpi_ds_store_object_to_local 519 * FUNCTION: acpi_ds_store_object_to_local
516 * 520 *
517 * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP 521 * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
522 * ACPI_REFCLASS_ARG
518 * Index - Which Local or Arg to set 523 * Index - Which Local or Arg to set
519 * obj_desc - Value to be stored 524 * obj_desc - Value to be stored
520 * walk_state - Current walk state 525 * walk_state - Current walk state
@@ -528,7 +533,7 @@ acpi_ds_method_data_delete_value(u16 opcode,
528 ******************************************************************************/ 533 ******************************************************************************/
529 534
530acpi_status 535acpi_status
531acpi_ds_store_object_to_local(u16 opcode, 536acpi_ds_store_object_to_local(u8 type,
532 u32 index, 537 u32 index,
533 union acpi_operand_object *obj_desc, 538 union acpi_operand_object *obj_desc,
534 struct acpi_walk_state *walk_state) 539 struct acpi_walk_state *walk_state)
@@ -539,8 +544,8 @@ acpi_ds_store_object_to_local(u16 opcode,
539 union acpi_operand_object *new_obj_desc; 544 union acpi_operand_object *new_obj_desc;
540 545
541 ACPI_FUNCTION_TRACE(ds_store_object_to_local); 546 ACPI_FUNCTION_TRACE(ds_store_object_to_local);
542 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Opcode=%X Index=%d Obj=%p\n", 547 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Type=%2.2X Index=%d Obj=%p\n",
543 opcode, index, obj_desc)); 548 type, index, obj_desc));
544 549
545 /* Parameter validation */ 550 /* Parameter validation */
546 551
@@ -550,7 +555,7 @@ acpi_ds_store_object_to_local(u16 opcode,
550 555
551 /* Get the namespace node for the arg/local */ 556 /* Get the namespace node for the arg/local */
552 557
553 status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node); 558 status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
554 if (ACPI_FAILURE(status)) { 559 if (ACPI_FAILURE(status)) {
555 return_ACPI_STATUS(status); 560 return_ACPI_STATUS(status);
556 } 561 }
@@ -602,7 +607,7 @@ acpi_ds_store_object_to_local(u16 opcode,
602 * 607 *
603 * Weird, but true. 608 * Weird, but true.
604 */ 609 */
605 if (opcode == AML_ARG_OP) { 610 if (type == ACPI_REFCLASS_ARG) {
606 /* 611 /*
607 * If we have a valid reference object that came from ref_of(), 612 * If we have a valid reference object that came from ref_of(),
608 * do the indirect store 613 * do the indirect store
@@ -611,8 +616,8 @@ acpi_ds_store_object_to_local(u16 opcode,
611 ACPI_DESC_TYPE_OPERAND) 616 ACPI_DESC_TYPE_OPERAND)
612 && (current_obj_desc->common.type == 617 && (current_obj_desc->common.type ==
613 ACPI_TYPE_LOCAL_REFERENCE) 618 ACPI_TYPE_LOCAL_REFERENCE)
614 && (current_obj_desc->reference.opcode == 619 && (current_obj_desc->reference.class ==
615 AML_REF_OF_OP)) { 620 ACPI_REFCLASS_REFOF)) {
616 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 621 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
617 "Arg (%p) is an ObjRef(Node), storing in node %p\n", 622 "Arg (%p) is an ObjRef(Node), storing in node %p\n",
618 new_obj_desc, 623 new_obj_desc,
@@ -640,11 +645,9 @@ acpi_ds_store_object_to_local(u16 opcode,
640 } 645 }
641 } 646 }
642 647
643 /* 648 /* Delete the existing object before storing the new one */
644 * Delete the existing object 649
645 * before storing the new one 650 acpi_ds_method_data_delete_value(type, index, walk_state);
646 */
647 acpi_ds_method_data_delete_value(opcode, index, walk_state);
648 } 651 }
649 652
650 /* 653 /*
@@ -653,7 +656,7 @@ acpi_ds_store_object_to_local(u16 opcode,
653 * (increments the object reference count by one) 656 * (increments the object reference count by one)
654 */ 657 */
655 status = 658 status =
656 acpi_ds_method_data_set_value(opcode, index, new_obj_desc, 659 acpi_ds_method_data_set_value(type, index, new_obj_desc,
657 walk_state); 660 walk_state);
658 661
659 /* Remove local reference if we copied the object above */ 662 /* Remove local reference if we copied the object above */
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
index 0f2805899210..4f08e599d07e 100644
--- a/drivers/acpi/dispatcher/dsobject.c
+++ b/drivers/acpi/dispatcher/dsobject.c
@@ -731,54 +731,70 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
731 switch (op_info->type) { 731 switch (op_info->type) {
732 case AML_TYPE_LOCAL_VARIABLE: 732 case AML_TYPE_LOCAL_VARIABLE:
733 733
734 /* Split the opcode into a base opcode + offset */ 734 /* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */
735 735
736 obj_desc->reference.opcode = AML_LOCAL_OP; 736 obj_desc->reference.value = opcode - AML_LOCAL_OP;
737 obj_desc->reference.offset = opcode - AML_LOCAL_OP; 737 obj_desc->reference.class = ACPI_REFCLASS_LOCAL;
738 738
739#ifndef ACPI_NO_METHOD_EXECUTION 739#ifndef ACPI_NO_METHOD_EXECUTION
740 status = acpi_ds_method_data_get_node(AML_LOCAL_OP, 740 status =
741 obj_desc-> 741 acpi_ds_method_data_get_node(ACPI_REFCLASS_LOCAL,
742 reference.offset, 742 obj_desc->reference.
743 walk_state, 743 value, walk_state,
744 (struct 744 ACPI_CAST_INDIRECT_PTR
745 acpi_namespace_node 745 (struct
746 **)&obj_desc-> 746 acpi_namespace_node,
747 reference.object); 747 &obj_desc->reference.
748 object));
748#endif 749#endif
749 break; 750 break;
750 751
751 case AML_TYPE_METHOD_ARGUMENT: 752 case AML_TYPE_METHOD_ARGUMENT:
752 753
753 /* Split the opcode into a base opcode + offset */ 754 /* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */
754 755
755 obj_desc->reference.opcode = AML_ARG_OP; 756 obj_desc->reference.value = opcode - AML_ARG_OP;
756 obj_desc->reference.offset = opcode - AML_ARG_OP; 757 obj_desc->reference.class = ACPI_REFCLASS_ARG;
757 758
758#ifndef ACPI_NO_METHOD_EXECUTION 759#ifndef ACPI_NO_METHOD_EXECUTION
759 status = acpi_ds_method_data_get_node(AML_ARG_OP, 760 status = acpi_ds_method_data_get_node(ACPI_REFCLASS_ARG,
760 obj_desc-> 761 obj_desc->
761 reference.offset, 762 reference.value,
762 walk_state, 763 walk_state,
764 ACPI_CAST_INDIRECT_PTR
763 (struct 765 (struct
764 acpi_namespace_node 766 acpi_namespace_node,
765 **)&obj_desc-> 767 &obj_desc->
766 reference.object); 768 reference.
769 object));
767#endif 770#endif
768 break; 771 break;
769 772
770 default: /* Other literals, etc.. */ 773 default: /* Object name or Debug object */
771 774
772 if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) { 775 switch (op->common.aml_opcode) {
776 case AML_INT_NAMEPATH_OP:
773 777
774 /* Node was saved in Op */ 778 /* Node was saved in Op */
775 779
776 obj_desc->reference.node = op->common.node; 780 obj_desc->reference.node = op->common.node;
777 obj_desc->reference.object = 781 obj_desc->reference.object =
778 op->common.node->object; 782 op->common.node->object;
779 } 783 obj_desc->reference.class = ACPI_REFCLASS_NAME;
784 break;
785
786 case AML_DEBUG_OP:
780 787
781 obj_desc->reference.opcode = opcode; 788 obj_desc->reference.class = ACPI_REFCLASS_DEBUG;
789 break;
790
791 default:
792
793 ACPI_ERROR((AE_INFO,
794 "Unimplemented reference type for AML opcode: %4.4X",
795 opcode));
796 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
797 }
782 break; 798 break;
783 } 799 }
784 break; 800 break;
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index 6a81c4400edf..69fae5905bb8 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -1330,7 +1330,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
1330 (walk_state->results->results.obj_desc[0]) == 1330 (walk_state->results->results.obj_desc[0]) ==
1331 ACPI_TYPE_LOCAL_REFERENCE) 1331 ACPI_TYPE_LOCAL_REFERENCE)
1332 && ((walk_state->results->results.obj_desc[0])-> 1332 && ((walk_state->results->results.obj_desc[0])->
1333 reference.opcode != AML_INDEX_OP)) { 1333 reference.class != ACPI_REFCLASS_INDEX)) {
1334 status = 1334 status =
1335 acpi_ex_resolve_to_value(&walk_state-> 1335 acpi_ex_resolve_to_value(&walk_state->
1336 results->results. 1336 results->results.
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c
index b5072fa9c920..396fe12078cd 100644
--- a/drivers/acpi/dispatcher/dswexec.c
+++ b/drivers/acpi/dispatcher/dswexec.c
@@ -166,6 +166,10 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
166 status = AE_CTRL_FALSE; 166 status = AE_CTRL_FALSE;
167 } 167 }
168 168
169 /* Predicate can be used for an implicit return value */
170
171 (void)acpi_ds_do_implicit_return(local_obj_desc, walk_state, TRUE);
172
169 cleanup: 173 cleanup:
170 174
171 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n", 175 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n",
@@ -429,10 +433,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
429 ACPI_TYPE_LOCAL_REFERENCE) 433 ACPI_TYPE_LOCAL_REFERENCE)
430 && (walk_state->operands[1]->common.type == 434 && (walk_state->operands[1]->common.type ==
431 ACPI_TYPE_LOCAL_REFERENCE) 435 ACPI_TYPE_LOCAL_REFERENCE)
432 && (walk_state->operands[0]->reference.opcode == 436 && (walk_state->operands[0]->reference.class ==
433 walk_state->operands[1]->reference.opcode) 437 walk_state->operands[1]->reference.class)
434 && (walk_state->operands[0]->reference.offset == 438 && (walk_state->operands[0]->reference.value ==
435 walk_state->operands[1]->reference.offset)) { 439 walk_state->operands[1]->reference.value)) {
436 status = AE_OK; 440 status = AE_OK;
437 } else { 441 } else {
438 ACPI_EXCEPTION((AE_INFO, status, 442 ACPI_EXCEPTION((AE_INFO, status,
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 7d2edf143f16..5b30b8d91d71 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -48,7 +48,6 @@ MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
48 " before undocking"); 48 " before undocking");
49 49
50static struct atomic_notifier_head dock_notifier_list; 50static struct atomic_notifier_head dock_notifier_list;
51static struct platform_device *dock_device;
52static char dock_device_name[] = "dock"; 51static char dock_device_name[] = "dock";
53 52
54static const struct acpi_device_id dock_device_ids[] = { 53static const struct acpi_device_id dock_device_ids[] = {
@@ -65,23 +64,29 @@ struct dock_station {
65 struct mutex hp_lock; 64 struct mutex hp_lock;
66 struct list_head dependent_devices; 65 struct list_head dependent_devices;
67 struct list_head hotplug_devices; 66 struct list_head hotplug_devices;
67
68 struct list_head sibiling;
69 struct platform_device *dock_device;
68}; 70};
71static LIST_HEAD(dock_stations);
72static int dock_station_count;
69 73
70struct dock_dependent_device { 74struct dock_dependent_device {
71 struct list_head list; 75 struct list_head list;
72 struct list_head hotplug_list; 76 struct list_head hotplug_list;
73 acpi_handle handle; 77 acpi_handle handle;
74 acpi_notify_handler handler; 78 struct acpi_dock_ops *ops;
75 void *context; 79 void *context;
76}; 80};
77 81
78#define DOCK_DOCKING 0x00000001 82#define DOCK_DOCKING 0x00000001
79#define DOCK_UNDOCKING 0x00000002 83#define DOCK_UNDOCKING 0x00000002
84#define DOCK_IS_DOCK 0x00000010
85#define DOCK_IS_ATA 0x00000020
86#define DOCK_IS_BAT 0x00000040
80#define DOCK_EVENT 3 87#define DOCK_EVENT 3
81#define UNDOCK_EVENT 2 88#define UNDOCK_EVENT 2
82 89
83static struct dock_station *dock_station;
84
85/***************************************************************************** 90/*****************************************************************************
86 * Dock Dependent device functions * 91 * Dock Dependent device functions *
87 *****************************************************************************/ 92 *****************************************************************************/
@@ -199,6 +204,60 @@ static int is_dock(acpi_handle handle)
199 return 1; 204 return 1;
200} 205}
201 206
207static int is_ejectable(acpi_handle handle)
208{
209 acpi_status status;
210 acpi_handle tmp;
211
212 status = acpi_get_handle(handle, "_EJ0", &tmp);
213 if (ACPI_FAILURE(status))
214 return 0;
215 return 1;
216}
217
218static int is_ata(acpi_handle handle)
219{
220 acpi_handle tmp;
221
222 if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
223 (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
224 (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
225 (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
226 return 1;
227
228 return 0;
229}
230
231static int is_battery(acpi_handle handle)
232{
233 struct acpi_device_info *info;
234 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
235 int ret = 1;
236
237 if (!ACPI_SUCCESS(acpi_get_object_info(handle, &buffer)))
238 return 0;
239 info = buffer.pointer;
240 if (!(info->valid & ACPI_VALID_HID))
241 ret = 0;
242 else
243 ret = !strcmp("PNP0C0A", info->hardware_id.value);
244
245 kfree(buffer.pointer);
246 return ret;
247}
248
249static int is_ejectable_bay(acpi_handle handle)
250{
251 acpi_handle phandle;
252 if (!is_ejectable(handle))
253 return 0;
254 if (is_battery(handle) || is_ata(handle))
255 return 1;
256 if (!acpi_get_parent(handle, &phandle) && is_ata(phandle))
257 return 1;
258 return 0;
259}
260
202/** 261/**
203 * is_dock_device - see if a device is on a dock station 262 * is_dock_device - see if a device is on a dock station
204 * @handle: acpi handle of the device 263 * @handle: acpi handle of the device
@@ -209,11 +268,17 @@ static int is_dock(acpi_handle handle)
209 */ 268 */
210int is_dock_device(acpi_handle handle) 269int is_dock_device(acpi_handle handle)
211{ 270{
212 if (!dock_station) 271 struct dock_station *dock_station;
272
273 if (!dock_station_count)
213 return 0; 274 return 0;
214 275
215 if (is_dock(handle) || find_dock_dependent_device(dock_station, handle)) 276 if (is_dock(handle))
216 return 1; 277 return 1;
278 list_for_each_entry(dock_station, &dock_stations, sibiling) {
279 if (find_dock_dependent_device(dock_station, handle))
280 return 1;
281 }
217 282
218 return 0; 283 return 0;
219} 284}
@@ -229,7 +294,7 @@ EXPORT_SYMBOL_GPL(is_dock_device);
229 */ 294 */
230static int dock_present(struct dock_station *ds) 295static int dock_present(struct dock_station *ds)
231{ 296{
232 unsigned long sta; 297 unsigned long long sta;
233 acpi_status status; 298 acpi_status status;
234 299
235 if (ds) { 300 if (ds) {
@@ -320,8 +385,8 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
320 * First call driver specific hotplug functions 385 * First call driver specific hotplug functions
321 */ 386 */
322 list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) { 387 list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) {
323 if (dd->handler) 388 if (dd->ops && dd->ops->handler)
324 dd->handler(dd->handle, event, dd->context); 389 dd->ops->handler(dd->handle, event, dd->context);
325 } 390 }
326 391
327 /* 392 /*
@@ -341,9 +406,10 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
341 406
342static void dock_event(struct dock_station *ds, u32 event, int num) 407static void dock_event(struct dock_station *ds, u32 event, int num)
343{ 408{
344 struct device *dev = &dock_device->dev; 409 struct device *dev = &ds->dock_device->dev;
345 char event_string[13]; 410 char event_string[13];
346 char *envp[] = { event_string, NULL }; 411 char *envp[] = { event_string, NULL };
412 struct dock_dependent_device *dd;
347 413
348 if (num == UNDOCK_EVENT) 414 if (num == UNDOCK_EVENT)
349 sprintf(event_string, "EVENT=undock"); 415 sprintf(event_string, "EVENT=undock");
@@ -354,7 +420,14 @@ static void dock_event(struct dock_station *ds, u32 event, int num)
354 * Indicate that the status of the dock station has 420 * Indicate that the status of the dock station has
355 * changed. 421 * changed.
356 */ 422 */
357 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); 423 if (num == DOCK_EVENT)
424 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
425
426 list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
427 if (dd->ops && dd->ops->uevent)
428 dd->ops->uevent(dd->handle, event, dd->context);
429 if (num != DOCK_EVENT)
430 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
358} 431}
359 432
360/** 433/**
@@ -414,9 +487,10 @@ static void handle_dock(struct dock_station *ds, int dock)
414 arg.type = ACPI_TYPE_INTEGER; 487 arg.type = ACPI_TYPE_INTEGER;
415 arg.integer.value = dock; 488 arg.integer.value = dock;
416 status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer); 489 status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
417 if (ACPI_FAILURE(status)) 490 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
418 printk(KERN_ERR PREFIX "%s - failed to execute _DCK\n", 491 ACPI_EXCEPTION((AE_INFO, status, "%s - failed to execute"
419 (char *)name_buffer.pointer); 492 " _DCK\n", (char *)name_buffer.pointer));
493
420 kfree(buffer.pointer); 494 kfree(buffer.pointer);
421 kfree(name_buffer.pointer); 495 kfree(name_buffer.pointer);
422} 496}
@@ -452,6 +526,25 @@ static inline void complete_undock(struct dock_station *ds)
452 ds->flags &= ~(DOCK_UNDOCKING); 526 ds->flags &= ~(DOCK_UNDOCKING);
453} 527}
454 528
529static void dock_lock(struct dock_station *ds, int lock)
530{
531 struct acpi_object_list arg_list;
532 union acpi_object arg;
533 acpi_status status;
534
535 arg_list.count = 1;
536 arg_list.pointer = &arg;
537 arg.type = ACPI_TYPE_INTEGER;
538 arg.integer.value = !!lock;
539 status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL);
540 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
541 if (lock)
542 printk(KERN_WARNING PREFIX "Locking device failed\n");
543 else
544 printk(KERN_WARNING PREFIX "Unlocking device failed\n");
545 }
546}
547
455/** 548/**
456 * dock_in_progress - see if we are in the middle of handling a dock event 549 * dock_in_progress - see if we are in the middle of handling a dock event
457 * @ds: the dock station 550 * @ds: the dock station
@@ -479,7 +572,7 @@ static int dock_in_progress(struct dock_station *ds)
479 */ 572 */
480int register_dock_notifier(struct notifier_block *nb) 573int register_dock_notifier(struct notifier_block *nb)
481{ 574{
482 if (!dock_station) 575 if (!dock_station_count)
483 return -ENODEV; 576 return -ENODEV;
484 577
485 return atomic_notifier_chain_register(&dock_notifier_list, nb); 578 return atomic_notifier_chain_register(&dock_notifier_list, nb);
@@ -493,7 +586,7 @@ EXPORT_SYMBOL_GPL(register_dock_notifier);
493 */ 586 */
494void unregister_dock_notifier(struct notifier_block *nb) 587void unregister_dock_notifier(struct notifier_block *nb)
495{ 588{
496 if (!dock_station) 589 if (!dock_station_count)
497 return; 590 return;
498 591
499 atomic_notifier_chain_unregister(&dock_notifier_list, nb); 592 atomic_notifier_chain_unregister(&dock_notifier_list, nb);
@@ -504,7 +597,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
504/** 597/**
505 * register_hotplug_dock_device - register a hotplug function 598 * register_hotplug_dock_device - register a hotplug function
506 * @handle: the handle of the device 599 * @handle: the handle of the device
507 * @handler: the acpi_notifier_handler to call after docking 600 * @ops: handlers to call after docking
508 * @context: device specific data 601 * @context: device specific data
509 * 602 *
510 * If a driver would like to perform a hotplug operation after a dock 603 * If a driver would like to perform a hotplug operation after a dock
@@ -512,27 +605,36 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
512 * the dock driver after _DCK is executed. 605 * the dock driver after _DCK is executed.
513 */ 606 */
514int 607int
515register_hotplug_dock_device(acpi_handle handle, acpi_notify_handler handler, 608register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
516 void *context) 609 void *context)
517{ 610{
518 struct dock_dependent_device *dd; 611 struct dock_dependent_device *dd;
612 struct dock_station *dock_station;
613 int ret = -EINVAL;
519 614
520 if (!dock_station) 615 if (!dock_station_count)
521 return -ENODEV; 616 return -ENODEV;
522 617
523 /* 618 /*
524 * make sure this handle is for a device dependent on the dock, 619 * make sure this handle is for a device dependent on the dock,
525 * this would include the dock station itself 620 * this would include the dock station itself
526 */ 621 */
527 dd = find_dock_dependent_device(dock_station, handle); 622 list_for_each_entry(dock_station, &dock_stations, sibiling) {
528 if (dd) { 623 /*
529 dd->handler = handler; 624 * An ATA bay can be in a dock and itself can be ejected
530 dd->context = context; 625 * seperately, so there are two 'dock stations' which need the
531 dock_add_hotplug_device(dock_station, dd); 626 * ops
532 return 0; 627 */
628 dd = find_dock_dependent_device(dock_station, handle);
629 if (dd) {
630 dd->ops = ops;
631 dd->context = context;
632 dock_add_hotplug_device(dock_station, dd);
633 ret = 0;
634 }
533 } 635 }
534 636
535 return -EINVAL; 637 return ret;
536} 638}
537 639
538EXPORT_SYMBOL_GPL(register_hotplug_dock_device); 640EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
@@ -544,13 +646,16 @@ EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
544void unregister_hotplug_dock_device(acpi_handle handle) 646void unregister_hotplug_dock_device(acpi_handle handle)
545{ 647{
546 struct dock_dependent_device *dd; 648 struct dock_dependent_device *dd;
649 struct dock_station *dock_station;
547 650
548 if (!dock_station) 651 if (!dock_station_count)
549 return; 652 return;
550 653
551 dd = find_dock_dependent_device(dock_station, handle); 654 list_for_each_entry(dock_station, &dock_stations, sibiling) {
552 if (dd) 655 dd = find_dock_dependent_device(dock_station, handle);
553 dock_del_hotplug_device(dock_station, dd); 656 if (dd)
657 dock_del_hotplug_device(dock_station, dd);
658 }
554} 659}
555 660
556EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device); 661EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
@@ -575,13 +680,9 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
575 */ 680 */
576 dock_event(ds, event, UNDOCK_EVENT); 681 dock_event(ds, event, UNDOCK_EVENT);
577 682
578 if (!dock_present(ds)) {
579 complete_undock(ds);
580 return -ENODEV;
581 }
582
583 hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST); 683 hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST);
584 undock(ds); 684 undock(ds);
685 dock_lock(ds, 0);
585 eject_dock(ds); 686 eject_dock(ds);
586 if (dock_present(ds)) { 687 if (dock_present(ds)) {
587 printk(KERN_ERR PREFIX "Unable to undock!\n"); 688 printk(KERN_ERR PREFIX "Unable to undock!\n");
@@ -604,14 +705,36 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
604static void dock_notify(acpi_handle handle, u32 event, void *data) 705static void dock_notify(acpi_handle handle, u32 event, void *data)
605{ 706{
606 struct dock_station *ds = data; 707 struct dock_station *ds = data;
708 struct acpi_device *tmp;
709 int surprise_removal = 0;
710
711 /*
712 * According to acpi spec 3.0a, if a DEVICE_CHECK notification
713 * is sent and _DCK is present, it is assumed to mean an undock
714 * request.
715 */
716 if ((ds->flags & DOCK_IS_DOCK) && event == ACPI_NOTIFY_DEVICE_CHECK)
717 event = ACPI_NOTIFY_EJECT_REQUEST;
607 718
719 /*
720 * dock station: BUS_CHECK - docked or surprise removal
721 * DEVICE_CHECK - undocked
722 * other device: BUS_CHECK/DEVICE_CHECK - added or surprise removal
723 *
724 * To simplify event handling, dock dependent device handler always
725 * get ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and
726 * ACPI_NOTIFY_EJECT_REQUEST for removal
727 */
608 switch (event) { 728 switch (event) {
609 case ACPI_NOTIFY_BUS_CHECK: 729 case ACPI_NOTIFY_BUS_CHECK:
610 if (!dock_in_progress(ds) && dock_present(ds)) { 730 case ACPI_NOTIFY_DEVICE_CHECK:
731 if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle,
732 &tmp)) {
611 begin_dock(ds); 733 begin_dock(ds);
612 dock(ds); 734 dock(ds);
613 if (!dock_present(ds)) { 735 if (!dock_present(ds)) {
614 printk(KERN_ERR PREFIX "Unable to dock!\n"); 736 printk(KERN_ERR PREFIX "Unable to dock!\n");
737 complete_dock(ds);
615 break; 738 break;
616 } 739 }
617 atomic_notifier_call_chain(&dock_notifier_list, 740 atomic_notifier_call_chain(&dock_notifier_list,
@@ -619,20 +742,19 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
619 hotplug_dock_devices(ds, event); 742 hotplug_dock_devices(ds, event);
620 complete_dock(ds); 743 complete_dock(ds);
621 dock_event(ds, event, DOCK_EVENT); 744 dock_event(ds, event, DOCK_EVENT);
745 dock_lock(ds, 1);
746 break;
622 } 747 }
623 break; 748 if (dock_present(ds) || dock_in_progress(ds))
624 case ACPI_NOTIFY_DEVICE_CHECK: 749 break;
625 /* 750 /* This is a surprise removal */
626 * According to acpi spec 3.0a, if a DEVICE_CHECK notification 751 surprise_removal = 1;
627 * is sent and _DCK is present, it is assumed to mean an 752 event = ACPI_NOTIFY_EJECT_REQUEST;
628 * undock request. This notify routine will only be called 753 /* Fall back */
629 * for objects defining _DCK, so we will fall through to eject
630 * request here. However, we will pass an eject request through
631 * to the driver who wish to hotplug.
632 */
633 case ACPI_NOTIFY_EJECT_REQUEST: 754 case ACPI_NOTIFY_EJECT_REQUEST:
634 begin_undock(ds); 755 begin_undock(ds);
635 if (immediate_undock) 756 if ((immediate_undock && !(ds->flags & DOCK_IS_ATA))
757 || surprise_removal)
636 handle_eject_request(ds, event); 758 handle_eject_request(ds, event);
637 else 759 else
638 dock_event(ds, event, UNDOCK_EVENT); 760 dock_event(ds, event, UNDOCK_EVENT);
@@ -642,6 +764,51 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
642 } 764 }
643} 765}
644 766
767struct dock_data {
768 acpi_handle handle;
769 unsigned long event;
770 struct dock_station *ds;
771};
772
773static void acpi_dock_deferred_cb(void *context)
774{
775 struct dock_data *data = (struct dock_data *)context;
776
777 dock_notify(data->handle, data->event, data->ds);
778 kfree(data);
779}
780
781static int acpi_dock_notifier_call(struct notifier_block *this,
782 unsigned long event, void *data)
783{
784 struct dock_station *dock_station;
785 acpi_handle handle = (acpi_handle)data;
786
787 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
788 && event != ACPI_NOTIFY_EJECT_REQUEST)
789 return 0;
790 list_for_each_entry(dock_station, &dock_stations, sibiling) {
791 if (dock_station->handle == handle) {
792 struct dock_data *dock_data;
793
794 dock_data = kmalloc(sizeof(*dock_data), GFP_KERNEL);
795 if (!dock_data)
796 return 0;
797 dock_data->handle = handle;
798 dock_data->event = event;
799 dock_data->ds = dock_station;
800 acpi_os_hotplug_execute(acpi_dock_deferred_cb,
801 dock_data);
802 return 0 ;
803 }
804 }
805 return 0;
806}
807
808static struct notifier_block dock_acpi_notifier = {
809 .notifier_call = acpi_dock_notifier_call,
810};
811
645/** 812/**
646 * find_dock_devices - find devices on the dock station 813 * find_dock_devices - find devices on the dock station
647 * @handle: the handle of the device we are examining 814 * @handle: the handle of the device we are examining
@@ -688,6 +855,8 @@ fdd_out:
688static ssize_t show_docked(struct device *dev, 855static ssize_t show_docked(struct device *dev,
689 struct device_attribute *attr, char *buf) 856 struct device_attribute *attr, char *buf)
690{ 857{
858 struct dock_station *dock_station = *((struct dock_station **)
859 dev->platform_data);
691 return snprintf(buf, PAGE_SIZE, "%d\n", dock_present(dock_station)); 860 return snprintf(buf, PAGE_SIZE, "%d\n", dock_present(dock_station));
692 861
693} 862}
@@ -699,6 +868,8 @@ static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
699static ssize_t show_flags(struct device *dev, 868static ssize_t show_flags(struct device *dev,
700 struct device_attribute *attr, char *buf) 869 struct device_attribute *attr, char *buf)
701{ 870{
871 struct dock_station *dock_station = *((struct dock_station **)
872 dev->platform_data);
702 return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags); 873 return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
703 874
704} 875}
@@ -711,6 +882,8 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
711 const char *buf, size_t count) 882 const char *buf, size_t count)
712{ 883{
713 int ret; 884 int ret;
885 struct dock_station *dock_station = *((struct dock_station **)
886 dev->platform_data);
714 887
715 if (!count) 888 if (!count)
716 return -EINVAL; 889 return -EINVAL;
@@ -727,16 +900,38 @@ static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
727static ssize_t show_dock_uid(struct device *dev, 900static ssize_t show_dock_uid(struct device *dev,
728 struct device_attribute *attr, char *buf) 901 struct device_attribute *attr, char *buf)
729{ 902{
730 unsigned long lbuf; 903 unsigned long long lbuf;
904 struct dock_station *dock_station = *((struct dock_station **)
905 dev->platform_data);
731 acpi_status status = acpi_evaluate_integer(dock_station->handle, 906 acpi_status status = acpi_evaluate_integer(dock_station->handle,
732 "_UID", NULL, &lbuf); 907 "_UID", NULL, &lbuf);
733 if (ACPI_FAILURE(status)) 908 if (ACPI_FAILURE(status))
734 return 0; 909 return 0;
735 910
736 return snprintf(buf, PAGE_SIZE, "%lx\n", lbuf); 911 return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
737} 912}
738static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL); 913static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
739 914
915static ssize_t show_dock_type(struct device *dev,
916 struct device_attribute *attr, char *buf)
917{
918 struct dock_station *dock_station = *((struct dock_station **)
919 dev->platform_data);
920 char *type;
921
922 if (dock_station->flags & DOCK_IS_DOCK)
923 type = "dock_station";
924 else if (dock_station->flags & DOCK_IS_ATA)
925 type = "ata_bay";
926 else if (dock_station->flags & DOCK_IS_BAT)
927 type = "battery_bay";
928 else
929 type = "unknown";
930
931 return snprintf(buf, PAGE_SIZE, "%s\n", type);
932}
933static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
934
740/** 935/**
741 * dock_add - add a new dock station 936 * dock_add - add a new dock station
742 * @handle: the dock station handle 937 * @handle: the dock station handle
@@ -747,8 +942,9 @@ static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
747static int dock_add(acpi_handle handle) 942static int dock_add(acpi_handle handle)
748{ 943{
749 int ret; 944 int ret;
750 acpi_status status;
751 struct dock_dependent_device *dd; 945 struct dock_dependent_device *dd;
946 struct dock_station *dock_station;
947 struct platform_device *dock_device;
752 948
753 /* allocate & initialize the dock_station private data */ 949 /* allocate & initialize the dock_station private data */
754 dock_station = kzalloc(sizeof(*dock_station), GFP_KERNEL); 950 dock_station = kzalloc(sizeof(*dock_station), GFP_KERNEL);
@@ -758,22 +954,34 @@ static int dock_add(acpi_handle handle)
758 dock_station->last_dock_time = jiffies - HZ; 954 dock_station->last_dock_time = jiffies - HZ;
759 INIT_LIST_HEAD(&dock_station->dependent_devices); 955 INIT_LIST_HEAD(&dock_station->dependent_devices);
760 INIT_LIST_HEAD(&dock_station->hotplug_devices); 956 INIT_LIST_HEAD(&dock_station->hotplug_devices);
957 INIT_LIST_HEAD(&dock_station->sibiling);
761 spin_lock_init(&dock_station->dd_lock); 958 spin_lock_init(&dock_station->dd_lock);
762 mutex_init(&dock_station->hp_lock); 959 mutex_init(&dock_station->hp_lock);
763 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); 960 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
764 961
765 /* initialize platform device stuff */ 962 /* initialize platform device stuff */
766 dock_device = 963 dock_station->dock_device =
767 platform_device_register_simple(dock_device_name, 0, NULL, 0); 964 platform_device_register_simple(dock_device_name,
965 dock_station_count, NULL, 0);
966 dock_device = dock_station->dock_device;
768 if (IS_ERR(dock_device)) { 967 if (IS_ERR(dock_device)) {
769 kfree(dock_station); 968 kfree(dock_station);
770 dock_station = NULL; 969 dock_station = NULL;
771 return PTR_ERR(dock_device); 970 return PTR_ERR(dock_device);
772 } 971 }
972 platform_device_add_data(dock_device, &dock_station,
973 sizeof(struct dock_station *));
773 974
774 /* we want the dock device to send uevents */ 975 /* we want the dock device to send uevents */
775 dock_device->dev.uevent_suppress = 0; 976 dock_device->dev.uevent_suppress = 0;
776 977
978 if (is_dock(handle))
979 dock_station->flags |= DOCK_IS_DOCK;
980 if (is_ata(handle))
981 dock_station->flags |= DOCK_IS_ATA;
982 if (is_battery(handle))
983 dock_station->flags |= DOCK_IS_BAT;
984
777 ret = device_create_file(&dock_device->dev, &dev_attr_docked); 985 ret = device_create_file(&dock_device->dev, &dev_attr_docked);
778 if (ret) { 986 if (ret) {
779 printk("Error %d adding sysfs file\n", ret); 987 printk("Error %d adding sysfs file\n", ret);
@@ -812,6 +1020,9 @@ static int dock_add(acpi_handle handle)
812 dock_station = NULL; 1020 dock_station = NULL;
813 return ret; 1021 return ret;
814 } 1022 }
1023 ret = device_create_file(&dock_device->dev, &dev_attr_type);
1024 if (ret)
1025 printk(KERN_ERR"Error %d adding sysfs file\n", ret);
815 1026
816 /* Find dependent devices */ 1027 /* Find dependent devices */
817 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 1028 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
@@ -828,24 +1039,12 @@ static int dock_add(acpi_handle handle)
828 } 1039 }
829 add_dock_dependent_device(dock_station, dd); 1040 add_dock_dependent_device(dock_station, dd);
830 1041
831 /* register for dock events */ 1042 dock_station_count++;
832 status = acpi_install_notify_handler(dock_station->handle, 1043 list_add(&dock_station->sibiling, &dock_stations);
833 ACPI_SYSTEM_NOTIFY,
834 dock_notify, dock_station);
835
836 if (ACPI_FAILURE(status)) {
837 printk(KERN_ERR PREFIX "Error installing notify handler\n");
838 ret = -ENODEV;
839 goto dock_add_err;
840 }
841
842 printk(KERN_INFO PREFIX "%s\n", ACPI_DOCK_DRIVER_DESCRIPTION);
843
844 return 0; 1044 return 0;
845 1045
846dock_add_err:
847 kfree(dd);
848dock_add_err_unregister: 1046dock_add_err_unregister:
1047 device_remove_file(&dock_device->dev, &dev_attr_type);
849 device_remove_file(&dock_device->dev, &dev_attr_docked); 1048 device_remove_file(&dock_device->dev, &dev_attr_docked);
850 device_remove_file(&dock_device->dev, &dev_attr_undock); 1049 device_remove_file(&dock_device->dev, &dev_attr_undock);
851 device_remove_file(&dock_device->dev, &dev_attr_uid); 1050 device_remove_file(&dock_device->dev, &dev_attr_uid);
@@ -859,12 +1058,12 @@ dock_add_err_unregister:
859/** 1058/**
860 * dock_remove - free up resources related to the dock station 1059 * dock_remove - free up resources related to the dock station
861 */ 1060 */
862static int dock_remove(void) 1061static int dock_remove(struct dock_station *dock_station)
863{ 1062{
864 struct dock_dependent_device *dd, *tmp; 1063 struct dock_dependent_device *dd, *tmp;
865 acpi_status status; 1064 struct platform_device *dock_device = dock_station->dock_device;
866 1065
867 if (!dock_station) 1066 if (!dock_station_count)
868 return 0; 1067 return 0;
869 1068
870 /* remove dependent devices */ 1069 /* remove dependent devices */
@@ -872,14 +1071,8 @@ static int dock_remove(void)
872 list) 1071 list)
873 kfree(dd); 1072 kfree(dd);
874 1073
875 /* remove dock notify handler */
876 status = acpi_remove_notify_handler(dock_station->handle,
877 ACPI_SYSTEM_NOTIFY,
878 dock_notify);
879 if (ACPI_FAILURE(status))
880 printk(KERN_ERR "Error removing notify handler\n");
881
882 /* cleanup sysfs */ 1074 /* cleanup sysfs */
1075 device_remove_file(&dock_device->dev, &dev_attr_type);
883 device_remove_file(&dock_device->dev, &dev_attr_docked); 1076 device_remove_file(&dock_device->dev, &dev_attr_docked);
884 device_remove_file(&dock_device->dev, &dev_attr_undock); 1077 device_remove_file(&dock_device->dev, &dev_attr_undock);
885 device_remove_file(&dock_device->dev, &dev_attr_uid); 1078 device_remove_file(&dock_device->dev, &dev_attr_uid);
@@ -904,41 +1097,60 @@ static int dock_remove(void)
904static acpi_status 1097static acpi_status
905find_dock(acpi_handle handle, u32 lvl, void *context, void **rv) 1098find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
906{ 1099{
907 int *count = context;
908 acpi_status status = AE_OK; 1100 acpi_status status = AE_OK;
909 1101
910 if (is_dock(handle)) { 1102 if (is_dock(handle)) {
911 if (dock_add(handle) >= 0) { 1103 if (dock_add(handle) >= 0) {
912 (*count)++;
913 status = AE_CTRL_TERMINATE; 1104 status = AE_CTRL_TERMINATE;
914 } 1105 }
915 } 1106 }
916 return status; 1107 return status;
917} 1108}
918 1109
919static int __init dock_init(void) 1110static acpi_status
1111find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
920{ 1112{
921 int num = 0; 1113 /* If bay is a dock, it's already handled */
922 1114 if (is_ejectable_bay(handle) && !is_dock(handle))
923 dock_station = NULL; 1115 dock_add(handle);
1116 return AE_OK;
1117}
924 1118
1119static int __init dock_init(void)
1120{
925 if (acpi_disabled) 1121 if (acpi_disabled)
926 return 0; 1122 return 0;
927 1123
928 /* look for a dock station */ 1124 /* look for a dock station */
929 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 1125 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
930 ACPI_UINT32_MAX, find_dock, &num, NULL); 1126 ACPI_UINT32_MAX, find_dock, NULL, NULL);
931 1127
932 if (!num) 1128 /* look for bay */
933 printk(KERN_INFO "No dock devices found.\n"); 1129 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
1130 ACPI_UINT32_MAX, find_bay, NULL, NULL);
1131 if (!dock_station_count) {
1132 printk(KERN_INFO PREFIX "No dock devices found.\n");
1133 return 0;
1134 }
934 1135
1136 register_acpi_bus_notifier(&dock_acpi_notifier);
1137 printk(KERN_INFO PREFIX "%s: %d docks/bays found\n",
1138 ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
935 return 0; 1139 return 0;
936} 1140}
937 1141
938static void __exit dock_exit(void) 1142static void __exit dock_exit(void)
939{ 1143{
940 dock_remove(); 1144 struct dock_station *dock_station;
1145
1146 unregister_acpi_bus_notifier(&dock_acpi_notifier);
1147 list_for_each_entry(dock_station, &dock_stations, sibiling)
1148 dock_remove(dock_station);
941} 1149}
942 1150
943postcore_initcall(dock_init); 1151/*
1152 * Must be called before drivers of devices in dock, otherwise we can't know
1153 * which devices are in a dock
1154 */
1155subsys_initcall(dock_init);
944module_exit(dock_exit); 1156module_exit(dock_exit);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 13593f9f2197..30f3ef236ecb 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * ec.c - ACPI Embedded Controller Driver (v2.0) 2 * ec.c - ACPI Embedded Controller Driver (v2.1)
3 * 3 *
4 * Copyright (C) 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> 4 * Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de>
5 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> 5 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
6 * Copyright (C) 2004 Luming Yu <luming.yu@intel.com> 6 * Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
7 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 7 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
@@ -26,7 +26,7 @@
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 */ 27 */
28 28
29/* Uncomment next line to get verbose print outs*/ 29/* Uncomment next line to get verbose printout */
30/* #define DEBUG */ 30/* #define DEBUG */
31 31
32#include <linux/kernel.h> 32#include <linux/kernel.h>
@@ -38,6 +38,7 @@
38#include <linux/seq_file.h> 38#include <linux/seq_file.h>
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/list.h> 40#include <linux/list.h>
41#include <linux/spinlock.h>
41#include <asm/io.h> 42#include <asm/io.h>
42#include <acpi/acpi_bus.h> 43#include <acpi/acpi_bus.h>
43#include <acpi/acpi_drivers.h> 44#include <acpi/acpi_drivers.h>
@@ -65,22 +66,21 @@ enum ec_command {
65 ACPI_EC_COMMAND_QUERY = 0x84, 66 ACPI_EC_COMMAND_QUERY = 0x84,
66}; 67};
67 68
68/* EC events */
69enum ec_event {
70 ACPI_EC_EVENT_OBF_1 = 1, /* Output buffer full */
71 ACPI_EC_EVENT_IBF_0, /* Input buffer empty */
72};
73
74#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ 69#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
75#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 70#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
76#define ACPI_EC_UDELAY 100 /* Wait 100us before polling EC again */ 71#define ACPI_EC_UDELAY 100 /* Wait 100us before polling EC again */
77 72
73#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
74 per one transaction */
75
78enum { 76enum {
79 EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */
80 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 77 EC_FLAGS_QUERY_PENDING, /* Query is pending */
81 EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */ 78 EC_FLAGS_GPE_MODE, /* Expect GPE to be sent
79 * for status change */
82 EC_FLAGS_NO_GPE, /* Don't use GPE mode */ 80 EC_FLAGS_NO_GPE, /* Don't use GPE mode */
83 EC_FLAGS_RESCHEDULE_POLL /* Re-schedule poll */ 81 EC_FLAGS_GPE_STORM, /* GPE storm detected */
82 EC_FLAGS_HANDLERS_INSTALLED /* Handlers for GPE and
83 * OpReg are installed */
84}; 84};
85 85
86/* If we find an EC via the ECDT, we need to keep a ptr to its context */ 86/* If we find an EC via the ECDT, we need to keep a ptr to its context */
@@ -95,6 +95,18 @@ struct acpi_ec_query_handler {
95 u8 query_bit; 95 u8 query_bit;
96}; 96};
97 97
98struct transaction {
99 const u8 *wdata;
100 u8 *rdata;
101 unsigned short irq_count;
102 u8 command;
103 u8 wi;
104 u8 ri;
105 u8 wlen;
106 u8 rlen;
107 bool done;
108};
109
98static struct acpi_ec { 110static struct acpi_ec {
99 acpi_handle handle; 111 acpi_handle handle;
100 unsigned long gpe; 112 unsigned long gpe;
@@ -105,9 +117,8 @@ static struct acpi_ec {
105 struct mutex lock; 117 struct mutex lock;
106 wait_queue_head_t wait; 118 wait_queue_head_t wait;
107 struct list_head list; 119 struct list_head list;
108 struct delayed_work work; 120 struct transaction *curr;
109 atomic_t irq_count; 121 spinlock_t curr_lock;
110 u8 handlers_installed;
111} *boot_ec, *first_ec; 122} *boot_ec, *first_ec;
112 123
113/* 124/*
@@ -150,7 +161,7 @@ static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
150{ 161{
151 u8 x = inb(ec->data_addr); 162 u8 x = inb(ec->data_addr);
152 pr_debug(PREFIX "---> data = 0x%2.2x\n", x); 163 pr_debug(PREFIX "---> data = 0x%2.2x\n", x);
153 return inb(ec->data_addr); 164 return x;
154} 165}
155 166
156static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) 167static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
@@ -165,158 +176,193 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
165 outb(data, ec->data_addr); 176 outb(data, ec->data_addr);
166} 177}
167 178
168static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event) 179static int ec_transaction_done(struct acpi_ec *ec)
169{ 180{
170 if (test_bit(EC_FLAGS_WAIT_GPE, &ec->flags)) 181 unsigned long flags;
171 return 0; 182 int ret = 0;
172 if (event == ACPI_EC_EVENT_OBF_1) { 183 spin_lock_irqsave(&ec->curr_lock, flags);
173 if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_OBF) 184 if (!ec->curr || ec->curr->done)
174 return 1; 185 ret = 1;
175 } else if (event == ACPI_EC_EVENT_IBF_0) { 186 spin_unlock_irqrestore(&ec->curr_lock, flags);
176 if (!(acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)) 187 return ret;
177 return 1; 188}
178 }
179 189
180 return 0; 190static void start_transaction(struct acpi_ec *ec)
191{
192 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
193 ec->curr->done = false;
194 acpi_ec_write_cmd(ec, ec->curr->command);
181} 195}
182 196
183static void ec_schedule_ec_poll(struct acpi_ec *ec) 197static void gpe_transaction(struct acpi_ec *ec, u8 status)
184{ 198{
185 if (test_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags)) 199 unsigned long flags;
186 schedule_delayed_work(&ec->work, 200 spin_lock_irqsave(&ec->curr_lock, flags);
187 msecs_to_jiffies(ACPI_EC_DELAY)); 201 if (!ec->curr)
202 goto unlock;
203 if (ec->curr->wlen > ec->curr->wi) {
204 if ((status & ACPI_EC_FLAG_IBF) == 0)
205 acpi_ec_write_data(ec,
206 ec->curr->wdata[ec->curr->wi++]);
207 else
208 goto err;
209 } else if (ec->curr->rlen > ec->curr->ri) {
210 if ((status & ACPI_EC_FLAG_OBF) == 1) {
211 ec->curr->rdata[ec->curr->ri++] = acpi_ec_read_data(ec);
212 if (ec->curr->rlen == ec->curr->ri)
213 ec->curr->done = true;
214 } else
215 goto err;
216 } else if (ec->curr->wlen == ec->curr->wi &&
217 (status & ACPI_EC_FLAG_IBF) == 0)
218 ec->curr->done = true;
219 goto unlock;
220err:
221 /* false interrupt, state didn't change */
222 if (in_interrupt())
223 ++ec->curr->irq_count;
224unlock:
225 spin_unlock_irqrestore(&ec->curr_lock, flags);
188} 226}
189 227
190static void ec_switch_to_poll_mode(struct acpi_ec *ec) 228static int acpi_ec_wait(struct acpi_ec *ec)
191{ 229{
230 if (wait_event_timeout(ec->wait, ec_transaction_done(ec),
231 msecs_to_jiffies(ACPI_EC_DELAY)))
232 return 0;
233 /* try restart command if we get any false interrupts */
234 if (ec->curr->irq_count &&
235 (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
236 pr_debug(PREFIX "controller reset, restart transaction\n");
237 start_transaction(ec);
238 if (wait_event_timeout(ec->wait, ec_transaction_done(ec),
239 msecs_to_jiffies(ACPI_EC_DELAY)))
240 return 0;
241 }
242 /* missing GPEs, switch back to poll mode */
243 if (printk_ratelimit())
244 pr_info(PREFIX "missing confirmations, "
245 "switch off interrupt mode.\n");
192 set_bit(EC_FLAGS_NO_GPE, &ec->flags); 246 set_bit(EC_FLAGS_NO_GPE, &ec->flags);
193 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); 247 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
194 acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); 248 return 1;
195 set_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
196} 249}
197 250
198static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll) 251static void acpi_ec_gpe_query(void *ec_cxt);
252
253static int ec_check_sci(struct acpi_ec *ec, u8 state)
199{ 254{
200 atomic_set(&ec->irq_count, 0); 255 if (state & ACPI_EC_FLAG_SCI) {
201 if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) && 256 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
202 likely(!force_poll)) { 257 return acpi_os_execute(OSL_EC_BURST_HANDLER,
203 if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event), 258 acpi_ec_gpe_query, ec);
204 msecs_to_jiffies(ACPI_EC_DELAY))) 259 }
205 return 0; 260 return 0;
206 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 261}
207 if (acpi_ec_check_status(ec, event)) { 262
208 /* missing GPEs, switch back to poll mode */ 263static int ec_poll(struct acpi_ec *ec)
209 if (printk_ratelimit()) 264{
210 pr_info(PREFIX "missing confirmations, " 265 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
211 "switch off interrupt mode.\n"); 266 udelay(ACPI_EC_UDELAY);
212 ec_switch_to_poll_mode(ec); 267 while (time_before(jiffies, delay)) {
213 ec_schedule_ec_poll(ec); 268 gpe_transaction(ec, acpi_ec_read_status(ec));
214 return 0; 269 udelay(ACPI_EC_UDELAY);
215 } 270 if (ec_transaction_done(ec))
216 } else {
217 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
218 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
219 while (time_before(jiffies, delay)) {
220 if (acpi_ec_check_status(ec, event))
221 return 0;
222 msleep(1);
223 }
224 if (acpi_ec_check_status(ec,event))
225 return 0; 271 return 0;
226 } 272 }
227 pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n",
228 acpi_ec_read_status(ec),
229 (event == ACPI_EC_EVENT_OBF_1) ? "\"b0=1\"" : "\"b1=0\"");
230 return -ETIME; 273 return -ETIME;
231} 274}
232 275
233static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, 276static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
234 const u8 * wdata, unsigned wdata_len, 277 struct transaction *t,
235 u8 * rdata, unsigned rdata_len,
236 int force_poll) 278 int force_poll)
237{ 279{
238 int result = 0; 280 unsigned long tmp;
239 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 281 int ret = 0;
240 pr_debug(PREFIX "transaction start\n"); 282 pr_debug(PREFIX "transaction start\n");
241 acpi_ec_write_cmd(ec, command); 283 /* disable GPE during transaction if storm is detected */
242 for (; wdata_len > 0; --wdata_len) { 284 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
243 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll); 285 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
244 if (result) { 286 acpi_disable_gpe(NULL, ec->gpe);
245 pr_err(PREFIX
246 "write_cmd timeout, command = %d\n", command);
247 goto end;
248 }
249 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
250 acpi_ec_write_data(ec, *(wdata++));
251 } 287 }
252 288 /* start transaction */
253 if (!rdata_len) { 289 spin_lock_irqsave(&ec->curr_lock, tmp);
254 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll); 290 /* following two actions should be kept atomic */
255 if (result) { 291 ec->curr = t;
256 pr_err(PREFIX 292 start_transaction(ec);
257 "finish-write timeout, command = %d\n", command); 293 if (ec->curr->command == ACPI_EC_COMMAND_QUERY)
258 goto end;
259 }
260 } else if (command == ACPI_EC_COMMAND_QUERY)
261 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 294 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
262 295 spin_unlock_irqrestore(&ec->curr_lock, tmp);
263 for (; rdata_len > 0; --rdata_len) { 296 /* if we selected poll mode or failed in GPE-mode do a poll loop */
264 result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll); 297 if (force_poll ||
265 if (result) { 298 !test_bit(EC_FLAGS_GPE_MODE, &ec->flags) ||
266 pr_err(PREFIX "read timeout, command = %d\n", command); 299 acpi_ec_wait(ec))
267 goto end; 300 ret = ec_poll(ec);
268 }
269 /* Don't expect GPE after last read */
270 if (rdata_len > 1)
271 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
272 *(rdata++) = acpi_ec_read_data(ec);
273 }
274 end:
275 pr_debug(PREFIX "transaction end\n"); 301 pr_debug(PREFIX "transaction end\n");
276 return result; 302 spin_lock_irqsave(&ec->curr_lock, tmp);
303 ec->curr = NULL;
304 spin_unlock_irqrestore(&ec->curr_lock, tmp);
305 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
306 /* check if we received SCI during transaction */
307 ec_check_sci(ec, acpi_ec_read_status(ec));
308 /* it is safe to enable GPE outside of transaction */
309 acpi_enable_gpe(NULL, ec->gpe);
310 } else if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
311 t->irq_count > ACPI_EC_STORM_THRESHOLD) {
312 pr_info(PREFIX "GPE storm detected, "
313 "transactions will use polling mode\n");
314 set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
315 }
316 return ret;
317}
318
319static int ec_check_ibf0(struct acpi_ec *ec)
320{
321 u8 status = acpi_ec_read_status(ec);
322 return (status & ACPI_EC_FLAG_IBF) == 0;
323}
324
325static int ec_wait_ibf0(struct acpi_ec *ec)
326{
327 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
328 /* interrupt wait manually if GPE mode is not active */
329 unsigned long timeout = test_bit(EC_FLAGS_GPE_MODE, &ec->flags) ?
330 msecs_to_jiffies(ACPI_EC_DELAY) : msecs_to_jiffies(1);
331 while (time_before(jiffies, delay))
332 if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), timeout))
333 return 0;
334 return -ETIME;
277} 335}
278 336
279static int acpi_ec_transaction(struct acpi_ec *ec, u8 command, 337static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t,
280 const u8 * wdata, unsigned wdata_len,
281 u8 * rdata, unsigned rdata_len,
282 int force_poll) 338 int force_poll)
283{ 339{
284 int status; 340 int status;
285 u32 glk; 341 u32 glk;
286 342 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
287 if (!ec || (wdata_len && !wdata) || (rdata_len && !rdata))
288 return -EINVAL; 343 return -EINVAL;
289 344 if (t->rdata)
290 if (rdata) 345 memset(t->rdata, 0, t->rlen);
291 memset(rdata, 0, rdata_len);
292
293 mutex_lock(&ec->lock); 346 mutex_lock(&ec->lock);
294 if (ec->global_lock) { 347 if (ec->global_lock) {
295 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 348 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
296 if (ACPI_FAILURE(status)) { 349 if (ACPI_FAILURE(status)) {
297 mutex_unlock(&ec->lock); 350 status = -ENODEV;
298 return -ENODEV; 351 goto unlock;
299 } 352 }
300 } 353 }
301 354 if (ec_wait_ibf0(ec)) {
302 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0);
303 if (status) {
304 pr_err(PREFIX "input buffer is not empty, " 355 pr_err(PREFIX "input buffer is not empty, "
305 "aborting transaction\n"); 356 "aborting transaction\n");
357 status = -ETIME;
306 goto end; 358 goto end;
307 } 359 }
308 360 status = acpi_ec_transaction_unlocked(ec, t, force_poll);
309 status = acpi_ec_transaction_unlocked(ec, command, 361end:
310 wdata, wdata_len,
311 rdata, rdata_len,
312 force_poll);
313
314 end:
315
316 if (ec->global_lock) 362 if (ec->global_lock)
317 acpi_release_global_lock(glk); 363 acpi_release_global_lock(glk);
364unlock:
318 mutex_unlock(&ec->lock); 365 mutex_unlock(&ec->lock);
319
320 return status; 366 return status;
321} 367}
322 368
@@ -327,21 +373,32 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
327int acpi_ec_burst_enable(struct acpi_ec *ec) 373int acpi_ec_burst_enable(struct acpi_ec *ec)
328{ 374{
329 u8 d; 375 u8 d;
330 return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1, 0); 376 struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
377 .wdata = NULL, .rdata = &d,
378 .wlen = 0, .rlen = 1};
379
380 return acpi_ec_transaction(ec, &t, 0);
331} 381}
332 382
333int acpi_ec_burst_disable(struct acpi_ec *ec) 383int acpi_ec_burst_disable(struct acpi_ec *ec)
334{ 384{
335 return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0, 0); 385 struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
386 .wdata = NULL, .rdata = NULL,
387 .wlen = 0, .rlen = 0};
388
389 return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
390 acpi_ec_transaction(ec, &t, 0) : 0;
336} 391}
337 392
338static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data) 393static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
339{ 394{
340 int result; 395 int result;
341 u8 d; 396 u8 d;
397 struct transaction t = {.command = ACPI_EC_COMMAND_READ,
398 .wdata = &address, .rdata = &d,
399 .wlen = 1, .rlen = 1};
342 400
343 result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_READ, 401 result = acpi_ec_transaction(ec, &t, 0);
344 &address, 1, &d, 1, 0);
345 *data = d; 402 *data = d;
346 return result; 403 return result;
347} 404}
@@ -349,8 +406,11 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
349static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) 406static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
350{ 407{
351 u8 wdata[2] = { address, data }; 408 u8 wdata[2] = { address, data };
352 return acpi_ec_transaction(ec, ACPI_EC_COMMAND_WRITE, 409 struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
353 wdata, 2, NULL, 0, 0); 410 .wdata = wdata, .rdata = NULL,
411 .wlen = 2, .rlen = 0};
412
413 return acpi_ec_transaction(ec, &t, 0);
354} 414}
355 415
356/* 416/*
@@ -412,12 +472,13 @@ int ec_transaction(u8 command,
412 u8 * rdata, unsigned rdata_len, 472 u8 * rdata, unsigned rdata_len,
413 int force_poll) 473 int force_poll)
414{ 474{
475 struct transaction t = {.command = command,
476 .wdata = wdata, .rdata = rdata,
477 .wlen = wdata_len, .rlen = rdata_len};
415 if (!first_ec) 478 if (!first_ec)
416 return -ENODEV; 479 return -ENODEV;
417 480
418 return acpi_ec_transaction(first_ec, command, wdata, 481 return acpi_ec_transaction(first_ec, &t, force_poll);
419 wdata_len, rdata, rdata_len,
420 force_poll);
421} 482}
422 483
423EXPORT_SYMBOL(ec_transaction); 484EXPORT_SYMBOL(ec_transaction);
@@ -426,7 +487,9 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
426{ 487{
427 int result; 488 int result;
428 u8 d; 489 u8 d;
429 490 struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
491 .wdata = NULL, .rdata = &d,
492 .wlen = 0, .rlen = 1};
430 if (!ec || !data) 493 if (!ec || !data)
431 return -EINVAL; 494 return -EINVAL;
432 495
@@ -436,7 +499,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
436 * bit to be cleared (and thus clearing the interrupt source). 499 * bit to be cleared (and thus clearing the interrupt source).
437 */ 500 */
438 501
439 result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1, 0); 502 result = acpi_ec_transaction(ec, &t, 0);
440 if (result) 503 if (result)
441 return result; 504 return result;
442 505
@@ -513,46 +576,35 @@ static void acpi_ec_gpe_query(void *ec_cxt)
513 576
514static u32 acpi_ec_gpe_handler(void *data) 577static u32 acpi_ec_gpe_handler(void *data)
515{ 578{
516 acpi_status status = AE_OK;
517 struct acpi_ec *ec = data; 579 struct acpi_ec *ec = data;
518 u8 state = acpi_ec_read_status(ec); 580 u8 status;
519 581
520 pr_debug(PREFIX "~~~> interrupt\n"); 582 pr_debug(PREFIX "~~~> interrupt\n");
521 atomic_inc(&ec->irq_count); 583 status = acpi_ec_read_status(ec);
522 if (atomic_read(&ec->irq_count) > 5) { 584
523 pr_err(PREFIX "GPE storm detected, disabling EC GPE\n"); 585 if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) {
524 ec_switch_to_poll_mode(ec); 586 gpe_transaction(ec, status);
525 goto end; 587 if (ec_transaction_done(ec) &&
588 (status & ACPI_EC_FLAG_IBF) == 0)
589 wake_up(&ec->wait);
526 } 590 }
527 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
528 if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))
529 wake_up(&ec->wait);
530 591
531 if (state & ACPI_EC_FLAG_SCI) { 592 ec_check_sci(ec, status);
532 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) 593 if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
533 status = acpi_os_execute(OSL_EC_BURST_HANDLER, 594 !test_bit(EC_FLAGS_NO_GPE, &ec->flags)) {
534 acpi_ec_gpe_query, ec);
535 } else if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
536 !test_bit(EC_FLAGS_NO_GPE, &ec->flags) &&
537 in_interrupt()) {
538 /* this is non-query, must be confirmation */ 595 /* this is non-query, must be confirmation */
539 if (printk_ratelimit()) 596 if (!test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
540 pr_info(PREFIX "non-query interrupt received," 597 if (printk_ratelimit())
598 pr_info(PREFIX "non-query interrupt received,"
599 " switching to interrupt mode\n");
600 } else {
601 /* hush, STORM switches the mode every transaction */
602 pr_debug(PREFIX "non-query interrupt received,"
541 " switching to interrupt mode\n"); 603 " switching to interrupt mode\n");
604 }
542 set_bit(EC_FLAGS_GPE_MODE, &ec->flags); 605 set_bit(EC_FLAGS_GPE_MODE, &ec->flags);
543 clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
544 } 606 }
545end: 607 return ACPI_INTERRUPT_HANDLED;
546 ec_schedule_ec_poll(ec);
547 return ACPI_SUCCESS(status) ?
548 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
549}
550
551static void do_ec_poll(struct work_struct *work)
552{
553 struct acpi_ec *ec = container_of(work, struct acpi_ec, work.work);
554 atomic_set(&ec->irq_count, 0);
555 (void)acpi_ec_gpe_handler(ec);
556} 608}
557 609
558/* -------------------------------------------------------------------------- 610/* --------------------------------------------------------------------------
@@ -696,8 +748,7 @@ static struct acpi_ec *make_acpi_ec(void)
696 mutex_init(&ec->lock); 748 mutex_init(&ec->lock);
697 init_waitqueue_head(&ec->wait); 749 init_waitqueue_head(&ec->wait);
698 INIT_LIST_HEAD(&ec->list); 750 INIT_LIST_HEAD(&ec->list);
699 INIT_DELAYED_WORK_DEFERRABLE(&ec->work, do_ec_poll); 751 spin_lock_init(&ec->curr_lock);
700 atomic_set(&ec->irq_count, 0);
701 return ec; 752 return ec;
702} 753}
703 754
@@ -718,6 +769,7 @@ static acpi_status
718ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) 769ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
719{ 770{
720 acpi_status status; 771 acpi_status status;
772 unsigned long long tmp = 0;
721 773
722 struct acpi_ec *ec = context; 774 struct acpi_ec *ec = context;
723 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 775 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
@@ -727,31 +779,27 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
727 779
728 /* Get GPE bit assignment (EC events). */ 780 /* Get GPE bit assignment (EC events). */
729 /* TODO: Add support for _GPE returning a package */ 781 /* TODO: Add support for _GPE returning a package */
730 status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec->gpe); 782 status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
731 if (ACPI_FAILURE(status)) 783 if (ACPI_FAILURE(status))
732 return status; 784 return status;
785 ec->gpe = tmp;
733 /* Use the global lock for all EC transactions? */ 786 /* Use the global lock for all EC transactions? */
734 acpi_evaluate_integer(handle, "_GLK", NULL, &ec->global_lock); 787 tmp = 0;
788 acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
789 ec->global_lock = tmp;
735 ec->handle = handle; 790 ec->handle = handle;
736 return AE_CTRL_TERMINATE; 791 return AE_CTRL_TERMINATE;
737} 792}
738 793
739static void ec_poll_stop(struct acpi_ec *ec)
740{
741 clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
742 cancel_delayed_work(&ec->work);
743}
744
745static void ec_remove_handlers(struct acpi_ec *ec) 794static void ec_remove_handlers(struct acpi_ec *ec)
746{ 795{
747 ec_poll_stop(ec);
748 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, 796 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
749 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) 797 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
750 pr_err(PREFIX "failed to remove space handler\n"); 798 pr_err(PREFIX "failed to remove space handler\n");
751 if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, 799 if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
752 &acpi_ec_gpe_handler))) 800 &acpi_ec_gpe_handler)))
753 pr_err(PREFIX "failed to remove gpe handler\n"); 801 pr_err(PREFIX "failed to remove gpe handler\n");
754 ec->handlers_installed = 0; 802 clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
755} 803}
756 804
757static int acpi_ec_add(struct acpi_device *device) 805static int acpi_ec_add(struct acpi_device *device)
@@ -788,7 +836,7 @@ static int acpi_ec_add(struct acpi_device *device)
788 836
789 if (!first_ec) 837 if (!first_ec)
790 first_ec = ec; 838 first_ec = ec;
791 acpi_driver_data(device) = ec; 839 device->driver_data = ec;
792 acpi_ec_add_fs(device); 840 acpi_ec_add_fs(device);
793 pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n", 841 pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
794 ec->gpe, ec->command_addr, ec->data_addr); 842 ec->gpe, ec->command_addr, ec->data_addr);
@@ -813,7 +861,7 @@ static int acpi_ec_remove(struct acpi_device *device, int type)
813 } 861 }
814 mutex_unlock(&ec->lock); 862 mutex_unlock(&ec->lock);
815 acpi_ec_remove_fs(device); 863 acpi_ec_remove_fs(device);
816 acpi_driver_data(device) = NULL; 864 device->driver_data = NULL;
817 if (ec == first_ec) 865 if (ec == first_ec)
818 first_ec = NULL; 866 first_ec = NULL;
819 kfree(ec); 867 kfree(ec);
@@ -846,27 +894,36 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
846static int ec_install_handlers(struct acpi_ec *ec) 894static int ec_install_handlers(struct acpi_ec *ec)
847{ 895{
848 acpi_status status; 896 acpi_status status;
849 if (ec->handlers_installed) 897 if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
850 return 0; 898 return 0;
851 status = acpi_install_gpe_handler(NULL, ec->gpe, 899 status = acpi_install_gpe_handler(NULL, ec->gpe,
852 ACPI_GPE_EDGE_TRIGGERED, 900 ACPI_GPE_EDGE_TRIGGERED,
853 &acpi_ec_gpe_handler, ec); 901 &acpi_ec_gpe_handler, ec);
854 if (ACPI_FAILURE(status)) 902 if (ACPI_FAILURE(status))
855 return -ENODEV; 903 return -ENODEV;
856
857 acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); 904 acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
858 acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); 905 acpi_enable_gpe(NULL, ec->gpe);
859
860 status = acpi_install_address_space_handler(ec->handle, 906 status = acpi_install_address_space_handler(ec->handle,
861 ACPI_ADR_SPACE_EC, 907 ACPI_ADR_SPACE_EC,
862 &acpi_ec_space_handler, 908 &acpi_ec_space_handler,
863 NULL, ec); 909 NULL, ec);
864 if (ACPI_FAILURE(status)) { 910 if (ACPI_FAILURE(status)) {
865 acpi_remove_gpe_handler(NULL, ec->gpe, &acpi_ec_gpe_handler); 911 if (status == AE_NOT_FOUND) {
866 return -ENODEV; 912 /*
913 * Maybe OS fails in evaluating the _REG object.
914 * The AE_NOT_FOUND error will be ignored and OS
915 * continue to initialize EC.
916 */
917 printk(KERN_ERR "Fail in evaluating the _REG object"
918 " of EC device. Broken bios is suspected.\n");
919 } else {
920 acpi_remove_gpe_handler(NULL, ec->gpe,
921 &acpi_ec_gpe_handler);
922 return -ENODEV;
923 }
867 } 924 }
868 925
869 ec->handlers_installed = 1; 926 set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
870 return 0; 927 return 0;
871} 928}
872 929
@@ -887,7 +944,6 @@ static int acpi_ec_start(struct acpi_device *device)
887 944
888 /* EC is fully operational, allow queries */ 945 /* EC is fully operational, allow queries */
889 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 946 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
890 ec_schedule_ec_poll(ec);
891 return ret; 947 return ret;
892} 948}
893 949
@@ -906,7 +962,7 @@ static int acpi_ec_stop(struct acpi_device *device, int type)
906 962
907int __init acpi_boot_ec_enable(void) 963int __init acpi_boot_ec_enable(void)
908{ 964{
909 if (!boot_ec || boot_ec->handlers_installed) 965 if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
910 return 0; 966 return 0;
911 if (!ec_install_handlers(boot_ec)) { 967 if (!ec_install_handlers(boot_ec)) {
912 first_ec = boot_ec; 968 first_ec = boot_ec;
@@ -985,7 +1041,7 @@ static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state)
985 /* Stop using GPE */ 1041 /* Stop using GPE */
986 set_bit(EC_FLAGS_NO_GPE, &ec->flags); 1042 set_bit(EC_FLAGS_NO_GPE, &ec->flags);
987 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); 1043 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
988 acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); 1044 acpi_disable_gpe(NULL, ec->gpe);
989 return 0; 1045 return 0;
990} 1046}
991 1047
@@ -994,7 +1050,7 @@ static int acpi_ec_resume(struct acpi_device *device)
994 struct acpi_ec *ec = acpi_driver_data(device); 1050 struct acpi_ec *ec = acpi_driver_data(device);
995 /* Enable use of GPE back */ 1051 /* Enable use of GPE back */
996 clear_bit(EC_FLAGS_NO_GPE, &ec->flags); 1052 clear_bit(EC_FLAGS_NO_GPE, &ec->flags);
997 acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); 1053 acpi_enable_gpe(NULL, ec->gpe);
998 return 0; 1054 return 0;
999} 1055}
1000 1056
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index c5e53aae86f7..f45c74fe745e 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -289,8 +289,6 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
289 */ 289 */
290 status = acpi_hw_low_disable_gpe(gpe_event_info); 290 status = acpi_hw_low_disable_gpe(gpe_event_info);
291 return_ACPI_STATUS(status); 291 return_ACPI_STATUS(status);
292
293 return_ACPI_STATUS(AE_OK);
294} 292}
295 293
296/******************************************************************************* 294/*******************************************************************************
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index 73bfd6bf962f..41554f736b68 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -248,21 +248,15 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe_type)
248 * DESCRIPTION: Enable an ACPI event (general purpose) 248 * DESCRIPTION: Enable an ACPI event (general purpose)
249 * 249 *
250 ******************************************************************************/ 250 ******************************************************************************/
251acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags) 251acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
252{ 252{
253 acpi_status status = AE_OK; 253 acpi_status status = AE_OK;
254 acpi_cpu_flags flags;
254 struct acpi_gpe_event_info *gpe_event_info; 255 struct acpi_gpe_event_info *gpe_event_info;
255 256
256 ACPI_FUNCTION_TRACE(acpi_enable_gpe); 257 ACPI_FUNCTION_TRACE(acpi_enable_gpe);
257 258
258 /* Use semaphore lock if not executing at interrupt level */ 259 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
259
260 if (flags & ACPI_NOT_ISR) {
261 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
262 if (ACPI_FAILURE(status)) {
263 return_ACPI_STATUS(status);
264 }
265 }
266 260
267 /* Ensure that we have a valid GPE number */ 261 /* Ensure that we have a valid GPE number */
268 262
@@ -277,9 +271,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
277 status = acpi_ev_enable_gpe(gpe_event_info, TRUE); 271 status = acpi_ev_enable_gpe(gpe_event_info, TRUE);
278 272
279 unlock_and_exit: 273 unlock_and_exit:
280 if (flags & ACPI_NOT_ISR) { 274 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
281 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
282 }
283 return_ACPI_STATUS(status); 275 return_ACPI_STATUS(status);
284} 276}
285 277
@@ -299,22 +291,15 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
299 * DESCRIPTION: Disable an ACPI event (general purpose) 291 * DESCRIPTION: Disable an ACPI event (general purpose)
300 * 292 *
301 ******************************************************************************/ 293 ******************************************************************************/
302acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags) 294acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
303{ 295{
304 acpi_status status = AE_OK; 296 acpi_status status = AE_OK;
297 acpi_cpu_flags flags;
305 struct acpi_gpe_event_info *gpe_event_info; 298 struct acpi_gpe_event_info *gpe_event_info;
306 299
307 ACPI_FUNCTION_TRACE(acpi_disable_gpe); 300 ACPI_FUNCTION_TRACE(acpi_disable_gpe);
308 301
309 /* Use semaphore lock if not executing at interrupt level */ 302 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
310
311 if (flags & ACPI_NOT_ISR) {
312 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
313 if (ACPI_FAILURE(status)) {
314 return_ACPI_STATUS(status);
315 }
316 }
317
318 /* Ensure that we have a valid GPE number */ 303 /* Ensure that we have a valid GPE number */
319 304
320 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 305 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
@@ -325,10 +310,8 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
325 310
326 status = acpi_ev_disable_gpe(gpe_event_info); 311 status = acpi_ev_disable_gpe(gpe_event_info);
327 312
328 unlock_and_exit: 313unlock_and_exit:
329 if (flags & ACPI_NOT_ISR) { 314 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
330 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
331 }
332 return_ACPI_STATUS(status); 315 return_ACPI_STATUS(status);
333} 316}
334 317
@@ -521,6 +504,9 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
521 if (value) 504 if (value)
522 *event_status |= ACPI_EVENT_FLAG_SET; 505 *event_status |= ACPI_EVENT_FLAG_SET;
523 506
507 if (acpi_gbl_fixed_event_handlers[event].handler)
508 *event_status |= ACPI_EVENT_FLAG_HANDLE;
509
524 return_ACPI_STATUS(status); 510 return_ACPI_STATUS(status);
525} 511}
526 512
@@ -571,6 +557,9 @@ acpi_get_gpe_status(acpi_handle gpe_device,
571 557
572 status = acpi_hw_get_gpe_status(gpe_event_info, event_status); 558 status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
573 559
560 if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
561 *event_status |= ACPI_EVENT_FLAG_HANDLE;
562
574 unlock_and_exit: 563 unlock_and_exit:
575 if (flags & ACPI_NOT_ISR) { 564 if (flags & ACPI_NOT_ISR) {
576 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); 565 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
index 8892b9824fae..74da6fa52ef1 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/executer/exconfig.c
@@ -43,7 +43,6 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acinterp.h> 45#include <acpi/acinterp.h>
46#include <acpi/amlcode.h>
47#include <acpi/acnamesp.h> 46#include <acpi/acnamesp.h>
48#include <acpi/actables.h> 47#include <acpi/actables.h>
49#include <acpi/acdispat.h> 48#include <acpi/acdispat.h>
@@ -91,13 +90,12 @@ acpi_ex_add_table(u32 table_index,
91 90
92 /* Init the table handle */ 91 /* Init the table handle */
93 92
94 obj_desc->reference.opcode = AML_LOAD_OP; 93 obj_desc->reference.class = ACPI_REFCLASS_TABLE;
95 *ddb_handle = obj_desc; 94 *ddb_handle = obj_desc;
96 95
97 /* Install the new table into the local data structures */ 96 /* Install the new table into the local data structures */
98 97
99 obj_desc->reference.object = ACPI_CAST_PTR(void, 98 obj_desc->reference.value = table_index;
100 (unsigned long)table_index);
101 99
102 /* Add the table to the namespace */ 100 /* Add the table to the namespace */
103 101
@@ -280,6 +278,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
280 struct acpi_walk_state *walk_state) 278 struct acpi_walk_state *walk_state)
281{ 279{
282 union acpi_operand_object *ddb_handle; 280 union acpi_operand_object *ddb_handle;
281 struct acpi_table_header *table;
283 struct acpi_table_desc table_desc; 282 struct acpi_table_desc table_desc;
284 u32 table_index; 283 u32 table_index;
285 acpi_status status; 284 acpi_status status;
@@ -294,9 +293,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
294 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) { 293 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
295 case ACPI_TYPE_REGION: 294 case ACPI_TYPE_REGION:
296 295
297 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n", 296 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
298 obj_desc, 297 "Load table from Region %p\n", obj_desc));
299 acpi_ut_get_object_type_name(obj_desc)));
300 298
301 /* Region must be system_memory (from ACPI spec) */ 299 /* Region must be system_memory (from ACPI spec) */
302 300
@@ -316,61 +314,112 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
316 } 314 }
317 315
318 /* 316 /*
319 * We will simply map the memory region for the table. However, the 317 * Map the table header and get the actual table length. The region
320 * memory region is technically not guaranteed to remain stable and 318 * length is not guaranteed to be the same as the table length.
321 * we may eventually have to copy the table to a local buffer. 319 */
320 table = acpi_os_map_memory(obj_desc->region.address,
321 sizeof(struct acpi_table_header));
322 if (!table) {
323 return_ACPI_STATUS(AE_NO_MEMORY);
324 }
325
326 length = table->length;
327 acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
328
329 /* Must have at least an ACPI table header */
330
331 if (length < sizeof(struct acpi_table_header)) {
332 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
333 }
334
335 /*
336 * The memory region is not guaranteed to remain stable and we must
337 * copy the table to a local buffer. For example, the memory region
338 * is corrupted after suspend on some machines. Dynamically loaded
339 * tables are usually small, so this overhead is minimal.
322 */ 340 */
341
342 /* Allocate a buffer for the table */
343
344 table_desc.pointer = ACPI_ALLOCATE(length);
345 if (!table_desc.pointer) {
346 return_ACPI_STATUS(AE_NO_MEMORY);
347 }
348
349 /* Map the entire table and copy it */
350
351 table = acpi_os_map_memory(obj_desc->region.address, length);
352 if (!table) {
353 ACPI_FREE(table_desc.pointer);
354 return_ACPI_STATUS(AE_NO_MEMORY);
355 }
356
357 ACPI_MEMCPY(table_desc.pointer, table, length);
358 acpi_os_unmap_memory(table, length);
359
323 table_desc.address = obj_desc->region.address; 360 table_desc.address = obj_desc->region.address;
324 table_desc.length = obj_desc->region.length;
325 table_desc.flags = ACPI_TABLE_ORIGIN_MAPPED;
326 break; 361 break;
327 362
328 case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */ 363 case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */
329 364
330 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 365 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
331 "Load from Buffer or Field %p %s\n", obj_desc, 366 "Load table from Buffer or Field %p\n",
332 acpi_ut_get_object_type_name(obj_desc))); 367 obj_desc));
333
334 length = obj_desc->buffer.length;
335 368
336 /* Must have at least an ACPI table header */ 369 /* Must have at least an ACPI table header */
337 370
338 if (length < sizeof(struct acpi_table_header)) { 371 if (obj_desc->buffer.length < sizeof(struct acpi_table_header)) {
339 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH); 372 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
340 } 373 }
341 374
342 /* Validate checksum here. It won't get validated in tb_add_table */ 375 /* Get the actual table length from the table header */
343 376
344 status = 377 table =
345 acpi_tb_verify_checksum(ACPI_CAST_PTR 378 ACPI_CAST_PTR(struct acpi_table_header,
346 (struct acpi_table_header, 379 obj_desc->buffer.pointer);
347 obj_desc->buffer.pointer), length); 380 length = table->length;
348 if (ACPI_FAILURE(status)) { 381
349 return_ACPI_STATUS(status); 382 /* Table cannot extend beyond the buffer */
383
384 if (length > obj_desc->buffer.length) {
385 return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
386 }
387 if (length < sizeof(struct acpi_table_header)) {
388 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
350 } 389 }
351 390
352 /* 391 /*
353 * We need to copy the buffer since the original buffer could be 392 * Copy the table from the buffer because the buffer could be modified
354 * changed or deleted in the future 393 * or even deleted in the future
355 */ 394 */
356 table_desc.pointer = ACPI_ALLOCATE(length); 395 table_desc.pointer = ACPI_ALLOCATE(length);
357 if (!table_desc.pointer) { 396 if (!table_desc.pointer) {
358 return_ACPI_STATUS(AE_NO_MEMORY); 397 return_ACPI_STATUS(AE_NO_MEMORY);
359 } 398 }
360 399
361 ACPI_MEMCPY(table_desc.pointer, obj_desc->buffer.pointer, 400 ACPI_MEMCPY(table_desc.pointer, table, length);
362 length); 401 table_desc.address = ACPI_TO_INTEGER(table_desc.pointer);
363 table_desc.length = length;
364 table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
365 break; 402 break;
366 403
367 default: 404 default:
368 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 405 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
369 } 406 }
370 407
371 /* 408 /* Validate table checksum (will not get validated in tb_add_table) */
372 * Install the new table into the local data structures 409
373 */ 410 status = acpi_tb_verify_checksum(table_desc.pointer, length);
411 if (ACPI_FAILURE(status)) {
412 ACPI_FREE(table_desc.pointer);
413 return_ACPI_STATUS(status);
414 }
415
416 /* Complete the table descriptor */
417
418 table_desc.length = length;
419 table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
420
421 /* Install the new table into the local data structures */
422
374 status = acpi_tb_add_table(&table_desc, &table_index); 423 status = acpi_tb_add_table(&table_desc, &table_index);
375 if (ACPI_FAILURE(status)) { 424 if (ACPI_FAILURE(status)) {
376 goto cleanup; 425 goto cleanup;
@@ -379,7 +428,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
379 /* 428 /*
380 * Add the table to the namespace. 429 * Add the table to the namespace.
381 * 430 *
382 * Note: We load the table objects relative to the root of the namespace. 431 * Note: Load the table objects relative to the root of the namespace.
383 * This appears to go against the ACPI specification, but we do it for 432 * This appears to go against the ACPI specification, but we do it for
384 * compatibility with other ACPI implementations. 433 * compatibility with other ACPI implementations.
385 */ 434 */
@@ -415,7 +464,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
415 cleanup: 464 cleanup:
416 if (ACPI_FAILURE(status)) { 465 if (ACPI_FAILURE(status)) {
417 466
418 /* Delete allocated buffer or mapping */ 467 /* Delete allocated table buffer */
419 468
420 acpi_tb_delete_table(&table_desc); 469 acpi_tb_delete_table(&table_desc);
421 } 470 }
@@ -455,9 +504,9 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
455 return_ACPI_STATUS(AE_BAD_PARAMETER); 504 return_ACPI_STATUS(AE_BAD_PARAMETER);
456 } 505 }
457 506
458 /* Get the table index from the ddb_handle (acpi_size for 64-bit case) */ 507 /* Get the table index from the ddb_handle */
459 508
460 table_index = (u32) (acpi_size) table_desc->reference.object; 509 table_index = table_desc->reference.value;
461 510
462 /* Invoke table handler if present */ 511 /* Invoke table handler if present */
463 512
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
index 261d97516d9b..1d1f35adddde 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/executer/exconvrt.c
@@ -57,7 +57,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
57 * 57 *
58 * FUNCTION: acpi_ex_convert_to_integer 58 * FUNCTION: acpi_ex_convert_to_integer
59 * 59 *
60 * PARAMETERS: obj_desc - Object to be converted. Must be an 60 * PARAMETERS: obj_desc - Object to be converted. Must be an
61 * Integer, Buffer, or String 61 * Integer, Buffer, or String
62 * result_desc - Where the new Integer object is returned 62 * result_desc - Where the new Integer object is returned
63 * Flags - Used for string conversion 63 * Flags - Used for string conversion
@@ -103,7 +103,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
103 } 103 }
104 104
105 /* 105 /*
106 * Convert the buffer/string to an integer. Note that both buffers and 106 * Convert the buffer/string to an integer. Note that both buffers and
107 * strings are treated as raw data - we don't convert ascii to hex for 107 * strings are treated as raw data - we don't convert ascii to hex for
108 * strings. 108 * strings.
109 * 109 *
@@ -120,7 +120,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
120 120
121 /* 121 /*
122 * Convert string to an integer - for most cases, the string must be 122 * Convert string to an integer - for most cases, the string must be
123 * hexadecimal as per the ACPI specification. The only exception (as 123 * hexadecimal as per the ACPI specification. The only exception (as
124 * of ACPI 3.0) is that the to_integer() operator allows both decimal 124 * of ACPI 3.0) is that the to_integer() operator allows both decimal
125 * and hexadecimal strings (hex prefixed with "0x"). 125 * and hexadecimal strings (hex prefixed with "0x").
126 */ 126 */
@@ -159,6 +159,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
159 break; 159 break;
160 160
161 default: 161 default:
162
162 /* No other types can get here */ 163 /* No other types can get here */
163 break; 164 break;
164 } 165 }
@@ -185,7 +186,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
185 * 186 *
186 * FUNCTION: acpi_ex_convert_to_buffer 187 * FUNCTION: acpi_ex_convert_to_buffer
187 * 188 *
188 * PARAMETERS: obj_desc - Object to be converted. Must be an 189 * PARAMETERS: obj_desc - Object to be converted. Must be an
189 * Integer, Buffer, or String 190 * Integer, Buffer, or String
190 * result_desc - Where the new buffer object is returned 191 * result_desc - Where the new buffer object is returned
191 * 192 *
@@ -365,7 +366,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
365 } 366 }
366 367
367 /* 368 /*
368 * Since leading zeros are supressed, we must check for the case where 369 * Since leading zeros are suppressed, we must check for the case where
369 * the integer equals 0 370 * the integer equals 0
370 * 371 *
371 * Finally, null terminate the string and return the length 372 * Finally, null terminate the string and return the length
@@ -383,7 +384,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
383 * 384 *
384 * FUNCTION: acpi_ex_convert_to_string 385 * FUNCTION: acpi_ex_convert_to_string
385 * 386 *
386 * PARAMETERS: obj_desc - Object to be converted. Must be an 387 * PARAMETERS: obj_desc - Object to be converted. Must be an
387 * Integer, Buffer, or String 388 * Integer, Buffer, or String
388 * result_desc - Where the string object is returned 389 * result_desc - Where the string object is returned
389 * Type - String flags (base and conversion type) 390 * Type - String flags (base and conversion type)
@@ -472,7 +473,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
472 base = 10; 473 base = 10;
473 474
474 /* 475 /*
475 * Calculate the final string length. Individual string values 476 * Calculate the final string length. Individual string values
476 * are variable length (include separator for each) 477 * are variable length (include separator for each)
477 */ 478 */
478 for (i = 0; i < obj_desc->buffer.length; i++) { 479 for (i = 0; i < obj_desc->buffer.length; i++) {
@@ -511,9 +512,14 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
511 /* 512 /*
512 * Create a new string object and string buffer 513 * Create a new string object and string buffer
513 * (-1 because of extra separator included in string_length from above) 514 * (-1 because of extra separator included in string_length from above)
515 * Allow creation of zero-length strings from zero-length buffers.
514 */ 516 */
517 if (string_length) {
518 string_length--;
519 }
520
515 return_desc = acpi_ut_create_string_object((acpi_size) 521 return_desc = acpi_ut_create_string_object((acpi_size)
516 (string_length - 1)); 522 string_length);
517 if (!return_desc) { 523 if (!return_desc) {
518 return_ACPI_STATUS(AE_NO_MEMORY); 524 return_ACPI_STATUS(AE_NO_MEMORY);
519 } 525 }
@@ -536,7 +542,9 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
536 * Null terminate the string 542 * Null terminate the string
537 * (overwrites final comma/space from above) 543 * (overwrites final comma/space from above)
538 */ 544 */
539 new_buf--; 545 if (obj_desc->buffer.length) {
546 new_buf--;
547 }
540 *new_buf = 0; 548 *new_buf = 0;
541 break; 549 break;
542 550
@@ -617,7 +625,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
617 case ACPI_TYPE_LOCAL_BANK_FIELD: 625 case ACPI_TYPE_LOCAL_BANK_FIELD:
618 case ACPI_TYPE_LOCAL_INDEX_FIELD: 626 case ACPI_TYPE_LOCAL_INDEX_FIELD:
619 /* 627 /*
620 * These types require an Integer operand. We can convert 628 * These types require an Integer operand. We can convert
621 * a Buffer or a String to an Integer if necessary. 629 * a Buffer or a String to an Integer if necessary.
622 */ 630 */
623 status = 631 status =
@@ -627,7 +635,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
627 635
628 case ACPI_TYPE_STRING: 636 case ACPI_TYPE_STRING:
629 /* 637 /*
630 * The operand must be a String. We can convert an 638 * The operand must be a String. We can convert an
631 * Integer or Buffer if necessary 639 * Integer or Buffer if necessary
632 */ 640 */
633 status = 641 status =
@@ -637,7 +645,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
637 645
638 case ACPI_TYPE_BUFFER: 646 case ACPI_TYPE_BUFFER:
639 /* 647 /*
640 * The operand must be a Buffer. We can convert an 648 * The operand must be a Buffer. We can convert an
641 * Integer or String if necessary 649 * Integer or String if necessary
642 */ 650 */
643 status = 651 status =
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index 2be2e2bf95bf..d087a7d28aa5 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -45,7 +45,6 @@
45#include <acpi/acinterp.h> 45#include <acpi/acinterp.h>
46#include <acpi/amlcode.h> 46#include <acpi/amlcode.h>
47#include <acpi/acnamesp.h> 47#include <acpi/acnamesp.h>
48#include <acpi/acparser.h>
49 48
50#define _COMPONENT ACPI_EXECUTER 49#define _COMPONENT ACPI_EXECUTER
51ACPI_MODULE_NAME("exdump") 50ACPI_MODULE_NAME("exdump")
@@ -214,10 +213,11 @@ static struct acpi_exdump_info acpi_ex_dump_index_field[5] = {
214 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.data_obj), "Data Object"} 213 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.data_obj), "Data Object"}
215}; 214};
216 215
217static struct acpi_exdump_info acpi_ex_dump_reference[7] = { 216static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
218 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_reference), NULL}, 217 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_reference), NULL},
218 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.class), "Class"},
219 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"}, 219 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"},
220 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.offset), "Offset"}, 220 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.value), "Value"},
221 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"}, 221 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"},
222 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.node), "Node"}, 222 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.node), "Node"},
223 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"}, 223 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"},
@@ -413,10 +413,10 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
413 413
414 case ACPI_EXD_REFERENCE: 414 case ACPI_EXD_REFERENCE:
415 415
416 acpi_ex_out_string("Opcode", 416 acpi_ex_out_string("Class Name",
417 (acpi_ps_get_opcode_info 417 (char *)
418 (obj_desc->reference.opcode))-> 418 acpi_ut_get_reference_name
419 name); 419 (obj_desc));
420 acpi_ex_dump_reference_obj(obj_desc); 420 acpi_ex_dump_reference_obj(obj_desc);
421 break; 421 break;
422 422
@@ -494,40 +494,41 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
494 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) { 494 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
495 case ACPI_TYPE_LOCAL_REFERENCE: 495 case ACPI_TYPE_LOCAL_REFERENCE:
496 496
497 switch (obj_desc->reference.opcode) { 497 acpi_os_printf("Reference: [%s] ",
498 case AML_DEBUG_OP: 498 acpi_ut_get_reference_name(obj_desc));
499
500 switch (obj_desc->reference.class) {
501 case ACPI_REFCLASS_DEBUG:
499 502
500 acpi_os_printf("Reference: Debug\n"); 503 acpi_os_printf("\n");
501 break; 504 break;
502 505
503 case AML_INDEX_OP: 506 case ACPI_REFCLASS_INDEX:
504 507
505 acpi_os_printf("Reference: Index %p\n", 508 acpi_os_printf("%p\n", obj_desc->reference.object);
506 obj_desc->reference.object);
507 break; 509 break;
508 510
509 case AML_LOAD_OP: 511 case ACPI_REFCLASS_TABLE:
510 512
511 acpi_os_printf("Reference: [DdbHandle] TableIndex %p\n", 513 acpi_os_printf("Table Index %X\n",
512 obj_desc->reference.object); 514 obj_desc->reference.value);
513 break; 515 break;
514 516
515 case AML_REF_OF_OP: 517 case ACPI_REFCLASS_REFOF:
516 518
517 acpi_os_printf("Reference: (RefOf) %p [%s]\n", 519 acpi_os_printf("%p [%s]\n", obj_desc->reference.object,
518 obj_desc->reference.object,
519 acpi_ut_get_type_name(((union 520 acpi_ut_get_type_name(((union
520 acpi_operand_object 521 acpi_operand_object
521 *)obj_desc-> 522 *)
523 obj_desc->
522 reference. 524 reference.
523 object)->common. 525 object)->common.
524 type)); 526 type));
525 break; 527 break;
526 528
527 case AML_ARG_OP: 529 case ACPI_REFCLASS_ARG:
528 530
529 acpi_os_printf("Reference: Arg%d", 531 acpi_os_printf("%X", obj_desc->reference.value);
530 obj_desc->reference.offset);
531 532
532 if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { 533 if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
533 534
@@ -542,10 +543,9 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
542 acpi_os_printf("\n"); 543 acpi_os_printf("\n");
543 break; 544 break;
544 545
545 case AML_LOCAL_OP: 546 case ACPI_REFCLASS_LOCAL:
546 547
547 acpi_os_printf("Reference: Local%d", 548 acpi_os_printf("%X", obj_desc->reference.value);
548 obj_desc->reference.offset);
549 549
550 if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { 550 if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
551 551
@@ -560,21 +560,16 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
560 acpi_os_printf("\n"); 560 acpi_os_printf("\n");
561 break; 561 break;
562 562
563 case AML_INT_NAMEPATH_OP: 563 case ACPI_REFCLASS_NAME:
564 564
565 acpi_os_printf("Reference: Namepath %X [%4.4s]\n", 565 acpi_os_printf("- [%4.4s]\n",
566 obj_desc->reference.node->name.integer,
567 obj_desc->reference.node->name.ascii); 566 obj_desc->reference.node->name.ascii);
568 break; 567 break;
569 568
570 default: 569 default: /* Unknown reference class */
571
572 /* Unknown opcode */
573 570
574 acpi_os_printf("Unknown Reference opcode=%X\n", 571 acpi_os_printf("%2.2X\n", obj_desc->reference.class);
575 obj_desc->reference.opcode);
576 break; 572 break;
577
578 } 573 }
579 break; 574 break;
580 575
@@ -865,8 +860,8 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
865 860
866 ret_buf.length = ACPI_ALLOCATE_LOCAL_BUFFER; 861 ret_buf.length = ACPI_ALLOCATE_LOCAL_BUFFER;
867 862
868 if (obj_desc->reference.opcode == AML_INT_NAMEPATH_OP) { 863 if (obj_desc->reference.class == ACPI_REFCLASS_NAME) {
869 acpi_os_printf(" Named Object %p ", obj_desc->reference.node); 864 acpi_os_printf(" %p ", obj_desc->reference.node);
870 865
871 status = 866 status =
872 acpi_ns_handle_to_pathname(obj_desc->reference.node, 867 acpi_ns_handle_to_pathname(obj_desc->reference.node,
@@ -882,14 +877,12 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
882 ACPI_DESC_TYPE_OPERAND) { 877 ACPI_DESC_TYPE_OPERAND) {
883 acpi_os_printf(" Target: %p", 878 acpi_os_printf(" Target: %p",
884 obj_desc->reference.object); 879 obj_desc->reference.object);
885 if (obj_desc->reference.opcode == AML_LOAD_OP) { 880 if (obj_desc->reference.class == ACPI_REFCLASS_TABLE) {
886 /* 881 acpi_os_printf(" Table Index: %X\n",
887 * For DDBHandle reference, 882 obj_desc->reference.value);
888 * obj_desc->Reference.Object is the table index
889 */
890 acpi_os_printf(" [DDBHandle]\n");
891 } else { 883 } else {
892 acpi_os_printf(" [%s]\n", 884 acpi_os_printf(" Target: %p [%s]\n",
885 obj_desc->reference.object,
893 acpi_ut_get_type_name(((union 886 acpi_ut_get_type_name(((union
894 acpi_operand_object 887 acpi_operand_object
895 *) 888 *)
@@ -988,9 +981,9 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
988 981
989 case ACPI_TYPE_LOCAL_REFERENCE: 982 case ACPI_TYPE_LOCAL_REFERENCE:
990 983
991 acpi_os_printf("[Object Reference] %s", 984 acpi_os_printf("[Object Reference] Type [%s] %2.2X",
992 (acpi_ps_get_opcode_info 985 acpi_ut_get_reference_name(obj_desc),
993 (obj_desc->reference.opcode))->name); 986 obj_desc->reference.class);
994 acpi_ex_dump_reference_obj(obj_desc); 987 acpi_ex_dump_reference_obj(obj_desc);
995 break; 988 break;
996 989
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c
index 731414a581a6..efb191340059 100644
--- a/drivers/acpi/executer/exmisc.c
+++ b/drivers/acpi/executer/exmisc.c
@@ -86,10 +86,10 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
86 /* 86 /*
87 * Must be a reference to a Local or Arg 87 * Must be a reference to a Local or Arg
88 */ 88 */
89 switch (obj_desc->reference.opcode) { 89 switch (obj_desc->reference.class) {
90 case AML_LOCAL_OP: 90 case ACPI_REFCLASS_LOCAL:
91 case AML_ARG_OP: 91 case ACPI_REFCLASS_ARG:
92 case AML_DEBUG_OP: 92 case ACPI_REFCLASS_DEBUG:
93 93
94 /* The referenced object is the pseudo-node for the local/arg */ 94 /* The referenced object is the pseudo-node for the local/arg */
95 95
@@ -98,8 +98,8 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
98 98
99 default: 99 default:
100 100
101 ACPI_ERROR((AE_INFO, "Unknown Reference opcode %X", 101 ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
102 obj_desc->reference.opcode)); 102 obj_desc->reference.class));
103 return_ACPI_STATUS(AE_AML_INTERNAL); 103 return_ACPI_STATUS(AE_AML_INTERNAL);
104 } 104 }
105 break; 105 break;
@@ -127,7 +127,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
127 return_ACPI_STATUS(AE_NO_MEMORY); 127 return_ACPI_STATUS(AE_NO_MEMORY);
128 } 128 }
129 129
130 reference_obj->reference.opcode = AML_REF_OF_OP; 130 reference_obj->reference.class = ACPI_REFCLASS_REFOF;
131 reference_obj->reference.object = referenced_obj; 131 reference_obj->reference.object = referenced_obj;
132 *return_desc = reference_obj; 132 *return_desc = reference_obj;
133 133
diff --git a/drivers/acpi/executer/exoparg1.c b/drivers/acpi/executer/exoparg1.c
index 7c3bea575e02..f622f9eac8a1 100644
--- a/drivers/acpi/executer/exoparg1.c
+++ b/drivers/acpi/executer/exoparg1.c
@@ -825,16 +825,16 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
825 * 825 *
826 * Must resolve/dereference the local/arg reference first 826 * Must resolve/dereference the local/arg reference first
827 */ 827 */
828 switch (operand[0]->reference.opcode) { 828 switch (operand[0]->reference.class) {
829 case AML_LOCAL_OP: 829 case ACPI_REFCLASS_LOCAL:
830 case AML_ARG_OP: 830 case ACPI_REFCLASS_ARG:
831 831
832 /* Set Operand[0] to the value of the local/arg */ 832 /* Set Operand[0] to the value of the local/arg */
833 833
834 status = 834 status =
835 acpi_ds_method_data_get_value 835 acpi_ds_method_data_get_value
836 (operand[0]->reference.opcode, 836 (operand[0]->reference.class,
837 operand[0]->reference.offset, 837 operand[0]->reference.value,
838 walk_state, &temp_desc); 838 walk_state, &temp_desc);
839 if (ACPI_FAILURE(status)) { 839 if (ACPI_FAILURE(status)) {
840 goto cleanup; 840 goto cleanup;
@@ -848,7 +848,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
848 operand[0] = temp_desc; 848 operand[0] = temp_desc;
849 break; 849 break;
850 850
851 case AML_REF_OF_OP: 851 case ACPI_REFCLASS_REFOF:
852 852
853 /* Get the object to which the reference refers */ 853 /* Get the object to which the reference refers */
854 854
@@ -928,8 +928,8 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
928 * This must be a reference object produced by either the 928 * This must be a reference object produced by either the
929 * Index() or ref_of() operator 929 * Index() or ref_of() operator
930 */ 930 */
931 switch (operand[0]->reference.opcode) { 931 switch (operand[0]->reference.class) {
932 case AML_INDEX_OP: 932 case ACPI_REFCLASS_INDEX:
933 933
934 /* 934 /*
935 * The target type for the Index operator must be 935 * The target type for the Index operator must be
@@ -965,7 +965,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
965 return_desc->integer.value = 965 return_desc->integer.value =
966 temp_desc->buffer. 966 temp_desc->buffer.
967 pointer[operand[0]->reference. 967 pointer[operand[0]->reference.
968 offset]; 968 value];
969 break; 969 break;
970 970
971 case ACPI_TYPE_PACKAGE: 971 case ACPI_TYPE_PACKAGE:
@@ -985,7 +985,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
985 default: 985 default:
986 986
987 ACPI_ERROR((AE_INFO, 987 ACPI_ERROR((AE_INFO,
988 "Unknown Index TargetType %X in obj %p", 988 "Unknown Index TargetType %X in reference object %p",
989 operand[0]->reference. 989 operand[0]->reference.
990 target_type, operand[0])); 990 target_type, operand[0]));
991 status = AE_AML_OPERAND_TYPE; 991 status = AE_AML_OPERAND_TYPE;
@@ -993,7 +993,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
993 } 993 }
994 break; 994 break;
995 995
996 case AML_REF_OF_OP: 996 case ACPI_REFCLASS_REFOF:
997 997
998 return_desc = operand[0]->reference.object; 998 return_desc = operand[0]->reference.object;
999 999
@@ -1013,9 +1013,9 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
1013 1013
1014 default: 1014 default:
1015 ACPI_ERROR((AE_INFO, 1015 ACPI_ERROR((AE_INFO,
1016 "Unknown opcode in reference(%p) - %X", 1016 "Unknown class in reference(%p) - %2.2X",
1017 operand[0], 1017 operand[0],
1018 operand[0]->reference.opcode)); 1018 operand[0]->reference.class));
1019 1019
1020 status = AE_TYPE; 1020 status = AE_TYPE;
1021 goto cleanup; 1021 goto cleanup;
diff --git a/drivers/acpi/executer/exoparg2.c b/drivers/acpi/executer/exoparg2.c
index 8e8bbb6ccebd..368def5dffce 100644
--- a/drivers/acpi/executer/exoparg2.c
+++ b/drivers/acpi/executer/exoparg2.c
@@ -391,8 +391,8 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
391 /* Initialize the Index reference object */ 391 /* Initialize the Index reference object */
392 392
393 index = operand[1]->integer.value; 393 index = operand[1]->integer.value;
394 return_desc->reference.offset = (u32) index; 394 return_desc->reference.value = (u32) index;
395 return_desc->reference.opcode = AML_INDEX_OP; 395 return_desc->reference.class = ACPI_REFCLASS_INDEX;
396 396
397 /* 397 /*
398 * At this point, the Source operand is a String, Buffer, or Package. 398 * At this point, the Source operand is a String, Buffer, or Package.
diff --git a/drivers/acpi/executer/exresnte.c b/drivers/acpi/executer/exresnte.c
index 5596f42c9676..423ad3635f3d 100644
--- a/drivers/acpi/executer/exresnte.c
+++ b/drivers/acpi/executer/exresnte.c
@@ -46,8 +46,6 @@
46#include <acpi/acdispat.h> 46#include <acpi/acdispat.h>
47#include <acpi/acinterp.h> 47#include <acpi/acinterp.h>
48#include <acpi/acnamesp.h> 48#include <acpi/acnamesp.h>
49#include <acpi/acparser.h>
50#include <acpi/amlcode.h>
51 49
52#define _COMPONENT ACPI_EXECUTER 50#define _COMPONENT ACPI_EXECUTER
53ACPI_MODULE_NAME("exresnte") 51ACPI_MODULE_NAME("exresnte")
@@ -238,10 +236,10 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
238 236
239 case ACPI_TYPE_LOCAL_REFERENCE: 237 case ACPI_TYPE_LOCAL_REFERENCE:
240 238
241 switch (source_desc->reference.opcode) { 239 switch (source_desc->reference.class) {
242 case AML_LOAD_OP: /* This is a ddb_handle */ 240 case ACPI_REFCLASS_TABLE: /* This is a ddb_handle */
243 case AML_REF_OF_OP: 241 case ACPI_REFCLASS_REFOF:
244 case AML_INDEX_OP: 242 case ACPI_REFCLASS_INDEX:
245 243
246 /* Return an additional reference to the object */ 244 /* Return an additional reference to the object */
247 245
@@ -253,10 +251,8 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
253 /* No named references are allowed here */ 251 /* No named references are allowed here */
254 252
255 ACPI_ERROR((AE_INFO, 253 ACPI_ERROR((AE_INFO,
256 "Unsupported Reference opcode %X (%s)", 254 "Unsupported Reference type %X",
257 source_desc->reference.opcode, 255 source_desc->reference.class));
258 acpi_ps_get_opcode_name(source_desc->
259 reference.opcode)));
260 256
261 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 257 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
262 } 258 }
diff --git a/drivers/acpi/executer/exresolv.c b/drivers/acpi/executer/exresolv.c
index b35f7c817acf..89571b92a522 100644
--- a/drivers/acpi/executer/exresolv.c
+++ b/drivers/acpi/executer/exresolv.c
@@ -47,7 +47,6 @@
47#include <acpi/acdispat.h> 47#include <acpi/acdispat.h>
48#include <acpi/acinterp.h> 48#include <acpi/acinterp.h>
49#include <acpi/acnamesp.h> 49#include <acpi/acnamesp.h>
50#include <acpi/acparser.h>
51 50
52#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
53ACPI_MODULE_NAME("exresolv") 52ACPI_MODULE_NAME("exresolv")
@@ -141,7 +140,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
141 acpi_status status = AE_OK; 140 acpi_status status = AE_OK;
142 union acpi_operand_object *stack_desc; 141 union acpi_operand_object *stack_desc;
143 union acpi_operand_object *obj_desc = NULL; 142 union acpi_operand_object *obj_desc = NULL;
144 u16 opcode; 143 u8 ref_type;
145 144
146 ACPI_FUNCTION_TRACE(ex_resolve_object_to_value); 145 ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
147 146
@@ -152,19 +151,19 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
152 switch (ACPI_GET_OBJECT_TYPE(stack_desc)) { 151 switch (ACPI_GET_OBJECT_TYPE(stack_desc)) {
153 case ACPI_TYPE_LOCAL_REFERENCE: 152 case ACPI_TYPE_LOCAL_REFERENCE:
154 153
155 opcode = stack_desc->reference.opcode; 154 ref_type = stack_desc->reference.class;
156 155
157 switch (opcode) { 156 switch (ref_type) {
158 case AML_LOCAL_OP: 157 case ACPI_REFCLASS_LOCAL:
159 case AML_ARG_OP: 158 case ACPI_REFCLASS_ARG:
160 159
161 /* 160 /*
162 * Get the local from the method's state info 161 * Get the local from the method's state info
163 * Note: this increments the local's object reference count 162 * Note: this increments the local's object reference count
164 */ 163 */
165 status = acpi_ds_method_data_get_value(opcode, 164 status = acpi_ds_method_data_get_value(ref_type,
166 stack_desc-> 165 stack_desc->
167 reference.offset, 166 reference.value,
168 walk_state, 167 walk_state,
169 &obj_desc); 168 &obj_desc);
170 if (ACPI_FAILURE(status)) { 169 if (ACPI_FAILURE(status)) {
@@ -173,7 +172,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
173 172
174 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 173 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
175 "[Arg/Local %X] ValueObj is %p\n", 174 "[Arg/Local %X] ValueObj is %p\n",
176 stack_desc->reference.offset, 175 stack_desc->reference.value,
177 obj_desc)); 176 obj_desc));
178 177
179 /* 178 /*
@@ -184,7 +183,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
184 *stack_ptr = obj_desc; 183 *stack_ptr = obj_desc;
185 break; 184 break;
186 185
187 case AML_INDEX_OP: 186 case ACPI_REFCLASS_INDEX:
188 187
189 switch (stack_desc->reference.target_type) { 188 switch (stack_desc->reference.target_type) {
190 case ACPI_TYPE_BUFFER_FIELD: 189 case ACPI_TYPE_BUFFER_FIELD:
@@ -239,15 +238,15 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
239 } 238 }
240 break; 239 break;
241 240
242 case AML_REF_OF_OP: 241 case ACPI_REFCLASS_REFOF:
243 case AML_DEBUG_OP: 242 case ACPI_REFCLASS_DEBUG:
244 case AML_LOAD_OP: 243 case ACPI_REFCLASS_TABLE:
245 244
246 /* Just leave the object as-is, do not dereference */ 245 /* Just leave the object as-is, do not dereference */
247 246
248 break; 247 break;
249 248
250 case AML_INT_NAMEPATH_OP: /* Reference to a named object */ 249 case ACPI_REFCLASS_NAME: /* Reference to a named object */
251 250
252 /* Dereference the name */ 251 /* Dereference the name */
253 252
@@ -273,8 +272,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
273 default: 272 default:
274 273
275 ACPI_ERROR((AE_INFO, 274 ACPI_ERROR((AE_INFO,
276 "Unknown Reference opcode %X (%s) in %p", 275 "Unknown Reference type %X in %p", ref_type,
277 opcode, acpi_ps_get_opcode_name(opcode),
278 stack_desc)); 276 stack_desc));
279 status = AE_AML_INTERNAL; 277 status = AE_AML_INTERNAL;
280 break; 278 break;
@@ -388,13 +386,13 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
388 * traversing the list of possibly many nested references. 386 * traversing the list of possibly many nested references.
389 */ 387 */
390 while (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) { 388 while (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) {
391 switch (obj_desc->reference.opcode) { 389 switch (obj_desc->reference.class) {
392 case AML_REF_OF_OP: 390 case ACPI_REFCLASS_REFOF:
393 case AML_INT_NAMEPATH_OP: 391 case ACPI_REFCLASS_NAME:
394 392
395 /* Dereference the reference pointer */ 393 /* Dereference the reference pointer */
396 394
397 if (obj_desc->reference.opcode == AML_REF_OF_OP) { 395 if (obj_desc->reference.class == ACPI_REFCLASS_REFOF) {
398 node = obj_desc->reference.object; 396 node = obj_desc->reference.object;
399 } else { /* AML_INT_NAMEPATH_OP */ 397 } else { /* AML_INT_NAMEPATH_OP */
400 398
@@ -429,7 +427,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
429 } 427 }
430 break; 428 break;
431 429
432 case AML_INDEX_OP: 430 case ACPI_REFCLASS_INDEX:
433 431
434 /* Get the type of this reference (index into another object) */ 432 /* Get the type of this reference (index into another object) */
435 433
@@ -455,22 +453,22 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
455 } 453 }
456 break; 454 break;
457 455
458 case AML_LOAD_OP: 456 case ACPI_REFCLASS_TABLE:
459 457
460 type = ACPI_TYPE_DDB_HANDLE; 458 type = ACPI_TYPE_DDB_HANDLE;
461 goto exit; 459 goto exit;
462 460
463 case AML_LOCAL_OP: 461 case ACPI_REFCLASS_LOCAL:
464 case AML_ARG_OP: 462 case ACPI_REFCLASS_ARG:
465 463
466 if (return_desc) { 464 if (return_desc) {
467 status = 465 status =
468 acpi_ds_method_data_get_value(obj_desc-> 466 acpi_ds_method_data_get_value(obj_desc->
469 reference. 467 reference.
470 opcode, 468 class,
471 obj_desc-> 469 obj_desc->
472 reference. 470 reference.
473 offset, 471 value,
474 walk_state, 472 walk_state,
475 &obj_desc); 473 &obj_desc);
476 if (ACPI_FAILURE(status)) { 474 if (ACPI_FAILURE(status)) {
@@ -481,10 +479,10 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
481 status = 479 status =
482 acpi_ds_method_data_get_node(obj_desc-> 480 acpi_ds_method_data_get_node(obj_desc->
483 reference. 481 reference.
484 opcode, 482 class,
485 obj_desc-> 483 obj_desc->
486 reference. 484 reference.
487 offset, 485 value,
488 walk_state, 486 walk_state,
489 &node); 487 &node);
490 if (ACPI_FAILURE(status)) { 488 if (ACPI_FAILURE(status)) {
@@ -499,7 +497,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
499 } 497 }
500 break; 498 break;
501 499
502 case AML_DEBUG_OP: 500 case ACPI_REFCLASS_DEBUG:
503 501
504 /* The Debug Object is of type "DebugObject" */ 502 /* The Debug Object is of type "DebugObject" */
505 503
@@ -509,8 +507,8 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
509 default: 507 default:
510 508
511 ACPI_ERROR((AE_INFO, 509 ACPI_ERROR((AE_INFO,
512 "Unknown Reference subtype %X", 510 "Unknown Reference Class %2.2X",
513 obj_desc->reference.opcode)); 511 obj_desc->reference.class));
514 return_ACPI_STATUS(AE_AML_INTERNAL); 512 return_ACPI_STATUS(AE_AML_INTERNAL);
515 } 513 }
516 } 514 }
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
index 54085f16ec28..0bb82593da72 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/executer/exresop.c
@@ -225,41 +225,36 @@ acpi_ex_resolve_operands(u16 opcode,
225 225
226 if (object_type == (u8) ACPI_TYPE_LOCAL_REFERENCE) { 226 if (object_type == (u8) ACPI_TYPE_LOCAL_REFERENCE) {
227 227
228 /* Decode the Reference */ 228 /* Validate the Reference */
229 229
230 op_info = acpi_ps_get_opcode_info(opcode); 230 switch (obj_desc->reference.class) {
231 if (op_info->class == AML_CLASS_UNKNOWN) { 231 case ACPI_REFCLASS_DEBUG:
232 return_ACPI_STATUS(AE_AML_BAD_OPCODE);
233 }
234 232
235 switch (obj_desc->reference.opcode) {
236 case AML_DEBUG_OP:
237 target_op = AML_DEBUG_OP; 233 target_op = AML_DEBUG_OP;
238 234
239 /*lint -fallthrough */ 235 /*lint -fallthrough */
240 236
241 case AML_INDEX_OP: 237 case ACPI_REFCLASS_ARG:
242 case AML_REF_OF_OP: 238 case ACPI_REFCLASS_LOCAL:
243 case AML_ARG_OP: 239 case ACPI_REFCLASS_INDEX:
244 case AML_LOCAL_OP: 240 case ACPI_REFCLASS_REFOF:
245 case AML_LOAD_OP: /* ddb_handle from LOAD_OP or LOAD_TABLE_OP */ 241 case ACPI_REFCLASS_TABLE: /* ddb_handle from LOAD_OP or LOAD_TABLE_OP */
246 case AML_INT_NAMEPATH_OP: /* Reference to a named object */ 242 case ACPI_REFCLASS_NAME: /* Reference to a named object */
247 243
248 ACPI_DEBUG_ONLY_MEMBERS(ACPI_DEBUG_PRINT 244 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
249 ((ACPI_DB_EXEC, 245 "Operand is a Reference, Class [%s] %2.2X\n",
250 "Operand is a Reference, RefOpcode [%s]\n", 246 acpi_ut_get_reference_name
251 (acpi_ps_get_opcode_info 247 (obj_desc),
252 (obj_desc-> 248 obj_desc->reference.
253 reference. 249 class));
254 opcode))->
255 name)));
256 break; 250 break;
257 251
258 default: 252 default:
253
259 ACPI_ERROR((AE_INFO, 254 ACPI_ERROR((AE_INFO,
260 "Operand is a Reference, Unknown Reference Opcode: %X", 255 "Unknown Reference Class %2.2X in %p",
261 obj_desc->reference. 256 obj_desc->reference.class,
262 opcode)); 257 obj_desc));
263 258
264 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 259 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
265 } 260 }
@@ -270,8 +265,7 @@ acpi_ex_resolve_operands(u16 opcode,
270 265
271 /* Invalid descriptor */ 266 /* Invalid descriptor */
272 267
273 ACPI_ERROR((AE_INFO, 268 ACPI_ERROR((AE_INFO, "Invalid descriptor %p [%s]",
274 "Invalid descriptor %p [%s]",
275 obj_desc, 269 obj_desc,
276 acpi_ut_get_descriptor_name(obj_desc))); 270 acpi_ut_get_descriptor_name(obj_desc)));
277 271
@@ -343,7 +337,7 @@ acpi_ex_resolve_operands(u16 opcode,
343 if ((opcode == AML_STORE_OP) && 337 if ((opcode == AML_STORE_OP) &&
344 (ACPI_GET_OBJECT_TYPE(*stack_ptr) == 338 (ACPI_GET_OBJECT_TYPE(*stack_ptr) ==
345 ACPI_TYPE_LOCAL_REFERENCE) 339 ACPI_TYPE_LOCAL_REFERENCE)
346 && ((*stack_ptr)->reference.opcode == AML_INDEX_OP)) { 340 && ((*stack_ptr)->reference.class == ACPI_REFCLASS_INDEX)) {
347 goto next_operand; 341 goto next_operand;
348 } 342 }
349 break; 343 break;
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c
index 38b55e352495..3318df4cbd98 100644
--- a/drivers/acpi/executer/exstore.c
+++ b/drivers/acpi/executer/exstore.c
@@ -47,7 +47,6 @@
47#include <acpi/acinterp.h> 47#include <acpi/acinterp.h>
48#include <acpi/amlcode.h> 48#include <acpi/amlcode.h>
49#include <acpi/acnamesp.h> 49#include <acpi/acnamesp.h>
50#include <acpi/acparser.h>
51 50
52#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
53ACPI_MODULE_NAME("exstore") 52ACPI_MODULE_NAME("exstore")
@@ -179,22 +178,26 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
179 178
180 case ACPI_TYPE_LOCAL_REFERENCE: 179 case ACPI_TYPE_LOCAL_REFERENCE:
181 180
182 if (source_desc->reference.opcode == AML_INDEX_OP) { 181 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s] ",
183 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, 182 acpi_ut_get_reference_name(source_desc)));
184 "[%s, 0x%X]\n", 183
185 acpi_ps_get_opcode_name 184 /* Decode the reference */
186 (source_desc->reference.opcode), 185
187 source_desc->reference.offset)); 186 switch (source_desc->reference.class) {
188 } else { 187 case ACPI_REFCLASS_INDEX:
189 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s]", 188
190 acpi_ps_get_opcode_name 189 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%X\n",
191 (source_desc->reference.opcode))); 190 source_desc->reference.value));
192 } 191 break;
192
193 case ACPI_REFCLASS_TABLE:
193 194
194 if (source_desc->reference.opcode == AML_LOAD_OP) { /* Load and load_table */
195 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, 195 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
196 " Table OwnerId %p\n", 196 "Table Index 0x%X\n",
197 source_desc->reference.object)); 197 source_desc->reference.value));
198 break;
199
200 default:
198 break; 201 break;
199 } 202 }
200 203
@@ -347,15 +350,15 @@ acpi_ex_store(union acpi_operand_object *source_desc,
347 } 350 }
348 351
349 /* 352 /*
350 * Examine the Reference opcode. These cases are handled: 353 * Examine the Reference class. These cases are handled:
351 * 354 *
352 * 1) Store to Name (Change the object associated with a name) 355 * 1) Store to Name (Change the object associated with a name)
353 * 2) Store to an indexed area of a Buffer or Package 356 * 2) Store to an indexed area of a Buffer or Package
354 * 3) Store to a Method Local or Arg 357 * 3) Store to a Method Local or Arg
355 * 4) Store to the debug object 358 * 4) Store to the debug object
356 */ 359 */
357 switch (ref_desc->reference.opcode) { 360 switch (ref_desc->reference.class) {
358 case AML_REF_OF_OP: 361 case ACPI_REFCLASS_REFOF:
359 362
360 /* Storing an object into a Name "container" */ 363 /* Storing an object into a Name "container" */
361 364
@@ -365,7 +368,7 @@ acpi_ex_store(union acpi_operand_object *source_desc,
365 ACPI_IMPLICIT_CONVERSION); 368 ACPI_IMPLICIT_CONVERSION);
366 break; 369 break;
367 370
368 case AML_INDEX_OP: 371 case ACPI_REFCLASS_INDEX:
369 372
370 /* Storing to an Index (pointer into a packager or buffer) */ 373 /* Storing to an Index (pointer into a packager or buffer) */
371 374
@@ -374,18 +377,18 @@ acpi_ex_store(union acpi_operand_object *source_desc,
374 walk_state); 377 walk_state);
375 break; 378 break;
376 379
377 case AML_LOCAL_OP: 380 case ACPI_REFCLASS_LOCAL:
378 case AML_ARG_OP: 381 case ACPI_REFCLASS_ARG:
379 382
380 /* Store to a method local/arg */ 383 /* Store to a method local/arg */
381 384
382 status = 385 status =
383 acpi_ds_store_object_to_local(ref_desc->reference.opcode, 386 acpi_ds_store_object_to_local(ref_desc->reference.class,
384 ref_desc->reference.offset, 387 ref_desc->reference.value,
385 source_desc, walk_state); 388 source_desc, walk_state);
386 break; 389 break;
387 390
388 case AML_DEBUG_OP: 391 case ACPI_REFCLASS_DEBUG:
389 392
390 /* 393 /*
391 * Storing to the Debug object causes the value stored to be 394 * Storing to the Debug object causes the value stored to be
@@ -401,9 +404,9 @@ acpi_ex_store(union acpi_operand_object *source_desc,
401 404
402 default: 405 default:
403 406
404 ACPI_ERROR((AE_INFO, "Unknown Reference opcode %X", 407 ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
405 ref_desc->reference.opcode)); 408 ref_desc->reference.class));
406 ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_ERROR); 409 ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO);
407 410
408 status = AE_AML_INTERNAL; 411 status = AE_AML_INTERNAL;
409 break; 412 break;
@@ -458,7 +461,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
458 461
459 if (ACPI_GET_OBJECT_TYPE(source_desc) == 462 if (ACPI_GET_OBJECT_TYPE(source_desc) ==
460 ACPI_TYPE_LOCAL_REFERENCE 463 ACPI_TYPE_LOCAL_REFERENCE
461 && source_desc->reference.opcode == AML_LOAD_OP) { 464 && source_desc->reference.class == ACPI_REFCLASS_TABLE) {
462 465
463 /* This is a DDBHandle, just add a reference to it */ 466 /* This is a DDBHandle, just add a reference to it */
464 467
@@ -553,7 +556,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
553 556
554 /* Store the source value into the target buffer byte */ 557 /* Store the source value into the target buffer byte */
555 558
556 obj_desc->buffer.pointer[index_desc->reference.offset] = value; 559 obj_desc->buffer.pointer[index_desc->reference.value] = value;
557 break; 560 break;
558 561
559 default: 562 default:
diff --git a/drivers/acpi/executer/exstoren.c b/drivers/acpi/executer/exstoren.c
index a6d2168b81f9..eef61a00803e 100644
--- a/drivers/acpi/executer/exstoren.c
+++ b/drivers/acpi/executer/exstoren.c
@@ -121,7 +121,8 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
121 (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_STRING) && 121 (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_STRING) &&
122 !((ACPI_GET_OBJECT_TYPE(source_desc) == 122 !((ACPI_GET_OBJECT_TYPE(source_desc) ==
123 ACPI_TYPE_LOCAL_REFERENCE) 123 ACPI_TYPE_LOCAL_REFERENCE)
124 && (source_desc->reference.opcode == AML_LOAD_OP))) { 124 && (source_desc->reference.class ==
125 ACPI_REFCLASS_TABLE))) {
125 126
126 /* Conversion successful but still not a valid type */ 127 /* Conversion successful but still not a valid type */
127 128
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 2655bc1b4eeb..eaaee1660bdf 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -34,7 +34,6 @@
34#include <acpi/acpi_bus.h> 34#include <acpi/acpi_bus.h>
35#include <acpi/acpi_drivers.h> 35#include <acpi/acpi_drivers.h>
36 36
37#define ACPI_FAN_COMPONENT 0x00200000
38#define ACPI_FAN_CLASS "fan" 37#define ACPI_FAN_CLASS "fan"
39#define ACPI_FAN_FILE_STATE "state" 38#define ACPI_FAN_FILE_STATE "state"
40 39
@@ -265,7 +264,7 @@ static int acpi_fan_add(struct acpi_device *device)
265 264
266 dev_info(&device->dev, "registered as cooling_device%d\n", cdev->id); 265 dev_info(&device->dev, "registered as cooling_device%d\n", cdev->id);
267 266
268 acpi_driver_data(device) = cdev; 267 device->driver_data = cdev;
269 result = sysfs_create_link(&device->dev.kobj, 268 result = sysfs_create_link(&device->dev.kobj,
270 &cdev->device.kobj, 269 &cdev->device.kobj,
271 "thermal_cooling"); 270 "thermal_cooling");
@@ -327,8 +326,8 @@ static int acpi_fan_resume(struct acpi_device *device)
327 326
328 result = acpi_bus_get_power(device->handle, &power_state); 327 result = acpi_bus_get_power(device->handle, &power_state);
329 if (result) { 328 if (result) {
330 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 329 printk(KERN_ERR PREFIX
331 "Error reading fan power state\n")); 330 "Error reading fan power state\n");
332 return result; 331 return result;
333 } 332 }
334 333
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 24649ada08df..adec3d15810a 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -140,6 +140,46 @@ struct device *acpi_get_physical_device(acpi_handle handle)
140 140
141EXPORT_SYMBOL(acpi_get_physical_device); 141EXPORT_SYMBOL(acpi_get_physical_device);
142 142
143/* ToDo: When a PCI bridge is found, return the PCI device behind the bridge
144 * This should work in general, but did not on a Lenovo T61 for the
145 * graphics card. But this must be fixed when the PCI device is
146 * bound and the kernel device struct is attached to the acpi device
147 * Note: A success call will increase reference count by one
148 * Do call put_device(dev) on the returned device then
149 */
150struct device *acpi_get_physical_pci_device(acpi_handle handle)
151{
152 struct device *dev;
153 long long device_id;
154 acpi_status status;
155
156 status =
157 acpi_evaluate_integer(handle, "_ADR", NULL, &device_id);
158
159 if (ACPI_FAILURE(status))
160 return NULL;
161
162 /* We need to attempt to determine whether the _ADR refers to a
163 PCI device or not. There's no terribly good way to do this,
164 so the best we can hope for is to assume that there'll never
165 be a device in the host bridge */
166 if (device_id >= 0x10000) {
167 /* It looks like a PCI device. Does it exist? */
168 dev = acpi_get_physical_device(handle);
169 } else {
170 /* It doesn't look like a PCI device. Does its parent
171 exist? */
172 acpi_handle phandle;
173 if (acpi_get_parent(handle, &phandle))
174 return NULL;
175 dev = acpi_get_physical_device(phandle);
176 }
177 if (!dev)
178 return NULL;
179 return dev;
180}
181EXPORT_SYMBOL(acpi_get_physical_pci_device);
182
143static int acpi_bind_one(struct device *dev, acpi_handle handle) 183static int acpi_bind_one(struct device *dev, acpi_handle handle)
144{ 184{
145 struct acpi_device *acpi_dev; 185 struct acpi_device *acpi_dev;
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index dba3cfbe8cba..25dccdf179b9 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -78,19 +78,17 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
78 return_ACPI_STATUS(status); 78 return_ACPI_STATUS(status);
79 } 79 }
80 80
81 /* Set the vector */ 81 /*
82 * According to the ACPI specification 2.0c and later, the 64-bit
83 * waking vector should be cleared and the 32-bit waking vector should
84 * be used, unless we want the wake-up code to be called by the BIOS in
85 * Protected Mode. Some systems (for example HP dv5-1004nr) are known
86 * to fail to resume if the 64-bit vector is used.
87 */
88 if (facs->version >= 1)
89 facs->xfirmware_waking_vector = 0;
82 90
83 if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) { 91 facs->firmware_waking_vector = (u32)physical_address;
84 /*
85 * ACPI 1.0 FACS or short table or optional X_ field is zero
86 */
87 facs->firmware_waking_vector = (u32) physical_address;
88 } else {
89 /*
90 * ACPI 2.0 FACS with valid X_ field
91 */
92 facs->xfirmware_waking_vector = physical_address;
93 }
94 92
95 return_ACPI_STATUS(AE_OK); 93 return_ACPI_STATUS(AE_OK);
96} 94}
@@ -134,20 +132,7 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
134 } 132 }
135 133
136 /* Get the vector */ 134 /* Get the vector */
137 135 *physical_address = (acpi_physical_address)facs->firmware_waking_vector;
138 if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) {
139 /*
140 * ACPI 1.0 FACS or short table or optional X_ field is zero
141 */
142 *physical_address =
143 (acpi_physical_address) facs->firmware_waking_vector;
144 } else {
145 /*
146 * ACPI 2.0 FACS with valid X_ field
147 */
148 *physical_address =
149 (acpi_physical_address) facs->xfirmware_waking_vector;
150 }
151 136
152 return_ACPI_STATUS(AE_OK); 137 return_ACPI_STATUS(AE_OK);
153} 138}
@@ -627,6 +612,13 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
627 } 612 }
628 /* TBD: _WAK "sometimes" returns stuff - do we want to look at it? */ 613 /* TBD: _WAK "sometimes" returns stuff - do we want to look at it? */
629 614
615 /*
616 * Some BIOSes assume that WAK_STS will be cleared on resume and use
617 * it to determine whether the system is rebooting or resuming. Clear
618 * it for compatibility.
619 */
620 acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
621
630 acpi_gbl_system_awake_and_running = TRUE; 622 acpi_gbl_system_awake_and_running = TRUE;
631 623
632 /* Enable power button */ 624 /* Enable power button */
diff --git a/drivers/acpi/namespace/Makefile b/drivers/acpi/namespace/Makefile
index 3f63d3640696..371a2daf837f 100644
--- a/drivers/acpi/namespace/Makefile
+++ b/drivers/acpi/namespace/Makefile
@@ -5,7 +5,7 @@
5obj-y := nsaccess.o nsload.o nssearch.o nsxfeval.o \ 5obj-y := nsaccess.o nsload.o nssearch.o nsxfeval.o \
6 nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \ 6 nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \
7 nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \ 7 nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \
8 nsparse.o 8 nsparse.o nspredef.o
9 9
10obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o 10obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
11 11
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c
index 0ab22004728a..cc0ae39440e4 100644
--- a/drivers/acpi/namespace/nsdump.c
+++ b/drivers/acpi/namespace/nsdump.c
@@ -43,7 +43,6 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include <acpi/acnamesp.h>
46#include <acpi/acparser.h>
47 46
48#define _COMPONENT ACPI_NAMESPACE 47#define _COMPONENT ACPI_NAMESPACE
49ACPI_MODULE_NAME("nsdump") 48ACPI_MODULE_NAME("nsdump")
@@ -334,9 +333,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
334 case ACPI_TYPE_LOCAL_REFERENCE: 333 case ACPI_TYPE_LOCAL_REFERENCE:
335 334
336 acpi_os_printf("[%s]\n", 335 acpi_os_printf("[%s]\n",
337 acpi_ps_get_opcode_name(obj_desc-> 336 acpi_ut_get_reference_name(obj_desc));
338 reference.
339 opcode));
340 break; 337 break;
341 338
342 case ACPI_TYPE_BUFFER_FIELD: 339 case ACPI_TYPE_BUFFER_FIELD:
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index d369164e00b0..4cdf03ac2b46 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -78,6 +78,7 @@ ACPI_MODULE_NAME("nseval")
78acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info) 78acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
79{ 79{
80 acpi_status status; 80 acpi_status status;
81 struct acpi_namespace_node *node;
81 82
82 ACPI_FUNCTION_TRACE(ns_evaluate); 83 ACPI_FUNCTION_TRACE(ns_evaluate);
83 84
@@ -117,6 +118,8 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
117 info->resolved_node, 118 info->resolved_node,
118 acpi_ns_get_attached_object(info->resolved_node))); 119 acpi_ns_get_attached_object(info->resolved_node)));
119 120
121 node = info->resolved_node;
122
120 /* 123 /*
121 * Two major cases here: 124 * Two major cases here:
122 * 125 *
@@ -148,21 +151,22 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
148 info->param_count++; 151 info->param_count++;
149 } 152 }
150 153
151 /* Error if too few arguments were passed in */ 154 /*
155 * Warning if too few or too many arguments have been passed by the
156 * caller. We don't want to abort here with an error because an
157 * incorrect number of arguments may not cause the method to fail.
158 * However, the method will fail if there are too few arguments passed
159 * and the method attempts to use one of the missing ones.
160 */
152 161
153 if (info->param_count < info->obj_desc->method.param_count) { 162 if (info->param_count < info->obj_desc->method.param_count) {
154 ACPI_ERROR((AE_INFO, 163 ACPI_WARNING((AE_INFO,
155 "Insufficient arguments - " 164 "Insufficient arguments - "
156 "method [%4.4s] needs %d, found %d", 165 "method [%4.4s] needs %d, found %d",
157 acpi_ut_get_node_name(info->resolved_node), 166 acpi_ut_get_node_name(info->resolved_node),
158 info->obj_desc->method.param_count, 167 info->obj_desc->method.param_count,
159 info->param_count)); 168 info->param_count));
160 return_ACPI_STATUS(AE_MISSING_ARGUMENTS); 169 } else if (info->param_count >
161 }
162
163 /* Just a warning if too many arguments */
164
165 else if (info->param_count >
166 info->obj_desc->method.param_count) { 170 info->obj_desc->method.param_count) {
167 ACPI_WARNING((AE_INFO, 171 ACPI_WARNING((AE_INFO,
168 "Excess arguments - " 172 "Excess arguments - "
@@ -195,7 +199,28 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
195 } else { 199 } else {
196 /* 200 /*
197 * 2) Object is not a method, return its current value 201 * 2) Object is not a method, return its current value
202 *
203 * Disallow certain object types. For these, "evaluation" is undefined.
198 */ 204 */
205 switch (info->resolved_node->type) {
206 case ACPI_TYPE_DEVICE:
207 case ACPI_TYPE_EVENT:
208 case ACPI_TYPE_MUTEX:
209 case ACPI_TYPE_REGION:
210 case ACPI_TYPE_THERMAL:
211 case ACPI_TYPE_LOCAL_SCOPE:
212
213 ACPI_ERROR((AE_INFO,
214 "[%4.4s] Evaluation of object type [%s] is not supported",
215 info->resolved_node->name.ascii,
216 acpi_ut_get_type_name(info->resolved_node->
217 type)));
218
219 return_ACPI_STATUS(AE_TYPE);
220
221 default:
222 break;
223 }
199 224
200 /* 225 /*
201 * Objects require additional resolution steps (e.g., the Node may be 226 * Objects require additional resolution steps (e.g., the Node may be
@@ -239,9 +264,35 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
239 } 264 }
240 } 265 }
241 266
242 /* 267 /* Validation of return values for ACPI-predefined methods and objects */
243 * Check if there is a return value that must be dealt with 268
244 */ 269 if ((status == AE_OK) || (status == AE_CTRL_RETURN_VALUE)) {
270 /*
271 * If this is the first evaluation, check the return value. This
272 * ensures that any warnings will only be emitted during the very
273 * first evaluation of the object.
274 */
275 if (!(node->flags & ANOBJ_EVALUATED)) {
276 /*
277 * Check for a predefined ACPI name. If found, validate the
278 * returned object.
279 *
280 * Note: Ignore return status for now, emit warnings if there are
281 * problems with the returned object. May change later to abort
282 * the method on invalid return object.
283 */
284 (void)acpi_ns_check_predefined_names(node,
285 info->
286 return_object);
287 }
288
289 /* Mark the node as having been evaluated */
290
291 node->flags |= ANOBJ_EVALUATED;
292 }
293
294 /* Check if there is a return value that must be dealt with */
295
245 if (status == AE_CTRL_RETURN_VALUE) { 296 if (status == AE_CTRL_RETURN_VALUE) {
246 297
247 /* If caller does not want the return value, delete it */ 298 /* If caller does not want the return value, delete it */
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index bd5773878009..42a39a7c96e9 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -115,7 +115,6 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
115 return (AE_OK); 115 return (AE_OK);
116} 116}
117 117
118#ifdef ACPI_DEBUG_OUTPUT
119/******************************************************************************* 118/*******************************************************************************
120 * 119 *
121 * FUNCTION: acpi_ns_get_external_pathname 120 * FUNCTION: acpi_ns_get_external_pathname
@@ -142,7 +141,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
142 141
143 size = acpi_ns_get_pathname_length(node); 142 size = acpi_ns_get_pathname_length(node);
144 if (!size) { 143 if (!size) {
145 return (NULL); 144 return_PTR(NULL);
146 } 145 }
147 146
148 /* Allocate a buffer to be returned to caller */ 147 /* Allocate a buffer to be returned to caller */
@@ -157,12 +156,12 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
157 156
158 status = acpi_ns_build_external_path(node, size, name_buffer); 157 status = acpi_ns_build_external_path(node, size, name_buffer);
159 if (ACPI_FAILURE(status)) { 158 if (ACPI_FAILURE(status)) {
160 return (NULL); 159 ACPI_FREE(name_buffer);
160 return_PTR(NULL);
161 } 161 }
162 162
163 return_PTR(name_buffer); 163 return_PTR(name_buffer);
164} 164}
165#endif
166 165
167/******************************************************************************* 166/*******************************************************************************
168 * 167 *
diff --git a/drivers/acpi/namespace/nspredef.c b/drivers/acpi/namespace/nspredef.c
new file mode 100644
index 000000000000..0f17cf0898c9
--- /dev/null
+++ b/drivers/acpi/namespace/nspredef.c
@@ -0,0 +1,900 @@
1/******************************************************************************
2 *
3 * Module Name: nspredef - Validation of ACPI predefined methods and objects
4 * $Revision: 1.1 $
5 *
6 *****************************************************************************/
7
8/*
9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
19 * substantially similar to the "NO WARRANTY" disclaimer below
20 * ("Disclaimer") and any redistribution must be conditioned upon
21 * including a substantially similar Disclaimer requirement for further
22 * binary redistribution.
23 * 3. Neither the names of the above-listed copyright holders nor the names
24 * of any contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * Alternatively, this software may be distributed under the terms of the
28 * GNU General Public License ("GPL") version 2 as published by the Free
29 * Software Foundation.
30 *
31 * NO WARRANTY
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
35 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
36 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
41 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGES.
43 */
44
45#include <acpi/acpi.h>
46#include <acpi/acnamesp.h>
47#include <acpi/acpredef.h>
48
49#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nspredef")
51
52/*******************************************************************************
53 *
54 * This module validates predefined ACPI objects that appear in the namespace,
55 * at the time they are evaluated (via acpi_evaluate_object). The purpose of this
56 * validation is to detect problems with BIOS-exposed predefined ACPI objects
57 * before the results are returned to the ACPI-related drivers.
58 *
59 * There are several areas that are validated:
60 *
61 * 1) The number of input arguments as defined by the method/object in the
62 * ASL is validated against the ACPI specification.
63 * 2) The type of the return object (if any) is validated against the ACPI
64 * specification.
65 * 3) For returned package objects, the count of package elements is
66 * validated, as well as the type of each package element. Nested
67 * packages are supported.
68 *
69 * For any problems found, a warning message is issued.
70 *
71 ******************************************************************************/
72/* Local prototypes */
73static acpi_status
74acpi_ns_check_package(char *pathname,
75 union acpi_operand_object *return_object,
76 const union acpi_predefined_info *predefined);
77
78static acpi_status
79acpi_ns_check_package_elements(char *pathname,
80 union acpi_operand_object **elements,
81 u8 type1, u32 count1, u8 type2, u32 count2);
82
83static acpi_status
84acpi_ns_check_object_type(char *pathname,
85 union acpi_operand_object *return_object,
86 u32 expected_btypes, u32 package_index);
87
88static acpi_status
89acpi_ns_check_reference(char *pathname,
90 union acpi_operand_object *return_object);
91
92/*
93 * Names for the types that can be returned by the predefined objects.
94 * Used for warning messages. Must be in the same order as the ACPI_RTYPEs
95 */
96static const char *acpi_rtype_names[] = {
97 "/Integer",
98 "/String",
99 "/Buffer",
100 "/Package",
101 "/Reference",
102};
103
104#define ACPI_NOT_PACKAGE ACPI_UINT32_MAX
105
106/*******************************************************************************
107 *
108 * FUNCTION: acpi_ns_check_predefined_names
109 *
110 * PARAMETERS: Node - Namespace node for the method/object
111 * return_object - Object returned from the evaluation of this
112 * method/object
113 *
114 * RETURN: Status
115 *
116 * DESCRIPTION: Check an ACPI name for a match in the predefined name list.
117 *
118 ******************************************************************************/
119
120acpi_status
121acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
122 union acpi_operand_object *return_object)
123{
124 acpi_status status = AE_OK;
125 const union acpi_predefined_info *predefined;
126 char *pathname;
127
128 /* Match the name for this method/object against the predefined list */
129
130 predefined = acpi_ns_check_for_predefined_name(node);
131 if (!predefined) {
132
133 /* Name was not one of the predefined names */
134
135 return (AE_OK);
136 }
137
138 /* Get the full pathname to the object, for use in error messages */
139
140 pathname = acpi_ns_get_external_pathname(node);
141 if (!pathname) {
142 pathname = ACPI_CAST_PTR(char, predefined->info.name);
143 }
144
145 /*
146 * Check that the parameter count for this method is in accordance
147 * with the ACPI specification.
148 */
149 acpi_ns_check_parameter_count(pathname, node, predefined);
150
151 /*
152 * If there is no return value, check if we require a return value for
153 * this predefined name. Either one return value is expected, or none,
154 * for both methods and other objects.
155 *
156 * Exit now if there is no return object. Warning if one was expected.
157 */
158 if (!return_object) {
159 if ((predefined->info.expected_btypes) &&
160 (!(predefined->info.expected_btypes & ACPI_RTYPE_NONE))) {
161 ACPI_ERROR((AE_INFO,
162 "%s: Missing expected return value",
163 pathname));
164
165 status = AE_AML_NO_RETURN_VALUE;
166 }
167 goto exit;
168 }
169
170 /*
171 * We have a return value, but if one wasn't expected, just exit, this is
172 * not a problem
173 *
174 * For example, if "Implicit return value" is enabled, methods will
175 * always return a value
176 */
177 if (!predefined->info.expected_btypes) {
178 goto exit;
179 }
180
181 /*
182 * Check that the type of the return object is what is expected for
183 * this predefined name
184 */
185 status = acpi_ns_check_object_type(pathname, return_object,
186 predefined->info.expected_btypes,
187 ACPI_NOT_PACKAGE);
188 if (ACPI_FAILURE(status)) {
189 goto exit;
190 }
191
192 /* For returned Package objects, check the type of all sub-objects */
193
194 if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_PACKAGE) {
195 status =
196 acpi_ns_check_package(pathname, return_object, predefined);
197 }
198
199 exit:
200 if (pathname) {
201 ACPI_FREE(pathname);
202 }
203
204 return (status);
205}
206
207/*******************************************************************************
208 *
209 * FUNCTION: acpi_ns_check_parameter_count
210 *
211 * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
212 * Node - Namespace node for the method/object
213 * Predefined - Pointer to entry in predefined name table
214 *
215 * RETURN: None
216 *
217 * DESCRIPTION: Check that the declared (in ASL/AML) parameter count for a
218 * predefined name is what is expected (i.e., what is defined in
219 * the ACPI specification for this predefined name.)
220 *
221 ******************************************************************************/
222
223void
224acpi_ns_check_parameter_count(char *pathname,
225 struct acpi_namespace_node *node,
226 const union acpi_predefined_info *predefined)
227{
228 u32 param_count;
229 u32 required_params_current;
230 u32 required_params_old;
231
232 /*
233 * Check that the ASL-defined parameter count is what is expected for
234 * this predefined name.
235 *
236 * Methods have 0-7 parameters. All other types have zero.
237 */
238 param_count = 0;
239 if (node->type == ACPI_TYPE_METHOD) {
240 param_count = node->object->method.param_count;
241 }
242
243 /* Validate parameter count - allow two different legal counts (_SCP) */
244
245 required_params_current = predefined->info.param_count & 0x0F;
246 required_params_old = predefined->info.param_count >> 4;
247
248 if ((param_count != required_params_current) &&
249 (param_count != required_params_old)) {
250 ACPI_WARNING((AE_INFO,
251 "%s: Parameter count mismatch - ASL declared %d, expected %d",
252 pathname, param_count, required_params_current));
253 }
254}
255
256/*******************************************************************************
257 *
258 * FUNCTION: acpi_ns_check_for_predefined_name
259 *
260 * PARAMETERS: Node - Namespace node for the method/object
261 *
262 * RETURN: Pointer to entry in predefined table. NULL indicates not found.
263 *
264 * DESCRIPTION: Check an object name against the predefined object list.
265 *
266 ******************************************************************************/
267
268const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
269 acpi_namespace_node
270 *node)
271{
272 const union acpi_predefined_info *this_name;
273
274 /* Quick check for a predefined name, first character must be underscore */
275
276 if (node->name.ascii[0] != '_') {
277 return (NULL);
278 }
279
280 /* Search info table for a predefined method/object name */
281
282 this_name = predefined_names;
283 while (this_name->info.name[0]) {
284 if (ACPI_COMPARE_NAME(node->name.ascii, this_name->info.name)) {
285
286 /* Return pointer to this table entry */
287
288 return (this_name);
289 }
290
291 /*
292 * Skip next entry in the table if this name returns a Package
293 * (next entry contains the package info)
294 */
295 if (this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) {
296 this_name++;
297 }
298
299 this_name++;
300 }
301
302 return (NULL);
303}
304
305/*******************************************************************************
306 *
307 * FUNCTION: acpi_ns_check_package
308 *
309 * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
310 * return_object - Object returned from the evaluation of a
311 * method or object
312 * Predefined - Pointer to entry in predefined name table
313 *
314 * RETURN: Status
315 *
316 * DESCRIPTION: Check a returned package object for the correct count and
317 * correct type of all sub-objects.
318 *
319 ******************************************************************************/
320
321static acpi_status
322acpi_ns_check_package(char *pathname,
323 union acpi_operand_object *return_object,
324 const union acpi_predefined_info *predefined)
325{
326 const union acpi_predefined_info *package;
327 union acpi_operand_object *sub_package;
328 union acpi_operand_object **elements;
329 union acpi_operand_object **sub_elements;
330 acpi_status status;
331 u32 expected_count;
332 u32 count;
333 u32 i;
334 u32 j;
335
336 ACPI_FUNCTION_NAME(ns_check_package);
337
338 /* The package info for this name is in the next table entry */
339
340 package = predefined + 1;
341
342 ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
343 "%s Validating return Package of Type %X, Count %X\n",
344 pathname, package->ret_info.type,
345 return_object->package.count));
346
347 /* Extract package count and elements array */
348
349 elements = return_object->package.elements;
350 count = return_object->package.count;
351
352 /* The package must have at least one element, else invalid */
353
354 if (!count) {
355 ACPI_WARNING((AE_INFO,
356 "%s: Return Package has no elements (empty)",
357 pathname));
358
359 return (AE_AML_OPERAND_VALUE);
360 }
361
362 /*
363 * Decode the type of the expected package contents
364 *
365 * PTYPE1 packages contain no subpackages
366 * PTYPE2 packages contain sub-packages
367 */
368 switch (package->ret_info.type) {
369 case ACPI_PTYPE1_FIXED:
370
371 /*
372 * The package count is fixed and there are no sub-packages
373 *
374 * If package is too small, exit.
375 * If package is larger than expected, issue warning but continue
376 */
377 expected_count =
378 package->ret_info.count1 + package->ret_info.count2;
379 if (count < expected_count) {
380 goto package_too_small;
381 } else if (count > expected_count) {
382 ACPI_WARNING((AE_INFO,
383 "%s: Return Package is larger than needed - "
384 "found %u, expected %u", pathname, count,
385 expected_count));
386 }
387
388 /* Validate all elements of the returned package */
389
390 status = acpi_ns_check_package_elements(pathname, elements,
391 package->ret_info.
392 object_type1,
393 package->ret_info.
394 count1,
395 package->ret_info.
396 object_type2,
397 package->ret_info.
398 count2);
399 if (ACPI_FAILURE(status)) {
400 return (status);
401 }
402 break;
403
404 case ACPI_PTYPE1_VAR:
405
406 /*
407 * The package count is variable, there are no sub-packages, and all
408 * elements must be of the same type
409 */
410 for (i = 0; i < count; i++) {
411 status = acpi_ns_check_object_type(pathname, *elements,
412 package->ret_info.
413 object_type1, i);
414 if (ACPI_FAILURE(status)) {
415 return (status);
416 }
417 elements++;
418 }
419 break;
420
421 case ACPI_PTYPE1_OPTION:
422
423 /*
424 * The package count is variable, there are no sub-packages. There are
425 * a fixed number of required elements, and a variable number of
426 * optional elements.
427 *
428 * Check if package is at least as large as the minimum required
429 */
430 expected_count = package->ret_info3.count;
431 if (count < expected_count) {
432 goto package_too_small;
433 }
434
435 /* Variable number of sub-objects */
436
437 for (i = 0; i < count; i++) {
438 if (i < package->ret_info3.count) {
439
440 /* These are the required package elements (0, 1, or 2) */
441
442 status =
443 acpi_ns_check_object_type(pathname,
444 *elements,
445 package->
446 ret_info3.
447 object_type[i],
448 i);
449 if (ACPI_FAILURE(status)) {
450 return (status);
451 }
452 } else {
453 /* These are the optional package elements */
454
455 status =
456 acpi_ns_check_object_type(pathname,
457 *elements,
458 package->
459 ret_info3.
460 tail_object_type,
461 i);
462 if (ACPI_FAILURE(status)) {
463 return (status);
464 }
465 }
466 elements++;
467 }
468 break;
469
470 case ACPI_PTYPE2_PKG_COUNT:
471
472 /* First element is the (Integer) count of sub-packages to follow */
473
474 status = acpi_ns_check_object_type(pathname, *elements,
475 ACPI_RTYPE_INTEGER, 0);
476 if (ACPI_FAILURE(status)) {
477 return (status);
478 }
479
480 /*
481 * Count cannot be larger than the parent package length, but allow it
482 * to be smaller. The >= accounts for the Integer above.
483 */
484 expected_count = (u32) (*elements)->integer.value;
485 if (expected_count >= count) {
486 goto package_too_small;
487 }
488
489 count = expected_count;
490 elements++;
491
492 /* Now we can walk the sub-packages */
493
494 /*lint -fallthrough */
495
496 case ACPI_PTYPE2:
497 case ACPI_PTYPE2_FIXED:
498 case ACPI_PTYPE2_MIN:
499 case ACPI_PTYPE2_COUNT:
500
501 /*
502 * These types all return a single package that consists of a variable
503 * number of sub-packages
504 */
505 for (i = 0; i < count; i++) {
506 sub_package = *elements;
507 sub_elements = sub_package->package.elements;
508
509 /* Each sub-object must be of type Package */
510
511 status =
512 acpi_ns_check_object_type(pathname, sub_package,
513 ACPI_RTYPE_PACKAGE, i);
514 if (ACPI_FAILURE(status)) {
515 return (status);
516 }
517
518 /* Examine the different types of sub-packages */
519
520 switch (package->ret_info.type) {
521 case ACPI_PTYPE2:
522 case ACPI_PTYPE2_PKG_COUNT:
523
524 /* Each subpackage has a fixed number of elements */
525
526 expected_count =
527 package->ret_info.count1 +
528 package->ret_info.count2;
529 if (sub_package->package.count !=
530 expected_count) {
531 count = sub_package->package.count;
532 goto package_too_small;
533 }
534
535 status =
536 acpi_ns_check_package_elements(pathname,
537 sub_elements,
538 package->
539 ret_info.
540 object_type1,
541 package->
542 ret_info.
543 count1,
544 package->
545 ret_info.
546 object_type2,
547 package->
548 ret_info.
549 count2);
550 if (ACPI_FAILURE(status)) {
551 return (status);
552 }
553 break;
554
555 case ACPI_PTYPE2_FIXED:
556
557 /* Each sub-package has a fixed length */
558
559 expected_count = package->ret_info2.count;
560 if (sub_package->package.count < expected_count) {
561 count = sub_package->package.count;
562 goto package_too_small;
563 }
564
565 /* Check the type of each sub-package element */
566
567 for (j = 0; j < expected_count; j++) {
568 status =
569 acpi_ns_check_object_type(pathname,
570 sub_elements
571 [j],
572 package->
573 ret_info2.
574 object_type
575 [j], j);
576 if (ACPI_FAILURE(status)) {
577 return (status);
578 }
579 }
580 break;
581
582 case ACPI_PTYPE2_MIN:
583
584 /* Each sub-package has a variable but minimum length */
585
586 expected_count = package->ret_info.count1;
587 if (sub_package->package.count < expected_count) {
588 count = sub_package->package.count;
589 goto package_too_small;
590 }
591
592 /* Check the type of each sub-package element */
593
594 status =
595 acpi_ns_check_package_elements(pathname,
596 sub_elements,
597 package->
598 ret_info.
599 object_type1,
600 sub_package->
601 package.
602 count, 0, 0);
603 if (ACPI_FAILURE(status)) {
604 return (status);
605 }
606 break;
607
608 case ACPI_PTYPE2_COUNT:
609
610 /* First element is the (Integer) count of elements to follow */
611
612 status =
613 acpi_ns_check_object_type(pathname,
614 *sub_elements,
615 ACPI_RTYPE_INTEGER,
616 0);
617 if (ACPI_FAILURE(status)) {
618 return (status);
619 }
620
621 /* Make sure package is large enough for the Count */
622
623 expected_count =
624 (u32) (*sub_elements)->integer.value;
625 if (sub_package->package.count < expected_count) {
626 count = sub_package->package.count;
627 goto package_too_small;
628 }
629
630 /* Check the type of each sub-package element */
631
632 status =
633 acpi_ns_check_package_elements(pathname,
634 (sub_elements
635 + 1),
636 package->
637 ret_info.
638 object_type1,
639 (expected_count
640 - 1), 0, 0);
641 if (ACPI_FAILURE(status)) {
642 return (status);
643 }
644 break;
645
646 default:
647 break;
648 }
649
650 elements++;
651 }
652 break;
653
654 default:
655
656 /* Should not get here if predefined info table is correct */
657
658 ACPI_WARNING((AE_INFO,
659 "%s: Invalid internal return type in table entry: %X",
660 pathname, package->ret_info.type));
661
662 return (AE_AML_INTERNAL);
663 }
664
665 return (AE_OK);
666
667 package_too_small:
668
669 /* Error exit for the case with an incorrect package count */
670
671 ACPI_WARNING((AE_INFO, "%s: Return Package is too small - "
672 "found %u, expected %u", pathname, count,
673 expected_count));
674
675 return (AE_AML_OPERAND_VALUE);
676}
677
678/*******************************************************************************
679 *
680 * FUNCTION: acpi_ns_check_package_elements
681 *
682 * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
683 * Elements - Pointer to the package elements array
684 * Type1 - Object type for first group
685 * Count1 - Count for first group
686 * Type2 - Object type for second group
687 * Count2 - Count for second group
688 *
689 * RETURN: Status
690 *
691 * DESCRIPTION: Check that all elements of a package are of the correct object
692 * type. Supports up to two groups of different object types.
693 *
694 ******************************************************************************/
695
696static acpi_status
697acpi_ns_check_package_elements(char *pathname,
698 union acpi_operand_object **elements,
699 u8 type1, u32 count1, u8 type2, u32 count2)
700{
701 union acpi_operand_object **this_element = elements;
702 acpi_status status;
703 u32 i;
704
705 /*
706 * Up to two groups of package elements are supported by the data
707 * structure. All elements in each group must be of the same type.
708 * The second group can have a count of zero.
709 */
710 for (i = 0; i < count1; i++) {
711 status = acpi_ns_check_object_type(pathname, *this_element,
712 type1, i);
713 if (ACPI_FAILURE(status)) {
714 return (status);
715 }
716 this_element++;
717 }
718
719 for (i = 0; i < count2; i++) {
720 status = acpi_ns_check_object_type(pathname, *this_element,
721 type2, (i + count1));
722 if (ACPI_FAILURE(status)) {
723 return (status);
724 }
725 this_element++;
726 }
727
728 return (AE_OK);
729}
730
731/*******************************************************************************
732 *
733 * FUNCTION: acpi_ns_check_object_type
734 *
735 * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
736 * return_object - Object return from the execution of this
737 * method/object
738 * expected_btypes - Bitmap of expected return type(s)
739 * package_index - Index of object within parent package (if
740 * applicable - ACPI_NOT_PACKAGE otherwise)
741 *
742 * RETURN: Status
743 *
744 * DESCRIPTION: Check the type of the return object against the expected object
745 * type(s). Use of Btype allows multiple expected object types.
746 *
747 ******************************************************************************/
748
749static acpi_status
750acpi_ns_check_object_type(char *pathname,
751 union acpi_operand_object *return_object,
752 u32 expected_btypes, u32 package_index)
753{
754 acpi_status status = AE_OK;
755 u32 return_btype;
756 char type_buffer[48]; /* Room for 5 types */
757 u32 this_rtype;
758 u32 i;
759 u32 j;
760
761 /*
762 * If we get a NULL return_object here, it is a NULL package element,
763 * and this is always an error.
764 */
765 if (!return_object) {
766 goto type_error_exit;
767 }
768
769 /* A Namespace node should not get here, but make sure */
770
771 if (ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) {
772 ACPI_WARNING((AE_INFO,
773 "%s: Invalid return type - Found a Namespace node [%4.4s] type %s",
774 pathname, return_object->node.name.ascii,
775 acpi_ut_get_type_name(return_object->node.type)));
776 return (AE_AML_OPERAND_TYPE);
777 }
778
779 /*
780 * Convert the object type (ACPI_TYPE_xxx) to a bitmapped object type.
781 * The bitmapped type allows multiple possible return types.
782 *
783 * Note, the cases below must handle all of the possible types returned
784 * from all of the predefined names (including elements of returned
785 * packages)
786 */
787 switch (ACPI_GET_OBJECT_TYPE(return_object)) {
788 case ACPI_TYPE_INTEGER:
789 return_btype = ACPI_RTYPE_INTEGER;
790 break;
791
792 case ACPI_TYPE_BUFFER:
793 return_btype = ACPI_RTYPE_BUFFER;
794 break;
795
796 case ACPI_TYPE_STRING:
797 return_btype = ACPI_RTYPE_STRING;
798 break;
799
800 case ACPI_TYPE_PACKAGE:
801 return_btype = ACPI_RTYPE_PACKAGE;
802 break;
803
804 case ACPI_TYPE_LOCAL_REFERENCE:
805 return_btype = ACPI_RTYPE_REFERENCE;
806 break;
807
808 default:
809 /* Not one of the supported objects, must be incorrect */
810
811 goto type_error_exit;
812 }
813
814 /* Is the object one of the expected types? */
815
816 if (!(return_btype & expected_btypes)) {
817 goto type_error_exit;
818 }
819
820 /* For reference objects, check that the reference type is correct */
821
822 if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_LOCAL_REFERENCE) {
823 status = acpi_ns_check_reference(pathname, return_object);
824 }
825
826 return (status);
827
828 type_error_exit:
829
830 /* Create a string with all expected types for this predefined object */
831
832 j = 1;
833 type_buffer[0] = 0;
834 this_rtype = ACPI_RTYPE_INTEGER;
835
836 for (i = 0; i < ACPI_NUM_RTYPES; i++) {
837
838 /* If one of the expected types, concatenate the name of this type */
839
840 if (expected_btypes & this_rtype) {
841 ACPI_STRCAT(type_buffer, &acpi_rtype_names[i][j]);
842 j = 0; /* Use name separator from now on */
843 }
844 this_rtype <<= 1; /* Next Rtype */
845 }
846
847 if (package_index == ACPI_NOT_PACKAGE) {
848 ACPI_WARNING((AE_INFO,
849 "%s: Return type mismatch - found %s, expected %s",
850 pathname,
851 acpi_ut_get_object_type_name(return_object),
852 type_buffer));
853 } else {
854 ACPI_WARNING((AE_INFO,
855 "%s: Return Package type mismatch at index %u - "
856 "found %s, expected %s", pathname, package_index,
857 acpi_ut_get_object_type_name(return_object),
858 type_buffer));
859 }
860
861 return (AE_AML_OPERAND_TYPE);
862}
863
864/*******************************************************************************
865 *
866 * FUNCTION: acpi_ns_check_reference
867 *
868 * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
869 * return_object - Object returned from the evaluation of a
870 * method or object
871 *
872 * RETURN: Status
873 *
874 * DESCRIPTION: Check a returned reference object for the correct reference
875 * type. The only reference type that can be returned from a
876 * predefined method is a named reference. All others are invalid.
877 *
878 ******************************************************************************/
879
880static acpi_status
881acpi_ns_check_reference(char *pathname,
882 union acpi_operand_object *return_object)
883{
884
885 /*
886 * Check the reference object for the correct reference type (opcode).
887 * The only type of reference that can be converted to an union acpi_object is
888 * a reference to a named object (reference class: NAME)
889 */
890 if (return_object->reference.class == ACPI_REFCLASS_NAME) {
891 return (AE_OK);
892 }
893
894 ACPI_WARNING((AE_INFO,
895 "%s: Return type mismatch - unexpected reference object type [%s] %2.2X",
896 pathname, acpi_ut_get_reference_name(return_object),
897 return_object->reference.class));
898
899 return (AE_AML_OPERAND_TYPE);
900}
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/namespace/nssearch.c
index 8399276cba1e..a9a80bf811b3 100644
--- a/drivers/acpi/namespace/nssearch.c
+++ b/drivers/acpi/namespace/nssearch.c
@@ -331,7 +331,7 @@ acpi_ns_search_and_enter(u32 target_name,
331 "Found bad character(s) in name, repaired: [%4.4s]\n", 331 "Found bad character(s) in name, repaired: [%4.4s]\n",
332 ACPI_CAST_PTR(char, &target_name))); 332 ACPI_CAST_PTR(char, &target_name)));
333 } else { 333 } else {
334 ACPI_DEBUG_PRINT((ACPI_DB_WARN, 334 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
335 "Found bad character(s) in name, repaired: [%4.4s]\n", 335 "Found bad character(s) in name, repaired: [%4.4s]\n",
336 ACPI_CAST_PTR(char, &target_name))); 336 ACPI_CAST_PTR(char, &target_name)));
337 } 337 }
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index 38be5865d95d..a085cc39c055 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -48,6 +48,10 @@
48 48
49#define _COMPONENT ACPI_NAMESPACE 49#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nsxfeval") 50ACPI_MODULE_NAME("nsxfeval")
51
52/* Local prototypes */
53static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
54
51#ifdef ACPI_FUTURE_USAGE 55#ifdef ACPI_FUTURE_USAGE
52/******************************************************************************* 56/*******************************************************************************
53 * 57 *
@@ -69,6 +73,7 @@ ACPI_MODULE_NAME("nsxfeval")
69 * be valid (non-null) 73 * be valid (non-null)
70 * 74 *
71 ******************************************************************************/ 75 ******************************************************************************/
76
72acpi_status 77acpi_status
73acpi_evaluate_object_typed(acpi_handle handle, 78acpi_evaluate_object_typed(acpi_handle handle,
74 acpi_string pathname, 79 acpi_string pathname,
@@ -283,6 +288,10 @@ acpi_evaluate_object(acpi_handle handle,
283 288
284 if (ACPI_SUCCESS(status)) { 289 if (ACPI_SUCCESS(status)) {
285 290
291 /* Dereference Index and ref_of references */
292
293 acpi_ns_resolve_references(info);
294
286 /* Get the size of the returned object */ 295 /* Get the size of the returned object */
287 296
288 status = 297 status =
@@ -352,6 +361,74 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
352 361
353/******************************************************************************* 362/*******************************************************************************
354 * 363 *
364 * FUNCTION: acpi_ns_resolve_references
365 *
366 * PARAMETERS: Info - Evaluation info block
367 *
368 * RETURN: Info->return_object is replaced with the dereferenced object
369 *
370 * DESCRIPTION: Dereference certain reference objects. Called before an
371 * internal return object is converted to an external union acpi_object.
372 *
373 * Performs an automatic dereference of Index and ref_of reference objects.
374 * These reference objects are not supported by the union acpi_object, so this is a
375 * last resort effort to return something useful. Also, provides compatibility
376 * with other ACPI implementations.
377 *
378 * NOTE: does not handle references within returned package objects or nested
379 * references, but this support could be added later if found to be necessary.
380 *
381 ******************************************************************************/
382static void acpi_ns_resolve_references(struct acpi_evaluate_info *info)
383{
384 union acpi_operand_object *obj_desc = NULL;
385 struct acpi_namespace_node *node;
386
387 /* We are interested in reference objects only */
388
389 if (ACPI_GET_OBJECT_TYPE(info->return_object) !=
390 ACPI_TYPE_LOCAL_REFERENCE) {
391 return;
392 }
393
394 /*
395 * Two types of references are supported - those created by Index and
396 * ref_of operators. A name reference (AML_NAMEPATH_OP) can be converted
397 * to an union acpi_object, so it is not dereferenced here. A ddb_handle
398 * (AML_LOAD_OP) cannot be dereferenced, nor can it be converted to
399 * an union acpi_object.
400 */
401 switch (info->return_object->reference.class) {
402 case ACPI_REFCLASS_INDEX:
403
404 obj_desc = *(info->return_object->reference.where);
405 break;
406
407 case ACPI_REFCLASS_REFOF:
408
409 node = info->return_object->reference.object;
410 if (node) {
411 obj_desc = node->object;
412 }
413 break;
414
415 default:
416 return;
417 }
418
419 /* Replace the existing reference object */
420
421 if (obj_desc) {
422 acpi_ut_add_reference(obj_desc);
423 acpi_ut_remove_reference(info->return_object);
424 info->return_object = obj_desc;
425 }
426
427 return;
428}
429
430/*******************************************************************************
431 *
355 * FUNCTION: acpi_walk_namespace 432 * FUNCTION: acpi_walk_namespace
356 * 433 *
357 * PARAMETERS: Type - acpi_object_type to search for 434 * PARAMETERS: Type - acpi_object_type to search for
@@ -379,6 +456,7 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
379 * function, etc. 456 * function, etc.
380 * 457 *
381 ******************************************************************************/ 458 ******************************************************************************/
459
382acpi_status 460acpi_status
383acpi_walk_namespace(acpi_object_type type, 461acpi_walk_namespace(acpi_object_type type,
384 acpi_handle start_object, 462 acpi_handle start_object,
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/namespace/nsxfname.c
index a287ed550f54..5efa4e7ddb0b 100644
--- a/drivers/acpi/namespace/nsxfname.c
+++ b/drivers/acpi/namespace/nsxfname.c
@@ -253,6 +253,7 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
253 node = acpi_ns_map_handle_to_node(handle); 253 node = acpi_ns_map_handle_to_node(handle);
254 if (!node) { 254 if (!node) {
255 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 255 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
256 status = AE_BAD_PARAMETER;
256 goto cleanup; 257 goto cleanup;
257 } 258 }
258 259
@@ -264,6 +265,10 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
264 info->name = node->name.integer; 265 info->name = node->name.integer;
265 info->valid = 0; 266 info->valid = 0;
266 267
268 if (node->type == ACPI_TYPE_METHOD) {
269 info->param_count = node->object->method.param_count;
270 }
271
267 status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 272 status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
268 if (ACPI_FAILURE(status)) { 273 if (ACPI_FAILURE(status)) {
269 goto cleanup; 274 goto cleanup;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index cb9864e39bae..25ceae9191ef 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -258,7 +258,7 @@ int __init acpi_numa_init(void)
258 258
259int acpi_get_pxm(acpi_handle h) 259int acpi_get_pxm(acpi_handle h)
260{ 260{
261 unsigned long pxm; 261 unsigned long long pxm;
262 acpi_status status; 262 acpi_status status;
263 acpi_handle handle; 263 acpi_handle handle;
264 acpi_handle phandle = h; 264 acpi_handle phandle = h;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 235a1386888a..c8111424dcb8 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -35,7 +35,6 @@
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/kmod.h> 36#include <linux/kmod.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/dmi.h>
39#include <linux/workqueue.h> 38#include <linux/workqueue.h>
40#include <linux/nmi.h> 39#include <linux/nmi.h>
41#include <linux/acpi.h> 40#include <linux/acpi.h>
@@ -97,54 +96,44 @@ static DEFINE_SPINLOCK(acpi_res_lock);
97static char osi_additional_string[OSI_STRING_LENGTH_MAX]; 96static char osi_additional_string[OSI_STRING_LENGTH_MAX];
98 97
99/* 98/*
100 * "Ode to _OSI(Linux)" 99 * The story of _OSI(Linux)
101 * 100 *
102 * osi_linux -- Control response to BIOS _OSI(Linux) query. 101 * From pre-history through Linux-2.6.22,
102 * Linux responded TRUE upon a BIOS OSI(Linux) query.
103 * 103 *
104 * As Linux evolves, the features that it supports change. 104 * Unfortunately, reference BIOS writers got wind of this
105 * So an OSI string such as "Linux" is not specific enough 105 * and put OSI(Linux) in their example code, quickly exposing
106 * to be useful across multiple versions of Linux. It 106 * this string as ill-conceived and opening the door to
107 * doesn't identify any particular feature, interface, 107 * an un-bounded number of BIOS incompatibilities.
108 * or even any particular version of Linux...
109 * 108 *
110 * Unfortunately, Linux-2.6.22 and earlier responded "yes" 109 * For example, OSI(Linux) was used on resume to re-POST a
111 * to a BIOS _OSI(Linux) query. When 110 * video card on one system, because Linux at that time
112 * a reference mobile BIOS started using it, its use 111 * could not do a speedy restore in its native driver.
113 * started to spread to many vendor platforms. 112 * But then upon gaining quick native restore capability,
114 * As it is not supportable, we need to halt that spread. 113 * Linux has no way to tell the BIOS to skip the time-consuming
114 * POST -- putting Linux at a permanent performance disadvantage.
115 * On another system, the BIOS writer used OSI(Linux)
116 * to infer native OS support for IPMI! On other systems,
117 * OSI(Linux) simply got in the way of Linux claiming to
118 * be compatible with other operating systems, exposing
119 * BIOS issues such as skipped device initialization.
115 * 120 *
116 * Today, most BIOS references to _OSI(Linux) are noise -- 121 * So "Linux" turned out to be a really poor chose of
117 * they have no functional effect and are just dead code 122 * OSI string, and from Linux-2.6.23 onward we respond FALSE.
118 * carried over from the reference BIOS.
119 *
120 * The next most common case is that _OSI(Linux) harms Linux,
121 * usually by causing the BIOS to follow paths that are
122 * not tested during Windows validation.
123 *
124 * Finally, there is a short list of platforms
125 * where OSI(Linux) benefits Linux.
126 *
127 * In Linux-2.6.23, OSI(Linux) is first disabled by default.
128 * DMI is used to disable the dmesg warning about OSI(Linux)
129 * on platforms where it is known to have no effect.
130 * But a dmesg warning remains for systems where
131 * we do not know if OSI(Linux) is good or bad for the system.
132 * DMI is also used to enable OSI(Linux) for the machines
133 * that are known to need it.
134 * 123 *
135 * BIOS writers should NOT query _OSI(Linux) on future systems. 124 * BIOS writers should NOT query _OSI(Linux) on future systems.
136 * It will be ignored by default, and to get Linux to 125 * Linux will complain on the console when it sees it, and return FALSE.
137 * not ignore it will require a kernel source update to 126 * To get Linux to return TRUE for your system will require
138 * add a DMI entry, or a boot-time "acpi_osi=Linux" invocation. 127 * a kernel source update to add a DMI entry,
128 * or boot with "acpi_osi=Linux"
139 */ 129 */
140#define OSI_LINUX_ENABLE 0
141 130
142static struct osi_linux { 131static struct osi_linux {
143 unsigned int enable:1; 132 unsigned int enable:1;
144 unsigned int dmi:1; 133 unsigned int dmi:1;
145 unsigned int cmdline:1; 134 unsigned int cmdline:1;
146 unsigned int known:1; 135 unsigned int known:1;
147} osi_linux = { OSI_LINUX_ENABLE, 0, 0, 0}; 136} osi_linux = { 0, 0, 0, 0};
148 137
149static void __init acpi_request_region (struct acpi_generic_address *addr, 138static void __init acpi_request_region (struct acpi_generic_address *addr,
150 unsigned int length, char *desc) 139 unsigned int length, char *desc)
@@ -608,7 +597,7 @@ static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
608 acpi_handle handle; 597 acpi_handle handle;
609 struct acpi_pci_id *pci_id = *id; 598 struct acpi_pci_id *pci_id = *id;
610 acpi_status status; 599 acpi_status status;
611 unsigned long temp; 600 unsigned long long temp;
612 acpi_object_type type; 601 acpi_object_type type;
613 602
614 acpi_get_parent(chandle, &handle); 603 acpi_get_parent(chandle, &handle);
@@ -620,8 +609,7 @@ static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
620 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE)) 609 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
621 return; 610 return;
622 611
623 status = 612 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
624 acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
625 &temp); 613 &temp);
626 if (ACPI_SUCCESS(status)) { 614 if (ACPI_SUCCESS(status)) {
627 u32 val; 615 u32 val;
@@ -682,6 +670,22 @@ static void acpi_os_execute_deferred(struct work_struct *work)
682 return; 670 return;
683} 671}
684 672
673static void acpi_os_execute_hp_deferred(struct work_struct *work)
674{
675 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
676 if (!dpc) {
677 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
678 return;
679 }
680
681 acpi_os_wait_events_complete(NULL);
682
683 dpc->function(dpc->context);
684 kfree(dpc);
685
686 return;
687}
688
685/******************************************************************************* 689/*******************************************************************************
686 * 690 *
687 * FUNCTION: acpi_os_execute 691 * FUNCTION: acpi_os_execute
@@ -697,12 +701,13 @@ static void acpi_os_execute_deferred(struct work_struct *work)
697 * 701 *
698 ******************************************************************************/ 702 ******************************************************************************/
699 703
700acpi_status acpi_os_execute(acpi_execute_type type, 704static acpi_status __acpi_os_execute(acpi_execute_type type,
701 acpi_osd_exec_callback function, void *context) 705 acpi_osd_exec_callback function, void *context, int hp)
702{ 706{
703 acpi_status status = AE_OK; 707 acpi_status status = AE_OK;
704 struct acpi_os_dpc *dpc; 708 struct acpi_os_dpc *dpc;
705 struct workqueue_struct *queue; 709 struct workqueue_struct *queue;
710 int ret;
706 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 711 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
707 "Scheduling function [%p(%p)] for deferred execution.\n", 712 "Scheduling function [%p(%p)] for deferred execution.\n",
708 function, context)); 713 function, context));
@@ -726,19 +731,38 @@ acpi_status acpi_os_execute(acpi_execute_type type,
726 dpc->function = function; 731 dpc->function = function;
727 dpc->context = context; 732 dpc->context = context;
728 733
729 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 734 if (!hp) {
730 queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq; 735 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
731 if (!queue_work(queue, &dpc->work)) { 736 queue = (type == OSL_NOTIFY_HANDLER) ?
732 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 737 kacpi_notify_wq : kacpid_wq;
733 "Call to queue_work() failed.\n")); 738 ret = queue_work(queue, &dpc->work);
739 } else {
740 INIT_WORK(&dpc->work, acpi_os_execute_hp_deferred);
741 ret = schedule_work(&dpc->work);
742 }
743
744 if (!ret) {
745 printk(KERN_ERR PREFIX
746 "Call to queue_work() failed.\n");
734 status = AE_ERROR; 747 status = AE_ERROR;
735 kfree(dpc); 748 kfree(dpc);
736 } 749 }
737 return_ACPI_STATUS(status); 750 return_ACPI_STATUS(status);
738} 751}
739 752
753acpi_status acpi_os_execute(acpi_execute_type type,
754 acpi_osd_exec_callback function, void *context)
755{
756 return __acpi_os_execute(type, function, context, 0);
757}
740EXPORT_SYMBOL(acpi_os_execute); 758EXPORT_SYMBOL(acpi_os_execute);
741 759
760acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
761 void *context)
762{
763 return __acpi_os_execute(0, function, context, 1);
764}
765
742void acpi_os_wait_events_complete(void *context) 766void acpi_os_wait_events_complete(void *context)
743{ 767{
744 flush_workqueue(kacpid_wq); 768 flush_workqueue(kacpid_wq);
@@ -1261,34 +1285,6 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1261 return (AE_OK); 1285 return (AE_OK);
1262} 1286}
1263 1287
1264/**
1265 * acpi_dmi_dump - dump DMI slots needed for blacklist entry
1266 *
1267 * Returns 0 on success
1268 */
1269static int acpi_dmi_dump(void)
1270{
1271
1272 if (!dmi_available)
1273 return -1;
1274
1275 printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n",
1276 dmi_get_system_info(DMI_SYS_VENDOR));
1277 printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n",
1278 dmi_get_system_info(DMI_PRODUCT_NAME));
1279 printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n",
1280 dmi_get_system_info(DMI_PRODUCT_VERSION));
1281 printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n",
1282 dmi_get_system_info(DMI_BOARD_NAME));
1283 printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n",
1284 dmi_get_system_info(DMI_BIOS_VENDOR));
1285 printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n",
1286 dmi_get_system_info(DMI_BIOS_DATE));
1287
1288 return 0;
1289}
1290
1291
1292/****************************************************************************** 1288/******************************************************************************
1293 * 1289 *
1294 * FUNCTION: acpi_os_validate_interface 1290 * FUNCTION: acpi_os_validate_interface
@@ -1315,21 +1311,6 @@ acpi_os_validate_interface (char *interface)
1315 osi_linux.cmdline ? " via cmdline" : 1311 osi_linux.cmdline ? " via cmdline" :
1316 osi_linux.dmi ? " via DMI" : ""); 1312 osi_linux.dmi ? " via DMI" : "");
1317 1313
1318 if (!osi_linux.dmi) {
1319 if (acpi_dmi_dump())
1320 printk(KERN_NOTICE PREFIX
1321 "[please extract dmidecode output]\n");
1322 printk(KERN_NOTICE PREFIX
1323 "Please send DMI info above to "
1324 "linux-acpi@vger.kernel.org\n");
1325 }
1326 if (!osi_linux.known && !osi_linux.cmdline) {
1327 printk(KERN_NOTICE PREFIX
1328 "If \"acpi_osi=%sLinux\" works better, "
1329 "please notify linux-acpi@vger.kernel.org\n",
1330 osi_linux.enable ? "!" : "");
1331 }
1332
1333 if (osi_linux.enable) 1314 if (osi_linux.enable)
1334 return AE_OK; 1315 return AE_OK;
1335 } 1316 }
diff --git a/drivers/acpi/parser/psloop.c b/drivers/acpi/parser/psloop.c
index c06238e55d98..4647039a0d8a 100644
--- a/drivers/acpi/parser/psloop.c
+++ b/drivers/acpi/parser/psloop.c
@@ -719,6 +719,8 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
719 *op = NULL; 719 *op = NULL;
720 } 720 }
721 721
722 ACPI_PREEMPTION_POINT();
723
722 return_ACPI_STATUS(AE_OK); 724 return_ACPI_STATUS(AE_OK);
723} 725}
724 726
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c
index 15e1702e48d6..68e932f215ea 100644
--- a/drivers/acpi/parser/psparse.c
+++ b/drivers/acpi/parser/psparse.c
@@ -137,6 +137,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
137 union acpi_parse_object *next; 137 union acpi_parse_object *next;
138 const struct acpi_opcode_info *parent_info; 138 const struct acpi_opcode_info *parent_info;
139 union acpi_parse_object *replacement_op = NULL; 139 union acpi_parse_object *replacement_op = NULL;
140 acpi_status status = AE_OK;
140 141
141 ACPI_FUNCTION_TRACE_PTR(ps_complete_this_op, op); 142 ACPI_FUNCTION_TRACE_PTR(ps_complete_this_op, op);
142 143
@@ -186,7 +187,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
186 replacement_op = 187 replacement_op =
187 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP); 188 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
188 if (!replacement_op) { 189 if (!replacement_op) {
189 goto allocate_error; 190 status = AE_NO_MEMORY;
190 } 191 }
191 break; 192 break;
192 193
@@ -211,7 +212,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
211 replacement_op = 212 replacement_op =
212 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP); 213 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
213 if (!replacement_op) { 214 if (!replacement_op) {
214 goto allocate_error; 215 status = AE_NO_MEMORY;
215 } 216 }
216 } else 217 } else
217 if ((op->common.parent->common.aml_opcode == 218 if ((op->common.parent->common.aml_opcode ==
@@ -226,13 +227,13 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
226 acpi_ps_alloc_op(op->common. 227 acpi_ps_alloc_op(op->common.
227 aml_opcode); 228 aml_opcode);
228 if (!replacement_op) { 229 if (!replacement_op) {
229 goto allocate_error; 230 status = AE_NO_MEMORY;
231 } else {
232 replacement_op->named.data =
233 op->named.data;
234 replacement_op->named.length =
235 op->named.length;
230 } 236 }
231
232 replacement_op->named.data =
233 op->named.data;
234 replacement_op->named.length =
235 op->named.length;
236 } 237 }
237 } 238 }
238 break; 239 break;
@@ -242,7 +243,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
242 replacement_op = 243 replacement_op =
243 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP); 244 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
244 if (!replacement_op) { 245 if (!replacement_op) {
245 goto allocate_error; 246 status = AE_NO_MEMORY;
246 } 247 }
247 } 248 }
248 249
@@ -302,14 +303,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
302 /* Now we can actually delete the subtree rooted at Op */ 303 /* Now we can actually delete the subtree rooted at Op */
303 304
304 acpi_ps_delete_parse_tree(op); 305 acpi_ps_delete_parse_tree(op);
305 return_ACPI_STATUS(AE_OK); 306 return_ACPI_STATUS(status);
306
307 allocate_error:
308
309 /* Always delete the subtree, even on error */
310
311 acpi_ps_delete_parse_tree(op);
312 return_ACPI_STATUS(AE_NO_MEMORY);
313} 307}
314 308
315/******************************************************************************* 309/*******************************************************************************
@@ -641,10 +635,12 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
641 ACPI_WALK_METHOD_RESTART; 635 ACPI_WALK_METHOD_RESTART;
642 } 636 }
643 } else { 637 } else {
644 /* On error, delete any return object */ 638 /* On error, delete any return object or implicit return */
645 639
646 acpi_ut_remove_reference(previous_walk_state-> 640 acpi_ut_remove_reference(previous_walk_state->
647 return_desc); 641 return_desc);
642 acpi_ds_clear_implicit_return
643 (previous_walk_state);
648 } 644 }
649 } 645 }
650 646
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 11acaee14d66..bf79d83bdfbb 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -384,6 +384,27 @@ acpi_pci_free_irq(struct acpi_prt_entry *entry,
384 return irq; 384 return irq;
385} 385}
386 386
387#ifdef CONFIG_X86_IO_APIC
388extern int noioapicquirk;
389
390static int bridge_has_boot_interrupt_variant(struct pci_bus *bus)
391{
392 struct pci_bus *bus_it;
393
394 for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) {
395 if (!bus_it->self)
396 return 0;
397
398 printk(KERN_INFO "vendor=%04x device=%04x\n", bus_it->self->vendor,
399 bus_it->self->device);
400
401 if (bus_it->self->irq_reroute_variant)
402 return bus_it->self->irq_reroute_variant;
403 }
404 return 0;
405}
406#endif /* CONFIG_X86_IO_APIC */
407
387/* 408/*
388 * acpi_pci_irq_lookup 409 * acpi_pci_irq_lookup
389 * success: return IRQ >= 0 410 * success: return IRQ >= 0
@@ -413,6 +434,41 @@ acpi_pci_irq_lookup(struct pci_bus *bus,
413 } 434 }
414 435
415 ret = func(entry, triggering, polarity, link); 436 ret = func(entry, triggering, polarity, link);
437
438#ifdef CONFIG_X86_IO_APIC
439 /*
440 * Some chipsets (e.g. intel 6700PXH) generate a legacy INTx when the
441 * IRQ entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel
442 * does during interrupt handling). When this INTx generation cannot be
443 * disabled, we reroute these interrupts to their legacy equivalent to
444 * get rid of spurious interrupts.
445 */
446 if (!noioapicquirk) {
447 switch (bridge_has_boot_interrupt_variant(bus)) {
448 case 0:
449 /* no rerouting necessary */
450 break;
451
452 case INTEL_IRQ_REROUTE_VARIANT:
453 /*
454 * Remap according to INTx routing table in 6700PXH
455 * specs, intel order number 302628-002, section
456 * 2.15.2. Other chipsets (80332, ...) have the same
457 * mapping and are handled here as well.
458 */
459 printk(KERN_INFO "pci irq %d -> rerouted to legacy "
460 "irq %d\n", ret, (ret % 4) + 16);
461 ret = (ret % 4) + 16;
462 break;
463
464 default:
465 printk(KERN_INFO "not rerouting irq %d to legacy irq: "
466 "unknown mapping\n", ret);
467 break;
468 }
469 }
470#endif /* CONFIG_X86_IO_APIC */
471
416 return ret; 472 return ret;
417} 473}
418 474
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index cf47805a7448..e52ad91ce2dc 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -531,7 +531,7 @@ int __init acpi_irq_penalty_init(void)
531 return 0; 531 return 0;
532} 532}
533 533
534static int acpi_irq_balance; /* 0: static, 1: balance */ 534static int acpi_irq_balance = -1; /* 0: static, 1: balance */
535 535
536static int acpi_pci_link_allocate(struct acpi_pci_link *link) 536static int acpi_pci_link_allocate(struct acpi_pci_link *link)
537{ 537{
@@ -709,7 +709,7 @@ int acpi_pci_link_free_irq(acpi_handle handle)
709 acpi_device_bid(link->device))); 709 acpi_device_bid(link->device)));
710 710
711 if (link->refcnt == 0) { 711 if (link->refcnt == 0) {
712 acpi_ut_evaluate_object(link->device->handle, "_DIS", 0, NULL); 712 acpi_evaluate_object(link->device->handle, "_DIS", NULL, NULL);
713 } 713 }
714 mutex_unlock(&acpi_link_lock); 714 mutex_unlock(&acpi_link_lock);
715 return (link->irq.active); 715 return (link->irq.active);
@@ -737,7 +737,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
737 link->device = device; 737 link->device = device;
738 strcpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME); 738 strcpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME);
739 strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS); 739 strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
740 acpi_driver_data(device) = link; 740 device->driver_data = link;
741 741
742 mutex_lock(&acpi_link_lock); 742 mutex_lock(&acpi_link_lock);
743 result = acpi_pci_link_get_possible(link); 743 result = acpi_pci_link_get_possible(link);
@@ -773,7 +773,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
773 773
774 end: 774 end:
775 /* disable all links -- to be activated on use */ 775 /* disable all links -- to be activated on use */
776 acpi_ut_evaluate_object(device->handle, "_DIS", 0, NULL); 776 acpi_evaluate_object(device->handle, "_DIS", NULL, NULL);
777 mutex_unlock(&acpi_link_lock); 777 mutex_unlock(&acpi_link_lock);
778 778
779 if (result) 779 if (result)
@@ -950,10 +950,17 @@ device_initcall(irqrouter_init_sysfs);
950 950
951static int __init acpi_pci_link_init(void) 951static int __init acpi_pci_link_init(void)
952{ 952{
953
954 if (acpi_noirq) 953 if (acpi_noirq)
955 return 0; 954 return 0;
956 955
956 if (acpi_irq_balance == -1) {
957 /* no command line switch: enable balancing in IOAPIC mode */
958 if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
959 acpi_irq_balance = 1;
960 else
961 acpi_irq_balance = 0;
962 }
963
957 acpi_link.count = 0; 964 acpi_link.count = 0;
958 INIT_LIST_HEAD(&acpi_link.entries); 965 INIT_LIST_HEAD(&acpi_link.entries);
959 966
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index c3fed31166b5..642554b1b60c 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -190,7 +190,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
190 struct acpi_pci_root *root = NULL; 190 struct acpi_pci_root *root = NULL;
191 struct acpi_pci_root *tmp; 191 struct acpi_pci_root *tmp;
192 acpi_status status = AE_OK; 192 acpi_status status = AE_OK;
193 unsigned long value = 0; 193 unsigned long long value = 0;
194 acpi_handle handle = NULL; 194 acpi_handle handle = NULL;
195 struct acpi_device *child; 195 struct acpi_device *child;
196 196
@@ -206,7 +206,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
206 root->device = device; 206 root->device = device;
207 strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); 207 strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
208 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); 208 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
209 acpi_driver_data(device) = root; 209 device->driver_data = root;
210 210
211 device->ops.bind = acpi_pci_bind; 211 device->ops.bind = acpi_pci_bind;
212 212
@@ -376,15 +376,9 @@ static int acpi_pci_root_remove(struct acpi_device *device, int type)
376 376
377static int __init acpi_pci_root_init(void) 377static int __init acpi_pci_root_init(void)
378{ 378{
379
380 if (acpi_pci_disabled) 379 if (acpi_pci_disabled)
381 return 0; 380 return 0;
382 381
383 /* DEBUG:
384 acpi_dbg_layer = ACPI_PCI_COMPONENT;
385 acpi_dbg_level = 0xFFFFFFFF;
386 */
387
388 if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0) 382 if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0)
389 return -ENODEV; 383 return -ENODEV;
390 384
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index d5b4ef898879..cd1f4467be7b 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -76,10 +76,10 @@ static struct acpi_pci_driver acpi_pci_slot_driver = {
76}; 76};
77 77
78static int 78static int
79check_slot(acpi_handle handle, unsigned long *sun) 79check_slot(acpi_handle handle, unsigned long long *sun)
80{ 80{
81 int device = -1; 81 int device = -1;
82 unsigned long adr, sta; 82 unsigned long long adr, sta;
83 acpi_status status; 83 acpi_status status;
84 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 84 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
85 85
@@ -132,7 +132,7 @@ static acpi_status
132register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) 132register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
133{ 133{
134 int device; 134 int device;
135 unsigned long sun; 135 unsigned long long sun;
136 char name[SLOT_NAME_SIZE]; 136 char name[SLOT_NAME_SIZE];
137 struct acpi_pci_slot *slot; 137 struct acpi_pci_slot *slot;
138 struct pci_slot *pci_slot; 138 struct pci_slot *pci_slot;
@@ -150,7 +150,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
150 } 150 }
151 151
152 snprintf(name, sizeof(name), "%u", (u32)sun); 152 snprintf(name, sizeof(name), "%u", (u32)sun);
153 pci_slot = pci_create_slot(pci_bus, device, name); 153 pci_slot = pci_create_slot(pci_bus, device, name, NULL);
154 if (IS_ERR(pci_slot)) { 154 if (IS_ERR(pci_slot)) {
155 err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot)); 155 err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
156 kfree(slot); 156 kfree(slot);
@@ -182,7 +182,7 @@ static acpi_status
182walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) 182walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
183{ 183{
184 int device, function; 184 int device, function;
185 unsigned long adr; 185 unsigned long long adr;
186 acpi_status status; 186 acpi_status status;
187 acpi_handle dummy_handle; 187 acpi_handle dummy_handle;
188 acpi_walk_callback user_function; 188 acpi_walk_callback user_function;
@@ -239,7 +239,7 @@ static int
239walk_root_bridge(acpi_handle handle, acpi_walk_callback user_function) 239walk_root_bridge(acpi_handle handle, acpi_walk_callback user_function)
240{ 240{
241 int seg, bus; 241 int seg, bus;
242 unsigned long tmp; 242 unsigned long long tmp;
243 acpi_status status; 243 acpi_status status;
244 acpi_handle dummy_handle; 244 acpi_handle dummy_handle;
245 struct pci_bus *pci_bus; 245 struct pci_bus *pci_bus;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 4ab21cb1c8c7..bb7d50dd2818 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -44,9 +44,8 @@
44#include <acpi/acpi_bus.h> 44#include <acpi/acpi_bus.h>
45#include <acpi/acpi_drivers.h> 45#include <acpi/acpi_drivers.h>
46 46
47#define _COMPONENT ACPI_POWER_COMPONENT 47#define _COMPONENT ACPI_POWER_COMPONENT
48ACPI_MODULE_NAME("power"); 48ACPI_MODULE_NAME("power");
49#define ACPI_POWER_COMPONENT 0x00800000
50#define ACPI_POWER_CLASS "power_resource" 49#define ACPI_POWER_CLASS "power_resource"
51#define ACPI_POWER_DEVICE_NAME "Power Resource" 50#define ACPI_POWER_DEVICE_NAME "Power Resource"
52#define ACPI_POWER_FILE_INFO "info" 51#define ACPI_POWER_FILE_INFO "info"
@@ -54,6 +53,14 @@ ACPI_MODULE_NAME("power");
54#define ACPI_POWER_RESOURCE_STATE_OFF 0x00 53#define ACPI_POWER_RESOURCE_STATE_OFF 0x00
55#define ACPI_POWER_RESOURCE_STATE_ON 0x01 54#define ACPI_POWER_RESOURCE_STATE_ON 0x01
56#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF 55#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
56
57#ifdef MODULE_PARAM_PREFIX
58#undef MODULE_PARAM_PREFIX
59#endif
60#define MODULE_PARAM_PREFIX "acpi."
61int acpi_power_nocheck;
62module_param_named(power_nocheck, acpi_power_nocheck, bool, 000);
63
57static int acpi_power_add(struct acpi_device *device); 64static int acpi_power_add(struct acpi_device *device);
58static int acpi_power_remove(struct acpi_device *device, int type); 65static int acpi_power_remove(struct acpi_device *device, int type);
59static int acpi_power_resume(struct acpi_device *device); 66static int acpi_power_resume(struct acpi_device *device);
@@ -128,16 +135,16 @@ acpi_power_get_context(acpi_handle handle,
128 return 0; 135 return 0;
129} 136}
130 137
131static int acpi_power_get_state(struct acpi_power_resource *resource, int *state) 138static int acpi_power_get_state(acpi_handle handle, int *state)
132{ 139{
133 acpi_status status = AE_OK; 140 acpi_status status = AE_OK;
134 unsigned long sta = 0; 141 unsigned long long sta = 0;
135 142
136 143
137 if (!resource || !state) 144 if (!handle || !state)
138 return -EINVAL; 145 return -EINVAL;
139 146
140 status = acpi_evaluate_integer(resource->device->handle, "_STA", NULL, &sta); 147 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
141 if (ACPI_FAILURE(status)) 148 if (ACPI_FAILURE(status))
142 return -ENODEV; 149 return -ENODEV;
143 150
@@ -145,7 +152,8 @@ static int acpi_power_get_state(struct acpi_power_resource *resource, int *state
145 ACPI_POWER_RESOURCE_STATE_OFF; 152 ACPI_POWER_RESOURCE_STATE_OFF;
146 153
147 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n", 154 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n",
148 resource->name, state ? "on" : "off")); 155 acpi_ut_get_node_name(handle),
156 *state ? "on" : "off"));
149 157
150 return 0; 158 return 0;
151} 159}
@@ -153,7 +161,6 @@ static int acpi_power_get_state(struct acpi_power_resource *resource, int *state
153static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state) 161static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
154{ 162{
155 int result = 0, state1; 163 int result = 0, state1;
156 struct acpi_power_resource *resource = NULL;
157 u32 i = 0; 164 u32 i = 0;
158 165
159 166
@@ -161,12 +168,15 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
161 return -EINVAL; 168 return -EINVAL;
162 169
163 /* The state of the list is 'on' IFF all resources are 'on'. */ 170 /* The state of the list is 'on' IFF all resources are 'on'. */
171 /* */
164 172
165 for (i = 0; i < list->count; i++) { 173 for (i = 0; i < list->count; i++) {
166 result = acpi_power_get_context(list->handles[i], &resource); 174 /*
167 if (result) 175 * The state of the power resource can be obtained by
168 return result; 176 * using the ACPI handle. In such case it is unnecessary to
169 result = acpi_power_get_state(resource, &state1); 177 * get the Power resource first and then get its state again.
178 */
179 result = acpi_power_get_state(list->handles[i], &state1);
170 if (result) 180 if (result)
171 return result; 181 return result;
172 182
@@ -226,12 +236,18 @@ static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
226 if (ACPI_FAILURE(status)) 236 if (ACPI_FAILURE(status))
227 return -ENODEV; 237 return -ENODEV;
228 238
229 result = acpi_power_get_state(resource, &state); 239 if (!acpi_power_nocheck) {
230 if (result) 240 /*
231 return result; 241 * If acpi_power_nocheck is set, it is unnecessary to check
232 if (state != ACPI_POWER_RESOURCE_STATE_ON) 242 * the power state after power transition.
233 return -ENOEXEC; 243 */
234 244 result = acpi_power_get_state(resource->device->handle,
245 &state);
246 if (result)
247 return result;
248 if (state != ACPI_POWER_RESOURCE_STATE_ON)
249 return -ENOEXEC;
250 }
235 /* Update the power resource's _device_ power state */ 251 /* Update the power resource's _device_ power state */
236 resource->device->power.state = ACPI_STATE_D0; 252 resource->device->power.state = ACPI_STATE_D0;
237 253
@@ -277,11 +293,17 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
277 if (ACPI_FAILURE(status)) 293 if (ACPI_FAILURE(status))
278 return -ENODEV; 294 return -ENODEV;
279 295
280 result = acpi_power_get_state(resource, &state); 296 if (!acpi_power_nocheck) {
281 if (result) 297 /*
282 return result; 298 * If acpi_power_nocheck is set, it is unnecessary to check
283 if (state != ACPI_POWER_RESOURCE_STATE_OFF) 299 * the power state after power transition.
284 return -ENOEXEC; 300 */
301 result = acpi_power_get_state(handle, &state);
302 if (result)
303 return result;
304 if (state != ACPI_POWER_RESOURCE_STATE_OFF)
305 return -ENOEXEC;
306 }
285 307
286 /* Update the power resource's _device_ power state */ 308 /* Update the power resource's _device_ power state */
287 resource->device->power.state = ACPI_STATE_D3; 309 resource->device->power.state = ACPI_STATE_D3;
@@ -494,11 +516,6 @@ int acpi_power_transition(struct acpi_device *device, int state)
494 cl = &device->power.states[device->power.state].resources; 516 cl = &device->power.states[device->power.state].resources;
495 tl = &device->power.states[state].resources; 517 tl = &device->power.states[state].resources;
496 518
497 if (!cl->count && !tl->count) {
498 result = -ENODEV;
499 goto end;
500 }
501
502 /* TBD: Resources must be ordered. */ 519 /* TBD: Resources must be ordered. */
503 520
504 /* 521 /*
@@ -555,7 +572,7 @@ static int acpi_power_seq_show(struct seq_file *seq, void *offset)
555 if (!resource) 572 if (!resource)
556 goto end; 573 goto end;
557 574
558 result = acpi_power_get_state(resource, &state); 575 result = acpi_power_get_state(resource->device->handle, &state);
559 if (result) 576 if (result)
560 goto end; 577 goto end;
561 578
@@ -657,7 +674,7 @@ static int acpi_power_add(struct acpi_device *device)
657 strcpy(resource->name, device->pnp.bus_id); 674 strcpy(resource->name, device->pnp.bus_id);
658 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); 675 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
659 strcpy(acpi_device_class(device), ACPI_POWER_CLASS); 676 strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
660 acpi_driver_data(device) = resource; 677 device->driver_data = resource;
661 678
662 /* Evalute the object to get the system level and resource order. */ 679 /* Evalute the object to get the system level and resource order. */
663 status = acpi_evaluate_object(device->handle, NULL, NULL, &buffer); 680 status = acpi_evaluate_object(device->handle, NULL, NULL, &buffer);
@@ -668,7 +685,7 @@ static int acpi_power_add(struct acpi_device *device)
668 resource->system_level = acpi_object.power_resource.system_level; 685 resource->system_level = acpi_object.power_resource.system_level;
669 resource->order = acpi_object.power_resource.resource_order; 686 resource->order = acpi_object.power_resource.resource_order;
670 687
671 result = acpi_power_get_state(resource, &state); 688 result = acpi_power_get_state(device->handle, &state);
672 if (result) 689 if (result)
673 goto end; 690 goto end;
674 691
@@ -733,9 +750,9 @@ static int acpi_power_resume(struct acpi_device *device)
733 if (!device || !acpi_driver_data(device)) 750 if (!device || !acpi_driver_data(device))
734 return -EINVAL; 751 return -EINVAL;
735 752
736 resource = (struct acpi_power_resource *)acpi_driver_data(device); 753 resource = acpi_driver_data(device);
737 754
738 result = acpi_power_get_state(resource, &state); 755 result = acpi_power_get_state(device->handle, &state);
739 if (result) 756 if (result)
740 return result; 757 return result;
741 758
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index ee68ac54c0d4..34948362f41d 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -59,7 +59,6 @@
59#include <acpi/acpi_drivers.h> 59#include <acpi/acpi_drivers.h>
60#include <acpi/processor.h> 60#include <acpi/processor.h>
61 61
62#define ACPI_PROCESSOR_COMPONENT 0x01000000
63#define ACPI_PROCESSOR_CLASS "processor" 62#define ACPI_PROCESSOR_CLASS "processor"
64#define ACPI_PROCESSOR_DEVICE_NAME "Processor" 63#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
65#define ACPI_PROCESSOR_FILE_INFO "info" 64#define ACPI_PROCESSOR_FILE_INFO "info"
@@ -89,6 +88,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr);
89 88
90 89
91static const struct acpi_device_id processor_device_ids[] = { 90static const struct acpi_device_id processor_device_ids[] = {
91 {ACPI_PROCESSOR_OBJECT_HID, 0},
92 {ACPI_PROCESSOR_HID, 0}, 92 {ACPI_PROCESSOR_HID, 0},
93 {"", 0}, 93 {"", 0},
94}; 94};
@@ -409,7 +409,7 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
409/* Use the acpiid in MADT to map cpus in case of SMP */ 409/* Use the acpiid in MADT to map cpus in case of SMP */
410 410
411#ifndef CONFIG_SMP 411#ifndef CONFIG_SMP
412static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;} 412static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id) { return -1; }
413#else 413#else
414 414
415static struct acpi_table_madt *madt; 415static struct acpi_table_madt *madt;
@@ -428,27 +428,35 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
428} 428}
429 429
430static int map_lsapic_id(struct acpi_subtable_header *entry, 430static int map_lsapic_id(struct acpi_subtable_header *entry,
431 u32 acpi_id, int *apic_id) 431 int device_declaration, u32 acpi_id, int *apic_id)
432{ 432{
433 struct acpi_madt_local_sapic *lsapic = 433 struct acpi_madt_local_sapic *lsapic =
434 (struct acpi_madt_local_sapic *)entry; 434 (struct acpi_madt_local_sapic *)entry;
435 u32 tmp = (lsapic->id << 8) | lsapic->eid;
436
435 /* Only check enabled APICs*/ 437 /* Only check enabled APICs*/
436 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) { 438 if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
437 /* First check against id */ 439 return 0;
438 if (lsapic->processor_id == acpi_id) { 440
439 *apic_id = (lsapic->id << 8) | lsapic->eid; 441 /* Device statement declaration type */
440 return 1; 442 if (device_declaration) {
441 /* Check against optional uid */ 443 if (entry->length < 16)
442 } else if (entry->length >= 16 && 444 printk(KERN_ERR PREFIX
443 lsapic->uid == acpi_id) { 445 "Invalid LSAPIC with Device type processor (SAPIC ID %#x)\n",
444 *apic_id = lsapic->uid; 446 tmp);
445 return 1; 447 else if (lsapic->uid == acpi_id)
446 } 448 goto found;
447 } 449 /* Processor statement declaration type */
450 } else if (lsapic->processor_id == acpi_id)
451 goto found;
452
448 return 0; 453 return 0;
454found:
455 *apic_id = tmp;
456 return 1;
449} 457}
450 458
451static int map_madt_entry(u32 acpi_id) 459static int map_madt_entry(int type, u32 acpi_id)
452{ 460{
453 unsigned long madt_end, entry; 461 unsigned long madt_end, entry;
454 int apic_id = -1; 462 int apic_id = -1;
@@ -469,7 +477,7 @@ static int map_madt_entry(u32 acpi_id)
469 if (map_lapic_id(header, acpi_id, &apic_id)) 477 if (map_lapic_id(header, acpi_id, &apic_id))
470 break; 478 break;
471 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 479 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
472 if (map_lsapic_id(header, acpi_id, &apic_id)) 480 if (map_lsapic_id(header, type, acpi_id, &apic_id))
473 break; 481 break;
474 } 482 }
475 entry += header->length; 483 entry += header->length;
@@ -477,7 +485,7 @@ static int map_madt_entry(u32 acpi_id)
477 return apic_id; 485 return apic_id;
478} 486}
479 487
480static int map_mat_entry(acpi_handle handle, u32 acpi_id) 488static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
481{ 489{
482 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 490 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
483 union acpi_object *obj; 491 union acpi_object *obj;
@@ -500,7 +508,7 @@ static int map_mat_entry(acpi_handle handle, u32 acpi_id)
500 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { 508 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
501 map_lapic_id(header, acpi_id, &apic_id); 509 map_lapic_id(header, acpi_id, &apic_id);
502 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 510 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
503 map_lsapic_id(header, acpi_id, &apic_id); 511 map_lsapic_id(header, type, acpi_id, &apic_id);
504 } 512 }
505 513
506exit: 514exit:
@@ -509,14 +517,14 @@ exit:
509 return apic_id; 517 return apic_id;
510} 518}
511 519
512static int get_cpu_id(acpi_handle handle, u32 acpi_id) 520static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id)
513{ 521{
514 int i; 522 int i;
515 int apic_id = -1; 523 int apic_id = -1;
516 524
517 apic_id = map_mat_entry(handle, acpi_id); 525 apic_id = map_mat_entry(handle, type, acpi_id);
518 if (apic_id == -1) 526 if (apic_id == -1)
519 apic_id = map_madt_entry(acpi_id); 527 apic_id = map_madt_entry(type, acpi_id);
520 if (apic_id == -1) 528 if (apic_id == -1)
521 return apic_id; 529 return apic_id;
522 530
@@ -532,15 +540,16 @@ static int get_cpu_id(acpi_handle handle, u32 acpi_id)
532 Driver Interface 540 Driver Interface
533 -------------------------------------------------------------------------- */ 541 -------------------------------------------------------------------------- */
534 542
535static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid) 543static int acpi_processor_get_info(struct acpi_device *device)
536{ 544{
537 acpi_status status = 0; 545 acpi_status status = 0;
538 union acpi_object object = { 0 }; 546 union acpi_object object = { 0 };
539 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 547 struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
540 int cpu_index; 548 struct acpi_processor *pr;
549 int cpu_index, device_declaration = 0;
541 static int cpu0_initialized; 550 static int cpu0_initialized;
542 551
543 552 pr = acpi_driver_data(device);
544 if (!pr) 553 if (!pr)
545 return -EINVAL; 554 return -EINVAL;
546 555
@@ -561,22 +570,23 @@ static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
561 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 570 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
562 "No bus mastering arbitration control\n")); 571 "No bus mastering arbitration control\n"));
563 572
564 /* Check if it is a Device with HID and UID */ 573 if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_HID)) {
565 if (has_uid) { 574 /*
566 unsigned long value; 575 * Declared with "Device" statement; match _UID.
576 * Note that we don't handle string _UIDs yet.
577 */
578 unsigned long long value;
567 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, 579 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
568 NULL, &value); 580 NULL, &value);
569 if (ACPI_FAILURE(status)) { 581 if (ACPI_FAILURE(status)) {
570 printk(KERN_ERR PREFIX "Evaluating processor _UID\n"); 582 printk(KERN_ERR PREFIX
583 "Evaluating processor _UID [%#x]\n", status);
571 return -ENODEV; 584 return -ENODEV;
572 } 585 }
586 device_declaration = 1;
573 pr->acpi_id = value; 587 pr->acpi_id = value;
574 } else { 588 } else {
575 /* 589 /* Declared with "Processor" statement; match ProcessorID */
576 * Evalute the processor object. Note that it is common on SMP to
577 * have the first (boot) processor with a valid PBLK address while
578 * all others have a NULL address.
579 */
580 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 590 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
581 if (ACPI_FAILURE(status)) { 591 if (ACPI_FAILURE(status)) {
582 printk(KERN_ERR PREFIX "Evaluating processor object\n"); 592 printk(KERN_ERR PREFIX "Evaluating processor object\n");
@@ -584,12 +594,13 @@ static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
584 } 594 }
585 595
586 /* 596 /*
587 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. 597 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
588 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c 598 * >>> 'acpi_get_processor_id(acpi_id, &id)' in
589 */ 599 * arch/xxx/acpi.c
600 */
590 pr->acpi_id = object.processor.proc_id; 601 pr->acpi_id = object.processor.proc_id;
591 } 602 }
592 cpu_index = get_cpu_id(pr->handle, pr->acpi_id); 603 cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id);
593 604
594 /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 605 /* Handle UP system running SMP kernel, with no LAPIC in MADT */
595 if (!cpu0_initialized && (cpu_index == -1) && 606 if (!cpu0_initialized && (cpu_index == -1) &&
@@ -661,7 +672,7 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
661 672
662 pr = acpi_driver_data(device); 673 pr = acpi_driver_data(device);
663 674
664 result = acpi_processor_get_info(pr, device->flags.unique_id); 675 result = acpi_processor_get_info(device);
665 if (result) { 676 if (result) {
666 /* Processor is physically not present */ 677 /* Processor is physically not present */
667 return 0; 678 return 0;
@@ -761,20 +772,20 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
761 acpi_bus_generate_proc_event(device, event, 772 acpi_bus_generate_proc_event(device, event,
762 pr->performance_platform_limit); 773 pr->performance_platform_limit);
763 acpi_bus_generate_netlink_event(device->pnp.device_class, 774 acpi_bus_generate_netlink_event(device->pnp.device_class,
764 device->dev.bus_id, event, 775 dev_name(&device->dev), event,
765 pr->performance_platform_limit); 776 pr->performance_platform_limit);
766 break; 777 break;
767 case ACPI_PROCESSOR_NOTIFY_POWER: 778 case ACPI_PROCESSOR_NOTIFY_POWER:
768 acpi_processor_cst_has_changed(pr); 779 acpi_processor_cst_has_changed(pr);
769 acpi_bus_generate_proc_event(device, event, 0); 780 acpi_bus_generate_proc_event(device, event, 0);
770 acpi_bus_generate_netlink_event(device->pnp.device_class, 781 acpi_bus_generate_netlink_event(device->pnp.device_class,
771 device->dev.bus_id, event, 0); 782 dev_name(&device->dev), event, 0);
772 break; 783 break;
773 case ACPI_PROCESSOR_NOTIFY_THROTTLING: 784 case ACPI_PROCESSOR_NOTIFY_THROTTLING:
774 acpi_processor_tstate_has_changed(pr); 785 acpi_processor_tstate_has_changed(pr);
775 acpi_bus_generate_proc_event(device, event, 0); 786 acpi_bus_generate_proc_event(device, event, 0);
776 acpi_bus_generate_netlink_event(device->pnp.device_class, 787 acpi_bus_generate_netlink_event(device->pnp.device_class,
777 device->dev.bus_id, event, 0); 788 dev_name(&device->dev), event, 0);
778 default: 789 default:
779 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 790 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
780 "Unsupported event [0x%x]\n", event)); 791 "Unsupported event [0x%x]\n", event));
@@ -818,7 +829,7 @@ static int acpi_processor_add(struct acpi_device *device)
818 pr->handle = device->handle; 829 pr->handle = device->handle;
819 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 830 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
820 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 831 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
821 acpi_driver_data(device) = pr; 832 device->driver_data = pr;
822 833
823 return 0; 834 return 0;
824} 835}
@@ -875,7 +886,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
875static int is_processor_present(acpi_handle handle) 886static int is_processor_present(acpi_handle handle)
876{ 887{
877 acpi_status status; 888 acpi_status status;
878 unsigned long sta = 0; 889 unsigned long long sta = 0;
879 890
880 891
881 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 892 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index cf5b1b7b684f..5f8d746a9b81 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -59,7 +59,6 @@
59#include <acpi/processor.h> 59#include <acpi/processor.h>
60#include <asm/processor.h> 60#include <asm/processor.h>
61 61
62#define ACPI_PROCESSOR_COMPONENT 0x01000000
63#define ACPI_PROCESSOR_CLASS "processor" 62#define ACPI_PROCESSOR_CLASS "processor"
64#define _COMPONENT ACPI_PROCESSOR_COMPONENT 63#define _COMPONENT ACPI_PROCESSOR_COMPONENT
65ACPI_MODULE_NAME("processor_idle"); 64ACPI_MODULE_NAME("processor_idle");
@@ -1587,6 +1586,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1587 1586
1588 if (acpi_idle_bm_check()) { 1587 if (acpi_idle_bm_check()) {
1589 if (dev->safe_state) { 1588 if (dev->safe_state) {
1589 dev->last_state = dev->safe_state;
1590 return dev->safe_state->enter(dev, dev->safe_state); 1590 return dev->safe_state->enter(dev, dev->safe_state);
1591 } else { 1591 } else {
1592 local_irq_disable(); 1592 local_irq_disable();
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 80c251ec6d2a..0d7b772bef50 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -39,10 +39,14 @@
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40#endif 40#endif
41 41
42#ifdef CONFIG_X86
43#include <asm/cpufeature.h>
44#endif
45
42#include <acpi/acpi_bus.h> 46#include <acpi/acpi_bus.h>
47#include <acpi/acpi_drivers.h>
43#include <acpi/processor.h> 48#include <acpi/processor.h>
44 49
45#define ACPI_PROCESSOR_COMPONENT 0x01000000
46#define ACPI_PROCESSOR_CLASS "processor" 50#define ACPI_PROCESSOR_CLASS "processor"
47#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" 51#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
48#define _COMPONENT ACPI_PROCESSOR_COMPONENT 52#define _COMPONENT ACPI_PROCESSOR_COMPONENT
@@ -126,7 +130,7 @@ static struct notifier_block acpi_ppc_notifier_block = {
126static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 130static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
127{ 131{
128 acpi_status status = 0; 132 acpi_status status = 0;
129 unsigned long ppc = 0; 133 unsigned long long ppc = 0;
130 134
131 135
132 if (!pr) 136 if (!pr)
@@ -334,7 +338,6 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
334 acpi_status status = AE_OK; 338 acpi_status status = AE_OK;
335 acpi_handle handle = NULL; 339 acpi_handle handle = NULL;
336 340
337
338 if (!pr || !pr->performance || !pr->handle) 341 if (!pr || !pr->performance || !pr->handle)
339 return -EINVAL; 342 return -EINVAL;
340 343
@@ -347,13 +350,27 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
347 350
348 result = acpi_processor_get_performance_control(pr); 351 result = acpi_processor_get_performance_control(pr);
349 if (result) 352 if (result)
350 return result; 353 goto update_bios;
351 354
352 result = acpi_processor_get_performance_states(pr); 355 result = acpi_processor_get_performance_states(pr);
353 if (result) 356 if (result)
354 return result; 357 goto update_bios;
355 358
356 return 0; 359 return 0;
360
361 /*
362 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
363 * the BIOS is older than the CPU and does not know its frequencies
364 */
365 update_bios:
366#ifdef CONFIG_X86
367 if (ACPI_SUCCESS(acpi_get_handle(pr->handle, "_PPC", &handle))){
368 if(boot_cpu_has(X86_FEATURE_EST))
369 printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
370 "frequency support\n");
371 }
372#endif
373 return result;
357} 374}
358 375
359int acpi_processor_notify_smm(struct module *calling_module) 376int acpi_processor_notify_smm(struct module *calling_module)
@@ -524,13 +541,13 @@ static int acpi_processor_get_psd(struct acpi_processor *pr)
524 541
525 psd = buffer.pointer; 542 psd = buffer.pointer;
526 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) { 543 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
527 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n")); 544 printk(KERN_ERR PREFIX "Invalid _PSD data\n");
528 result = -EFAULT; 545 result = -EFAULT;
529 goto end; 546 goto end;
530 } 547 }
531 548
532 if (psd->package.count != 1) { 549 if (psd->package.count != 1) {
533 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n")); 550 printk(KERN_ERR PREFIX "Invalid _PSD data\n");
534 result = -EFAULT; 551 result = -EFAULT;
535 goto end; 552 goto end;
536 } 553 }
@@ -543,19 +560,19 @@ static int acpi_processor_get_psd(struct acpi_processor *pr)
543 status = acpi_extract_package(&(psd->package.elements[0]), 560 status = acpi_extract_package(&(psd->package.elements[0]),
544 &format, &state); 561 &format, &state);
545 if (ACPI_FAILURE(status)) { 562 if (ACPI_FAILURE(status)) {
546 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n")); 563 printk(KERN_ERR PREFIX "Invalid _PSD data\n");
547 result = -EFAULT; 564 result = -EFAULT;
548 goto end; 565 goto end;
549 } 566 }
550 567
551 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { 568 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
552 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n")); 569 printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n");
553 result = -EFAULT; 570 result = -EFAULT;
554 goto end; 571 goto end;
555 } 572 }
556 573
557 if (pdomain->revision != ACPI_PSD_REV0_REVISION) { 574 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
558 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n")); 575 printk(KERN_ERR PREFIX "Unknown _PSD:revision\n");
559 result = -EFAULT; 576 result = -EFAULT;
560 goto end; 577 goto end;
561 } 578 }
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index ef34b18f95ca..b1eb376fae45 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -40,7 +40,6 @@
40#include <acpi/processor.h> 40#include <acpi/processor.h>
41#include <acpi/acpi_drivers.h> 41#include <acpi/acpi_drivers.h>
42 42
43#define ACPI_PROCESSOR_COMPONENT 0x01000000
44#define ACPI_PROCESSOR_CLASS "processor" 43#define ACPI_PROCESSOR_CLASS "processor"
45#define _COMPONENT ACPI_PROCESSOR_COMPONENT 44#define _COMPONENT ACPI_PROCESSOR_COMPONENT
46ACPI_MODULE_NAME("processor_thermal"); 45ACPI_MODULE_NAME("processor_thermal");
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index a56fc6c4394b..a0c38c94a8a0 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -38,9 +38,9 @@
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39 39
40#include <acpi/acpi_bus.h> 40#include <acpi/acpi_bus.h>
41#include <acpi/acpi_drivers.h>
41#include <acpi/processor.h> 42#include <acpi/processor.h>
42 43
43#define ACPI_PROCESSOR_COMPONENT 0x01000000
44#define ACPI_PROCESSOR_CLASS "processor" 44#define ACPI_PROCESSOR_CLASS "processor"
45#define _COMPONENT ACPI_PROCESSOR_COMPONENT 45#define _COMPONENT ACPI_PROCESSOR_COMPONENT
46ACPI_MODULE_NAME("processor_throttling"); 46ACPI_MODULE_NAME("processor_throttling");
@@ -274,7 +274,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data)
274static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 274static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
275{ 275{
276 acpi_status status = 0; 276 acpi_status status = 0;
277 unsigned long tpc = 0; 277 unsigned long long tpc = 0;
278 278
279 if (!pr) 279 if (!pr)
280 return -EINVAL; 280 return -EINVAL;
@@ -528,13 +528,13 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
528 528
529 tsd = buffer.pointer; 529 tsd = buffer.pointer;
530 if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) { 530 if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
531 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n")); 531 printk(KERN_ERR PREFIX "Invalid _TSD data\n");
532 result = -EFAULT; 532 result = -EFAULT;
533 goto end; 533 goto end;
534 } 534 }
535 535
536 if (tsd->package.count != 1) { 536 if (tsd->package.count != 1) {
537 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n")); 537 printk(KERN_ERR PREFIX "Invalid _TSD data\n");
538 result = -EFAULT; 538 result = -EFAULT;
539 goto end; 539 goto end;
540 } 540 }
@@ -547,19 +547,19 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
547 status = acpi_extract_package(&(tsd->package.elements[0]), 547 status = acpi_extract_package(&(tsd->package.elements[0]),
548 &format, &state); 548 &format, &state);
549 if (ACPI_FAILURE(status)) { 549 if (ACPI_FAILURE(status)) {
550 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n")); 550 printk(KERN_ERR PREFIX "Invalid _TSD data\n");
551 result = -EFAULT; 551 result = -EFAULT;
552 goto end; 552 goto end;
553 } 553 }
554 554
555 if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) { 555 if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
556 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n")); 556 printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
557 result = -EFAULT; 557 result = -EFAULT;
558 goto end; 558 goto end;
559 } 559 }
560 560
561 if (pdomain->revision != ACPI_TSD_REV0_REVISION) { 561 if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
562 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n")); 562 printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
563 result = -EFAULT; 563 result = -EFAULT;
564 goto end; 564 goto end;
565 } 565 }
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index d9063ea414e3..8eaaecf92009 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -43,7 +43,6 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include <acpi/acresrc.h>
46#include <acpi/amlcode.h>
47#include <acpi/acnamesp.h> 46#include <acpi/acnamesp.h>
48 47
49#define _COMPONENT ACPI_RESOURCES 48#define _COMPONENT ACPI_RESOURCES
@@ -560,8 +559,8 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
560 ACPI_GET_OBJECT_TYPE(*sub_object_list)) || 559 ACPI_GET_OBJECT_TYPE(*sub_object_list)) ||
561 ((ACPI_TYPE_LOCAL_REFERENCE == 560 ((ACPI_TYPE_LOCAL_REFERENCE ==
562 ACPI_GET_OBJECT_TYPE(*sub_object_list)) && 561 ACPI_GET_OBJECT_TYPE(*sub_object_list)) &&
563 ((*sub_object_list)->reference.opcode == 562 ((*sub_object_list)->reference.class ==
564 AML_INT_NAMEPATH_OP)))) { 563 ACPI_REFCLASS_NAME)))) {
565 name_found = TRUE; 564 name_found = TRUE;
566 } else { 565 } else {
567 /* Look at the next element */ 566 /* Look at the next element */
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index 7804a8c40e7a..c0bbfa2c4193 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -43,7 +43,6 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include <acpi/acresrc.h>
46#include <acpi/amlcode.h>
47#include <acpi/acnamesp.h> 46#include <acpi/acnamesp.h>
48 47
49#define _COMPONENT ACPI_RESOURCES 48#define _COMPONENT ACPI_RESOURCES
@@ -310,13 +309,12 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
310 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) { 309 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
311 case ACPI_TYPE_LOCAL_REFERENCE: 310 case ACPI_TYPE_LOCAL_REFERENCE:
312 311
313 if (obj_desc->reference.opcode != 312 if (obj_desc->reference.class !=
314 AML_INT_NAMEPATH_OP) { 313 ACPI_REFCLASS_NAME) {
315 ACPI_ERROR((AE_INFO, 314 ACPI_ERROR((AE_INFO,
316 "(PRT[%X].Source) Need name, found reference op %X", 315 "(PRT[%X].Source) Need name, found Reference Class %X",
317 index, 316 index,
318 obj_desc->reference. 317 obj_desc->reference.class));
319 opcode));
320 return_ACPI_STATUS(AE_BAD_DATA); 318 return_ACPI_STATUS(AE_BAD_DATA);
321 } 319 }
322 320
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 7b011e7e29fe..6050ce481873 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -931,7 +931,7 @@ static int acpi_sbs_add(struct acpi_device *device)
931 sbs->device = device; 931 sbs->device = device;
932 strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME); 932 strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
933 strcpy(acpi_device_class(device), ACPI_SBS_CLASS); 933 strcpy(acpi_device_class(device), ACPI_SBS_CLASS);
934 acpi_driver_data(device) = sbs; 934 device->driver_data = sbs;
935 935
936 result = acpi_charger_add(sbs); 936 result = acpi_charger_add(sbs);
937 if (result) 937 if (result)
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index a4e3767b8c64..e53e590252c0 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -258,7 +258,7 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
258static int acpi_smbus_hc_add(struct acpi_device *device) 258static int acpi_smbus_hc_add(struct acpi_device *device)
259{ 259{
260 int status; 260 int status;
261 unsigned long val; 261 unsigned long long val;
262 struct acpi_smb_hc *hc; 262 struct acpi_smb_hc *hc;
263 263
264 if (!device) 264 if (!device)
@@ -282,7 +282,7 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
282 hc->ec = acpi_driver_data(device->parent); 282 hc->ec = acpi_driver_data(device->parent);
283 hc->offset = (val >> 8) & 0xff; 283 hc->offset = (val >> 8) & 0xff;
284 hc->query_bit = val & 0xff; 284 hc->query_bit = val & 0xff;
285 acpi_driver_data(device) = hc; 285 device->driver_data = hc;
286 286
287 acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc); 287 acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc);
288 printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n", 288 printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n",
@@ -303,7 +303,7 @@ static int acpi_smbus_hc_remove(struct acpi_device *device, int type)
303 hc = acpi_driver_data(device); 303 hc = acpi_driver_data(device);
304 acpi_ec_remove_query_handler(hc->ec, hc->query_bit); 304 acpi_ec_remove_query_handler(hc->ec, hc->query_bit);
305 kfree(hc); 305 kfree(hc);
306 acpi_driver_data(device) = NULL; 306 device->driver_data = NULL;
307 return 0; 307 return 0;
308} 308}
309 309
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index f6f52c1a2aba..39b7233c3485 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -109,20 +109,19 @@ static int acpi_bus_hot_remove_device(void *context)
109 return 0; 109 return 0;
110 110
111 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 111 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
112 "Hot-removing device %s...\n", device->dev.bus_id)); 112 "Hot-removing device %s...\n", dev_name(&device->dev)));
113
114 113
115 if (acpi_bus_trim(device, 1)) { 114 if (acpi_bus_trim(device, 1)) {
116 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 115 printk(KERN_ERR PREFIX
117 "Removing device failed\n")); 116 "Removing device failed\n");
118 return -1; 117 return -1;
119 } 118 }
120 119
121 /* power off device */ 120 /* power off device */
122 status = acpi_evaluate_object(handle, "_PS3", NULL, NULL); 121 status = acpi_evaluate_object(handle, "_PS3", NULL, NULL);
123 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) 122 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
124 ACPI_DEBUG_PRINT((ACPI_DB_WARN, 123 printk(KERN_WARNING PREFIX
125 "Power-off device failed\n")); 124 "Power-off device failed\n");
126 125
127 if (device->flags.lockable) { 126 if (device->flags.lockable) {
128 arg_list.count = 1; 127 arg_list.count = 1;
@@ -276,6 +275,13 @@ int acpi_match_device_ids(struct acpi_device *device,
276{ 275{
277 const struct acpi_device_id *id; 276 const struct acpi_device_id *id;
278 277
278 /*
279 * If the device is not present, it is unnecessary to load device
280 * driver for it.
281 */
282 if (!device->status.present)
283 return -ENODEV;
284
279 if (device->flags.hardware_id) { 285 if (device->flags.hardware_id) {
280 for (id = ids; id->id[0]; id++) { 286 for (id = ids; id->id[0]; id++) {
281 if (!strcmp((char*)id->id, device->pnp.hardware_id)) 287 if (!strcmp((char*)id->id, device->pnp.hardware_id))
@@ -384,7 +390,7 @@ static int acpi_device_remove(struct device * dev)
384 acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type); 390 acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type);
385 } 391 }
386 acpi_dev->driver = NULL; 392 acpi_dev->driver = NULL;
387 acpi_driver_data(dev) = NULL; 393 acpi_dev->driver_data = NULL;
388 394
389 put_device(dev); 395 put_device(dev);
390 return 0; 396 return 0;
@@ -453,7 +459,7 @@ static int acpi_device_register(struct acpi_device *device,
453 acpi_device_bus_id->instance_no = 0; 459 acpi_device_bus_id->instance_no = 0;
454 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); 460 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
455 } 461 }
456 sprintf(device->dev.bus_id, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); 462 dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
457 463
458 if (device->parent) { 464 if (device->parent) {
459 list_add_tail(&device->node, &device->parent->children); 465 list_add_tail(&device->node, &device->parent->children);
@@ -477,7 +483,8 @@ static int acpi_device_register(struct acpi_device *device,
477 483
478 result = acpi_device_setup_files(device); 484 result = acpi_device_setup_files(device);
479 if(result) 485 if(result)
480 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error creating sysfs interface for device %s\n", device->dev.bus_id)); 486 printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n",
487 dev_name(&device->dev));
481 488
482 device->removal_type = ACPI_BUS_REMOVAL_NORMAL; 489 device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
483 return 0; 490 return 0;
@@ -537,7 +544,7 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
537 result = driver->ops.add(device); 544 result = driver->ops.add(device);
538 if (result) { 545 if (result) {
539 device->driver = NULL; 546 device->driver = NULL;
540 acpi_driver_data(device) = NULL; 547 device->driver_data = NULL;
541 return result; 548 return result;
542 } 549 }
543 550
@@ -807,6 +814,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
807 /* TBD: System wake support and resource requirements. */ 814 /* TBD: System wake support and resource requirements. */
808 815
809 device->power.state = ACPI_STATE_UNKNOWN; 816 device->power.state = ACPI_STATE_UNKNOWN;
817 acpi_bus_get_power(device->handle, &(device->power.state));
810 818
811 return 0; 819 return 0;
812} 820}
@@ -901,36 +909,6 @@ static void acpi_device_get_busid(struct acpi_device *device,
901 } 909 }
902} 910}
903 911
904static int
905acpi_video_bus_match(struct acpi_device *device)
906{
907 acpi_handle h_dummy;
908
909 if (!device)
910 return -EINVAL;
911
912 /* Since there is no HID, CID for ACPI Video drivers, we have
913 * to check well known required nodes for each feature we support.
914 */
915
916 /* Does this device able to support video switching ? */
917 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) &&
918 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
919 return 0;
920
921 /* Does this device able to retrieve a video ROM ? */
922 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
923 return 0;
924
925 /* Does this device able to configure which video head to be POSTed ? */
926 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy)) &&
927 ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy)) &&
928 ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy)))
929 return 0;
930
931 return -ENODEV;
932}
933
934/* 912/*
935 * acpi_bay_match - see if a device is an ejectable driver bay 913 * acpi_bay_match - see if a device is an ejectable driver bay
936 * 914 *
@@ -1013,7 +991,7 @@ static void acpi_device_set_id(struct acpi_device *device,
1013 will get autoloaded and the device might still match 991 will get autoloaded and the device might still match
1014 against another driver. 992 against another driver.
1015 */ 993 */
1016 if (ACPI_SUCCESS(acpi_video_bus_match(device))) 994 if (acpi_is_video_device(device))
1017 cid_add = ACPI_VIDEO_HID; 995 cid_add = ACPI_VIDEO_HID;
1018 else if (ACPI_SUCCESS(acpi_bay_match(device))) 996 else if (ACPI_SUCCESS(acpi_bay_match(device)))
1019 cid_add = ACPI_BAY_HID; 997 cid_add = ACPI_BAY_HID;
@@ -1025,7 +1003,7 @@ static void acpi_device_set_id(struct acpi_device *device,
1025 hid = ACPI_POWER_HID; 1003 hid = ACPI_POWER_HID;
1026 break; 1004 break;
1027 case ACPI_BUS_TYPE_PROCESSOR: 1005 case ACPI_BUS_TYPE_PROCESSOR:
1028 hid = ACPI_PROCESSOR_HID; 1006 hid = ACPI_PROCESSOR_OBJECT_HID;
1029 break; 1007 break;
1030 case ACPI_BUS_TYPE_SYSTEM: 1008 case ACPI_BUS_TYPE_SYSTEM:
1031 hid = ACPI_SYSTEM_HID; 1009 hid = ACPI_SYSTEM_HID;
@@ -1153,20 +1131,6 @@ static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
1153} 1131}
1154 1132
1155static int 1133static int
1156acpi_is_child_device(struct acpi_device *device,
1157 int (*matcher)(struct acpi_device *))
1158{
1159 int result = -ENODEV;
1160
1161 do {
1162 if (ACPI_SUCCESS(matcher(device)))
1163 return AE_OK;
1164 } while ((device = device->parent));
1165
1166 return result;
1167}
1168
1169static int
1170acpi_add_single_object(struct acpi_device **child, 1134acpi_add_single_object(struct acpi_device **child,
1171 struct acpi_device *parent, acpi_handle handle, int type, 1135 struct acpi_device *parent, acpi_handle handle, int type,
1172 struct acpi_bus_ops *ops) 1136 struct acpi_bus_ops *ops)
@@ -1221,15 +1185,18 @@ acpi_add_single_object(struct acpi_device **child,
1221 result = -ENODEV; 1185 result = -ENODEV;
1222 goto end; 1186 goto end;
1223 } 1187 }
1224 if (!device->status.present) { 1188 /*
1225 /* Bay and dock should be handled even if absent */ 1189 * When the device is neither present nor functional, the
1226 if (!ACPI_SUCCESS( 1190 * device should not be added to Linux ACPI device tree.
1227 acpi_is_child_device(device, acpi_bay_match)) && 1191 * When the status of the device is not present but functinal,
1228 !ACPI_SUCCESS( 1192 * it should be added to Linux ACPI tree. For example : bay
1229 acpi_is_child_device(device, acpi_dock_match))) { 1193 * device , dock device.
1230 result = -ENODEV; 1194 * In such conditions it is unncessary to check whether it is
1231 goto end; 1195 * bay device or dock device.
1232 } 1196 */
1197 if (!device->status.present && !device->status.functional) {
1198 result = -ENODEV;
1199 goto end;
1233 } 1200 }
1234 break; 1201 break;
1235 default: 1202 default:
@@ -1252,6 +1219,16 @@ acpi_add_single_object(struct acpi_device **child,
1252 acpi_device_set_id(device, parent, handle, type); 1219 acpi_device_set_id(device, parent, handle, type);
1253 1220
1254 /* 1221 /*
1222 * The ACPI device is attached to acpi handle before getting
1223 * the power/wakeup/peformance flags. Otherwise OS can't get
1224 * the corresponding ACPI device by the acpi handle in the course
1225 * of getting the power/wakeup/performance flags.
1226 */
1227 result = acpi_device_set_context(device, type);
1228 if (result)
1229 goto end;
1230
1231 /*
1255 * Power Management 1232 * Power Management
1256 * ---------------- 1233 * ----------------
1257 */ 1234 */
@@ -1281,8 +1258,6 @@ acpi_add_single_object(struct acpi_device **child,
1281 goto end; 1258 goto end;
1282 } 1259 }
1283 1260
1284 if ((result = acpi_device_set_context(device, type)))
1285 goto end;
1286 1261
1287 result = acpi_device_register(device, parent); 1262 result = acpi_device_register(device, parent);
1288 1263
@@ -1402,7 +1377,12 @@ static int acpi_bus_scan(struct acpi_device *start, struct acpi_bus_ops *ops)
1402 * TBD: Need notifications and other detection mechanisms 1377 * TBD: Need notifications and other detection mechanisms
1403 * in place before we can fully implement this. 1378 * in place before we can fully implement this.
1404 */ 1379 */
1405 if (child->status.present) { 1380 /*
1381 * When the device is not present but functional, it is also
1382 * necessary to scan the children of this device.
1383 */
1384 if (child->status.present || (!child->status.present &&
1385 child->status.functional)) {
1406 status = acpi_get_next_object(ACPI_TYPE_ANY, chandle, 1386 status = acpi_get_next_object(ACPI_TYPE_ANY, chandle,
1407 NULL, NULL); 1387 NULL, NULL);
1408 if (ACPI_SUCCESS(status)) { 1388 if (ACPI_SUCCESS(status)) {
@@ -1545,7 +1525,6 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
1545 return result; 1525 return result;
1546} 1526}
1547 1527
1548int __init acpi_boot_ec_enable(void);
1549 1528
1550static int __init acpi_scan_init(void) 1529static int __init acpi_scan_init(void)
1551{ 1530{
@@ -1579,9 +1558,6 @@ static int __init acpi_scan_init(void)
1579 */ 1558 */
1580 result = acpi_bus_scan_fixed(acpi_root); 1559 result = acpi_bus_scan_fixed(acpi_root);
1581 1560
1582 /* EC region might be needed at bus_scan, so enable it now */
1583 acpi_boot_ec_enable();
1584
1585 if (!result) 1561 if (!result)
1586 result = acpi_bus_scan(acpi_root, &ops); 1562 result = acpi_bus_scan(acpi_root, &ops);
1587 1563
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index d13194a031bf..28a691cc625e 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -15,6 +15,7 @@
15#include <linux/dmi.h> 15#include <linux/dmi.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/suspend.h> 17#include <linux/suspend.h>
18#include <linux/reboot.h>
18 19
19#include <asm/io.h> 20#include <asm/io.h>
20 21
@@ -24,6 +25,36 @@
24 25
25u8 sleep_states[ACPI_S_STATE_COUNT]; 26u8 sleep_states[ACPI_S_STATE_COUNT];
26 27
28static void acpi_sleep_tts_switch(u32 acpi_state)
29{
30 union acpi_object in_arg = { ACPI_TYPE_INTEGER };
31 struct acpi_object_list arg_list = { 1, &in_arg };
32 acpi_status status = AE_OK;
33
34 in_arg.integer.value = acpi_state;
35 status = acpi_evaluate_object(NULL, "\\_TTS", &arg_list, NULL);
36 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
37 /*
38 * OS can't evaluate the _TTS object correctly. Some warning
39 * message will be printed. But it won't break anything.
40 */
41 printk(KERN_NOTICE "Failure in evaluating _TTS object\n");
42 }
43}
44
45static int tts_notify_reboot(struct notifier_block *this,
46 unsigned long code, void *x)
47{
48 acpi_sleep_tts_switch(ACPI_STATE_S5);
49 return NOTIFY_DONE;
50}
51
52static struct notifier_block tts_notifier = {
53 .notifier_call = tts_notify_reboot,
54 .next = NULL,
55 .priority = 0,
56};
57
27static int acpi_sleep_prepare(u32 acpi_state) 58static int acpi_sleep_prepare(u32 acpi_state)
28{ 59{
29#ifdef CONFIG_ACPI_SLEEP 60#ifdef CONFIG_ACPI_SLEEP
@@ -45,9 +76,8 @@ static int acpi_sleep_prepare(u32 acpi_state)
45 return 0; 76 return 0;
46} 77}
47 78
48#ifdef CONFIG_PM_SLEEP 79#ifdef CONFIG_ACPI_SLEEP
49static u32 acpi_target_sleep_state = ACPI_STATE_S0; 80static u32 acpi_target_sleep_state = ACPI_STATE_S0;
50
51/* 81/*
52 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the 82 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
53 * user to request that behavior by using the 'acpi_old_suspend_ordering' 83 * user to request that behavior by using the 'acpi_old_suspend_ordering'
@@ -60,6 +90,18 @@ void __init acpi_old_suspend_ordering(void)
60 old_suspend_ordering = true; 90 old_suspend_ordering = true;
61} 91}
62 92
93/*
94 * According to the ACPI specification the BIOS should make sure that ACPI is
95 * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
96 * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
97 * on such systems during resume. Unfortunately that doesn't help in
98 * particularly pathological cases in which SCI_EN has to be set directly on
99 * resume, although the specification states very clearly that this flag is
100 * owned by the hardware. The set_sci_en_on_resume variable will be set in such
101 * cases.
102 */
103static bool set_sci_en_on_resume;
104
63/** 105/**
64 * acpi_pm_disable_gpes - Disable the GPEs. 106 * acpi_pm_disable_gpes - Disable the GPEs.
65 */ 107 */
@@ -131,8 +173,11 @@ static void acpi_pm_end(void)
131 * failing transition to a sleep state. 173 * failing transition to a sleep state.
132 */ 174 */
133 acpi_target_sleep_state = ACPI_STATE_S0; 175 acpi_target_sleep_state = ACPI_STATE_S0;
176 acpi_sleep_tts_switch(acpi_target_sleep_state);
134} 177}
135#endif /* CONFIG_PM_SLEEP */ 178#else /* !CONFIG_ACPI_SLEEP */
179#define acpi_target_sleep_state ACPI_STATE_S0
180#endif /* CONFIG_ACPI_SLEEP */
136 181
137#ifdef CONFIG_SUSPEND 182#ifdef CONFIG_SUSPEND
138extern void do_suspend_lowlevel(void); 183extern void do_suspend_lowlevel(void);
@@ -155,6 +200,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
155 200
156 if (sleep_states[acpi_state]) { 201 if (sleep_states[acpi_state]) {
157 acpi_target_sleep_state = acpi_state; 202 acpi_target_sleep_state = acpi_state;
203 acpi_sleep_tts_switch(acpi_target_sleep_state);
158 } else { 204 } else {
159 printk(KERN_ERR "ACPI does not support this state: %d\n", 205 printk(KERN_ERR "ACPI does not support this state: %d\n",
160 pm_state); 206 pm_state);
@@ -200,6 +246,12 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
200 break; 246 break;
201 } 247 }
202 248
249 /* If ACPI is not enabled by the BIOS, we need to enable it here. */
250 if (set_sci_en_on_resume)
251 acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1);
252 else
253 acpi_enable();
254
203 /* Reprogram control registers and execute _BFS */ 255 /* Reprogram control registers and execute _BFS */
204 acpi_leave_sleep_state_prep(acpi_state); 256 acpi_leave_sleep_state_prep(acpi_state);
205 257
@@ -287,6 +339,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
287 return 0; 339 return 0;
288} 340}
289 341
342static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
343{
344 set_sci_en_on_resume = true;
345 return 0;
346}
347
290static struct dmi_system_id __initdata acpisleep_dmi_table[] = { 348static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
291 { 349 {
292 .callback = init_old_suspend_ordering, 350 .callback = init_old_suspend_ordering,
@@ -296,6 +354,30 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
296 DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), 354 DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
297 }, 355 },
298 }, 356 },
357 {
358 .callback = init_old_suspend_ordering,
359 .ident = "HP xw4600 Workstation",
360 .matches = {
361 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
362 DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
363 },
364 },
365 {
366 .callback = init_set_sci_en_on_resume,
367 .ident = "Apple MacBook 1,1",
368 .matches = {
369 DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
370 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
371 },
372 },
373 {
374 .callback = init_set_sci_en_on_resume,
375 .ident = "Apple MacMini 1,1",
376 .matches = {
377 DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
378 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
379 },
380 },
299 {}, 381 {},
300}; 382};
301#endif /* CONFIG_SUSPEND */ 383#endif /* CONFIG_SUSPEND */
@@ -313,6 +395,7 @@ void __init acpi_no_s4_hw_signature(void)
313static int acpi_hibernation_begin(void) 395static int acpi_hibernation_begin(void)
314{ 396{
315 acpi_target_sleep_state = ACPI_STATE_S4; 397 acpi_target_sleep_state = ACPI_STATE_S4;
398 acpi_sleep_tts_switch(acpi_target_sleep_state);
316 return 0; 399 return 0;
317} 400}
318 401
@@ -376,7 +459,15 @@ static struct platform_hibernation_ops acpi_hibernation_ops = {
376 */ 459 */
377static int acpi_hibernation_begin_old(void) 460static int acpi_hibernation_begin_old(void)
378{ 461{
379 int error = acpi_sleep_prepare(ACPI_STATE_S4); 462 int error;
463 /*
464 * The _TTS object should always be evaluated before the _PTS object.
465 * When the old_suspended_ordering is true, the _PTS object is
466 * evaluated in the acpi_sleep_prepare.
467 */
468 acpi_sleep_tts_switch(ACPI_STATE_S4);
469
470 error = acpi_sleep_prepare(ACPI_STATE_S4);
380 471
381 if (!error) 472 if (!error)
382 acpi_target_sleep_state = ACPI_STATE_S4; 473 acpi_target_sleep_state = ACPI_STATE_S4;
@@ -444,7 +535,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
444 acpi_handle handle = DEVICE_ACPI_HANDLE(dev); 535 acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
445 struct acpi_device *adev; 536 struct acpi_device *adev;
446 char acpi_method[] = "_SxD"; 537 char acpi_method[] = "_SxD";
447 unsigned long d_min, d_max; 538 unsigned long long d_min, d_max;
448 539
449 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { 540 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
450 printk(KERN_DEBUG "ACPI handle has no context!\n"); 541 printk(KERN_DEBUG "ACPI handle has no context!\n");
@@ -596,5 +687,10 @@ int __init acpi_sleep_init(void)
596 pm_power_off = acpi_power_off; 687 pm_power_off = acpi_power_off;
597 } 688 }
598 printk(")\n"); 689 printk(")\n");
690 /*
691 * Register the tts_notifier to reboot notifier list so that the _TTS
692 * object can also be evaluated when the system enters S5.
693 */
694 register_reboot_notifier(&tts_notifier);
599 return 0; 695 return 0;
600} 696}
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 631ee2ee2ca0..4dbc2271acf5 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -367,7 +367,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
367 if (ldev) 367 if (ldev)
368 seq_printf(seq, "%s:%s", 368 seq_printf(seq, "%s:%s",
369 ldev->bus ? ldev->bus->name : "no-bus", 369 ldev->bus ? ldev->bus->name : "no-bus",
370 ldev->bus_id); 370 dev_name(ldev));
371 seq_printf(seq, "\n"); 371 seq_printf(seq, "\n");
372 put_device(ldev); 372 put_device(ldev);
373 373
diff --git a/drivers/acpi/sleep/wakeup.c b/drivers/acpi/sleep/wakeup.c
index 38655eb132dc..dea4c23df764 100644
--- a/drivers/acpi/sleep/wakeup.c
+++ b/drivers/acpi/sleep/wakeup.c
@@ -88,7 +88,7 @@ void acpi_enable_wakeup_device(u8 sleep_state)
88 spin_unlock(&acpi_device_lock); 88 spin_unlock(&acpi_device_lock);
89 if (!dev->wakeup.flags.run_wake) 89 if (!dev->wakeup.flags.run_wake)
90 acpi_enable_gpe(dev->wakeup.gpe_device, 90 acpi_enable_gpe(dev->wakeup.gpe_device,
91 dev->wakeup.gpe_number, ACPI_ISR); 91 dev->wakeup.gpe_number);
92 spin_lock(&acpi_device_lock); 92 spin_lock(&acpi_device_lock);
93 } 93 }
94 spin_unlock(&acpi_device_lock); 94 spin_unlock(&acpi_device_lock);
@@ -122,7 +122,7 @@ void acpi_disable_wakeup_device(u8 sleep_state)
122 ACPI_GPE_TYPE_WAKE_RUN); 122 ACPI_GPE_TYPE_WAKE_RUN);
123 /* Re-enable it, since set_gpe_type will disable it */ 123 /* Re-enable it, since set_gpe_type will disable it */
124 acpi_enable_gpe(dev->wakeup.gpe_device, 124 acpi_enable_gpe(dev->wakeup.gpe_device,
125 dev->wakeup.gpe_number, ACPI_NOT_ISR); 125 dev->wakeup.gpe_number);
126 spin_lock(&acpi_device_lock); 126 spin_lock(&acpi_device_lock);
127 } 127 }
128 continue; 128 continue;
@@ -133,7 +133,7 @@ void acpi_disable_wakeup_device(u8 sleep_state)
133 /* Never disable run-wake GPE */ 133 /* Never disable run-wake GPE */
134 if (!dev->wakeup.flags.run_wake) { 134 if (!dev->wakeup.flags.run_wake) {
135 acpi_disable_gpe(dev->wakeup.gpe_device, 135 acpi_disable_gpe(dev->wakeup.gpe_device,
136 dev->wakeup.gpe_number, ACPI_NOT_ISR); 136 dev->wakeup.gpe_number);
137 acpi_clear_gpe(dev->wakeup.gpe_device, 137 acpi_clear_gpe(dev->wakeup.gpe_device,
138 dev->wakeup.gpe_number, ACPI_NOT_ISR); 138 dev->wakeup.gpe_number, ACPI_NOT_ISR);
139 } 139 }
@@ -162,7 +162,7 @@ static int __init acpi_wakeup_device_init(void)
162 dev->wakeup.gpe_number, 162 dev->wakeup.gpe_number,
163 ACPI_GPE_TYPE_WAKE_RUN); 163 ACPI_GPE_TYPE_WAKE_RUN);
164 acpi_enable_gpe(dev->wakeup.gpe_device, 164 acpi_enable_gpe(dev->wakeup.gpe_device,
165 dev->wakeup.gpe_number, ACPI_NOT_ISR); 165 dev->wakeup.gpe_number);
166 dev->wakeup.state.enabled = 1; 166 dev->wakeup.state.enabled = 1;
167 spin_lock(&acpi_device_lock); 167 spin_lock(&acpi_device_lock);
168 } 168 }
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 24e80fd927e2..6e4107f82403 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -78,9 +78,15 @@ static ssize_t acpi_table_show(struct kobject *kobj,
78 container_of(bin_attr, struct acpi_table_attr, attr); 78 container_of(bin_attr, struct acpi_table_attr, attr);
79 struct acpi_table_header *table_header = NULL; 79 struct acpi_table_header *table_header = NULL;
80 acpi_status status; 80 acpi_status status;
81 char name[ACPI_NAME_SIZE];
82
83 if (strncmp(table_attr->name, "NULL", 4))
84 memcpy(name, table_attr->name, ACPI_NAME_SIZE);
85 else
86 memcpy(name, "\0\0\0\0", 4);
81 87
82 status = 88 status =
83 acpi_get_table(table_attr->name, table_attr->instance, 89 acpi_get_table(name, table_attr->instance,
84 &table_header); 90 &table_header);
85 if (ACPI_FAILURE(status)) 91 if (ACPI_FAILURE(status))
86 return -ENODEV; 92 return -ENODEV;
@@ -95,21 +101,24 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
95 struct acpi_table_header *header = NULL; 101 struct acpi_table_header *header = NULL;
96 struct acpi_table_attr *attr = NULL; 102 struct acpi_table_attr *attr = NULL;
97 103
98 memcpy(table_attr->name, table_header->signature, ACPI_NAME_SIZE); 104 if (table_header->signature[0] != '\0')
105 memcpy(table_attr->name, table_header->signature,
106 ACPI_NAME_SIZE);
107 else
108 memcpy(table_attr->name, "NULL", 4);
99 109
100 list_for_each_entry(attr, &acpi_table_attr_list, node) { 110 list_for_each_entry(attr, &acpi_table_attr_list, node) {
101 if (!memcmp(table_header->signature, attr->name, 111 if (!memcmp(table_attr->name, attr->name, ACPI_NAME_SIZE))
102 ACPI_NAME_SIZE))
103 if (table_attr->instance < attr->instance) 112 if (table_attr->instance < attr->instance)
104 table_attr->instance = attr->instance; 113 table_attr->instance = attr->instance;
105 } 114 }
106 table_attr->instance++; 115 table_attr->instance++;
107 116
108 if (table_attr->instance > 1 || (table_attr->instance == 1 && 117 if (table_attr->instance > 1 || (table_attr->instance == 1 &&
109 !acpi_get_table(table_header-> 118 !acpi_get_table
110 signature, 2, 119 (table_header->signature, 2, &header)))
111 &header))) 120 sprintf(table_attr->name + ACPI_NAME_SIZE, "%d",
112 sprintf(table_attr->name + 4, "%d", table_attr->instance); 121 table_attr->instance);
113 122
114 table_attr->attr.size = 0; 123 table_attr->attr.size = 0;
115 table_attr->attr.read = acpi_table_show; 124 table_attr->attr.read = acpi_table_show;
@@ -167,7 +176,6 @@ static int acpi_system_sysfs_init(void)
167#define COUNT_ERROR 2 /* other */ 176#define COUNT_ERROR 2 /* other */
168#define NUM_COUNTERS_EXTRA 3 177#define NUM_COUNTERS_EXTRA 3
169 178
170#define ACPI_EVENT_VALID 0x01
171struct event_counter { 179struct event_counter {
172 u32 count; 180 u32 count;
173 u32 flags; 181 u32 flags;
@@ -312,12 +320,6 @@ static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
312 } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS)) 320 } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
313 result = acpi_get_event_status(index - num_gpes, status); 321 result = acpi_get_event_status(index - num_gpes, status);
314 322
315 /*
316 * sleep/power button GPE/Fixed Event is enabled after acpi_system_init,
317 * check the status at runtime and mark it as valid once it's enabled
318 */
319 if (!result && (*status & ACPI_EVENT_FLAG_ENABLED))
320 all_counters[index].flags |= ACPI_EVENT_VALID;
321end: 323end:
322 return result; 324 return result;
323} 325}
@@ -346,12 +348,14 @@ static ssize_t counter_show(struct kobject *kobj,
346 if (result) 348 if (result)
347 goto end; 349 goto end;
348 350
349 if (!(all_counters[index].flags & ACPI_EVENT_VALID)) 351 if (!(status & ACPI_EVENT_FLAG_HANDLE))
350 size += sprintf(buf + size, " invalid"); 352 size += sprintf(buf + size, " invalid");
351 else if (status & ACPI_EVENT_FLAG_ENABLED) 353 else if (status & ACPI_EVENT_FLAG_ENABLED)
352 size += sprintf(buf + size, " enable"); 354 size += sprintf(buf + size, " enabled");
355 else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
356 size += sprintf(buf + size, " wake_enabled");
353 else 357 else
354 size += sprintf(buf + size, " disable"); 358 size += sprintf(buf + size, " disabled");
355 359
356end: 360end:
357 size += sprintf(buf + size, "\n"); 361 size += sprintf(buf + size, "\n");
@@ -385,19 +389,19 @@ static ssize_t counter_set(struct kobject *kobj,
385 if (result) 389 if (result)
386 goto end; 390 goto end;
387 391
388 if (!(all_counters[index].flags & ACPI_EVENT_VALID)) { 392 if (!(status & ACPI_EVENT_FLAG_HANDLE)) {
389 ACPI_DEBUG_PRINT((ACPI_DB_WARN, 393 printk(KERN_WARNING PREFIX
390 "Can not change Invalid GPE/Fixed Event status\n")); 394 "Can not change Invalid GPE/Fixed Event status\n");
391 return -EINVAL; 395 return -EINVAL;
392 } 396 }
393 397
394 if (index < num_gpes) { 398 if (index < num_gpes) {
395 if (!strcmp(buf, "disable\n") && 399 if (!strcmp(buf, "disable\n") &&
396 (status & ACPI_EVENT_FLAG_ENABLED)) 400 (status & ACPI_EVENT_FLAG_ENABLED))
397 result = acpi_disable_gpe(handle, index, ACPI_NOT_ISR); 401 result = acpi_disable_gpe(handle, index);
398 else if (!strcmp(buf, "enable\n") && 402 else if (!strcmp(buf, "enable\n") &&
399 !(status & ACPI_EVENT_FLAG_ENABLED)) 403 !(status & ACPI_EVENT_FLAG_ENABLED))
400 result = acpi_enable_gpe(handle, index, ACPI_NOT_ISR); 404 result = acpi_enable_gpe(handle, index);
401 else if (!strcmp(buf, "clear\n") && 405 else if (!strcmp(buf, "clear\n") &&
402 (status & ACPI_EVENT_FLAG_SET)) 406 (status & ACPI_EVENT_FLAG_SET))
403 result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR); 407 result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
index a4a41ba2484b..2817158fb6a1 100644
--- a/drivers/acpi/tables/tbfadt.c
+++ b/drivers/acpi/tables/tbfadt.c
@@ -50,7 +50,7 @@ ACPI_MODULE_NAME("tbfadt")
50/* Local prototypes */ 50/* Local prototypes */
51static void inline 51static void inline
52acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, 52acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
53 u8 bit_width, u64 address); 53 u8 byte_width, u64 address);
54 54
55static void acpi_tb_convert_fadt(void); 55static void acpi_tb_convert_fadt(void);
56 56
@@ -111,7 +111,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
111 * FUNCTION: acpi_tb_init_generic_address 111 * FUNCTION: acpi_tb_init_generic_address
112 * 112 *
113 * PARAMETERS: generic_address - GAS struct to be initialized 113 * PARAMETERS: generic_address - GAS struct to be initialized
114 * bit_width - Width of this register 114 * byte_width - Width of this register
115 * Address - Address of the register 115 * Address - Address of the register
116 * 116 *
117 * RETURN: None 117 * RETURN: None
@@ -124,7 +124,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
124 124
125static void inline 125static void inline
126acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, 126acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
127 u8 bit_width, u64 address) 127 u8 byte_width, u64 address)
128{ 128{
129 129
130 /* 130 /*
@@ -136,7 +136,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
136 /* All other fields are byte-wide */ 136 /* All other fields are byte-wide */
137 137
138 generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO; 138 generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO;
139 generic_address->bit_width = bit_width; 139 generic_address->bit_width = byte_width << 3;
140 generic_address->bit_offset = 0; 140 generic_address->bit_offset = 0;
141 generic_address->access_width = 0; 141 generic_address->access_width = 0;
142} 142}
@@ -304,7 +304,7 @@ static void acpi_tb_convert_fadt(void)
304 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at 304 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at
305 * offset 45, 55, 95, and the word located at offset 109, 110. 305 * offset 45, 55, 95, and the word located at offset 109, 110.
306 */ 306 */
307 if (acpi_gbl_FADT.header.revision < 3) { 307 if (acpi_gbl_FADT.header.revision < FADT2_REVISION_ID) {
308 acpi_gbl_FADT.preferred_profile = 0; 308 acpi_gbl_FADT.preferred_profile = 0;
309 acpi_gbl_FADT.pstate_control = 0; 309 acpi_gbl_FADT.pstate_control = 0;
310 acpi_gbl_FADT.cst_control = 0; 310 acpi_gbl_FADT.cst_control = 0;
@@ -342,9 +342,20 @@ static void acpi_tb_convert_fadt(void)
342 * useful to calculate them once, here. 342 * useful to calculate them once, here.
343 * 343 *
344 * The PM event blocks are split into two register blocks, first is the 344 * The PM event blocks are split into two register blocks, first is the
345 * PM Status Register block, followed immediately by the PM Enable Register 345 * PM Status Register block, followed immediately by the PM Enable
346 * block. Each is of length (pm1_event_length/2) 346 * Register block. Each is of length (xpm1x_event_block.bit_width/2).
347 *
348 * On various systems the v2 fields (and particularly the bit widths)
349 * cannot be relied upon, though. Hence resort to using the v1 length
350 * here (and warn about the inconsistency).
347 */ 351 */
352 if (acpi_gbl_FADT.xpm1a_event_block.bit_width
353 != acpi_gbl_FADT.pm1_event_length * 8)
354 printk(KERN_WARNING "FADT: "
355 "X_PM1a_EVT_BLK.bit_width (%u) does not match"
356 " PM1_EVT_LEN (%u)\n",
357 acpi_gbl_FADT.xpm1a_event_block.bit_width,
358 acpi_gbl_FADT.pm1_event_length);
348 pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length); 359 pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length);
349 360
350 /* The PM1A register block is required */ 361 /* The PM1A register block is required */
@@ -360,13 +371,20 @@ static void acpi_tb_convert_fadt(void)
360 /* The PM1B register block is optional, ignore if not present */ 371 /* The PM1B register block is optional, ignore if not present */
361 372
362 if (acpi_gbl_FADT.xpm1b_event_block.address) { 373 if (acpi_gbl_FADT.xpm1b_event_block.address) {
374 if (acpi_gbl_FADT.xpm1b_event_block.bit_width
375 != acpi_gbl_FADT.pm1_event_length * 8)
376 printk(KERN_WARNING "FADT: "
377 "X_PM1b_EVT_BLK.bit_width (%u) does not match"
378 " PM1_EVT_LEN (%u)\n",
379 acpi_gbl_FADT.xpm1b_event_block.bit_width,
380 acpi_gbl_FADT.pm1_event_length);
363 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable, 381 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
364 pm1_register_length, 382 pm1_register_length,
365 (acpi_gbl_FADT.xpm1b_event_block. 383 (acpi_gbl_FADT.xpm1b_event_block.
366 address + pm1_register_length)); 384 address + pm1_register_length));
367 /* Don't forget to copy space_id of the GAS */ 385 /* Don't forget to copy space_id of the GAS */
368 acpi_gbl_xpm1b_enable.space_id = 386 acpi_gbl_xpm1b_enable.space_id =
369 acpi_gbl_FADT.xpm1a_event_block.space_id; 387 acpi_gbl_FADT.xpm1b_event_block.space_id;
370 388
371 } 389 }
372} 390}
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c
index b22185f55a16..18747ce8dd2f 100644
--- a/drivers/acpi/tables/tbinstal.c
+++ b/drivers/acpi/tables/tbinstal.c
@@ -110,7 +110,6 @@ acpi_status
110acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index) 110acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
111{ 111{
112 u32 i; 112 u32 i;
113 u32 length;
114 acpi_status status = AE_OK; 113 acpi_status status = AE_OK;
115 114
116 ACPI_FUNCTION_TRACE(tb_add_table); 115 ACPI_FUNCTION_TRACE(tb_add_table);
@@ -145,25 +144,64 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
145 } 144 }
146 } 145 }
147 146
148 length = ACPI_MIN(table_desc->length, 147 /*
149 acpi_gbl_root_table_list.tables[i].length); 148 * Check for a table match on the entire table length,
149 * not just the header.
150 */
151 if (table_desc->length !=
152 acpi_gbl_root_table_list.tables[i].length) {
153 continue;
154 }
155
150 if (ACPI_MEMCMP(table_desc->pointer, 156 if (ACPI_MEMCMP(table_desc->pointer,
151 acpi_gbl_root_table_list.tables[i].pointer, 157 acpi_gbl_root_table_list.tables[i].pointer,
152 length)) { 158 acpi_gbl_root_table_list.tables[i].length)) {
153 continue; 159 continue;
154 } 160 }
155 161
156 /* Table is already registered */ 162 /*
157 163 * Note: the current mechanism does not unregister a table if it is
164 * dynamically unloaded. The related namespace entries are deleted,
165 * but the table remains in the root table list.
166 *
167 * The assumption here is that the number of different tables that
168 * will be loaded is actually small, and there is minimal overhead
169 * in just keeping the table in case it is needed again.
170 *
171 * If this assumption changes in the future (perhaps on large
172 * machines with many table load/unload operations), tables will
173 * need to be unregistered when they are unloaded, and slots in the
174 * root table list should be reused when empty.
175 */
176
177 /*
178 * Table is already registered.
179 * We can delete the table that was passed as a parameter.
180 */
158 acpi_tb_delete_table(table_desc); 181 acpi_tb_delete_table(table_desc);
159 *table_index = i; 182 *table_index = i;
160 status = AE_ALREADY_EXISTS; 183
161 goto release; 184 if (acpi_gbl_root_table_list.tables[i].
185 flags & ACPI_TABLE_IS_LOADED) {
186
187 /* Table is still loaded, this is an error */
188
189 status = AE_ALREADY_EXISTS;
190 goto release;
191 } else {
192 /* Table was unloaded, allow it to be reloaded */
193
194 table_desc->pointer =
195 acpi_gbl_root_table_list.tables[i].pointer;
196 table_desc->address =
197 acpi_gbl_root_table_list.tables[i].address;
198 status = AE_OK;
199 goto print_header;
200 }
162 } 201 }
163 202
164 /* 203 /* Add the table to the global root table list */
165 * Add the table to the global table list 204
166 */
167 status = acpi_tb_store_table(table_desc->address, table_desc->pointer, 205 status = acpi_tb_store_table(table_desc->address, table_desc->pointer,
168 table_desc->length, table_desc->flags, 206 table_desc->length, table_desc->flags,
169 table_index); 207 table_index);
@@ -171,6 +209,7 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
171 goto release; 209 goto release;
172 } 210 }
173 211
212 print_header:
174 acpi_tb_print_table_header(table_desc->address, table_desc->pointer); 213 acpi_tb_print_table_header(table_desc->address, table_desc->pointer);
175 214
176 release: 215 release:
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 912703691d36..073ff09218a9 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -47,7 +47,6 @@
47#include <acpi/acpi_bus.h> 47#include <acpi/acpi_bus.h>
48#include <acpi/acpi_drivers.h> 48#include <acpi/acpi_drivers.h>
49 49
50#define ACPI_THERMAL_COMPONENT 0x04000000
51#define ACPI_THERMAL_CLASS "thermal_zone" 50#define ACPI_THERMAL_CLASS "thermal_zone"
52#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone" 51#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone"
53#define ACPI_THERMAL_FILE_STATE "state" 52#define ACPI_THERMAL_FILE_STATE "state"
@@ -246,18 +245,18 @@ static const struct file_operations acpi_thermal_polling_fops = {
246static int acpi_thermal_get_temperature(struct acpi_thermal *tz) 245static int acpi_thermal_get_temperature(struct acpi_thermal *tz)
247{ 246{
248 acpi_status status = AE_OK; 247 acpi_status status = AE_OK;
249 248 unsigned long long tmp;
250 249
251 if (!tz) 250 if (!tz)
252 return -EINVAL; 251 return -EINVAL;
253 252
254 tz->last_temperature = tz->temperature; 253 tz->last_temperature = tz->temperature;
255 254
256 status = 255 status = acpi_evaluate_integer(tz->device->handle, "_TMP", NULL, &tmp);
257 acpi_evaluate_integer(tz->device->handle, "_TMP", NULL, &tz->temperature);
258 if (ACPI_FAILURE(status)) 256 if (ACPI_FAILURE(status))
259 return -ENODEV; 257 return -ENODEV;
260 258
259 tz->temperature = tmp;
261 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Temperature is %lu dK\n", 260 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Temperature is %lu dK\n",
262 tz->temperature)); 261 tz->temperature));
263 262
@@ -267,17 +266,16 @@ static int acpi_thermal_get_temperature(struct acpi_thermal *tz)
267static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz) 266static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
268{ 267{
269 acpi_status status = AE_OK; 268 acpi_status status = AE_OK;
270 269 unsigned long long tmp;
271 270
272 if (!tz) 271 if (!tz)
273 return -EINVAL; 272 return -EINVAL;
274 273
275 status = 274 status = acpi_evaluate_integer(tz->device->handle, "_TZP", NULL, &tmp);
276 acpi_evaluate_integer(tz->device->handle, "_TZP", NULL,
277 &tz->polling_frequency);
278 if (ACPI_FAILURE(status)) 275 if (ACPI_FAILURE(status))
279 return -ENODEV; 276 return -ENODEV;
280 277
278 tz->polling_frequency = tmp;
281 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Polling frequency is %lu dS\n", 279 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Polling frequency is %lu dS\n",
282 tz->polling_frequency)); 280 tz->polling_frequency));
283 281
@@ -356,6 +354,7 @@ do { \
356static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) 354static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
357{ 355{
358 acpi_status status = AE_OK; 356 acpi_status status = AE_OK;
357 unsigned long long tmp;
359 struct acpi_handle_list devices; 358 struct acpi_handle_list devices;
360 int valid = 0; 359 int valid = 0;
361 int i; 360 int i;
@@ -363,7 +362,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
363 /* Critical Shutdown (required) */ 362 /* Critical Shutdown (required) */
364 if (flag & ACPI_TRIPS_CRITICAL) { 363 if (flag & ACPI_TRIPS_CRITICAL) {
365 status = acpi_evaluate_integer(tz->device->handle, 364 status = acpi_evaluate_integer(tz->device->handle,
366 "_CRT", NULL, &tz->trips.critical.temperature); 365 "_CRT", NULL, &tmp);
366 tz->trips.critical.temperature = tmp;
367 /* 367 /*
368 * Treat freezing temperatures as invalid as well; some 368 * Treat freezing temperatures as invalid as well; some
369 * BIOSes return really low values and cause reboots at startup. 369 * BIOSes return really low values and cause reboots at startup.
@@ -388,10 +388,12 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
388 } else if (crt > 0) { 388 } else if (crt > 0) {
389 unsigned long crt_k = CELSIUS_TO_KELVIN(crt); 389 unsigned long crt_k = CELSIUS_TO_KELVIN(crt);
390 /* 390 /*
391 * Allow override to lower critical threshold 391 * Allow override critical threshold
392 */ 392 */
393 if (crt_k < tz->trips.critical.temperature) 393 if (crt_k > tz->trips.critical.temperature)
394 tz->trips.critical.temperature = crt_k; 394 printk(KERN_WARNING PREFIX
395 "Critical threshold %d C\n", crt);
396 tz->trips.critical.temperature = crt_k;
395 } 397 }
396 } 398 }
397 } 399 }
@@ -399,12 +401,13 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
399 /* Critical Sleep (optional) */ 401 /* Critical Sleep (optional) */
400 if (flag & ACPI_TRIPS_HOT) { 402 if (flag & ACPI_TRIPS_HOT) {
401 status = acpi_evaluate_integer(tz->device->handle, 403 status = acpi_evaluate_integer(tz->device->handle,
402 "_HOT", NULL, &tz->trips.hot.temperature); 404 "_HOT", NULL, &tmp);
403 if (ACPI_FAILURE(status)) { 405 if (ACPI_FAILURE(status)) {
404 tz->trips.hot.flags.valid = 0; 406 tz->trips.hot.flags.valid = 0;
405 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 407 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
406 "No hot threshold\n")); 408 "No hot threshold\n"));
407 } else { 409 } else {
410 tz->trips.hot.temperature = tmp;
408 tz->trips.hot.flags.valid = 1; 411 tz->trips.hot.flags.valid = 1;
409 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 412 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
410 "Found hot threshold [%lu]\n", 413 "Found hot threshold [%lu]\n",
@@ -418,33 +421,40 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
418 if (psv == -1) { 421 if (psv == -1) {
419 status = AE_SUPPORT; 422 status = AE_SUPPORT;
420 } else if (psv > 0) { 423 } else if (psv > 0) {
421 tz->trips.passive.temperature = CELSIUS_TO_KELVIN(psv); 424 tmp = CELSIUS_TO_KELVIN(psv);
422 status = AE_OK; 425 status = AE_OK;
423 } else { 426 } else {
424 status = acpi_evaluate_integer(tz->device->handle, 427 status = acpi_evaluate_integer(tz->device->handle,
425 "_PSV", NULL, &tz->trips.passive.temperature); 428 "_PSV", NULL, &tmp);
426 } 429 }
427 430
428 if (ACPI_FAILURE(status)) 431 if (ACPI_FAILURE(status))
429 tz->trips.passive.flags.valid = 0; 432 tz->trips.passive.flags.valid = 0;
430 else { 433 else {
434 tz->trips.passive.temperature = tmp;
431 tz->trips.passive.flags.valid = 1; 435 tz->trips.passive.flags.valid = 1;
432 if (flag == ACPI_TRIPS_INIT) { 436 if (flag == ACPI_TRIPS_INIT) {
433 status = acpi_evaluate_integer( 437 status = acpi_evaluate_integer(
434 tz->device->handle, "_TC1", 438 tz->device->handle, "_TC1",
435 NULL, &tz->trips.passive.tc1); 439 NULL, &tmp);
436 if (ACPI_FAILURE(status)) 440 if (ACPI_FAILURE(status))
437 tz->trips.passive.flags.valid = 0; 441 tz->trips.passive.flags.valid = 0;
442 else
443 tz->trips.passive.tc1 = tmp;
438 status = acpi_evaluate_integer( 444 status = acpi_evaluate_integer(
439 tz->device->handle, "_TC2", 445 tz->device->handle, "_TC2",
440 NULL, &tz->trips.passive.tc2); 446 NULL, &tmp);
441 if (ACPI_FAILURE(status)) 447 if (ACPI_FAILURE(status))
442 tz->trips.passive.flags.valid = 0; 448 tz->trips.passive.flags.valid = 0;
449 else
450 tz->trips.passive.tc2 = tmp;
443 status = acpi_evaluate_integer( 451 status = acpi_evaluate_integer(
444 tz->device->handle, "_TSP", 452 tz->device->handle, "_TSP",
445 NULL, &tz->trips.passive.tsp); 453 NULL, &tmp);
446 if (ACPI_FAILURE(status)) 454 if (ACPI_FAILURE(status))
447 tz->trips.passive.flags.valid = 0; 455 tz->trips.passive.flags.valid = 0;
456 else
457 tz->trips.passive.tsp = tmp;
448 } 458 }
449 } 459 }
450 } 460 }
@@ -479,7 +489,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
479 489
480 if (flag & ACPI_TRIPS_ACTIVE) { 490 if (flag & ACPI_TRIPS_ACTIVE) {
481 status = acpi_evaluate_integer(tz->device->handle, 491 status = acpi_evaluate_integer(tz->device->handle,
482 name, NULL, &tz->trips.active[i].temperature); 492 name, NULL, &tmp);
483 if (ACPI_FAILURE(status)) { 493 if (ACPI_FAILURE(status)) {
484 tz->trips.active[i].flags.valid = 0; 494 tz->trips.active[i].flags.valid = 0;
485 if (i == 0) 495 if (i == 0)
@@ -500,8 +510,10 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
500 tz->trips.active[i - 2].temperature : 510 tz->trips.active[i - 2].temperature :
501 CELSIUS_TO_KELVIN(act)); 511 CELSIUS_TO_KELVIN(act));
502 break; 512 break;
503 } else 513 } else {
514 tz->trips.active[i].temperature = tmp;
504 tz->trips.active[i].flags.valid = 1; 515 tz->trips.active[i].flags.valid = 1;
516 }
505 } 517 }
506 518
507 name[2] = 'L'; 519 name[2] = 'L';
@@ -563,7 +575,7 @@ static int acpi_thermal_critical(struct acpi_thermal *tz)
563 acpi_bus_generate_proc_event(tz->device, ACPI_THERMAL_NOTIFY_CRITICAL, 575 acpi_bus_generate_proc_event(tz->device, ACPI_THERMAL_NOTIFY_CRITICAL,
564 tz->trips.critical.flags.enabled); 576 tz->trips.critical.flags.enabled);
565 acpi_bus_generate_netlink_event(tz->device->pnp.device_class, 577 acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
566 tz->device->dev.bus_id, 578 dev_name(&tz->device->dev),
567 ACPI_THERMAL_NOTIFY_CRITICAL, 579 ACPI_THERMAL_NOTIFY_CRITICAL,
568 tz->trips.critical.flags.enabled); 580 tz->trips.critical.flags.enabled);
569 581
@@ -592,7 +604,7 @@ static int acpi_thermal_hot(struct acpi_thermal *tz)
592 acpi_bus_generate_proc_event(tz->device, ACPI_THERMAL_NOTIFY_HOT, 604 acpi_bus_generate_proc_event(tz->device, ACPI_THERMAL_NOTIFY_HOT,
593 tz->trips.hot.flags.enabled); 605 tz->trips.hot.flags.enabled);
594 acpi_bus_generate_netlink_event(tz->device->pnp.device_class, 606 acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
595 tz->device->dev.bus_id, 607 dev_name(&tz->device->dev),
596 ACPI_THERMAL_NOTIFY_HOT, 608 ACPI_THERMAL_NOTIFY_HOT,
597 tz->trips.hot.flags.enabled); 609 tz->trips.hot.flags.enabled);
598 610
@@ -1213,8 +1225,8 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
1213 acpi_bus_private_data_handler, 1225 acpi_bus_private_data_handler,
1214 tz->thermal_zone); 1226 tz->thermal_zone);
1215 if (ACPI_FAILURE(status)) { 1227 if (ACPI_FAILURE(status)) {
1216 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 1228 printk(KERN_ERR PREFIX
1217 "Error attaching device data\n")); 1229 "Error attaching device data\n");
1218 return -ENODEV; 1230 return -ENODEV;
1219 } 1231 }
1220 1232
@@ -1579,14 +1591,14 @@ static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data)
1579 acpi_thermal_check(tz); 1591 acpi_thermal_check(tz);
1580 acpi_bus_generate_proc_event(device, event, 0); 1592 acpi_bus_generate_proc_event(device, event, 0);
1581 acpi_bus_generate_netlink_event(device->pnp.device_class, 1593 acpi_bus_generate_netlink_event(device->pnp.device_class,
1582 device->dev.bus_id, event, 0); 1594 dev_name(&device->dev), event, 0);
1583 break; 1595 break;
1584 case ACPI_THERMAL_NOTIFY_DEVICES: 1596 case ACPI_THERMAL_NOTIFY_DEVICES:
1585 acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES); 1597 acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
1586 acpi_thermal_check(tz); 1598 acpi_thermal_check(tz);
1587 acpi_bus_generate_proc_event(device, event, 0); 1599 acpi_bus_generate_proc_event(device, event, 0);
1588 acpi_bus_generate_netlink_event(device->pnp.device_class, 1600 acpi_bus_generate_netlink_event(device->pnp.device_class,
1589 device->dev.bus_id, event, 0); 1601 dev_name(&device->dev), event, 0);
1590 break; 1602 break;
1591 default: 1603 default:
1592 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1604 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -1647,7 +1659,7 @@ static int acpi_thermal_add(struct acpi_device *device)
1647 strcpy(tz->name, device->pnp.bus_id); 1659 strcpy(tz->name, device->pnp.bus_id);
1648 strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME); 1660 strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
1649 strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS); 1661 strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
1650 acpi_driver_data(device) = tz; 1662 device->driver_data = tz;
1651 mutex_init(&tz->lock); 1663 mutex_init(&tz->lock);
1652 1664
1653 1665
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c
index 8a649f40d162..40e60fc2e596 100644
--- a/drivers/acpi/toshiba_acpi.c
+++ b/drivers/acpi/toshiba_acpi.c
@@ -371,6 +371,7 @@ static void bt_poll_rfkill(struct input_polled_dev *poll_dev)
371 RFKILL_STATE_HARD_BLOCKED); 371 RFKILL_STATE_HARD_BLOCKED);
372 input_report_switch(poll_dev->input, SW_RFKILL_ALL, 372 input_report_switch(poll_dev->input, SW_RFKILL_ALL,
373 new_rfk_state); 373 new_rfk_state);
374 input_sync(poll_dev->input);
374 } 375 }
375} 376}
376 377
@@ -548,7 +549,7 @@ static unsigned long write_video(const char *buffer, unsigned long count)
548 549
549 hci_read1(HCI_VIDEO_OUT, &video_out, &hci_result); 550 hci_read1(HCI_VIDEO_OUT, &video_out, &hci_result);
550 if (hci_result == HCI_SUCCESS) { 551 if (hci_result == HCI_SUCCESS) {
551 int new_video_out = video_out; 552 unsigned int new_video_out = video_out;
552 if (lcd_out != -1) 553 if (lcd_out != -1)
553 _set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out); 554 _set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out);
554 if (crt_out != -1) 555 if (crt_out != -1)
@@ -823,33 +824,36 @@ static int __init toshiba_acpi_init(void)
823 toshiba_acpi_exit(); 824 toshiba_acpi_exit();
824 return -ENOMEM; 825 return -ENOMEM;
825 } 826 }
826 }
827 827
828 /* Register input device for kill switch */ 828 /* Register input device for kill switch */
829 toshiba_acpi.poll_dev = input_allocate_polled_device(); 829 toshiba_acpi.poll_dev = input_allocate_polled_device();
830 if (!toshiba_acpi.poll_dev) { 830 if (!toshiba_acpi.poll_dev) {
831 printk(MY_ERR "unable to allocate kill-switch input device\n"); 831 printk(MY_ERR
832 toshiba_acpi_exit(); 832 "unable to allocate kill-switch input device\n");
833 return -ENOMEM; 833 toshiba_acpi_exit();
834 } 834 return -ENOMEM;
835 toshiba_acpi.poll_dev->private = &toshiba_acpi; 835 }
836 toshiba_acpi.poll_dev->poll = bt_poll_rfkill; 836 toshiba_acpi.poll_dev->private = &toshiba_acpi;
837 toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */ 837 toshiba_acpi.poll_dev->poll = bt_poll_rfkill;
838 838 toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */
839 toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name; 839
840 toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST; 840 toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
841 toshiba_acpi.poll_dev->input->id.vendor = 0x0930; /* Toshiba USB ID */ 841 toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
842 set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit); 842 /* Toshiba USB ID */
843 set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit); 843 toshiba_acpi.poll_dev->input->id.vendor = 0x0930;
844 input_report_switch(toshiba_acpi.poll_dev->input, SW_RFKILL_ALL, TRUE); 844 set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
845 845 set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
846 ret = input_register_polled_device(toshiba_acpi.poll_dev); 846 input_report_switch(toshiba_acpi.poll_dev->input,
847 if (ret) { 847 SW_RFKILL_ALL, TRUE);
848 printk(MY_ERR "unable to register kill-switch input device\n"); 848 input_sync(toshiba_acpi.poll_dev->input);
849 rfkill_free(toshiba_acpi.rfk_dev); 849
850 toshiba_acpi.rfk_dev = NULL; 850 ret = input_register_polled_device(toshiba_acpi.poll_dev);
851 toshiba_acpi_exit(); 851 if (ret) {
852 return ret; 852 printk(MY_ERR
853 "unable to register kill-switch input device\n");
854 toshiba_acpi_exit();
855 return ret;
856 }
853 } 857 }
854 858
855 return 0; 859 return 0;
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index 7dcb67e0b215..241c535c1753 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -232,7 +232,7 @@ acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
232 * RETURN: Status 232 * RETURN: Status
233 * 233 *
234 * DESCRIPTION: Validate that the buffer is of the required length or 234 * DESCRIPTION: Validate that the buffer is of the required length or
235 * allocate a new buffer. Returned buffer is always zeroed. 235 * allocate a new buffer. Returned buffer is always zeroed.
236 * 236 *
237 ******************************************************************************/ 237 ******************************************************************************/
238 238
@@ -240,7 +240,7 @@ acpi_status
240acpi_ut_initialize_buffer(struct acpi_buffer * buffer, 240acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
241 acpi_size required_length) 241 acpi_size required_length)
242{ 242{
243 acpi_status status = AE_OK; 243 acpi_size input_buffer_length;
244 244
245 /* Parameter validation */ 245 /* Parameter validation */
246 246
@@ -248,55 +248,58 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
248 return (AE_BAD_PARAMETER); 248 return (AE_BAD_PARAMETER);
249 } 249 }
250 250
251 switch (buffer->length) { 251 /*
252 * Buffer->Length is used as both an input and output parameter. Get the
253 * input actual length and set the output required buffer length.
254 */
255 input_buffer_length = buffer->length;
256 buffer->length = required_length;
257
258 /*
259 * The input buffer length contains the actual buffer length, or the type
260 * of buffer to be allocated by this routine.
261 */
262 switch (input_buffer_length) {
252 case ACPI_NO_BUFFER: 263 case ACPI_NO_BUFFER:
253 264
254 /* Set the exception and returned the required length */ 265 /* Return the exception (and the required buffer length) */
255 266
256 status = AE_BUFFER_OVERFLOW; 267 return (AE_BUFFER_OVERFLOW);
257 break;
258 268
259 case ACPI_ALLOCATE_BUFFER: 269 case ACPI_ALLOCATE_BUFFER:
260 270
261 /* Allocate a new buffer */ 271 /* Allocate a new buffer */
262 272
263 buffer->pointer = acpi_os_allocate(required_length); 273 buffer->pointer = acpi_os_allocate(required_length);
264 if (!buffer->pointer) {
265 return (AE_NO_MEMORY);
266 }
267
268 /* Clear the buffer */
269
270 ACPI_MEMSET(buffer->pointer, 0, required_length);
271 break; 274 break;
272 275
273 case ACPI_ALLOCATE_LOCAL_BUFFER: 276 case ACPI_ALLOCATE_LOCAL_BUFFER:
274 277
275 /* Allocate a new buffer with local interface to allow tracking */ 278 /* Allocate a new buffer with local interface to allow tracking */
276 279
277 buffer->pointer = ACPI_ALLOCATE_ZEROED(required_length); 280 buffer->pointer = ACPI_ALLOCATE(required_length);
278 if (!buffer->pointer) {
279 return (AE_NO_MEMORY);
280 }
281 break; 281 break;
282 282
283 default: 283 default:
284 284
285 /* Existing buffer: Validate the size of the buffer */ 285 /* Existing buffer: Validate the size of the buffer */
286 286
287 if (buffer->length < required_length) { 287 if (input_buffer_length < required_length) {
288 status = AE_BUFFER_OVERFLOW; 288 return (AE_BUFFER_OVERFLOW);
289 break;
290 } 289 }
290 break;
291 }
291 292
292 /* Clear the buffer */ 293 /* Validate allocation from above or input buffer pointer */
293 294
294 ACPI_MEMSET(buffer->pointer, 0, required_length); 295 if (!buffer->pointer) {
295 break; 296 return (AE_NO_MEMORY);
296 } 297 }
297 298
298 buffer->length = required_length; 299 /* Have a valid buffer, clear it */
299 return (status); 300
301 ACPI_MEMSET(buffer->pointer, 0, required_length);
302 return (AE_OK);
300} 303}
301 304
302#ifdef NOT_USED_BY_LINUX 305#ifdef NOT_USED_BY_LINUX
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
index 53499ac90988..5b2f7c27b705 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/utilities/utcopy.c
@@ -42,7 +42,6 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/amlcode.h>
46#include <acpi/acnamesp.h> 45#include <acpi/acnamesp.h>
47 46
48 47
@@ -176,20 +175,24 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
176 175
177 /* This is an object reference. */ 176 /* This is an object reference. */
178 177
179 switch (internal_object->reference.opcode) { 178 switch (internal_object->reference.class) {
180 case AML_INT_NAMEPATH_OP: 179 case ACPI_REFCLASS_NAME:
181
182 /* For namepath, return the object handle ("reference") */
183
184 default:
185
186 /* We are referring to the namespace node */
187 180
181 /*
182 * For namepath, return the object handle ("reference")
183 * We are referring to the namespace node
184 */
188 external_object->reference.handle = 185 external_object->reference.handle =
189 internal_object->reference.node; 186 internal_object->reference.node;
190 external_object->reference.actual_type = 187 external_object->reference.actual_type =
191 acpi_ns_get_type(internal_object->reference.node); 188 acpi_ns_get_type(internal_object->reference.node);
192 break; 189 break;
190
191 default:
192
193 /* All other reference types are unsupported */
194
195 return_ACPI_STATUS(AE_TYPE);
193 } 196 }
194 break; 197 break;
195 198
@@ -533,7 +536,7 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
533 536
534 /* TBD: should validate incoming handle */ 537 /* TBD: should validate incoming handle */
535 538
536 internal_object->reference.opcode = AML_INT_NAMEPATH_OP; 539 internal_object->reference.class = ACPI_REFCLASS_NAME;
537 internal_object->reference.node = 540 internal_object->reference.node =
538 external_object->reference.handle; 541 external_object->reference.handle;
539 break; 542 break;
@@ -743,11 +746,11 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
743 * We copied the reference object, so we now must add a reference 746 * We copied the reference object, so we now must add a reference
744 * to the object pointed to by the reference 747 * to the object pointed to by the reference
745 * 748 *
746 * DDBHandle reference (from Load/load_table is a special reference, 749 * DDBHandle reference (from Load/load_table) is a special reference,
747 * it's Reference.Object is the table index, so does not need to 750 * it does not have a Reference.Object, so does not need to
748 * increase the reference count 751 * increase the reference count
749 */ 752 */
750 if (source_desc->reference.opcode == AML_LOAD_OP) { 753 if (source_desc->reference.class == ACPI_REFCLASS_TABLE) {
751 break; 754 break;
752 } 755 }
753 756
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index 42609d3a8aa9..d197c6b29e17 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -45,7 +45,6 @@
45#include <acpi/acinterp.h> 45#include <acpi/acinterp.h>
46#include <acpi/acnamesp.h> 46#include <acpi/acnamesp.h>
47#include <acpi/acevents.h> 47#include <acpi/acevents.h>
48#include <acpi/amlcode.h>
49 48
50#define _COMPONENT ACPI_UTILITIES 49#define _COMPONENT ACPI_UTILITIES
51ACPI_MODULE_NAME("utdelete") 50ACPI_MODULE_NAME("utdelete")
@@ -548,8 +547,8 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
548 * reference must track changes to the ref count of the index or 547 * reference must track changes to the ref count of the index or
549 * target object. 548 * target object.
550 */ 549 */
551 if ((object->reference.opcode == AML_INDEX_OP) || 550 if ((object->reference.class == ACPI_REFCLASS_INDEX) ||
552 (object->reference.opcode == AML_INT_NAMEPATH_OP)) { 551 (object->reference.class == ACPI_REFCLASS_NAME)) {
553 next_object = object->reference.object; 552 next_object = object->reference.object;
554 } 553 }
555 break; 554 break;
@@ -586,6 +585,13 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
586 ACPI_EXCEPTION((AE_INFO, status, 585 ACPI_EXCEPTION((AE_INFO, status,
587 "Could not update object reference count")); 586 "Could not update object reference count"));
588 587
588 /* Free any stacked Update State objects */
589
590 while (state_list) {
591 state = acpi_ut_pop_generic_state(&state_list);
592 acpi_ut_delete_generic_state(state);
593 }
594
589 return_ACPI_STATUS(status); 595 return_ACPI_STATUS(status);
590} 596}
591 597
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index a6e71b801d2d..670551b95e56 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -281,7 +281,6 @@ struct acpi_bit_register_info acpi_gbl_bit_register_info[ACPI_NUM_BITREG] = {
281 /* ACPI_BITREG_RT_CLOCK_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, 281 /* ACPI_BITREG_RT_CLOCK_ENABLE */ {ACPI_REGISTER_PM1_ENABLE,
282 ACPI_BITPOSITION_RT_CLOCK_ENABLE, 282 ACPI_BITPOSITION_RT_CLOCK_ENABLE,
283 ACPI_BITMASK_RT_CLOCK_ENABLE}, 283 ACPI_BITMASK_RT_CLOCK_ENABLE},
284 /* ACPI_BITREG_WAKE_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, 0, 0},
285 /* ACPI_BITREG_PCIEXP_WAKE_DISABLE */ {ACPI_REGISTER_PM1_ENABLE, 284 /* ACPI_BITREG_PCIEXP_WAKE_DISABLE */ {ACPI_REGISTER_PM1_ENABLE,
286 ACPI_BITPOSITION_PCIEXP_WAKE_DISABLE, 285 ACPI_BITPOSITION_PCIEXP_WAKE_DISABLE,
287 ACPI_BITMASK_PCIEXP_WAKE_DISABLE}, 286 ACPI_BITMASK_PCIEXP_WAKE_DISABLE},
@@ -575,6 +574,47 @@ char *acpi_ut_get_descriptor_name(void *object)
575 574
576} 575}
577 576
577/*******************************************************************************
578 *
579 * FUNCTION: acpi_ut_get_reference_name
580 *
581 * PARAMETERS: Object - An ACPI reference object
582 *
583 * RETURN: Pointer to a string
584 *
585 * DESCRIPTION: Decode a reference object sub-type to a string.
586 *
587 ******************************************************************************/
588
589/* Printable names of reference object sub-types */
590
591static const char *acpi_gbl_ref_class_names[] = {
592 /* 00 */ "Local",
593 /* 01 */ "Argument",
594 /* 02 */ "RefOf",
595 /* 03 */ "Index",
596 /* 04 */ "DdbHandle",
597 /* 05 */ "Named Object",
598 /* 06 */ "Debug"
599};
600
601const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
602{
603 if (!object)
604 return "NULL Object";
605
606 if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND)
607 return "Not an Operand object";
608
609 if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE)
610 return "Not a Reference object";
611
612 if (object->reference.class > ACPI_REFCLASS_MAX)
613 return "Unknown Reference class";
614
615 return acpi_gbl_ref_class_names[object->reference.class];
616}
617
578#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) 618#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
579/* 619/*
580 * Strings and procedures used for debug only 620 * Strings and procedures used for debug only
@@ -677,14 +717,14 @@ u8 acpi_ut_valid_object_type(acpi_object_type type)
677 * 717 *
678 * PARAMETERS: None 718 * PARAMETERS: None
679 * 719 *
680 * RETURN: None 720 * RETURN: Status
681 * 721 *
682 * DESCRIPTION: Init library globals. All globals that require specific 722 * DESCRIPTION: Init library globals. All globals that require specific
683 * initialization should be initialized here! 723 * initialization should be initialized here!
684 * 724 *
685 ******************************************************************************/ 725 ******************************************************************************/
686 726
687void acpi_ut_init_globals(void) 727acpi_status acpi_ut_init_globals(void)
688{ 728{
689 acpi_status status; 729 acpi_status status;
690 u32 i; 730 u32 i;
@@ -695,7 +735,7 @@ void acpi_ut_init_globals(void)
695 735
696 status = acpi_ut_create_caches(); 736 status = acpi_ut_create_caches();
697 if (ACPI_FAILURE(status)) { 737 if (ACPI_FAILURE(status)) {
698 return; 738 return_ACPI_STATUS(status);
699 } 739 }
700 740
701 /* Mutex locked flags */ 741 /* Mutex locked flags */
@@ -772,8 +812,8 @@ void acpi_ut_init_globals(void)
772 acpi_gbl_display_final_mem_stats = FALSE; 812 acpi_gbl_display_final_mem_stats = FALSE;
773#endif 813#endif
774 814
775 return_VOID; 815 return_ACPI_STATUS(AE_OK);
776} 816}
777 817
778ACPI_EXPORT_SYMBOL(acpi_dbg_level) 818ACPI_EXPORT_SYMBOL(acpi_dbg_level)
779 ACPI_EXPORT_SYMBOL(acpi_dbg_layer) 819ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index f34be6773556..9089a158a874 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -995,6 +995,15 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
995 state->pkg. 995 state->pkg.
996 this_target_obj, 0); 996 this_target_obj, 0);
997 if (!state) { 997 if (!state) {
998
999 /* Free any stacked Update State objects */
1000
1001 while (state_list) {
1002 state =
1003 acpi_ut_pop_generic_state
1004 (&state_list);
1005 acpi_ut_delete_generic_state(state);
1006 }
998 return_ACPI_STATUS(AE_NO_MEMORY); 1007 return_ACPI_STATUS(AE_NO_MEMORY);
999 } 1008 }
1000 } 1009 }
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index 916eff399eb3..c354e7a42bcd 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -43,7 +43,6 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include <acpi/acnamesp.h>
46#include <acpi/amlcode.h>
47 46
48#define _COMPONENT ACPI_UTILITIES 47#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utobject") 48ACPI_MODULE_NAME("utobject")
@@ -478,8 +477,8 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
478 477
479 case ACPI_TYPE_LOCAL_REFERENCE: 478 case ACPI_TYPE_LOCAL_REFERENCE:
480 479
481 switch (internal_object->reference.opcode) { 480 switch (internal_object->reference.class) {
482 case AML_INT_NAMEPATH_OP: 481 case ACPI_REFCLASS_NAME:
483 482
484 /* 483 /*
485 * Get the actual length of the full pathname to this object. 484 * Get the actual length of the full pathname to this object.
@@ -503,8 +502,10 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
503 * required eventually. 502 * required eventually.
504 */ 503 */
505 ACPI_ERROR((AE_INFO, 504 ACPI_ERROR((AE_INFO,
506 "Unsupported Reference opcode=%X in object %p", 505 "Cannot convert to external object - "
507 internal_object->reference.opcode, 506 "unsupported Reference Class [%s] %X in object %p",
507 acpi_ut_get_reference_name(internal_object),
508 internal_object->reference.class,
508 internal_object)); 509 internal_object));
509 status = AE_TYPE; 510 status = AE_TYPE;
510 break; 511 break;
@@ -513,7 +514,9 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
513 514
514 default: 515 default:
515 516
516 ACPI_ERROR((AE_INFO, "Unsupported type=%X in object %p", 517 ACPI_ERROR((AE_INFO, "Cannot convert to external object - "
518 "unsupported type [%s] %X in object %p",
519 acpi_ut_get_object_type_name(internal_object),
517 ACPI_GET_OBJECT_TYPE(internal_object), 520 ACPI_GET_OBJECT_TYPE(internal_object),
518 internal_object)); 521 internal_object));
519 status = AE_TYPE; 522 status = AE_TYPE;
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
index f8bdadf3c32f..c198a4d40583 100644
--- a/drivers/acpi/utilities/utxface.c
+++ b/drivers/acpi/utilities/utxface.c
@@ -81,7 +81,12 @@ acpi_status __init acpi_initialize_subsystem(void)
81 81
82 /* Initialize all globals used by the subsystem */ 82 /* Initialize all globals used by the subsystem */
83 83
84 acpi_ut_init_globals(); 84 status = acpi_ut_init_globals();
85 if (ACPI_FAILURE(status)) {
86 ACPI_EXCEPTION((AE_INFO, status,
87 "During initialization of globals"));
88 return_ACPI_STATUS(status);
89 }
85 90
86 /* Create the default mutex objects */ 91 /* Create the default mutex objects */
87 92
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 100926143818..f844941089bb 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -256,39 +256,31 @@ EXPORT_SYMBOL(acpi_extract_package);
256acpi_status 256acpi_status
257acpi_evaluate_integer(acpi_handle handle, 257acpi_evaluate_integer(acpi_handle handle,
258 acpi_string pathname, 258 acpi_string pathname,
259 struct acpi_object_list *arguments, unsigned long *data) 259 struct acpi_object_list *arguments, unsigned long long *data)
260{ 260{
261 acpi_status status = AE_OK; 261 acpi_status status = AE_OK;
262 union acpi_object *element; 262 union acpi_object element;
263 struct acpi_buffer buffer = { 0, NULL }; 263 struct acpi_buffer buffer = { 0, NULL };
264 264
265
266 if (!data) 265 if (!data)
267 return AE_BAD_PARAMETER; 266 return AE_BAD_PARAMETER;
268 267
269 element = kzalloc(sizeof(union acpi_object), irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL);
270 if (!element)
271 return AE_NO_MEMORY;
272
273 buffer.length = sizeof(union acpi_object); 268 buffer.length = sizeof(union acpi_object);
274 buffer.pointer = element; 269 buffer.pointer = &element;
275 status = acpi_evaluate_object(handle, pathname, arguments, &buffer); 270 status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
276 if (ACPI_FAILURE(status)) { 271 if (ACPI_FAILURE(status)) {
277 acpi_util_eval_error(handle, pathname, status); 272 acpi_util_eval_error(handle, pathname, status);
278 kfree(element);
279 return status; 273 return status;
280 } 274 }
281 275
282 if (element->type != ACPI_TYPE_INTEGER) { 276 if (element.type != ACPI_TYPE_INTEGER) {
283 acpi_util_eval_error(handle, pathname, AE_BAD_DATA); 277 acpi_util_eval_error(handle, pathname, AE_BAD_DATA);
284 kfree(element);
285 return AE_BAD_DATA; 278 return AE_BAD_DATA;
286 } 279 }
287 280
288 *data = element->integer.value; 281 *data = element.integer.value;
289 kfree(element);
290 282
291 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%lu]\n", *data)); 283 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%llu]\n", *data));
292 284
293 return AE_OK; 285 return AE_OK;
294} 286}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index e8a51a1700f7..baa441929720 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -41,7 +41,6 @@
41#include <acpi/acpi_bus.h> 41#include <acpi/acpi_bus.h>
42#include <acpi/acpi_drivers.h> 42#include <acpi/acpi_drivers.h>
43 43
44#define ACPI_VIDEO_COMPONENT 0x08000000
45#define ACPI_VIDEO_CLASS "video" 44#define ACPI_VIDEO_CLASS "video"
46#define ACPI_VIDEO_BUS_NAME "Video Bus" 45#define ACPI_VIDEO_BUS_NAME "Video Bus"
47#define ACPI_VIDEO_DEVICE_NAME "Video Device" 46#define ACPI_VIDEO_DEVICE_NAME "Video Device"
@@ -291,20 +290,20 @@ static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
291 int level); 290 int level);
292static int acpi_video_device_lcd_get_level_current( 291static int acpi_video_device_lcd_get_level_current(
293 struct acpi_video_device *device, 292 struct acpi_video_device *device,
294 unsigned long *level); 293 unsigned long long *level);
295static int acpi_video_get_next_level(struct acpi_video_device *device, 294static int acpi_video_get_next_level(struct acpi_video_device *device,
296 u32 level_current, u32 event); 295 u32 level_current, u32 event);
297static void acpi_video_switch_brightness(struct acpi_video_device *device, 296static void acpi_video_switch_brightness(struct acpi_video_device *device,
298 int event); 297 int event);
299static int acpi_video_device_get_state(struct acpi_video_device *device, 298static int acpi_video_device_get_state(struct acpi_video_device *device,
300 unsigned long *state); 299 unsigned long long *state);
301static int acpi_video_output_get(struct output_device *od); 300static int acpi_video_output_get(struct output_device *od);
302static int acpi_video_device_set_state(struct acpi_video_device *device, int state); 301static int acpi_video_device_set_state(struct acpi_video_device *device, int state);
303 302
304/*backlight device sysfs support*/ 303/*backlight device sysfs support*/
305static int acpi_video_get_brightness(struct backlight_device *bd) 304static int acpi_video_get_brightness(struct backlight_device *bd)
306{ 305{
307 unsigned long cur_level; 306 unsigned long long cur_level;
308 int i; 307 int i;
309 struct acpi_video_device *vd = 308 struct acpi_video_device *vd =
310 (struct acpi_video_device *)bl_get_data(bd); 309 (struct acpi_video_device *)bl_get_data(bd);
@@ -336,7 +335,7 @@ static struct backlight_ops acpi_backlight_ops = {
336/*video output device sysfs support*/ 335/*video output device sysfs support*/
337static int acpi_video_output_get(struct output_device *od) 336static int acpi_video_output_get(struct output_device *od)
338{ 337{
339 unsigned long state; 338 unsigned long long state;
340 struct acpi_video_device *vd = 339 struct acpi_video_device *vd =
341 (struct acpi_video_device *)dev_get_drvdata(&od->dev); 340 (struct acpi_video_device *)dev_get_drvdata(&od->dev);
342 acpi_video_device_get_state(vd, &state); 341 acpi_video_device_get_state(vd, &state);
@@ -370,7 +369,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cdev, char *buf)
370{ 369{
371 struct acpi_device *device = cdev->devdata; 370 struct acpi_device *device = cdev->devdata;
372 struct acpi_video_device *video = acpi_driver_data(device); 371 struct acpi_video_device *video = acpi_driver_data(device);
373 unsigned long level; 372 unsigned long long level;
374 int state; 373 int state;
375 374
376 acpi_video_device_lcd_get_level_current(video, &level); 375 acpi_video_device_lcd_get_level_current(video, &level);
@@ -410,7 +409,7 @@ static struct thermal_cooling_device_ops video_cooling_ops = {
410/* device */ 409/* device */
411 410
412static int 411static int
413acpi_video_device_query(struct acpi_video_device *device, unsigned long *state) 412acpi_video_device_query(struct acpi_video_device *device, unsigned long long *state)
414{ 413{
415 int status; 414 int status;
416 415
@@ -421,7 +420,7 @@ acpi_video_device_query(struct acpi_video_device *device, unsigned long *state)
421 420
422static int 421static int
423acpi_video_device_get_state(struct acpi_video_device *device, 422acpi_video_device_get_state(struct acpi_video_device *device,
424 unsigned long *state) 423 unsigned long long *state)
425{ 424{
426 int status; 425 int status;
427 426
@@ -436,7 +435,7 @@ acpi_video_device_set_state(struct acpi_video_device *device, int state)
436 int status; 435 int status;
437 union acpi_object arg0 = { ACPI_TYPE_INTEGER }; 436 union acpi_object arg0 = { ACPI_TYPE_INTEGER };
438 struct acpi_object_list args = { 1, &arg0 }; 437 struct acpi_object_list args = { 1, &arg0 };
439 unsigned long ret; 438 unsigned long long ret;
440 439
441 440
442 arg0.integer.value = state; 441 arg0.integer.value = state;
@@ -495,7 +494,7 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
495 494
496static int 495static int
497acpi_video_device_lcd_get_level_current(struct acpi_video_device *device, 496acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
498 unsigned long *level) 497 unsigned long long *level)
499{ 498{
500 if (device->cap._BQC) 499 if (device->cap._BQC)
501 return acpi_evaluate_integer(device->dev->handle, "_BQC", NULL, 500 return acpi_evaluate_integer(device->dev->handle, "_BQC", NULL,
@@ -549,7 +548,7 @@ static int
549acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option) 548acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option)
550{ 549{
551 int status; 550 int status;
552 unsigned long tmp; 551 unsigned long long tmp;
553 union acpi_object arg0 = { ACPI_TYPE_INTEGER }; 552 union acpi_object arg0 = { ACPI_TYPE_INTEGER };
554 struct acpi_object_list args = { 1, &arg0 }; 553 struct acpi_object_list args = { 1, &arg0 };
555 554
@@ -564,7 +563,7 @@ acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option)
564} 563}
565 564
566static int 565static int
567acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long *id) 566acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long long *id)
568{ 567{
569 int status; 568 int status;
570 569
@@ -575,7 +574,7 @@ acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long *id)
575 574
576static int 575static int
577acpi_video_bus_POST_options(struct acpi_video_bus *video, 576acpi_video_bus_POST_options(struct acpi_video_bus *video,
578 unsigned long *options) 577 unsigned long long *options)
579{ 578{
580 int status; 579 int status;
581 580
@@ -739,7 +738,8 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
739 device->cap._DSS = 1; 738 device->cap._DSS = 1;
740 } 739 }
741 740
742 max_level = acpi_video_init_brightness(device); 741 if (acpi_video_backlight_support())
742 max_level = acpi_video_init_brightness(device);
743 743
744 if (device->cap._BCL && device->cap._BCM && max_level > 0) { 744 if (device->cap._BCL && device->cap._BCM && max_level > 0) {
745 int result; 745 int result;
@@ -785,18 +785,21 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
785 printk(KERN_ERR PREFIX "Create sysfs link\n"); 785 printk(KERN_ERR PREFIX "Create sysfs link\n");
786 786
787 } 787 }
788 if (device->cap._DCS && device->cap._DSS){ 788
789 static int count = 0; 789 if (acpi_video_display_switch_support()) {
790 char *name; 790
791 name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); 791 if (device->cap._DCS && device->cap._DSS) {
792 if (!name) 792 static int count;
793 return; 793 char *name;
794 sprintf(name, "acpi_video%d", count++); 794 name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
795 device->output_dev = video_output_register(name, 795 if (!name)
796 NULL, device, &acpi_output_properties); 796 return;
797 kfree(name); 797 sprintf(name, "acpi_video%d", count++);
798 device->output_dev = video_output_register(name,
799 NULL, device, &acpi_output_properties);
800 kfree(name);
801 }
798 } 802 }
799 return;
800} 803}
801 804
802/* 805/*
@@ -842,11 +845,16 @@ static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
842static int acpi_video_bus_check(struct acpi_video_bus *video) 845static int acpi_video_bus_check(struct acpi_video_bus *video)
843{ 846{
844 acpi_status status = -ENOENT; 847 acpi_status status = -ENOENT;
845 848 struct device *dev;
846 849
847 if (!video) 850 if (!video)
848 return -EINVAL; 851 return -EINVAL;
849 852
853 dev = acpi_get_physical_pci_device(video->device->handle);
854 if (!dev)
855 return -ENODEV;
856 put_device(dev);
857
850 /* Since there is no HID, CID and so on for VGA driver, we have 858 /* Since there is no HID, CID and so on for VGA driver, we have
851 * to check well known required nodes. 859 * to check well known required nodes.
852 */ 860 */
@@ -918,7 +926,7 @@ static int acpi_video_device_state_seq_show(struct seq_file *seq, void *offset)
918{ 926{
919 int status; 927 int status;
920 struct acpi_video_device *dev = seq->private; 928 struct acpi_video_device *dev = seq->private;
921 unsigned long state; 929 unsigned long long state;
922 930
923 931
924 if (!dev) 932 if (!dev)
@@ -927,14 +935,14 @@ static int acpi_video_device_state_seq_show(struct seq_file *seq, void *offset)
927 status = acpi_video_device_get_state(dev, &state); 935 status = acpi_video_device_get_state(dev, &state);
928 seq_printf(seq, "state: "); 936 seq_printf(seq, "state: ");
929 if (ACPI_SUCCESS(status)) 937 if (ACPI_SUCCESS(status))
930 seq_printf(seq, "0x%02lx\n", state); 938 seq_printf(seq, "0x%02llx\n", state);
931 else 939 else
932 seq_printf(seq, "<not supported>\n"); 940 seq_printf(seq, "<not supported>\n");
933 941
934 status = acpi_video_device_query(dev, &state); 942 status = acpi_video_device_query(dev, &state);
935 seq_printf(seq, "query: "); 943 seq_printf(seq, "query: ");
936 if (ACPI_SUCCESS(status)) 944 if (ACPI_SUCCESS(status))
937 seq_printf(seq, "0x%02lx\n", state); 945 seq_printf(seq, "0x%02llx\n", state);
938 else 946 else
939 seq_printf(seq, "<not supported>\n"); 947 seq_printf(seq, "<not supported>\n");
940 948
@@ -1217,7 +1225,7 @@ static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file)
1217static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset) 1225static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset)
1218{ 1226{
1219 struct acpi_video_bus *video = seq->private; 1227 struct acpi_video_bus *video = seq->private;
1220 unsigned long options; 1228 unsigned long long options;
1221 int status; 1229 int status;
1222 1230
1223 1231
@@ -1232,7 +1240,7 @@ static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset)
1232 printk(KERN_WARNING PREFIX 1240 printk(KERN_WARNING PREFIX
1233 "This indicates a BIOS bug. Please contact the manufacturer.\n"); 1241 "This indicates a BIOS bug. Please contact the manufacturer.\n");
1234 } 1242 }
1235 printk("%lx\n", options); 1243 printk("%llx\n", options);
1236 seq_printf(seq, "can POST: <integrated video>"); 1244 seq_printf(seq, "can POST: <integrated video>");
1237 if (options & 2) 1245 if (options & 2)
1238 seq_printf(seq, " <PCI video>"); 1246 seq_printf(seq, " <PCI video>");
@@ -1256,7 +1264,7 @@ static int acpi_video_bus_POST_seq_show(struct seq_file *seq, void *offset)
1256{ 1264{
1257 struct acpi_video_bus *video = seq->private; 1265 struct acpi_video_bus *video = seq->private;
1258 int status; 1266 int status;
1259 unsigned long id; 1267 unsigned long long id;
1260 1268
1261 1269
1262 if (!video) 1270 if (!video)
@@ -1303,7 +1311,7 @@ acpi_video_bus_write_POST(struct file *file,
1303 struct seq_file *m = file->private_data; 1311 struct seq_file *m = file->private_data;
1304 struct acpi_video_bus *video = m->private; 1312 struct acpi_video_bus *video = m->private;
1305 char str[12] = { 0 }; 1313 char str[12] = { 0 };
1306 unsigned long opt, options; 1314 unsigned long long opt, options;
1307 1315
1308 1316
1309 if (!video || count + 1 > sizeof str) 1317 if (!video || count + 1 > sizeof str)
@@ -1473,7 +1481,7 @@ static int
1473acpi_video_bus_get_one_device(struct acpi_device *device, 1481acpi_video_bus_get_one_device(struct acpi_device *device,
1474 struct acpi_video_bus *video) 1482 struct acpi_video_bus *video)
1475{ 1483{
1476 unsigned long device_id; 1484 unsigned long long device_id;
1477 int status; 1485 int status;
1478 struct acpi_video_device *data; 1486 struct acpi_video_device *data;
1479 struct acpi_video_device_attrib* attribute; 1487 struct acpi_video_device_attrib* attribute;
@@ -1491,7 +1499,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1491 1499
1492 strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME); 1500 strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
1493 strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS); 1501 strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
1494 acpi_driver_data(device) = data; 1502 device->driver_data = data;
1495 1503
1496 data->device_id = device_id; 1504 data->device_id = device_id;
1497 data->video = video; 1505 data->video = video;
@@ -1530,8 +1538,8 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1530 acpi_video_device_notify, 1538 acpi_video_device_notify,
1531 data); 1539 data);
1532 if (ACPI_FAILURE(status)) { 1540 if (ACPI_FAILURE(status)) {
1533 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 1541 printk(KERN_ERR PREFIX
1534 "Error installing notify handler\n")); 1542 "Error installing notify handler\n");
1535 if(data->brightness) 1543 if(data->brightness)
1536 kfree(data->brightness->levels); 1544 kfree(data->brightness->levels);
1537 kfree(data->brightness); 1545 kfree(data->brightness);
@@ -1724,7 +1732,7 @@ acpi_video_get_next_level(struct acpi_video_device *device,
1724static void 1732static void
1725acpi_video_switch_brightness(struct acpi_video_device *device, int event) 1733acpi_video_switch_brightness(struct acpi_video_device *device, int event)
1726{ 1734{
1727 unsigned long level_current, level_next; 1735 unsigned long long level_current, level_next;
1728 if (!device->brightness) 1736 if (!device->brightness)
1729 return; 1737 return;
1730 acpi_video_device_lcd_get_level_current(device, &level_current); 1738 acpi_video_device_lcd_get_level_current(device, &level_current);
@@ -1745,8 +1753,8 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
1745 1753
1746 status = acpi_video_bus_get_one_device(dev, video); 1754 status = acpi_video_bus_get_one_device(dev, video);
1747 if (ACPI_FAILURE(status)) { 1755 if (ACPI_FAILURE(status)) {
1748 ACPI_DEBUG_PRINT((ACPI_DB_WARN, 1756 printk(KERN_WARNING PREFIX
1749 "Cant attach device")); 1757 "Cant attach device");
1750 continue; 1758 continue;
1751 } 1759 }
1752 } 1760 }
@@ -1982,7 +1990,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
1982 video->device = device; 1990 video->device = device;
1983 strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME); 1991 strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
1984 strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS); 1992 strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
1985 acpi_driver_data(device) = video; 1993 device->driver_data = video;
1986 1994
1987 acpi_video_bus_find_cap(video); 1995 acpi_video_bus_find_cap(video);
1988 error = acpi_video_bus_check(video); 1996 error = acpi_video_bus_check(video);
@@ -2003,8 +2011,8 @@ static int acpi_video_bus_add(struct acpi_device *device)
2003 ACPI_DEVICE_NOTIFY, 2011 ACPI_DEVICE_NOTIFY,
2004 acpi_video_bus_notify, video); 2012 acpi_video_bus_notify, video);
2005 if (ACPI_FAILURE(status)) { 2013 if (ACPI_FAILURE(status)) {
2006 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 2014 printk(KERN_ERR PREFIX
2007 "Error installing notify handler\n")); 2015 "Error installing notify handler\n");
2008 error = -ENODEV; 2016 error = -ENODEV;
2009 goto err_stop_video; 2017 goto err_stop_video;
2010 } 2018 }
@@ -2058,7 +2066,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
2058 acpi_video_bus_remove_fs(device); 2066 acpi_video_bus_remove_fs(device);
2059 err_free_video: 2067 err_free_video:
2060 kfree(video); 2068 kfree(video);
2061 acpi_driver_data(device) = NULL; 2069 device->driver_data = NULL;
2062 2070
2063 return error; 2071 return error;
2064} 2072}
@@ -2094,12 +2102,6 @@ static int __init acpi_video_init(void)
2094{ 2102{
2095 int result = 0; 2103 int result = 0;
2096 2104
2097
2098 /*
2099 acpi_dbg_level = 0xFFFFFFFF;
2100 acpi_dbg_layer = 0x08000000;
2101 */
2102
2103 acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir); 2105 acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir);
2104 if (!acpi_video_dir) 2106 if (!acpi_video_dir)
2105 return -ENODEV; 2107 return -ENODEV;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
new file mode 100644
index 000000000000..f022eb6f5637
--- /dev/null
+++ b/drivers/acpi/video_detect.c
@@ -0,0 +1,267 @@
1/*
2 * Copyright (C) 2008 SuSE Linux Products GmbH
3 * Thomas Renninger <trenn@suse.de>
4 *
5 * May be copied or modified under the terms of the GNU General Public License
6 *
7 * video_detect.c:
8 * Provides acpi_is_video_device() for early scanning of ACPI devices in scan.c
9 * There a Linux specific (Spec does not provide a HID for video devices) is
10 * assinged
11 *
12 * After PCI devices are glued with ACPI devices
13 * acpi_get_physical_pci_device() can be called to identify ACPI graphics
14 * devices for which a real graphics card is plugged in
15 *
16 * Now acpi_video_get_capabilities() can be called to check which
17 * capabilities the graphics cards plugged in support. The check for general
18 * video capabilities will be triggered by the first caller of
19 * acpi_video_get_capabilities(NULL); which will happen when the first
20 * backlight (or display output) switching supporting driver calls:
21 * acpi_video_backlight_support();
22 *
23 * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B)
24 * are available, video.ko should be used to handle the device.
25 *
26 * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi,
27 * sony_acpi,... can take care about backlight brightness and display output
28 * switching.
29 *
30 * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m)
31 * this file will not be compiled, acpi_video_get_capabilities() and
32 * acpi_video_backlight_support() will always return 0 and vendor specific
33 * drivers always can handle backlight.
34 *
35 */
36
37#include <linux/acpi.h>
38#include <linux/dmi.h>
39
40ACPI_MODULE_NAME("video");
41#define _COMPONENT ACPI_VIDEO_COMPONENT
42
43static long acpi_video_support;
44static bool acpi_video_caps_checked;
45
46static acpi_status
47acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
48 void **retyurn_value)
49{
50 long *cap = context;
51 acpi_handle h_dummy;
52
53 if (ACPI_SUCCESS(acpi_get_handle(handle, "_BCM", &h_dummy)) &&
54 ACPI_SUCCESS(acpi_get_handle(handle, "_BCL", &h_dummy))) {
55 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
56 "support\n"));
57 *cap |= ACPI_VIDEO_BACKLIGHT;
58 /* We have backlight support, no need to scan further */
59 return AE_CTRL_TERMINATE;
60 }
61 return 0;
62}
63
64/* Returns true if the device is a video device which can be handled by
65 * video.ko.
66 * The device will get a Linux specific CID added in scan.c to
67 * identify the device as an ACPI graphics device
68 * Be aware that the graphics device may not be physically present
69 * Use acpi_video_get_capabilities() to detect general ACPI video
70 * capabilities of present cards
71 */
72long acpi_is_video_device(struct acpi_device *device)
73{
74 acpi_handle h_dummy;
75 long video_caps = 0;
76
77 if (!device)
78 return 0;
79
80 /* Does this device able to support video switching ? */
81 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) &&
82 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
83 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
84
85 /* Does this device able to retrieve a video ROM ? */
86 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
87 video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
88
89 /* Does this device able to configure which video head to be POSTed ? */
90 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy)) &&
91 ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy)) &&
92 ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy)))
93 video_caps |= ACPI_VIDEO_DEVICE_POSTING;
94
95 /* Only check for backlight functionality if one of the above hit. */
96 if (video_caps)
97 acpi_walk_namespace(ACPI_TYPE_DEVICE, device->handle,
98 ACPI_UINT32_MAX, acpi_backlight_cap_match,
99 &video_caps, NULL);
100
101 return video_caps;
102}
103EXPORT_SYMBOL(acpi_is_video_device);
104
105static acpi_status
106find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
107{
108 long *cap = context;
109 struct device *dev;
110 struct acpi_device *acpi_dev;
111
112 const struct acpi_device_id video_ids[] = {
113 {ACPI_VIDEO_HID, 0},
114 {"", 0},
115 };
116 if (acpi_bus_get_device(handle, &acpi_dev))
117 return AE_OK;
118
119 if (!acpi_match_device_ids(acpi_dev, video_ids)) {
120 dev = acpi_get_physical_pci_device(handle);
121 if (!dev)
122 return AE_OK;
123 put_device(dev);
124 *cap |= acpi_is_video_device(acpi_dev);
125 }
126 return AE_OK;
127}
128
129/*
130 * Returns the video capabilities of a specific ACPI graphics device
131 *
132 * if NULL is passed as argument all ACPI devices are enumerated and
133 * all graphics capabilities of physically present devices are
134 * summerized and returned. This is cached and done only once.
135 */
136long acpi_video_get_capabilities(acpi_handle graphics_handle)
137{
138 long caps = 0;
139 struct acpi_device *tmp_dev;
140 acpi_status status;
141
142 if (acpi_video_caps_checked && graphics_handle == NULL)
143 return acpi_video_support;
144
145 if (!graphics_handle) {
146 /* Only do the global walk through all graphics devices once */
147 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
148 ACPI_UINT32_MAX, find_video,
149 &caps, NULL);
150 /* There might be boot param flags set already... */
151 acpi_video_support |= caps;
152 acpi_video_caps_checked = 1;
153 /* Add blacklists here. Be careful to use the right *DMI* bits
154 * to still be able to override logic via boot params, e.g.:
155 *
156 * if (dmi_name_in_vendors("XY")) {
157 * acpi_video_support |=
158 * ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR;
159 * acpi_video_support |=
160 * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
161 *}
162 */
163 } else {
164 status = acpi_bus_get_device(graphics_handle, &tmp_dev);
165 if (ACPI_FAILURE(status)) {
166 ACPI_EXCEPTION((AE_INFO, status, "Invalid device"));
167 return 0;
168 }
169 acpi_walk_namespace(ACPI_TYPE_DEVICE, graphics_handle,
170 ACPI_UINT32_MAX, find_video,
171 &caps, NULL);
172 }
173 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "We have 0x%lX video support %s %s\n",
174 graphics_handle ? caps : acpi_video_support,
175 graphics_handle ? "on device " : "in general",
176 graphics_handle ? acpi_device_bid(tmp_dev) : ""));
177 return caps;
178}
179EXPORT_SYMBOL(acpi_video_get_capabilities);
180
181/* Returns true if video.ko can do backlight switching */
182int acpi_video_backlight_support(void)
183{
184 /*
185 * We must check whether the ACPI graphics device is physically plugged
186 * in. Therefore this must be called after binding PCI and ACPI devices
187 */
188 if (!acpi_video_caps_checked)
189 acpi_video_get_capabilities(NULL);
190
191 /* First check for boot param -> highest prio */
192 if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR)
193 return 0;
194 else if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO)
195 return 1;
196
197 /* Then check for DMI blacklist -> second highest prio */
198 if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_DMI_VENDOR)
199 return 0;
200 else if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_DMI_VIDEO)
201 return 1;
202
203 /* Then go the default way */
204 return acpi_video_support & ACPI_VIDEO_BACKLIGHT;
205}
206EXPORT_SYMBOL(acpi_video_backlight_support);
207
208/*
209 * Returns true if video.ko can do display output switching.
210 * This does not work well/at all with binary graphics drivers
211 * which disable system io ranges and do it on their own.
212 */
213int acpi_video_display_switch_support(void)
214{
215 if (!acpi_video_caps_checked)
216 acpi_video_get_capabilities(NULL);
217
218 if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR)
219 return 0;
220 else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO)
221 return 1;
222
223 if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR)
224 return 0;
225 else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO)
226 return 1;
227
228 return acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING;
229}
230EXPORT_SYMBOL(acpi_video_display_switch_support);
231
232/*
233 * Use acpi_display_output=vendor/video or acpi_backlight=vendor/video
234 * To force that backlight or display output switching is processed by vendor
235 * specific acpi drivers or video.ko driver.
236 */
237int __init acpi_backlight(char *str)
238{
239 if (str == NULL || *str == '\0')
240 return 1;
241 else {
242 if (!strcmp("vendor", str))
243 acpi_video_support |=
244 ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
245 if (!strcmp("video", str))
246 acpi_video_support |=
247 ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
248 }
249 return 1;
250}
251__setup("acpi_backlight=", acpi_backlight);
252
253int __init acpi_display_output(char *str)
254{
255 if (str == NULL || *str == '\0')
256 return 1;
257 else {
258 if (!strcmp("vendor", str))
259 acpi_video_support |=
260 ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR;
261 if (!strcmp("video", str))
262 acpi_video_support |=
263 ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
264 }
265 return 1;
266}
267__setup("acpi_display_output=", acpi_display_output);
diff --git a/drivers/acpi/wmi.c b/drivers/acpi/wmi.c
index cfe2c833474d..8a8b377712c9 100644
--- a/drivers/acpi/wmi.c
+++ b/drivers/acpi/wmi.c
@@ -217,6 +217,35 @@ static bool find_guid(const char *guid_string, struct wmi_block **out)
217 return 0; 217 return 0;
218} 218}
219 219
220static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
221{
222 struct guid_block *block = NULL;
223 char method[5];
224 struct acpi_object_list input;
225 union acpi_object params[1];
226 acpi_status status;
227 acpi_handle handle;
228
229 block = &wblock->gblock;
230 handle = wblock->handle;
231
232 if (!block)
233 return AE_NOT_EXIST;
234
235 input.count = 1;
236 input.pointer = params;
237 params[0].type = ACPI_TYPE_INTEGER;
238 params[0].integer.value = enable;
239
240 snprintf(method, 5, "WE%02X", block->notify_id);
241 status = acpi_evaluate_object(handle, method, &input, NULL);
242
243 if (status != AE_OK && status != AE_NOT_FOUND)
244 return status;
245 else
246 return AE_OK;
247}
248
220/* 249/*
221 * Exported WMI functions 250 * Exported WMI functions
222 */ 251 */
@@ -242,7 +271,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
242 char method[4] = "WM"; 271 char method[4] = "WM";
243 272
244 if (!find_guid(guid_string, &wblock)) 273 if (!find_guid(guid_string, &wblock))
245 return AE_BAD_ADDRESS; 274 return AE_ERROR;
246 275
247 block = &wblock->gblock; 276 block = &wblock->gblock;
248 handle = wblock->handle; 277 handle = wblock->handle;
@@ -304,7 +333,7 @@ struct acpi_buffer *out)
304 return AE_BAD_PARAMETER; 333 return AE_BAD_PARAMETER;
305 334
306 if (!find_guid(guid_string, &wblock)) 335 if (!find_guid(guid_string, &wblock))
307 return AE_BAD_ADDRESS; 336 return AE_ERROR;
308 337
309 block = &wblock->gblock; 338 block = &wblock->gblock;
310 handle = wblock->handle; 339 handle = wblock->handle;
@@ -314,7 +343,7 @@ struct acpi_buffer *out)
314 343
315 /* Check GUID is a data block */ 344 /* Check GUID is a data block */
316 if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD)) 345 if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
317 return AE_BAD_ADDRESS; 346 return AE_ERROR;
318 347
319 input.count = 1; 348 input.count = 1;
320 input.pointer = wq_params; 349 input.pointer = wq_params;
@@ -385,7 +414,7 @@ const struct acpi_buffer *in)
385 return AE_BAD_DATA; 414 return AE_BAD_DATA;
386 415
387 if (!find_guid(guid_string, &wblock)) 416 if (!find_guid(guid_string, &wblock))
388 return AE_BAD_ADDRESS; 417 return AE_ERROR;
389 418
390 block = &wblock->gblock; 419 block = &wblock->gblock;
391 handle = wblock->handle; 420 handle = wblock->handle;
@@ -395,7 +424,7 @@ const struct acpi_buffer *in)
395 424
396 /* Check GUID is a data block */ 425 /* Check GUID is a data block */
397 if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD)) 426 if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
398 return AE_BAD_ADDRESS; 427 return AE_ERROR;
399 428
400 input.count = 2; 429 input.count = 2;
401 input.pointer = params; 430 input.pointer = params;
@@ -427,6 +456,7 @@ acpi_status wmi_install_notify_handler(const char *guid,
427wmi_notify_handler handler, void *data) 456wmi_notify_handler handler, void *data)
428{ 457{
429 struct wmi_block *block; 458 struct wmi_block *block;
459 acpi_status status;
430 460
431 if (!guid || !handler) 461 if (!guid || !handler)
432 return AE_BAD_PARAMETER; 462 return AE_BAD_PARAMETER;
@@ -441,7 +471,9 @@ wmi_notify_handler handler, void *data)
441 block->handler = handler; 471 block->handler = handler;
442 block->handler_data = data; 472 block->handler_data = data;
443 473
444 return AE_OK; 474 status = wmi_method_enable(block, 1);
475
476 return status;
445} 477}
446EXPORT_SYMBOL_GPL(wmi_install_notify_handler); 478EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
447 479
@@ -453,6 +485,7 @@ EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
453acpi_status wmi_remove_notify_handler(const char *guid) 485acpi_status wmi_remove_notify_handler(const char *guid)
454{ 486{
455 struct wmi_block *block; 487 struct wmi_block *block;
488 acpi_status status;
456 489
457 if (!guid) 490 if (!guid)
458 return AE_BAD_PARAMETER; 491 return AE_BAD_PARAMETER;
@@ -464,10 +497,12 @@ acpi_status wmi_remove_notify_handler(const char *guid)
464 if (!block->handler) 497 if (!block->handler)
465 return AE_NULL_ENTRY; 498 return AE_NULL_ENTRY;
466 499
500 status = wmi_method_enable(block, 0);
501
467 block->handler = NULL; 502 block->handler = NULL;
468 block->handler_data = NULL; 503 block->handler_data = NULL;
469 504
470 return AE_OK; 505 return status;
471} 506}
472EXPORT_SYMBOL_GPL(wmi_remove_notify_handler); 507EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
473 508
@@ -625,7 +660,7 @@ static void acpi_wmi_notify(acpi_handle handle, u32 event, void *data)
625 wblock->handler(event, wblock->handler_data); 660 wblock->handler(event, wblock->handler_data);
626 661
627 acpi_bus_generate_netlink_event( 662 acpi_bus_generate_netlink_event(
628 device->pnp.device_class, device->dev.bus_id, 663 device->pnp.device_class, dev_name(&device->dev),
629 event, 0); 664 event, 0);
630 break; 665 break;
631 } 666 }
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 78fbec8ceda0..421b7c71e72d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -153,7 +153,7 @@ config SATA_PROMISE
153 If unsure, say N. 153 If unsure, say N.
154 154
155config SATA_SX4 155config SATA_SX4
156 tristate "Promise SATA SX4 support" 156 tristate "Promise SATA SX4 support (Experimental)"
157 depends on PCI && EXPERIMENTAL 157 depends on PCI && EXPERIMENTAL
158 help 158 help
159 This option enables support for Promise Serial ATA SX4. 159 This option enables support for Promise Serial ATA SX4.
@@ -219,8 +219,8 @@ config PATA_ACPI
219 otherwise unsupported hardware. 219 otherwise unsupported hardware.
220 220
221config PATA_ALI 221config PATA_ALI
222 tristate "ALi PATA support (Experimental)" 222 tristate "ALi PATA support"
223 depends on PCI && EXPERIMENTAL 223 depends on PCI
224 help 224 help
225 This option enables support for the ALi ATA interfaces 225 This option enables support for the ALi ATA interfaces
226 found on the many ALi chipsets. 226 found on the many ALi chipsets.
@@ -263,7 +263,7 @@ config PATA_ATIIXP
263 If unsure, say N. 263 If unsure, say N.
264 264
265config PATA_CMD640_PCI 265config PATA_CMD640_PCI
266 tristate "CMD640 PCI PATA support (Very Experimental)" 266 tristate "CMD640 PCI PATA support (Experimental)"
267 depends on PCI && EXPERIMENTAL 267 depends on PCI && EXPERIMENTAL
268 help 268 help
269 This option enables support for the CMD640 PCI IDE 269 This option enables support for the CMD640 PCI IDE
@@ -291,8 +291,8 @@ config PATA_CS5520
291 If unsure, say N. 291 If unsure, say N.
292 292
293config PATA_CS5530 293config PATA_CS5530
294 tristate "CS5530 PATA support (Experimental)" 294 tristate "CS5530 PATA support"
295 depends on PCI && EXPERIMENTAL 295 depends on PCI
296 help 296 help
297 This option enables support for the Cyrix/NatSemi/AMD CS5530 297 This option enables support for the Cyrix/NatSemi/AMD CS5530
298 companion chip used with the MediaGX/Geode processor family. 298 companion chip used with the MediaGX/Geode processor family.
@@ -309,8 +309,8 @@ config PATA_CS5535
309 If unsure, say N. 309 If unsure, say N.
310 310
311config PATA_CS5536 311config PATA_CS5536
312 tristate "CS5536 PATA support (Experimental)" 312 tristate "CS5536 PATA support"
313 depends on PCI && X86 && !X86_64 && EXPERIMENTAL 313 depends on PCI && X86 && !X86_64
314 help 314 help
315 This option enables support for the AMD CS5536 315 This option enables support for the AMD CS5536
316 companion chip used with the Geode LX processor family. 316 companion chip used with the Geode LX processor family.
@@ -363,7 +363,7 @@ config PATA_HPT37X
363 If unsure, say N. 363 If unsure, say N.
364 364
365config PATA_HPT3X2N 365config PATA_HPT3X2N
366 tristate "HPT 372N/302N PATA support (Very Experimental)" 366 tristate "HPT 372N/302N PATA support (Experimental)"
367 depends on PCI && EXPERIMENTAL 367 depends on PCI && EXPERIMENTAL
368 help 368 help
369 This option enables support for the N variant HPT PATA 369 This option enables support for the N variant HPT PATA
@@ -389,8 +389,8 @@ config PATA_HPT3X3_DMA
389 problems with DMA on this chipset. 389 problems with DMA on this chipset.
390 390
391config PATA_ISAPNP 391config PATA_ISAPNP
392 tristate "ISA Plug and Play PATA support (Experimental)" 392 tristate "ISA Plug and Play PATA support"
393 depends on EXPERIMENTAL && ISAPNP 393 depends on ISAPNP
394 help 394 help
395 This option enables support for ISA plug & play ATA 395 This option enables support for ISA plug & play ATA
396 controllers such as those found on old soundcards. 396 controllers such as those found on old soundcards.
@@ -498,8 +498,8 @@ config PATA_NINJA32
498 If unsure, say N. 498 If unsure, say N.
499 499
500config PATA_NS87410 500config PATA_NS87410
501 tristate "Nat Semi NS87410 PATA support (Experimental)" 501 tristate "Nat Semi NS87410 PATA support"
502 depends on PCI && EXPERIMENTAL 502 depends on PCI
503 help 503 help
504 This option enables support for the National Semiconductor 504 This option enables support for the National Semiconductor
505 NS87410 PCI-IDE controller. 505 NS87410 PCI-IDE controller.
@@ -507,8 +507,8 @@ config PATA_NS87410
507 If unsure, say N. 507 If unsure, say N.
508 508
509config PATA_NS87415 509config PATA_NS87415
510 tristate "Nat Semi NS87415 PATA support (Experimental)" 510 tristate "Nat Semi NS87415 PATA support"
511 depends on PCI && EXPERIMENTAL 511 depends on PCI
512 help 512 help
513 This option enables support for the National Semiconductor 513 This option enables support for the National Semiconductor
514 NS87415 PCI-IDE controller. 514 NS87415 PCI-IDE controller.
@@ -544,8 +544,8 @@ config PATA_PCMCIA
544 If unsure, say N. 544 If unsure, say N.
545 545
546config PATA_PDC_OLD 546config PATA_PDC_OLD
547 tristate "Older Promise PATA controller support (Experimental)" 547 tristate "Older Promise PATA controller support"
548 depends on PCI && EXPERIMENTAL 548 depends on PCI
549 help 549 help
550 This option enables support for the Promise 20246, 20262, 20263, 550 This option enables support for the Promise 20246, 20262, 20263,
551 20265 and 20267 adapters. 551 20265 and 20267 adapters.
@@ -559,7 +559,7 @@ config PATA_QDI
559 Support for QDI 6500 and 6580 PATA controllers on VESA local bus. 559 Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
560 560
561config PATA_RADISYS 561config PATA_RADISYS
562 tristate "RADISYS 82600 PATA support (Very Experimental)" 562 tristate "RADISYS 82600 PATA support (Experimental)"
563 depends on PCI && EXPERIMENTAL 563 depends on PCI && EXPERIMENTAL
564 help 564 help
565 This option enables support for the RADISYS 82600 565 This option enables support for the RADISYS 82600
@@ -586,8 +586,8 @@ config PATA_RZ1000
586 If unsure, say N. 586 If unsure, say N.
587 587
588config PATA_SC1200 588config PATA_SC1200
589 tristate "SC1200 PATA support (Very Experimental)" 589 tristate "SC1200 PATA support"
590 depends on PCI && EXPERIMENTAL 590 depends on PCI
591 help 591 help
592 This option enables support for the NatSemi/AMD SC1200 SoC 592 This option enables support for the NatSemi/AMD SC1200 SoC
593 companion chip used with the Geode processor family. 593 companion chip used with the Geode processor family.
@@ -620,8 +620,8 @@ config PATA_SIL680
620 If unsure, say N. 620 If unsure, say N.
621 621
622config PATA_SIS 622config PATA_SIS
623 tristate "SiS PATA support (Experimental)" 623 tristate "SiS PATA support"
624 depends on PCI && EXPERIMENTAL 624 depends on PCI
625 help 625 help
626 This option enables support for SiS PATA controllers 626 This option enables support for SiS PATA controllers
627 627
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index aeadd00411a1..a67b8e7c712d 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -49,6 +49,17 @@
49#define DRV_NAME "ahci" 49#define DRV_NAME "ahci"
50#define DRV_VERSION "3.0" 50#define DRV_VERSION "3.0"
51 51
52/* Enclosure Management Control */
53#define EM_CTRL_MSG_TYPE 0x000f0000
54
55/* Enclosure Management LED Message Type */
56#define EM_MSG_LED_HBA_PORT 0x0000000f
57#define EM_MSG_LED_PMP_SLOT 0x0000ff00
58#define EM_MSG_LED_VALUE 0xffff0000
59#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
60#define EM_MSG_LED_VALUE_OFF 0xfff80000
61#define EM_MSG_LED_VALUE_ON 0x00010000
62
52static int ahci_skip_host_reset; 63static int ahci_skip_host_reset;
53module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); 64module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
54MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); 65MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
@@ -588,6 +599,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
588 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 599 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
589 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ 600 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
590 601
602 /* Promise */
603 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
604
591 /* Generic, PCI class code for AHCI */ 605 /* Generic, PCI class code for AHCI */
592 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 606 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
593 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, 607 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
@@ -1220,18 +1234,20 @@ static void ahci_sw_activity_blink(unsigned long arg)
1220 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 1234 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1221 unsigned long led_message = emp->led_state; 1235 unsigned long led_message = emp->led_state;
1222 u32 activity_led_state; 1236 u32 activity_led_state;
1237 unsigned long flags;
1223 1238
1224 led_message &= 0xffff0000; 1239 led_message &= EM_MSG_LED_VALUE;
1225 led_message |= ap->port_no | (link->pmp << 8); 1240 led_message |= ap->port_no | (link->pmp << 8);
1226 1241
1227 /* check to see if we've had activity. If so, 1242 /* check to see if we've had activity. If so,
1228 * toggle state of LED and reset timer. If not, 1243 * toggle state of LED and reset timer. If not,
1229 * turn LED to desired idle state. 1244 * turn LED to desired idle state.
1230 */ 1245 */
1246 spin_lock_irqsave(ap->lock, flags);
1231 if (emp->saved_activity != emp->activity) { 1247 if (emp->saved_activity != emp->activity) {
1232 emp->saved_activity = emp->activity; 1248 emp->saved_activity = emp->activity;
1233 /* get the current LED state */ 1249 /* get the current LED state */
1234 activity_led_state = led_message & 0x00010000; 1250 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1235 1251
1236 if (activity_led_state) 1252 if (activity_led_state)
1237 activity_led_state = 0; 1253 activity_led_state = 0;
@@ -1239,17 +1255,18 @@ static void ahci_sw_activity_blink(unsigned long arg)
1239 activity_led_state = 1; 1255 activity_led_state = 1;
1240 1256
1241 /* clear old state */ 1257 /* clear old state */
1242 led_message &= 0xfff8ffff; 1258 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1243 1259
1244 /* toggle state */ 1260 /* toggle state */
1245 led_message |= (activity_led_state << 16); 1261 led_message |= (activity_led_state << 16);
1246 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100)); 1262 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1247 } else { 1263 } else {
1248 /* switch to idle */ 1264 /* switch to idle */
1249 led_message &= 0xfff8ffff; 1265 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1250 if (emp->blink_policy == BLINK_OFF) 1266 if (emp->blink_policy == BLINK_OFF)
1251 led_message |= (1 << 16); 1267 led_message |= (1 << 16);
1252 } 1268 }
1269 spin_unlock_irqrestore(ap->lock, flags);
1253 ahci_transmit_led_message(ap, led_message, 4); 1270 ahci_transmit_led_message(ap, led_message, 4);
1254} 1271}
1255 1272
@@ -1294,7 +1311,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1294 struct ahci_em_priv *emp; 1311 struct ahci_em_priv *emp;
1295 1312
1296 /* get the slot number from the message */ 1313 /* get the slot number from the message */
1297 pmp = (state & 0x0000ff00) >> 8; 1314 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1298 if (pmp < MAX_SLOTS) 1315 if (pmp < MAX_SLOTS)
1299 emp = &pp->em_priv[pmp]; 1316 emp = &pp->em_priv[pmp];
1300 else 1317 else
@@ -1319,7 +1336,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1319 message[0] |= (4 << 8); 1336 message[0] |= (4 << 8);
1320 1337
1321 /* ignore 0:4 of byte zero, fill in port info yourself */ 1338 /* ignore 0:4 of byte zero, fill in port info yourself */
1322 message[1] = ((state & 0xfffffff0) | ap->port_no); 1339 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1323 1340
1324 /* write message to EM_LOC */ 1341 /* write message to EM_LOC */
1325 writel(message[0], mmio + hpriv->em_loc); 1342 writel(message[0], mmio + hpriv->em_loc);
@@ -1362,7 +1379,7 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1362 state = simple_strtoul(buf, NULL, 0); 1379 state = simple_strtoul(buf, NULL, 0);
1363 1380
1364 /* get the slot number from the message */ 1381 /* get the slot number from the message */
1365 pmp = (state & 0x0000ff00) >> 8; 1382 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1366 if (pmp < MAX_SLOTS) 1383 if (pmp < MAX_SLOTS)
1367 emp = &pp->em_priv[pmp]; 1384 emp = &pp->em_priv[pmp];
1368 else 1385 else
@@ -1373,7 +1390,7 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1373 * activity led through em_message 1390 * activity led through em_message
1374 */ 1391 */
1375 if (emp->blink_policy) 1392 if (emp->blink_policy)
1376 state &= 0xfff8ffff; 1393 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1377 1394
1378 return ahci_transmit_led_message(ap, state, size); 1395 return ahci_transmit_led_message(ap, state, size);
1379} 1396}
@@ -1392,16 +1409,16 @@ static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1392 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY); 1409 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1393 1410
1394 /* set the LED to OFF */ 1411 /* set the LED to OFF */
1395 port_led_state &= 0xfff80000; 1412 port_led_state &= EM_MSG_LED_VALUE_OFF;
1396 port_led_state |= (ap->port_no | (link->pmp << 8)); 1413 port_led_state |= (ap->port_no | (link->pmp << 8));
1397 ahci_transmit_led_message(ap, port_led_state, 4); 1414 ahci_transmit_led_message(ap, port_led_state, 4);
1398 } else { 1415 } else {
1399 link->flags |= ATA_LFLAG_SW_ACTIVITY; 1416 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1400 if (val == BLINK_OFF) { 1417 if (val == BLINK_OFF) {
1401 /* set LED to ON for idle */ 1418 /* set LED to ON for idle */
1402 port_led_state &= 0xfff80000; 1419 port_led_state &= EM_MSG_LED_VALUE_OFF;
1403 port_led_state |= (ap->port_no | (link->pmp << 8)); 1420 port_led_state |= (ap->port_no | (link->pmp << 8));
1404 port_led_state |= 0x00010000; /* check this */ 1421 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1405 ahci_transmit_led_message(ap, port_led_state, 4); 1422 ahci_transmit_led_message(ap, port_led_state, 4);
1406 } 1423 }
1407 } 1424 }
@@ -2612,7 +2629,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2612 u32 em_loc = readl(mmio + HOST_EM_LOC); 2629 u32 em_loc = readl(mmio + HOST_EM_LOC);
2613 u32 em_ctl = readl(mmio + HOST_EM_CTL); 2630 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2614 2631
2615 messages = (em_ctl & 0x000f0000) >> 16; 2632 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2616 2633
2617 /* we only support LED message type right now */ 2634 /* we only support LED message type right now */
2618 if ((messages & 0x01) && (ahci_em_messages == 1)) { 2635 if ((messages & 0x01) && (ahci_em_messages == 1)) {
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 75a406f5e694..5c33767e66de 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * ata_generic.c - Generic PATA/SATA controller driver. 2 * ata_generic.c - Generic PATA/SATA controller driver.
3 * Copyright 2005 Red Hat Inc <alan@redhat.com>, all rights reserved. 3 * Copyright 2005 Red Hat Inc, all rights reserved.
4 * 4 *
5 * Elements from ide/pci/generic.c 5 * Elements from ide/pci/generic.c
6 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> 6 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index e9e32ed6b1a3..c11936e13dd3 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -14,7 +14,7 @@
14 * 14 *
15 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer 15 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
16 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 16 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
17 * Copyright (C) 2003 Red Hat Inc <alan@redhat.com> 17 * Copyright (C) 2003 Red Hat Inc
18 * 18 *
19 * 19 *
20 * This program is free software; you can redistribute it and/or modify 20 * This program is free software; you can redistribute it and/or modify
@@ -738,7 +738,6 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
738 * do_pata_set_dmamode - Initialize host controller PATA PIO timings 738 * do_pata_set_dmamode - Initialize host controller PATA PIO timings
739 * @ap: Port whose timings we are configuring 739 * @ap: Port whose timings we are configuring
740 * @adev: Drive in question 740 * @adev: Drive in question
741 * @udma: udma mode, 0 - 6
742 * @isich: set if the chip is an ICH device 741 * @isich: set if the chip is an ICH device
743 * 742 *
744 * Set UDMA mode for device, in host controller PCI config space. 743 * Set UDMA mode for device, in host controller PCI config space.
@@ -1067,6 +1066,28 @@ static int piix_broken_suspend(void)
1067 if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL)) 1066 if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL))
1068 return 1; 1067 return 1;
1069 1068
1069 /* TECRA M4 sometimes forgets its identify and reports bogus
1070 * DMI information. As the bogus information is a bit
1071 * generic, match as many entries as possible. This manual
1072 * matching is necessary because dmi_system_id.matches is
1073 * limited to four entries.
1074 */
1075 if (dmi_get_system_info(DMI_SYS_VENDOR) &&
1076 dmi_get_system_info(DMI_PRODUCT_NAME) &&
1077 dmi_get_system_info(DMI_PRODUCT_VERSION) &&
1078 dmi_get_system_info(DMI_PRODUCT_SERIAL) &&
1079 dmi_get_system_info(DMI_BOARD_VENDOR) &&
1080 dmi_get_system_info(DMI_BOARD_NAME) &&
1081 dmi_get_system_info(DMI_BOARD_VERSION) &&
1082 !strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") &&
1083 !strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") &&
1084 !strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") &&
1085 !strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") &&
1086 !strcmp(dmi_get_system_info(DMI_BOARD_VENDOR), "TOSHIBA") &&
1087 !strcmp(dmi_get_system_info(DMI_BOARD_NAME), "Portable PC") &&
1088 !strcmp(dmi_get_system_info(DMI_BOARD_VERSION), "Version A0"))
1089 return 1;
1090
1070 return 0; 1091 return 0;
1071} 1092}
1072 1093
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 9330b7922f62..c012307d0ba6 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -120,21 +120,6 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap)
120 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; 120 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
121} 121}
122 122
123static void ata_acpi_eject_device(acpi_handle handle)
124{
125 struct acpi_object_list arg_list;
126 union acpi_object arg;
127
128 arg_list.count = 1;
129 arg_list.pointer = &arg;
130 arg.type = ACPI_TYPE_INTEGER;
131 arg.integer.value = 1;
132
133 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_EJ0",
134 &arg_list, NULL)))
135 printk(KERN_ERR "Failed to evaluate _EJ0!\n");
136}
137
138/* @ap and @dev are the same as ata_acpi_handle_hotplug() */ 123/* @ap and @dev are the same as ata_acpi_handle_hotplug() */
139static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev) 124static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
140{ 125{
@@ -157,7 +142,6 @@ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
157 * @ap: ATA port ACPI event occurred 142 * @ap: ATA port ACPI event occurred
158 * @dev: ATA device ACPI event occurred (can be NULL) 143 * @dev: ATA device ACPI event occurred (can be NULL)
159 * @event: ACPI event which occurred 144 * @event: ACPI event which occurred
160 * @is_dock_event: boolean indicating whether the event was a dock one
161 * 145 *
162 * All ACPI bay / device realted events end up in this function. If 146 * All ACPI bay / device realted events end up in this function. If
163 * the event is port-wide @dev is NULL. If the event is specific to a 147 * the event is port-wide @dev is NULL. If the event is specific to a
@@ -171,117 +155,100 @@ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
171 * ACPI notify handler context. May sleep. 155 * ACPI notify handler context. May sleep.
172 */ 156 */
173static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, 157static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
174 u32 event, int is_dock_event) 158 u32 event)
175{ 159{
176 char event_string[12];
177 char *envp[] = { event_string, NULL };
178 struct ata_eh_info *ehi = &ap->link.eh_info; 160 struct ata_eh_info *ehi = &ap->link.eh_info;
179 struct kobject *kobj = NULL;
180 int wait = 0; 161 int wait = 0;
181 unsigned long flags; 162 unsigned long flags;
182 acpi_handle handle, tmphandle; 163 acpi_handle handle;
183 unsigned long sta;
184 acpi_status status;
185 164
186 if (dev) { 165 if (dev)
187 if (dev->sdev)
188 kobj = &dev->sdev->sdev_gendev.kobj;
189 handle = dev->acpi_handle; 166 handle = dev->acpi_handle;
190 } else { 167 else
191 kobj = &ap->dev->kobj;
192 handle = ap->acpi_handle; 168 handle = ap->acpi_handle;
193 }
194
195 status = acpi_get_handle(handle, "_EJ0", &tmphandle);
196 if (ACPI_FAILURE(status))
197 /* This device does not support hotplug */
198 return;
199
200 if (event == ACPI_NOTIFY_BUS_CHECK ||
201 event == ACPI_NOTIFY_DEVICE_CHECK)
202 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
203 169
204 spin_lock_irqsave(ap->lock, flags); 170 spin_lock_irqsave(ap->lock, flags);
205 171 /*
172 * When dock driver calls into the routine, it will always use
173 * ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and
174 * ACPI_NOTIFY_EJECT_REQUEST for remove
175 */
206 switch (event) { 176 switch (event) {
207 case ACPI_NOTIFY_BUS_CHECK: 177 case ACPI_NOTIFY_BUS_CHECK:
208 case ACPI_NOTIFY_DEVICE_CHECK: 178 case ACPI_NOTIFY_DEVICE_CHECK:
209 ata_ehi_push_desc(ehi, "ACPI event"); 179 ata_ehi_push_desc(ehi, "ACPI event");
210 180
211 if (ACPI_FAILURE(status)) { 181 ata_ehi_hotplugged(ehi);
212 ata_port_printk(ap, KERN_ERR, 182 ata_port_freeze(ap);
213 "acpi: failed to determine bay status (0x%x)\n",
214 status);
215 break;
216 }
217
218 if (sta) {
219 ata_ehi_hotplugged(ehi);
220 ata_port_freeze(ap);
221 } else {
222 /* The device has gone - unplug it */
223 ata_acpi_detach_device(ap, dev);
224 wait = 1;
225 }
226 break; 183 break;
227 case ACPI_NOTIFY_EJECT_REQUEST: 184 case ACPI_NOTIFY_EJECT_REQUEST:
228 ata_ehi_push_desc(ehi, "ACPI event"); 185 ata_ehi_push_desc(ehi, "ACPI event");
229 186
230 if (!is_dock_event)
231 break;
232
233 /* undock event - immediate unplug */
234 ata_acpi_detach_device(ap, dev); 187 ata_acpi_detach_device(ap, dev);
235 wait = 1; 188 wait = 1;
236 break; 189 break;
237 } 190 }
238 191
239 /* make sure kobj doesn't go away while ap->lock is released */
240 kobject_get(kobj);
241
242 spin_unlock_irqrestore(ap->lock, flags); 192 spin_unlock_irqrestore(ap->lock, flags);
243 193
244 if (wait) { 194 if (wait)
245 ata_port_wait_eh(ap); 195 ata_port_wait_eh(ap);
246 ata_acpi_eject_device(handle);
247 }
248
249 if (kobj && !is_dock_event) {
250 sprintf(event_string, "BAY_EVENT=%d", event);
251 kobject_uevent_env(kobj, KOBJ_CHANGE, envp);
252 }
253
254 kobject_put(kobj);
255} 196}
256 197
257static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data) 198static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
258{ 199{
259 struct ata_device *dev = data; 200 struct ata_device *dev = data;
260 201
261 ata_acpi_handle_hotplug(dev->link->ap, dev, event, 1); 202 ata_acpi_handle_hotplug(dev->link->ap, dev, event);
262} 203}
263 204
264static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data) 205static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data)
265{ 206{
266 struct ata_port *ap = data; 207 struct ata_port *ap = data;
267 208
268 ata_acpi_handle_hotplug(ap, NULL, event, 1); 209 ata_acpi_handle_hotplug(ap, NULL, event);
269} 210}
270 211
271static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data) 212static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
213 u32 event)
272{ 214{
273 struct ata_device *dev = data; 215 struct kobject *kobj = NULL;
216 char event_string[20];
217 char *envp[] = { event_string, NULL };
218
219 if (dev) {
220 if (dev->sdev)
221 kobj = &dev->sdev->sdev_gendev.kobj;
222 } else
223 kobj = &ap->dev->kobj;
274 224
275 ata_acpi_handle_hotplug(dev->link->ap, dev, event, 0); 225 if (kobj) {
226 snprintf(event_string, 20, "BAY_EVENT=%d", event);
227 kobject_uevent_env(kobj, KOBJ_CHANGE, envp);
228 }
276} 229}
277 230
278static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) 231static void ata_acpi_ap_uevent(acpi_handle handle, u32 event, void *data)
279{ 232{
280 struct ata_port *ap = data; 233 ata_acpi_uevent(data, NULL, event);
234}
281 235
282 ata_acpi_handle_hotplug(ap, NULL, event, 0); 236static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
237{
238 struct ata_device *dev = data;
239 ata_acpi_uevent(dev->link->ap, dev, event);
283} 240}
284 241
242static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
243 .handler = ata_acpi_dev_notify_dock,
244 .uevent = ata_acpi_dev_uevent,
245};
246
247static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
248 .handler = ata_acpi_ap_notify_dock,
249 .uevent = ata_acpi_ap_uevent,
250};
251
285/** 252/**
286 * ata_acpi_associate - associate ATA host with ACPI objects 253 * ata_acpi_associate - associate ATA host with ACPI objects
287 * @host: target ATA host 254 * @host: target ATA host
@@ -315,24 +282,18 @@ void ata_acpi_associate(struct ata_host *host)
315 ata_acpi_associate_ide_port(ap); 282 ata_acpi_associate_ide_port(ap);
316 283
317 if (ap->acpi_handle) { 284 if (ap->acpi_handle) {
318 acpi_install_notify_handler(ap->acpi_handle,
319 ACPI_SYSTEM_NOTIFY,
320 ata_acpi_ap_notify, ap);
321 /* we might be on a docking station */ 285 /* we might be on a docking station */
322 register_hotplug_dock_device(ap->acpi_handle, 286 register_hotplug_dock_device(ap->acpi_handle,
323 ata_acpi_ap_notify_dock, ap); 287 &ata_acpi_ap_dock_ops, ap);
324 } 288 }
325 289
326 for (j = 0; j < ata_link_max_devices(&ap->link); j++) { 290 for (j = 0; j < ata_link_max_devices(&ap->link); j++) {
327 struct ata_device *dev = &ap->link.device[j]; 291 struct ata_device *dev = &ap->link.device[j];
328 292
329 if (dev->acpi_handle) { 293 if (dev->acpi_handle) {
330 acpi_install_notify_handler(dev->acpi_handle,
331 ACPI_SYSTEM_NOTIFY,
332 ata_acpi_dev_notify, dev);
333 /* we might be on a docking station */ 294 /* we might be on a docking station */
334 register_hotplug_dock_device(dev->acpi_handle, 295 register_hotplug_dock_device(dev->acpi_handle,
335 ata_acpi_dev_notify_dock, dev); 296 &ata_acpi_dev_dock_ops, dev);
336 } 297 }
337 } 298 }
338 } 299 }
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1ee9499bd343..bc6695e3c848 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -612,7 +612,7 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
612 if (tf->flags & ATA_TFLAG_LBA48) { 612 if (tf->flags & ATA_TFLAG_LBA48) {
613 block |= (u64)tf->hob_lbah << 40; 613 block |= (u64)tf->hob_lbah << 40;
614 block |= (u64)tf->hob_lbam << 32; 614 block |= (u64)tf->hob_lbam << 32;
615 block |= tf->hob_lbal << 24; 615 block |= (u64)tf->hob_lbal << 24;
616 } else 616 } else
617 block |= (tf->device & 0xf) << 24; 617 block |= (tf->device & 0xf) << 24;
618 618
@@ -1268,7 +1268,7 @@ u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1268 1268
1269 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1269 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1270 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1270 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1271 sectors |= (tf->hob_lbal & 0xff) << 24; 1271 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1272 sectors |= (tf->lbah & 0xff) << 16; 1272 sectors |= (tf->lbah & 0xff) << 16;
1273 sectors |= (tf->lbam & 0xff) << 8; 1273 sectors |= (tf->lbam & 0xff) << 8;
1274 sectors |= (tf->lbal & 0xff); 1274 sectors |= (tf->lbal & 0xff);
@@ -1602,7 +1602,6 @@ unsigned long ata_id_xfermask(const u16 *id)
1602/** 1602/**
1603 * ata_pio_queue_task - Queue port_task 1603 * ata_pio_queue_task - Queue port_task
1604 * @ap: The ata_port to queue port_task for 1604 * @ap: The ata_port to queue port_task for
1605 * @fn: workqueue function to be scheduled
1606 * @data: data for @fn to use 1605 * @data: data for @fn to use
1607 * @delay: delay time in msecs for workqueue function 1606 * @delay: delay time in msecs for workqueue function
1608 * 1607 *
@@ -2161,6 +2160,10 @@ retry:
2161static inline u8 ata_dev_knobble(struct ata_device *dev) 2160static inline u8 ata_dev_knobble(struct ata_device *dev)
2162{ 2161{
2163 struct ata_port *ap = dev->link->ap; 2162 struct ata_port *ap = dev->link->ap;
2163
2164 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2165 return 0;
2166
2164 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2167 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2165} 2168}
2166 2169
@@ -2489,6 +2492,13 @@ int ata_dev_configure(struct ata_device *dev)
2489 } 2492 }
2490 } 2493 }
2491 2494
2495 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2496 ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2497 "firmware update to be fully functional.\n");
2498 ata_dev_printk(dev, KERN_WARNING, " contact the vendor "
2499 "or visit http://ata.wiki.kernel.org.\n");
2500 }
2501
2492 return 0; 2502 return 0;
2493 2503
2494err_out_nosup: 2504err_out_nosup:
@@ -4023,6 +4033,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4023 4033
4024 /* Weird ATAPI devices */ 4034 /* Weird ATAPI devices */
4025 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4035 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4036 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4026 4037
4027 /* Devices we expect to fail diagnostics */ 4038 /* Devices we expect to fail diagnostics */
4028 4039
@@ -4038,6 +4049,73 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4038 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4049 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4039 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4050 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4040 4051
4052 /* Seagate NCQ + FLUSH CACHE firmware bug */
4053 { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ |
4054 ATA_HORKAGE_FIRMWARE_WARN },
4055 { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ |
4056 ATA_HORKAGE_FIRMWARE_WARN },
4057 { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ |
4058 ATA_HORKAGE_FIRMWARE_WARN },
4059 { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ |
4060 ATA_HORKAGE_FIRMWARE_WARN },
4061 { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ |
4062 ATA_HORKAGE_FIRMWARE_WARN },
4063
4064 { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ |
4065 ATA_HORKAGE_FIRMWARE_WARN },
4066 { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ |
4067 ATA_HORKAGE_FIRMWARE_WARN },
4068 { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ |
4069 ATA_HORKAGE_FIRMWARE_WARN },
4070 { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ |
4071 ATA_HORKAGE_FIRMWARE_WARN },
4072 { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ |
4073 ATA_HORKAGE_FIRMWARE_WARN },
4074
4075 { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ |
4076 ATA_HORKAGE_FIRMWARE_WARN },
4077 { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ |
4078 ATA_HORKAGE_FIRMWARE_WARN },
4079 { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ |
4080 ATA_HORKAGE_FIRMWARE_WARN },
4081 { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ |
4082 ATA_HORKAGE_FIRMWARE_WARN },
4083 { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ |
4084 ATA_HORKAGE_FIRMWARE_WARN },
4085
4086 { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ |
4087 ATA_HORKAGE_FIRMWARE_WARN },
4088 { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ |
4089 ATA_HORKAGE_FIRMWARE_WARN },
4090 { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ |
4091 ATA_HORKAGE_FIRMWARE_WARN },
4092 { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ |
4093 ATA_HORKAGE_FIRMWARE_WARN },
4094 { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ |
4095 ATA_HORKAGE_FIRMWARE_WARN },
4096
4097 { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ |
4098 ATA_HORKAGE_FIRMWARE_WARN },
4099 { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ |
4100 ATA_HORKAGE_FIRMWARE_WARN },
4101 { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ |
4102 ATA_HORKAGE_FIRMWARE_WARN },
4103 { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ |
4104 ATA_HORKAGE_FIRMWARE_WARN },
4105 { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ |
4106 ATA_HORKAGE_FIRMWARE_WARN },
4107
4108 { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ |
4109 ATA_HORKAGE_FIRMWARE_WARN },
4110 { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ |
4111 ATA_HORKAGE_FIRMWARE_WARN },
4112 { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ |
4113 ATA_HORKAGE_FIRMWARE_WARN },
4114 { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ |
4115 ATA_HORKAGE_FIRMWARE_WARN },
4116 { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ |
4117 ATA_HORKAGE_FIRMWARE_WARN },
4118
4041 /* Blacklist entries taken from Silicon Image 3124/3132 4119 /* Blacklist entries taken from Silicon Image 3124/3132
4042 Windows driver .inf file - also several Linux problem reports */ 4120 Windows driver .inf file - also several Linux problem reports */
4043 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4121 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
@@ -4065,6 +4143,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4065 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, }, 4143 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4066 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, }, 4144 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
4067 4145
4146 /* Devices that do not need bridging limits applied */
4147 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4148
4068 /* End Marker */ 4149 /* End Marker */
4069 { } 4150 { }
4070}; 4151};
@@ -4158,29 +4239,33 @@ static int cable_is_40wire(struct ata_port *ap)
4158 struct ata_link *link; 4239 struct ata_link *link;
4159 struct ata_device *dev; 4240 struct ata_device *dev;
4160 4241
4161 /* If the controller thinks we are 40 wire, we are */ 4242 /* If the controller thinks we are 40 wire, we are. */
4162 if (ap->cbl == ATA_CBL_PATA40) 4243 if (ap->cbl == ATA_CBL_PATA40)
4163 return 1; 4244 return 1;
4164 /* If the controller thinks we are 80 wire, we are */ 4245
4246 /* If the controller thinks we are 80 wire, we are. */
4165 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4247 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4166 return 0; 4248 return 0;
4167 /* If the system is known to be 40 wire short cable (eg laptop), 4249
4168 then we allow 80 wire modes even if the drive isn't sure */ 4250 /* If the system is known to be 40 wire short cable (eg
4251 * laptop), then we allow 80 wire modes even if the drive
4252 * isn't sure.
4253 */
4169 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4254 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4170 return 0; 4255 return 0;
4171 /* If the controller doesn't know we scan 4256
4172 4257 /* If the controller doesn't know, we scan.
4173 - Note: We look for all 40 wire detects at this point. 4258 *
4174 Any 80 wire detect is taken to be 80 wire cable 4259 * Note: We look for all 40 wire detects at this point. Any
4175 because 4260 * 80 wire detect is taken to be 80 wire cable because
4176 - In many setups only the one drive (slave if present) 4261 * - in many setups only the one drive (slave if present) will
4177 will give a valid detect 4262 * give a valid detect
4178 - If you have a non detect capable drive you don't 4263 * - if you have a non detect capable drive you don't want it
4179 want it to colour the choice 4264 * to colour the choice
4180 */ 4265 */
4181 ata_port_for_each_link(link, ap) { 4266 ata_port_for_each_link(link, ap) {
4182 ata_link_for_each_dev(dev, link) { 4267 ata_link_for_each_dev(dev, link) {
4183 if (!ata_is_40wire(dev)) 4268 if (ata_dev_enabled(dev) && !ata_is_40wire(dev))
4184 return 0; 4269 return 0;
4185 } 4270 }
4186 } 4271 }
@@ -4436,7 +4521,8 @@ int atapi_check_dma(struct ata_queued_cmd *qc)
4436 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4521 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4437 * few ATAPI devices choke on such DMA requests. 4522 * few ATAPI devices choke on such DMA requests.
4438 */ 4523 */
4439 if (unlikely(qc->nbytes & 15)) 4524 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4525 unlikely(qc->nbytes & 15))
4440 return 1; 4526 return 1;
4441 4527
4442 if (ap->ops->check_atapi_dma) 4528 if (ap->ops->check_atapi_dma)
@@ -4586,6 +4672,7 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4586/** 4672/**
4587 * ata_qc_new_init - Request an available ATA command, and initialize it 4673 * ata_qc_new_init - Request an available ATA command, and initialize it
4588 * @dev: Device from whom we request an available command structure 4674 * @dev: Device from whom we request an available command structure
4675 * @tag: command tag
4589 * 4676 *
4590 * LOCKING: 4677 * LOCKING:
4591 * None. 4678 * None.
@@ -4697,7 +4784,6 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc)
4697/** 4784/**
4698 * ata_qc_complete - Complete an active ATA command 4785 * ata_qc_complete - Complete an active ATA command
4699 * @qc: Command to complete 4786 * @qc: Command to complete
4700 * @err_mask: ATA Status register contents
4701 * 4787 *
4702 * Indicate to the mid and upper layers that an ATA 4788 * Indicate to the mid and upper layers that an ATA
4703 * command has completed, with either an ok or not-ok status. 4789 * command has completed, with either an ok or not-ok status.
@@ -5373,6 +5459,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5373 5459
5374#ifdef CONFIG_ATA_SFF 5460#ifdef CONFIG_ATA_SFF
5375 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task); 5461 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5462#else
5463 INIT_DELAYED_WORK(&ap->port_task, NULL);
5376#endif 5464#endif
5377 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5465 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5378 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5466 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
@@ -5976,7 +6064,7 @@ static void ata_port_detach(struct ata_port *ap)
5976 * to us. Restore SControl and disable all existing devices. 6064 * to us. Restore SControl and disable all existing devices.
5977 */ 6065 */
5978 __ata_port_for_each_link(link, ap) { 6066 __ata_port_for_each_link(link, ap) {
5979 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol); 6067 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
5980 ata_link_for_each_dev(dev, link) 6068 ata_link_for_each_dev(dev, link)
5981 ata_dev_disable(dev); 6069 ata_dev_disable(dev);
5982 } 6070 }
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index a93247cc395a..32da9a93ce44 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -603,13 +603,13 @@ void ata_scsi_error(struct Scsi_Host *host)
603 ata_link_for_each_dev(dev, link) { 603 ata_link_for_each_dev(dev, link) {
604 int devno = dev->devno; 604 int devno = dev->devno;
605 605
606 if (!ata_dev_enabled(dev))
607 continue;
608
606 ehc->saved_xfer_mode[devno] = dev->xfer_mode; 609 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
607 if (ata_ncq_enabled(dev)) 610 if (ata_ncq_enabled(dev))
608 ehc->saved_ncq_enabled |= 1 << devno; 611 ehc->saved_ncq_enabled |= 1 << devno;
609 } 612 }
610
611 /* set last reset timestamp to some time in the past */
612 ehc->last_reset = jiffies - 60 * HZ;
613 } 613 }
614 614
615 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 615 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
@@ -1161,6 +1161,7 @@ void ata_eh_detach_dev(struct ata_device *dev)
1161{ 1161{
1162 struct ata_link *link = dev->link; 1162 struct ata_link *link = dev->link;
1163 struct ata_port *ap = link->ap; 1163 struct ata_port *ap = link->ap;
1164 struct ata_eh_context *ehc = &link->eh_context;
1164 unsigned long flags; 1165 unsigned long flags;
1165 1166
1166 ata_dev_disable(dev); 1167 ata_dev_disable(dev);
@@ -1174,9 +1175,11 @@ void ata_eh_detach_dev(struct ata_device *dev)
1174 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1175 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1175 } 1176 }
1176 1177
1177 /* clear per-dev EH actions */ 1178 /* clear per-dev EH info */
1178 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1179 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1179 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 1180 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1181 ehc->saved_xfer_mode[dev->devno] = 0;
1182 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1180 1183
1181 spin_unlock_irqrestore(ap->lock, flags); 1184 spin_unlock_irqrestore(ap->lock, flags);
1182} 1185}
@@ -1206,7 +1209,10 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1206 1209
1207 ata_eh_clear_action(link, dev, ehi, action); 1210 ata_eh_clear_action(link, dev, ehi, action);
1208 1211
1209 if (!(ehc->i.flags & ATA_EHI_QUIET)) 1212 /* About to take EH action, set RECOVERED. Ignore actions on
1213 * slave links as master will do them again.
1214 */
1215 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1210 ap->pflags |= ATA_PFLAG_RECOVERED; 1216 ap->pflags |= ATA_PFLAG_RECOVERED;
1211 1217
1212 spin_unlock_irqrestore(ap->lock, flags); 1218 spin_unlock_irqrestore(ap->lock, flags);
@@ -2010,8 +2016,13 @@ void ata_eh_autopsy(struct ata_port *ap)
2010 struct ata_eh_context *mehc = &ap->link.eh_context; 2016 struct ata_eh_context *mehc = &ap->link.eh_context;
2011 struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2017 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2012 2018
2019 /* transfer control flags from master to slave */
2020 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2021
2022 /* perform autopsy on the slave link */
2013 ata_eh_link_autopsy(ap->slave_link); 2023 ata_eh_link_autopsy(ap->slave_link);
2014 2024
2025 /* transfer actions from slave to master and clear slave */
2015 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2026 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2016 mehc->i.action |= sehc->i.action; 2027 mehc->i.action |= sehc->i.action;
2017 mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2028 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
@@ -2267,17 +2278,21 @@ int ata_eh_reset(struct ata_link *link, int classify,
2267 if (link->flags & ATA_LFLAG_NO_SRST) 2278 if (link->flags & ATA_LFLAG_NO_SRST)
2268 softreset = NULL; 2279 softreset = NULL;
2269 2280
2270 now = jiffies; 2281 /* make sure each reset attemp is at least COOL_DOWN apart */
2271 deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN); 2282 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2272 if (time_before(now, deadline)) 2283 now = jiffies;
2273 schedule_timeout_uninterruptible(deadline - now); 2284 WARN_ON(time_after(ehc->last_reset, now));
2285 deadline = ata_deadline(ehc->last_reset,
2286 ATA_EH_RESET_COOL_DOWN);
2287 if (time_before(now, deadline))
2288 schedule_timeout_uninterruptible(deadline - now);
2289 }
2274 2290
2275 spin_lock_irqsave(ap->lock, flags); 2291 spin_lock_irqsave(ap->lock, flags);
2276 ap->pflags |= ATA_PFLAG_RESETTING; 2292 ap->pflags |= ATA_PFLAG_RESETTING;
2277 spin_unlock_irqrestore(ap->lock, flags); 2293 spin_unlock_irqrestore(ap->lock, flags);
2278 2294
2279 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2295 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2280 ehc->last_reset = jiffies;
2281 2296
2282 ata_link_for_each_dev(dev, link) { 2297 ata_link_for_each_dev(dev, link) {
2283 /* If we issue an SRST then an ATA drive (not ATAPI) 2298 /* If we issue an SRST then an ATA drive (not ATAPI)
@@ -2365,7 +2380,6 @@ int ata_eh_reset(struct ata_link *link, int classify,
2365 /* 2380 /*
2366 * Perform reset 2381 * Perform reset
2367 */ 2382 */
2368 ehc->last_reset = jiffies;
2369 if (ata_is_host_link(link)) 2383 if (ata_is_host_link(link))
2370 ata_eh_freeze_port(ap); 2384 ata_eh_freeze_port(ap);
2371 2385
@@ -2377,6 +2391,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2377 reset == softreset ? "soft" : "hard"); 2391 reset == softreset ? "soft" : "hard");
2378 2392
2379 /* mark that this EH session started with reset */ 2393 /* mark that this EH session started with reset */
2394 ehc->last_reset = jiffies;
2380 if (reset == hardreset) 2395 if (reset == hardreset)
2381 ehc->i.flags |= ATA_EHI_DID_HARDRESET; 2396 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2382 else 2397 else
@@ -2447,14 +2462,14 @@ int ata_eh_reset(struct ata_link *link, int classify,
2447 dev->pio_mode = XFER_PIO_0; 2462 dev->pio_mode = XFER_PIO_0;
2448 dev->flags &= ~ATA_DFLAG_SLEEPING; 2463 dev->flags &= ~ATA_DFLAG_SLEEPING;
2449 2464
2450 if (ata_phys_link_offline(ata_dev_phys_link(dev))) 2465 if (!ata_phys_link_offline(ata_dev_phys_link(dev))) {
2451 continue; 2466 /* apply class override */
2452 2467 if (lflags & ATA_LFLAG_ASSUME_ATA)
2453 /* apply class override */ 2468 classes[dev->devno] = ATA_DEV_ATA;
2454 if (lflags & ATA_LFLAG_ASSUME_ATA) 2469 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2455 classes[dev->devno] = ATA_DEV_ATA; 2470 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2456 else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2471 } else
2457 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */ 2472 classes[dev->devno] = ATA_DEV_NONE;
2458 } 2473 }
2459 2474
2460 /* record current link speed */ 2475 /* record current link speed */
@@ -2521,7 +2536,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2521 ata_eh_done(link, NULL, ATA_EH_RESET); 2536 ata_eh_done(link, NULL, ATA_EH_RESET);
2522 if (slave) 2537 if (slave)
2523 ata_eh_done(slave, NULL, ATA_EH_RESET); 2538 ata_eh_done(slave, NULL, ATA_EH_RESET);
2524 ehc->last_reset = jiffies; 2539 ehc->last_reset = jiffies; /* update to completion time */
2525 ehc->i.action |= ATA_EH_REVALIDATE; 2540 ehc->i.action |= ATA_EH_REVALIDATE;
2526 2541
2527 rc = 0; 2542 rc = 0;
@@ -2779,6 +2794,9 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2779 2794
2780 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 2795 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
2781 ata_link_for_each_dev(dev, link) { 2796 ata_link_for_each_dev(dev, link) {
2797 if (!ata_dev_enabled(dev))
2798 continue;
2799
2782 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 2800 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
2783 struct ata_ering_entry *ent; 2801 struct ata_ering_entry *ent;
2784 2802
@@ -2800,6 +2818,9 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2800 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 2818 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
2801 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 2819 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
2802 2820
2821 if (!ata_dev_enabled(dev))
2822 continue;
2823
2803 if (dev->xfer_mode != saved_xfer_mode || 2824 if (dev->xfer_mode != saved_xfer_mode ||
2804 ata_ncq_enabled(dev) != saved_ncq) 2825 ata_ncq_enabled(dev) != saved_ncq)
2805 dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 2826 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 5d312dc9be9f..47c7afcb36f2 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -190,7 +190,7 @@ static ssize_t ata_scsi_park_show(struct device *device,
190 struct ata_port *ap; 190 struct ata_port *ap;
191 struct ata_link *link; 191 struct ata_link *link;
192 struct ata_device *dev; 192 struct ata_device *dev;
193 unsigned long flags; 193 unsigned long flags, now;
194 unsigned int uninitialized_var(msecs); 194 unsigned int uninitialized_var(msecs);
195 int rc = 0; 195 int rc = 0;
196 196
@@ -208,10 +208,11 @@ static ssize_t ata_scsi_park_show(struct device *device,
208 } 208 }
209 209
210 link = dev->link; 210 link = dev->link;
211 now = jiffies;
211 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS && 212 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
212 link->eh_context.unloaded_mask & (1 << dev->devno) && 213 link->eh_context.unloaded_mask & (1 << dev->devno) &&
213 time_after(dev->unpark_deadline, jiffies)) 214 time_after(dev->unpark_deadline, now))
214 msecs = jiffies_to_msecs(dev->unpark_deadline - jiffies); 215 msecs = jiffies_to_msecs(dev->unpark_deadline - now);
215 else 216 else
216 msecs = 0; 217 msecs = 0;
217 218
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2a4c516894f0..9033d164c4ec 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1227,10 +1227,19 @@ fsm_start:
1227 /* ATA PIO protocol */ 1227 /* ATA PIO protocol */
1228 if (unlikely((status & ATA_DRQ) == 0)) { 1228 if (unlikely((status & ATA_DRQ) == 0)) {
1229 /* handle BSY=0, DRQ=0 as error */ 1229 /* handle BSY=0, DRQ=0 as error */
1230 if (likely(status & (ATA_ERR | ATA_DF))) 1230 if (likely(status & (ATA_ERR | ATA_DF))) {
1231 /* device stops HSM for abort/error */ 1231 /* device stops HSM for abort/error */
1232 qc->err_mask |= AC_ERR_DEV; 1232 qc->err_mask |= AC_ERR_DEV;
1233 else { 1233
1234 /* If diagnostic failed and this is
1235 * IDENTIFY, it's likely a phantom
1236 * device. Mark hint.
1237 */
1238 if (qc->dev->horkage &
1239 ATA_HORKAGE_DIAGNOSTIC)
1240 qc->err_mask |=
1241 AC_ERR_NODEV_HINT;
1242 } else {
1234 /* HSM violation. Let EH handle this. 1243 /* HSM violation. Let EH handle this.
1235 * Phantom devices also trigger this 1244 * Phantom devices also trigger this
1236 * condition. Mark hint. 1245 * condition. Mark hint.
@@ -2153,8 +2162,17 @@ void ata_sff_error_handler(struct ata_port *ap)
2153 */ 2162 */
2154void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) 2163void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
2155{ 2164{
2156 if (qc->ap->ioaddr.bmdma_addr) 2165 struct ata_port *ap = qc->ap;
2166 unsigned long flags;
2167
2168 spin_lock_irqsave(ap->lock, flags);
2169
2170 ap->hsm_task_state = HSM_ST_IDLE;
2171
2172 if (ap->ioaddr.bmdma_addr)
2157 ata_bmdma_stop(qc); 2173 ata_bmdma_stop(qc);
2174
2175 spin_unlock_irqrestore(ap->lock, flags);
2158} 2176}
2159 2177
2160/** 2178/**
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index eb919c16a03e..e2e332d8ff95 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * ACPI PATA driver 2 * ACPI PATA driver
3 * 3 *
4 * (c) 2007 Red Hat <alan@redhat.com> 4 * (c) 2007 Red Hat
5 */ 5 */
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 5ca70fa1f587..73c466e452ca 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata_ali.c - ALI 15x3 PATA for new ATA layer 2 * pata_ali.c - ALI 15x3 PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * based in part upon 5 * based in part upon
7 * linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02 6 * linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 57dd00f463d3..0ec9c7d9fe9d 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata_amd.c - AMD PATA for new ATA layer 2 * pata_amd.c - AMD PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc 3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * Based on pata-sil680. Errata information is taken from data sheets 5 * Based on pata-sil680. Errata information is taken from data sheets
7 * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are 6 * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 0f513bc11193..6b3092c75ffe 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_artop.c - ARTOP ATA controller driver 2 * pata_artop.c - ARTOP ATA controller driver
3 * 3 *
4 * (C) 2006 Red Hat <alan@redhat.com> 4 * (C) 2006 Red Hat
5 * (C) 2007 Bartlomiej Zolnierkiewicz 5 * (C) 2007 Bartlomiej Zolnierkiewicz
6 * 6 *
7 * Based in part on drivers/ide/pci/aec62xx.c 7 * Based in part on drivers/ide/pci/aec62xx.c
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index e8a0d99d7356..0e2cde8f9973 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata_atiixp.c - ATI PATA for new ATA layer 2 * pata_atiixp.c - ATI PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * Based on 5 * Based on
7 * 6 *
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 2de30b990278..34a394264c3d 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata_cmd640.c - CMD640 PCI PATA for new ATA layer 2 * pata_cmd640.c - CMD640 PCI PATA for new ATA layer
3 * (C) 2007 Red Hat Inc 3 * (C) 2007 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * Based upon 5 * Based upon
7 * linux/drivers/ide/pci/cmd640.c Version 1.02 Sep 01, 1996 6 * linux/drivers/ide/pci/cmd640.c Version 1.02 Sep 01, 1996
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index ddd09b7d98c9..3167d8fed2f2 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_cmd64x.c - CMD64x PATA for new ATA layer 2 * pata_cmd64x.c - CMD64x PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com> 4 * Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * 5 *
6 * Based upon 6 * Based upon
7 * linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002 7 * linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 0c4b271a9d5a..bba453381f44 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata-cs5530.c - CS5530 PATA for new ATA layer 2 * pata-cs5530.c - CS5530 PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * based upon cs5530.c by Mark Lord. 5 * based upon cs5530.c by Mark Lord.
7 * 6 *
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index f1b6556f0483..8b236af84c2e 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata-cs5535.c - CS5535 PATA for new ATA layer 2 * pata-cs5535.c - CS5535 PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc 3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com> 4 * Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * 5 *
6 * based upon cs5535.c from AMD <Jens.Altmann@amd.com> as cleaned up and 6 * based upon cs5535.c from AMD <Jens.Altmann@amd.com> as cleaned up and
7 * made readable and Linux style by Wolfgang Zuleger <wolfgang.zuleger@gmx.de 7 * made readable and Linux style by Wolfgang Zuleger <wolfgang.zuleger@gmx.de
@@ -72,7 +72,6 @@
72/** 72/**
73 * cs5535_cable_detect - detect cable type 73 * cs5535_cable_detect - detect cable type
74 * @ap: Port to detect on 74 * @ap: Port to detect on
75 * @deadline: deadline jiffies for the operation
76 * 75 *
77 * Perform cable detection for ATA66 capable cable. Return a libata 76 * Perform cable detection for ATA66 capable cable. Return a libata
78 * cable type. 77 * cable type.
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 73f8332cb679..afed92976198 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -110,7 +110,6 @@ static inline int cs5536_write(struct pci_dev *pdev, int reg, int val)
110/** 110/**
111 * cs5536_cable_detect - detect cable type 111 * cs5536_cable_detect - detect cable type
112 * @ap: Port to detect on 112 * @ap: Port to detect on
113 * @deadline: deadline jiffies for the operation
114 * 113 *
115 * Perform cable detection for ATA66 capable cable. Return a libata 114 * Perform cable detection for ATA66 capable cable. Return a libata
116 * cable type. 115 * cable type.
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 2ff62608ae37..d546425cd380 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_cypress.c - Cypress PATA for new ATA layer 2 * pata_cypress.c - Cypress PATA for new ATA layer
3 * (C) 2006 Red Hat Inc 3 * (C) 2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com> 4 * Alan Cox
5 * 5 *
6 * Based heavily on 6 * Based heavily on
7 * linux/drivers/ide/pci/cy82c693.c Version 0.40 Sep. 10, 2002 7 * linux/drivers/ide/pci/cy82c693.c Version 0.40 Sep. 10, 2002
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 9fba82976ba6..ac6392ea35b0 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_efar.c - EFAR PIIX clone controller driver 2 * pata_efar.c - EFAR PIIX clone controller driver
3 * 3 *
4 * (C) 2005 Red Hat <alan@redhat.com> 4 * (C) 2005 Red Hat
5 * 5 *
6 * Some parts based on ata_piix.c by Jeff Garzik and others. 6 * Some parts based on ata_piix.c by Jeff Garzik and others.
7 * 7 *
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index f2b83eabc7c7..e0c4f05d7d57 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -183,7 +183,9 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
183 mask &= ~(0xF8 << ATA_SHIFT_UDMA); 183 mask &= ~(0xF8 << ATA_SHIFT_UDMA);
184 if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) 184 if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
185 mask &= ~(0xF0 << ATA_SHIFT_UDMA); 185 mask &= ~(0xF0 << ATA_SHIFT_UDMA);
186 } 186 } else if (adev->class == ATA_DEV_ATAPI)
187 mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
188
187 return ata_bmdma_mode_filter(adev, mask); 189 return ata_bmdma_mode_filter(adev, mask);
188} 190}
189 191
@@ -211,11 +213,15 @@ static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
211 213
212static int hpt36x_cable_detect(struct ata_port *ap) 214static int hpt36x_cable_detect(struct ata_port *ap)
213{ 215{
214 u8 ata66;
215 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 216 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
217 u8 ata66;
216 218
219 /*
220 * Each channel of pata_hpt366 occupies separate PCI function
221 * as the primary channel and bit1 indicates the cable type.
222 */
217 pci_read_config_byte(pdev, 0x5A, &ata66); 223 pci_read_config_byte(pdev, 0x5A, &ata66);
218 if (ata66 & (1 << ap->port_no)) 224 if (ata66 & 2)
219 return ATA_CBL_PATA40; 225 return ATA_CBL_PATA40;
220 return ATA_CBL_PATA80; 226 return ATA_CBL_PATA80;
221} 227}
@@ -382,10 +388,10 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
382 /* PCI clocking determines the ATA timing values to use */ 388 /* PCI clocking determines the ATA timing values to use */
383 /* info_hpt366 is safe against re-entry so we can scribble on it */ 389 /* info_hpt366 is safe against re-entry so we can scribble on it */
384 switch((reg1 & 0x700) >> 8) { 390 switch((reg1 & 0x700) >> 8) {
385 case 5: 391 case 9:
386 hpriv = &hpt366_40; 392 hpriv = &hpt366_40;
387 break; 393 break;
388 case 9: 394 case 5:
389 hpriv = &hpt366_25; 395 hpriv = &hpt366_25;
390 break; 396 break;
391 default: 397 default:
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 6a111baab523..15cdb9148aab 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -1,7 +1,7 @@
1 1
2/* 2/*
3 * pata-isapnp.c - ISA PnP PATA controller driver. 3 * pata-isapnp.c - ISA PnP PATA controller driver.
4 * Copyright 2005/2006 Red Hat Inc <alan@redhat.com>, all rights reserved. 4 * Copyright 2005/2006 Red Hat Inc, all rights reserved.
5 * 5 *
6 * Based in part on ide-pnp.c by Andrey Panin <pazke@donpac.ru> 6 * Based in part on ide-pnp.c by Andrey Panin <pazke@donpac.ru>
7 */ 7 */
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 0221c9a46769..860ede526282 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_it821x.c - IT821x PATA for new ATA layer 2 * pata_it821x.c - IT821x PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com> 4 * Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * (C) 2007 Bartlomiej Zolnierkiewicz 5 * (C) 2007 Bartlomiej Zolnierkiewicz
6 * 6 *
7 * based upon 7 * based upon
@@ -10,7 +10,7 @@
10 * 10 *
11 * linux/drivers/ide/pci/it821x.c Version 0.09 December 2004 11 * linux/drivers/ide/pci/it821x.c Version 0.09 December 2004
12 * 12 *
13 * Copyright (C) 2004 Red Hat <alan@redhat.com> 13 * Copyright (C) 2004 Red Hat
14 * 14 *
15 * May be copied or modified under the terms of the GNU General Public License 15 * May be copied or modified under the terms of the GNU General Public License
16 * Based in part on the ITE vendor provided SCSI driver. 16 * Based in part on the ITE vendor provided SCSI driver.
@@ -557,9 +557,8 @@ static unsigned int it821x_read_id(struct ata_device *adev,
557 if (strstr(model_num, "Integrated Technology Express")) { 557 if (strstr(model_num, "Integrated Technology Express")) {
558 /* Set feature bits the firmware neglects */ 558 /* Set feature bits the firmware neglects */
559 id[49] |= 0x0300; /* LBA, DMA */ 559 id[49] |= 0x0300; /* LBA, DMA */
560 id[82] |= 0x0400; /* LBA48 */
561 id[83] &= 0x7FFF; 560 id[83] &= 0x7FFF;
562 id[83] |= 0x4000; /* Word 83 is valid */ 561 id[83] |= 0x4400; /* Word 83 is valid and LBA48 */
563 id[86] |= 0x0400; /* LBA48 on */ 562 id[86] |= 0x0400; /* LBA48 on */
564 id[ATA_ID_MAJOR_VER] |= 0x1F; 563 id[ATA_ID_MAJOR_VER] |= 0x1F;
565 } 564 }
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 73b7596816b4..38cf1ab2d289 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -4,7 +4,7 @@
4 * driven by AHCI in the usual configuration although 4 * driven by AHCI in the usual configuration although
5 * this driver can handle other setups if we need it. 5 * this driver can handle other setups if we need it.
6 * 6 *
7 * (c) 2006 Red Hat <alan@redhat.com> 7 * (c) 2006 Red Hat
8 */ 8 */
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index bc037ffce200..930c2208640b 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * pata-legacy.c - Legacy port PATA/SATA controller driver. 2 * pata-legacy.c - Legacy port PATA/SATA controller driver.
3 * Copyright 2005/2006 Red Hat <alan@redhat.com>, all rights reserved. 3 * Copyright 2005/2006 Red Hat, all rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 0d87eec84966..76e399bf8c1b 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -5,7 +5,7 @@
5 * isn't making full use of the device functionality but it is 5 * isn't making full use of the device functionality but it is
6 * easy to get working. 6 * easy to get working.
7 * 7 *
8 * (c) 2006 Red Hat <alan@redhat.com> 8 * (c) 2006 Red Hat
9 */ 9 */
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 7d7e3fdab71f..7c8faa48b5f3 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_mpiix.c - Intel MPIIX PATA for new ATA layer 2 * pata_mpiix.c - Intel MPIIX PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc 3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com> 4 * Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * 5 *
6 * The MPIIX is different enough to the PIIX4 and friends that we give it 6 * The MPIIX is different enough to the PIIX4 and friends that we give it
7 * a separate driver. The old ide/pci code handles this by just not tuning 7 * a separate driver. The old ide/pci code handles this by just not tuning
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index d9719c8b9dbe..9dc05e1656a8 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_netcell.c - Netcell PATA driver 2 * pata_netcell.c - Netcell PATA driver
3 * 3 *
4 * (c) 2006 Red Hat <alan@redhat.com> 4 * (c) 2006 Red Hat
5 */ 5 */
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 565e67cd13fa..4dd9a3b031e4 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata_ninja32.c - Ninja32 PATA for new ATA layer 2 * pata_ninja32.c - Ninja32 PATA for new ATA layer
3 * (C) 2007 Red Hat Inc 3 * (C) 2007 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * Note: The controller like many controllers has shared timings for 5 * Note: The controller like many controllers has shared timings for
7 * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back 6 * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
@@ -45,7 +44,7 @@
45#include <linux/libata.h> 44#include <linux/libata.h>
46 45
47#define DRV_NAME "pata_ninja32" 46#define DRV_NAME "pata_ninja32"
48#define DRV_VERSION "0.0.1" 47#define DRV_VERSION "0.1.3"
49 48
50 49
51/** 50/**
@@ -89,6 +88,17 @@ static struct ata_port_operations ninja32_port_ops = {
89 .set_piomode = ninja32_set_piomode, 88 .set_piomode = ninja32_set_piomode,
90}; 89};
91 90
91static void ninja32_program(void __iomem *base)
92{
93 iowrite8(0x05, base + 0x01); /* Enable interrupt lines */
94 iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */
95 iowrite8(0x01, base + 0x03); /* Unknown */
96 iowrite8(0x20, base + 0x04); /* WAIT0 */
97 iowrite8(0x8f, base + 0x05); /* Unknown */
98 iowrite8(0xa4, base + 0x1c); /* Unknown */
99 iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */
100}
101
92static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) 102static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
93{ 103{
94 struct ata_host *host; 104 struct ata_host *host;
@@ -120,7 +130,8 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
120 return rc; 130 return rc;
121 pci_set_master(dev); 131 pci_set_master(dev);
122 132
123 /* Set up the register mappings */ 133 /* Set up the register mappings. We use the I/O mapping as only the
134 older chips also have MMIO on BAR 1 */
124 base = host->iomap[0]; 135 base = host->iomap[0];
125 if (!base) 136 if (!base)
126 return -ENOMEM; 137 return -ENOMEM;
@@ -134,21 +145,35 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
134 ap->ioaddr.bmdma_addr = base; 145 ap->ioaddr.bmdma_addr = base;
135 ata_sff_std_ports(&ap->ioaddr); 146 ata_sff_std_ports(&ap->ioaddr);
136 147
137 iowrite8(0x05, base + 0x01); /* Enable interrupt lines */ 148 ninja32_program(base);
138 iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */
139 iowrite8(0x01, base + 0x03); /* Unknown */
140 iowrite8(0x20, base + 0x04); /* WAIT0 */
141 iowrite8(0x8f, base + 0x05); /* Unknown */
142 iowrite8(0xa4, base + 0x1c); /* Unknown */
143 iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */
144 /* FIXME: Should we disable them at remove ? */ 149 /* FIXME: Should we disable them at remove ? */
145 return ata_host_activate(host, dev->irq, ata_sff_interrupt, 150 return ata_host_activate(host, dev->irq, ata_sff_interrupt,
146 IRQF_SHARED, &ninja32_sht); 151 IRQF_SHARED, &ninja32_sht);
147} 152}
148 153
154#ifdef CONFIG_PM
155
156static int ninja32_reinit_one(struct pci_dev *pdev)
157{
158 struct ata_host *host = dev_get_drvdata(&pdev->dev);
159 int rc;
160
161 rc = ata_pci_device_do_resume(pdev);
162 if (rc)
163 return rc;
164 ninja32_program(host->iomap[0]);
165 ata_host_resume(host);
166 return 0;
167}
168#endif
169
149static const struct pci_device_id ninja32[] = { 170static const struct pci_device_id ninja32[] = {
171 { 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
172 { 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
173 { 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
150 { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 174 { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
151 { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 175 { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
176 { 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
152 { }, 177 { },
153}; 178};
154 179
@@ -156,7 +181,11 @@ static struct pci_driver ninja32_pci_driver = {
156 .name = DRV_NAME, 181 .name = DRV_NAME,
157 .id_table = ninja32, 182 .id_table = ninja32,
158 .probe = ninja32_init_one, 183 .probe = ninja32_init_one,
159 .remove = ata_pci_remove_one 184 .remove = ata_pci_remove_one,
185#ifdef CONFIG_PM
186 .suspend = ata_pci_device_suspend,
187 .resume = ninja32_reinit_one,
188#endif
160}; 189};
161 190
162static int __init ninja32_init(void) 191static int __init ninja32_init(void)
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index be756b7ef07e..40d411c460de 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata_ns87410.c - National Semiconductor 87410 PATA for new ATA layer 2 * pata_ns87410.c - National Semiconductor 87410 PATA for new ATA layer
3 * (C) 2006 Red Hat Inc 3 * (C) 2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index e0aa7eaaee0a..89bf5f865d6a 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_ns87415.c - NS87415 (non PARISC) PATA 2 * pata_ns87415.c - NS87415 (non PARISC) PATA
3 * 3 *
4 * (C) 2005 Red Hat <alan@redhat.com> 4 * (C) 2005 Red Hat <alan@lxorguk.ukuu.org.uk>
5 * 5 *
6 * This is a fairly generic MWDMA controller. It has some limitations 6 * This is a fairly generic MWDMA controller. It has some limitations
7 * as it requires timing reloads on PIO/DMA transitions but it is otherwise 7 * as it requires timing reloads on PIO/DMA transitions but it is otherwise
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index df64f2443001..c0dbc46a348e 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_oldpiix.c - Intel PATA/SATA controllers 2 * pata_oldpiix.c - Intel PATA/SATA controllers
3 * 3 *
4 * (C) 2005 Red Hat <alan@redhat.com> 4 * (C) 2005 Red Hat
5 * 5 *
6 * Some parts based on ata_piix.c by Jeff Garzik and others. 6 * Some parts based on ata_piix.c by Jeff Garzik and others.
7 * 7 *
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index fb2cf661b0e8..e4fa4d565e96 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata_opti.c - ATI PATA for new ATA layer 2 * pata_opti.c - ATI PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * Based on 5 * Based on
7 * linux/drivers/ide/pci/opti621.c Version 0.7 Sept 10, 2002 6 * linux/drivers/ide/pci/opti621.c Version 0.7 Sept 10, 2002
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 4cd744456313..93bb6e91973f 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata_optidma.c - Opti DMA PATA for new ATA layer 2 * pata_optidma.c - Opti DMA PATA for new ATA layer
3 * (C) 2006 Red Hat Inc 3 * (C) 2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * The Opti DMA controllers are related to the older PIO PCI controllers 5 * The Opti DMA controllers are related to the older PIO PCI controllers
7 * and indeed the VLB ones. The main differences are that the timing 6 * and indeed the VLB ones. The main differences are that the timing
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 02b596b9cf6a..64b2e2281ee7 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * pata_pcmcia.c - PCMCIA PATA controller driver. 2 * pata_pcmcia.c - PCMCIA PATA controller driver.
3 * Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved. 3 * Copyright 2005-2006 Red Hat Inc, all rights reserved.
4 * PCMCIA ident update Copyright 2006 Marcin Juszkiewicz 4 * PCMCIA ident update Copyright 2006 Marcin Juszkiewicz
5 * <openembedded@hrw.one.pl> 5 * <openembedded@hrw.one.pl>
6 * 6 *
@@ -416,6 +416,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
416 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), 416 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
417 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e), 417 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
418 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6), 418 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
419 PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506),
419 PCMCIA_DEVICE_NULL, 420 PCMCIA_DEVICE_NULL,
420}; 421};
421 422
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index d2673060bc8d..799a6a098712 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer 2 * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com> 4 * Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * (C) 2007 Bartlomiej Zolnierkiewicz 5 * (C) 2007 Bartlomiej Zolnierkiewicz
6 * 6 *
7 * Based in part on linux/drivers/ide/pci/pdc202xx_old.c 7 * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 8f65ad61b8af..77e4e3b17f54 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Based on pata_pcmcia: 6 * Based on pata_pcmcia:
7 * 7 *
8 * Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved. 8 * Copyright 2005-2006 Red Hat Inc, all rights reserved.
9 * 9 *
10 * This file is subject to the terms and conditions of the GNU General Public 10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive 11 * License. See the file "COPYING" in the main directory of this archive
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 63b7a1c165a5..3080f371222c 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * pata_qdi.c - QDI VLB ATA controllers 2 * pata_qdi.c - QDI VLB ATA controllers
3 * (C) 2006 Red Hat <alan@redhat.com> 3 * (C) 2006 Red Hat
4 * 4 *
5 * This driver mostly exists as a proof of concept for non PCI devices under 5 * This driver mostly exists as a proof of concept for non PCI devices under
6 * libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly 6 * libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 1c0d9fa7ee54..0b0aa452de14 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_radisys.c - Intel PATA/SATA controllers 2 * pata_radisys.c - Intel PATA/SATA controllers
3 * 3 *
4 * (C) 2006 Red Hat <alan@redhat.com> 4 * (C) 2006 Red Hat <alan@lxorguk.ukuu.org.uk>
5 * 5 *
6 * Some parts based on ata_piix.c by Jeff Garzik and others. 6 * Some parts based on ata_piix.c by Jeff Garzik and others.
7 * 7 *
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index f8b3ffc8ae9e..c2e6fb9f2ef9 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -39,9 +39,11 @@
39#define RB500_CF_MAXPORTS 1 39#define RB500_CF_MAXPORTS 1
40#define RB500_CF_IO_DELAY 400 40#define RB500_CF_IO_DELAY 400
41 41
42#define RB500_CF_REG_CMD 0x0800 42#define RB500_CF_REG_BASE 0x0800
43#define RB500_CF_REG_ERR 0x080D
43#define RB500_CF_REG_CTRL 0x080E 44#define RB500_CF_REG_CTRL 0x080E
44#define RB500_CF_REG_DATA 0x0C00 45/* 32bit buffered data register offset */
46#define RB500_CF_REG_DBUF32 0x0C00
45 47
46struct rb532_cf_info { 48struct rb532_cf_info {
47 void __iomem *iobase; 49 void __iomem *iobase;
@@ -72,11 +74,12 @@ static void rb532_pata_exec_command(struct ata_port *ap,
72 rb532_pata_finish_io(ap); 74 rb532_pata_finish_io(ap);
73} 75}
74 76
75static void rb532_pata_data_xfer(struct ata_device *adev, unsigned char *buf, 77static unsigned int rb532_pata_data_xfer(struct ata_device *adev, unsigned char *buf,
76 unsigned int buflen, int write_data) 78 unsigned int buflen, int write_data)
77{ 79{
78 struct ata_port *ap = adev->link->ap; 80 struct ata_port *ap = adev->link->ap;
79 void __iomem *ioaddr = ap->ioaddr.data_addr; 81 void __iomem *ioaddr = ap->ioaddr.data_addr;
82 int retlen = buflen;
80 83
81 if (write_data) { 84 if (write_data) {
82 for (; buflen > 0; buflen--, buf++) 85 for (; buflen > 0; buflen--, buf++)
@@ -87,6 +90,7 @@ static void rb532_pata_data_xfer(struct ata_device *adev, unsigned char *buf,
87 } 90 }
88 91
89 rb532_pata_finish_io(adev->link->ap); 92 rb532_pata_finish_io(adev->link->ap);
93 return retlen;
90} 94}
91 95
92static void rb532_pata_freeze(struct ata_port *ap) 96static void rb532_pata_freeze(struct ata_port *ap)
@@ -146,13 +150,14 @@ static void rb532_pata_setup_ports(struct ata_host *ah)
146 ap->pio_mask = 0x1f; /* PIO4 */ 150 ap->pio_mask = 0x1f; /* PIO4 */
147 ap->flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO; 151 ap->flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO;
148 152
149 ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_CMD; 153 ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE;
150 ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL; 154 ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL;
151 ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL; 155 ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL;
152 156
153 ata_sff_std_ports(&ap->ioaddr); 157 ata_sff_std_ports(&ap->ioaddr);
154 158
155 ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DATA; 159 ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DBUF32;
160 ap->ioaddr.error_addr = info->iobase + RB500_CF_REG_ERR;
156} 161}
157 162
158static __devinit int rb532_pata_driver_probe(struct platform_device *pdev) 163static __devinit int rb532_pata_driver_probe(struct platform_device *pdev)
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 0278fd2b8fb1..9a4bdca54616 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * New ATA layer SC1200 driver Alan Cox <alan@redhat.com> 2 * New ATA layer SC1200 driver Alan Cox <alan@lxorguk.ukuu.org.uk>
3 * 3 *
4 * TODO: Mode selection filtering 4 * TODO: Mode selection filtering
5 * TODO: Can't enable second channel until ATA core has serialize 5 * TODO: Can't enable second channel until ATA core has serialize
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 16673d168573..cf3707e516a2 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -8,7 +8,7 @@
8 * Copyright 2003-2005 Jeff Garzik 8 * Copyright 2003-2005 Jeff Garzik
9 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer 9 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
10 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 10 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
11 * Copyright (C) 2003 Red Hat Inc <alan@redhat.com> 11 * Copyright (C) 2003 Red Hat Inc
12 * 12 *
13 * and drivers/ata/ahci.c: 13 * and drivers/ata/ahci.c:
14 * Copyright 2004-2005 Red Hat, Inc. 14 * Copyright 2004-2005 Red Hat, Inc.
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index c8cc027789fe..6aeeeeb34124 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -83,7 +83,7 @@ static struct ata_port_operations sch_pata_ops = {
83}; 83};
84 84
85static struct ata_port_info sch_port_info = { 85static struct ata_port_info sch_port_info = {
86 .flags = 0, 86 .flags = ATA_FLAG_SLAVE_POSS,
87 .pio_mask = ATA_PIO4, /* pio0-4 */ 87 .pio_mask = ATA_PIO4, /* pio0-4 */
88 .mwdma_mask = ATA_MWDMA2, /* mwdma0-2 */ 88 .mwdma_mask = ATA_MWDMA2, /* mwdma0-2 */
89 .udma_mask = ATA_UDMA5, /* udma0-5 */ 89 .udma_mask = ATA_UDMA5, /* udma0-5 */
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index ffd26d0dc50d..72e41c9f969b 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata_serverworks.c - Serverworks PATA for new ATA layer 2 * pata_serverworks.c - Serverworks PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * based upon 5 * based upon
7 * 6 *
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index a598bb36aafc..83580a59db58 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata_sil680.c - SIL680 PATA for new ATA layer 2 * pata_sil680.c - SIL680 PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * based upon 5 * based upon
7 * 6 *
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 26345d7b531c..e4be55e047f6 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_sis.c - SiS ATA driver 2 * pata_sis.c - SiS ATA driver
3 * 3 *
4 * (C) 2005 Red Hat <alan@redhat.com> 4 * (C) 2005 Red Hat
5 * (C) 2007 Bartlomiej Zolnierkiewicz 5 * (C) 2007 Bartlomiej Zolnierkiewicz
6 * 6 *
7 * Based upon linux/drivers/ide/pci/sis5513.c 7 * Based upon linux/drivers/ide/pci/sis5513.c
@@ -56,7 +56,6 @@ static const struct sis_laptop sis_laptop[] = {
56 { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */ 56 { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
57 { 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */ 57 { 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
58 { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */ 58 { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
59 { 0x5513, 0x1039, 0x5513 }, /* Targa Visionary 1000 */
60 /* end marker */ 59 /* end marker */
61 { 0, } 60 { 0, }
62}; 61};
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 69877bd81815..1b0e7b6d8ef5 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata_sl82c105.c - SL82C105 PATA for new ATA layer 2 * pata_sl82c105.c - SL82C105 PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * Based in part on linux/drivers/ide/pci/sl82c105.c 5 * Based in part on linux/drivers/ide/pci/sl82c105.c
7 * SL82C105/Winbond 553 IDE driver 6 * SL82C105/Winbond 553 IDE driver
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index b181261f2743..ef9597517cdd 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * pata_triflex.c - Compaq PATA for new ATA layer 2 * pata_triflex.c - Compaq PATA for new ATA layer
3 * (C) 2005 Red Hat Inc 3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com> 4 * Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * 5 *
6 * based upon 6 * based upon
7 * 7 *
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 8fdb2ce73210..681169c9c640 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * pata_via.c - VIA PATA for new ATA layer 2 * pata_via.c - VIA PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc 3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 * 4 *
6 * Documentation 5 * Documentation
7 * Most chipset documentation available under NDA only 6 * Most chipset documentation available under NDA only
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
index a7606b044a61..319e164a3d74 100644
--- a/drivers/ata/pata_winbond.c
+++ b/drivers/ata/pata_winbond.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * pata_winbond.c - Winbond VLB ATA controllers 2 * pata_winbond.c - Winbond VLB ATA controllers
3 * (C) 2006 Red Hat <alan@redhat.com> 3 * (C) 2006 Red Hat
4 * 4 *
5 * Support for the Winbond 83759A when operating in advanced mode. 5 * Support for the Winbond 83759A when operating in advanced mode.
6 * Multichip mode is not currently supported. 6 * Multichip mode is not currently supported.
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index fae3841de0d8..6f1460614325 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -307,10 +307,10 @@ static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 307
308static void nv_nf2_freeze(struct ata_port *ap); 308static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap); 309static void nv_nf2_thaw(struct ata_port *ap);
310static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
311 unsigned long deadline);
310static void nv_ck804_freeze(struct ata_port *ap); 312static void nv_ck804_freeze(struct ata_port *ap);
311static void nv_ck804_thaw(struct ata_port *ap); 313static void nv_ck804_thaw(struct ata_port *ap);
312static int nv_hardreset(struct ata_link *link, unsigned int *class,
313 unsigned long deadline);
314static int nv_adma_slave_config(struct scsi_device *sdev); 314static int nv_adma_slave_config(struct scsi_device *sdev);
315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); 315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316static void nv_adma_qc_prep(struct ata_queued_cmd *qc); 316static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
@@ -405,17 +405,8 @@ static struct scsi_host_template nv_swncq_sht = {
405 .slave_configure = nv_swncq_slave_config, 405 .slave_configure = nv_swncq_slave_config,
406}; 406};
407 407
408/* OSDL bz3352 reports that some nv controllers can't determine device
409 * signature reliably and nv_hardreset is implemented to work around
410 * the problem. This was reported on nf3 and it's unclear whether any
411 * other controllers are affected. However, the workaround has been
412 * applied to all variants and there isn't much to gain by trying to
413 * find out exactly which ones are affected at this point especially
414 * because NV has moved over to ahci for newer controllers.
415 */
416static struct ata_port_operations nv_common_ops = { 408static struct ata_port_operations nv_common_ops = {
417 .inherits = &ata_bmdma_port_ops, 409 .inherits = &ata_bmdma_port_ops,
418 .hardreset = nv_hardreset,
419 .scr_read = nv_scr_read, 410 .scr_read = nv_scr_read,
420 .scr_write = nv_scr_write, 411 .scr_write = nv_scr_write,
421}; 412};
@@ -429,12 +420,22 @@ static struct ata_port_operations nv_generic_ops = {
429 .hardreset = ATA_OP_NULL, 420 .hardreset = ATA_OP_NULL,
430}; 421};
431 422
423/* OSDL bz3352 reports that nf2/3 controllers can't determine device
424 * signature reliably. Also, the following thread reports detection
425 * failure on cold boot with the standard debouncing timing.
426 *
427 * http://thread.gmane.org/gmane.linux.ide/34098
428 *
429 * Debounce with hotplug timing and request follow-up SRST.
430 */
432static struct ata_port_operations nv_nf2_ops = { 431static struct ata_port_operations nv_nf2_ops = {
433 .inherits = &nv_common_ops, 432 .inherits = &nv_common_ops,
434 .freeze = nv_nf2_freeze, 433 .freeze = nv_nf2_freeze,
435 .thaw = nv_nf2_thaw, 434 .thaw = nv_nf2_thaw,
435 .hardreset = nv_nf2_hardreset,
436}; 436};
437 437
438/* CK804 finally gets hardreset right */
438static struct ata_port_operations nv_ck804_ops = { 439static struct ata_port_operations nv_ck804_ops = {
439 .inherits = &nv_common_ops, 440 .inherits = &nv_common_ops,
440 .freeze = nv_ck804_freeze, 441 .freeze = nv_ck804_freeze,
@@ -443,7 +444,7 @@ static struct ata_port_operations nv_ck804_ops = {
443}; 444};
444 445
445static struct ata_port_operations nv_adma_ops = { 446static struct ata_port_operations nv_adma_ops = {
446 .inherits = &nv_common_ops, 447 .inherits = &nv_ck804_ops,
447 448
448 .check_atapi_dma = nv_adma_check_atapi_dma, 449 .check_atapi_dma = nv_adma_check_atapi_dma,
449 .sff_tf_read = nv_adma_tf_read, 450 .sff_tf_read = nv_adma_tf_read,
@@ -467,7 +468,7 @@ static struct ata_port_operations nv_adma_ops = {
467}; 468};
468 469
469static struct ata_port_operations nv_swncq_ops = { 470static struct ata_port_operations nv_swncq_ops = {
470 .inherits = &nv_common_ops, 471 .inherits = &nv_generic_ops,
471 472
472 .qc_defer = ata_std_qc_defer, 473 .qc_defer = ata_std_qc_defer,
473 .qc_prep = nv_swncq_qc_prep, 474 .qc_prep = nv_swncq_qc_prep,
@@ -1553,6 +1554,17 @@ static void nv_nf2_thaw(struct ata_port *ap)
1553 iowrite8(mask, scr_addr + NV_INT_ENABLE); 1554 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1554} 1555}
1555 1556
1557static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
1558 unsigned long deadline)
1559{
1560 bool online;
1561 int rc;
1562
1563 rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1564 &online, NULL);
1565 return online ? -EAGAIN : rc;
1566}
1567
1556static void nv_ck804_freeze(struct ata_port *ap) 1568static void nv_ck804_freeze(struct ata_port *ap)
1557{ 1569{
1558 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; 1570 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
@@ -1605,21 +1617,6 @@ static void nv_mcp55_thaw(struct ata_port *ap)
1605 ata_sff_thaw(ap); 1617 ata_sff_thaw(ap);
1606} 1618}
1607 1619
1608static int nv_hardreset(struct ata_link *link, unsigned int *class,
1609 unsigned long deadline)
1610{
1611 int rc;
1612
1613 /* SATA hardreset fails to retrieve proper device signature on
1614 * some controllers. Request follow up SRST. For more info,
1615 * see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1616 */
1617 rc = sata_sff_hardreset(link, class, deadline);
1618 if (rc)
1619 return rc;
1620 return -EAGAIN;
1621}
1622
1623static void nv_adma_error_handler(struct ata_port *ap) 1620static void nv_adma_error_handler(struct ata_port *ap)
1624{ 1621{
1625 struct nv_adma_port_priv *pp = ap->private_data; 1622 struct nv_adma_port_priv *pp = ap->private_data;
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 750d8cdc00cd..ba9a2570a742 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -153,6 +153,10 @@ static void pdc_freeze(struct ata_port *ap);
153static void pdc_sata_freeze(struct ata_port *ap); 153static void pdc_sata_freeze(struct ata_port *ap);
154static void pdc_thaw(struct ata_port *ap); 154static void pdc_thaw(struct ata_port *ap);
155static void pdc_sata_thaw(struct ata_port *ap); 155static void pdc_sata_thaw(struct ata_port *ap);
156static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
157 unsigned long deadline);
158static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
159 unsigned long deadline);
156static void pdc_error_handler(struct ata_port *ap); 160static void pdc_error_handler(struct ata_port *ap);
157static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); 161static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
158static int pdc_pata_cable_detect(struct ata_port *ap); 162static int pdc_pata_cable_detect(struct ata_port *ap);
@@ -186,6 +190,7 @@ static struct ata_port_operations pdc_sata_ops = {
186 .scr_read = pdc_sata_scr_read, 190 .scr_read = pdc_sata_scr_read,
187 .scr_write = pdc_sata_scr_write, 191 .scr_write = pdc_sata_scr_write,
188 .port_start = pdc_sata_port_start, 192 .port_start = pdc_sata_port_start,
193 .hardreset = pdc_sata_hardreset,
189}; 194};
190 195
191/* First-generation chips need a more restrictive ->check_atapi_dma op */ 196/* First-generation chips need a more restrictive ->check_atapi_dma op */
@@ -200,6 +205,7 @@ static struct ata_port_operations pdc_pata_ops = {
200 .freeze = pdc_freeze, 205 .freeze = pdc_freeze,
201 .thaw = pdc_thaw, 206 .thaw = pdc_thaw,
202 .port_start = pdc_common_port_start, 207 .port_start = pdc_common_port_start,
208 .softreset = pdc_pata_softreset,
203}; 209};
204 210
205static const struct ata_port_info pdc_port_info[] = { 211static const struct ata_port_info pdc_port_info[] = {
@@ -693,6 +699,20 @@ static void pdc_sata_thaw(struct ata_port *ap)
693 readl(host_mmio + hotplug_offset); /* flush */ 699 readl(host_mmio + hotplug_offset); /* flush */
694} 700}
695 701
702static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
703 unsigned long deadline)
704{
705 pdc_reset_port(link->ap);
706 return ata_sff_softreset(link, class, deadline);
707}
708
709static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
710 unsigned long deadline)
711{
712 pdc_reset_port(link->ap);
713 return sata_sff_hardreset(link, class, deadline);
714}
715
696static void pdc_error_handler(struct ata_port *ap) 716static void pdc_error_handler(struct ata_port *ap)
697{ 717{
698 if (!(ap->pflags & ATA_PFLAG_FROZEN)) 718 if (!(ap->pflags & ATA_PFLAG_FROZEN))
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 4621807a1a6a..ccee930f1e12 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1329,6 +1329,11 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1329 } 1329 }
1330 } 1330 }
1331 1331
1332 /* Set max read request size to 4096. This slightly increases
1333 * write throughput for pci-e variants.
1334 */
1335 pcie_set_readrq(pdev, 4096);
1336
1332 sil24_init_controller(host); 1337 sil24_init_controller(host);
1333 1338
1334 pci_set_master(pdev); 1339 pci_set_master(pdev);
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 1cfa74535d91..c18935f0bda2 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -44,11 +44,16 @@
44#include <linux/libata.h> 44#include <linux/libata.h>
45 45
46#define DRV_NAME "sata_via" 46#define DRV_NAME "sata_via"
47#define DRV_VERSION "2.3" 47#define DRV_VERSION "2.4"
48 48
49/*
50 * vt8251 is different from other sata controllers of VIA. It has two
51 * channels, each channel has both Master and Slave slot.
52 */
49enum board_ids_enum { 53enum board_ids_enum {
50 vt6420, 54 vt6420,
51 vt6421, 55 vt6421,
56 vt8251,
52}; 57};
53 58
54enum { 59enum {
@@ -70,6 +75,9 @@ enum {
70static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 75static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
71static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 76static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
72static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 77static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
78static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
79static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val);
80static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
73static void svia_noop_freeze(struct ata_port *ap); 81static void svia_noop_freeze(struct ata_port *ap);
74static int vt6420_prereset(struct ata_link *link, unsigned long deadline); 82static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
75static int vt6421_pata_cable_detect(struct ata_port *ap); 83static int vt6421_pata_cable_detect(struct ata_port *ap);
@@ -78,12 +86,12 @@ static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
78 86
79static const struct pci_device_id svia_pci_tbl[] = { 87static const struct pci_device_id svia_pci_tbl[] = {
80 { PCI_VDEVICE(VIA, 0x5337), vt6420 }, 88 { PCI_VDEVICE(VIA, 0x5337), vt6420 },
81 { PCI_VDEVICE(VIA, 0x0591), vt6420 }, 89 { PCI_VDEVICE(VIA, 0x0591), vt6420 }, /* 2 sata chnls (Master) */
82 { PCI_VDEVICE(VIA, 0x3149), vt6420 }, 90 { PCI_VDEVICE(VIA, 0x3149), vt6420 }, /* 2 sata chnls (Master) */
83 { PCI_VDEVICE(VIA, 0x3249), vt6421 }, 91 { PCI_VDEVICE(VIA, 0x3249), vt6421 }, /* 2 sata chnls, 1 pata chnl */
84 { PCI_VDEVICE(VIA, 0x5287), vt6420 },
85 { PCI_VDEVICE(VIA, 0x5372), vt6420 }, 92 { PCI_VDEVICE(VIA, 0x5372), vt6420 },
86 { PCI_VDEVICE(VIA, 0x7372), vt6420 }, 93 { PCI_VDEVICE(VIA, 0x7372), vt6420 },
94 { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */
87 95
88 { } /* terminate list */ 96 { } /* terminate list */
89}; 97};
@@ -103,25 +111,37 @@ static struct scsi_host_template svia_sht = {
103 ATA_BMDMA_SHT(DRV_NAME), 111 ATA_BMDMA_SHT(DRV_NAME),
104}; 112};
105 113
106static struct ata_port_operations vt6420_sata_ops = { 114static struct ata_port_operations svia_base_ops = {
107 .inherits = &ata_bmdma_port_ops, 115 .inherits = &ata_bmdma_port_ops,
116 .sff_tf_load = svia_tf_load,
117};
118
119static struct ata_port_operations vt6420_sata_ops = {
120 .inherits = &svia_base_ops,
108 .freeze = svia_noop_freeze, 121 .freeze = svia_noop_freeze,
109 .prereset = vt6420_prereset, 122 .prereset = vt6420_prereset,
110}; 123};
111 124
112static struct ata_port_operations vt6421_pata_ops = { 125static struct ata_port_operations vt6421_pata_ops = {
113 .inherits = &ata_bmdma_port_ops, 126 .inherits = &svia_base_ops,
114 .cable_detect = vt6421_pata_cable_detect, 127 .cable_detect = vt6421_pata_cable_detect,
115 .set_piomode = vt6421_set_pio_mode, 128 .set_piomode = vt6421_set_pio_mode,
116 .set_dmamode = vt6421_set_dma_mode, 129 .set_dmamode = vt6421_set_dma_mode,
117}; 130};
118 131
119static struct ata_port_operations vt6421_sata_ops = { 132static struct ata_port_operations vt6421_sata_ops = {
120 .inherits = &ata_bmdma_port_ops, 133 .inherits = &svia_base_ops,
121 .scr_read = svia_scr_read, 134 .scr_read = svia_scr_read,
122 .scr_write = svia_scr_write, 135 .scr_write = svia_scr_write,
123}; 136};
124 137
138static struct ata_port_operations vt8251_ops = {
139 .inherits = &svia_base_ops,
140 .hardreset = sata_std_hardreset,
141 .scr_read = vt8251_scr_read,
142 .scr_write = vt8251_scr_write,
143};
144
125static const struct ata_port_info vt6420_port_info = { 145static const struct ata_port_info vt6420_port_info = {
126 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 146 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
127 .pio_mask = 0x1f, 147 .pio_mask = 0x1f,
@@ -146,6 +166,15 @@ static struct ata_port_info vt6421_pport_info = {
146 .port_ops = &vt6421_pata_ops, 166 .port_ops = &vt6421_pata_ops,
147}; 167};
148 168
169static struct ata_port_info vt8251_port_info = {
170 .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS |
171 ATA_FLAG_NO_LEGACY,
172 .pio_mask = 0x1f,
173 .mwdma_mask = 0x07,
174 .udma_mask = ATA_UDMA6,
175 .port_ops = &vt8251_ops,
176};
177
149MODULE_AUTHOR("Jeff Garzik"); 178MODULE_AUTHOR("Jeff Garzik");
150MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers"); 179MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
151MODULE_LICENSE("GPL"); 180MODULE_LICENSE("GPL");
@@ -168,6 +197,106 @@ static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
168 return 0; 197 return 0;
169} 198}
170 199
200static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
201{
202 static const u8 ipm_tbl[] = { 1, 2, 6, 0 };
203 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
204 int slot = 2 * link->ap->port_no + link->pmp;
205 u32 v = 0;
206 u8 raw;
207
208 switch (scr) {
209 case SCR_STATUS:
210 pci_read_config_byte(pdev, 0xA0 + slot, &raw);
211
212 /* read the DET field, bit0 and 1 of the config byte */
213 v |= raw & 0x03;
214
215 /* read the SPD field, bit4 of the configure byte */
216 if (raw & (1 << 4))
217 v |= 0x02 << 4;
218 else
219 v |= 0x01 << 4;
220
221 /* read the IPM field, bit2 and 3 of the config byte */
222 v |= ipm_tbl[(raw >> 2) & 0x3];
223 break;
224
225 case SCR_ERROR:
226 /* devices other than 5287 uses 0xA8 as base */
227 WARN_ON(pdev->device != 0x5287);
228 pci_read_config_dword(pdev, 0xB0 + slot * 4, &v);
229 break;
230
231 case SCR_CONTROL:
232 pci_read_config_byte(pdev, 0xA4 + slot, &raw);
233
234 /* read the DET field, bit0 and bit1 */
235 v |= ((raw & 0x02) << 1) | (raw & 0x01);
236
237 /* read the IPM field, bit2 and bit3 */
238 v |= ((raw >> 2) & 0x03) << 8;
239 break;
240
241 default:
242 return -EINVAL;
243 }
244
245 *val = v;
246 return 0;
247}
248
249static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val)
250{
251 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
252 int slot = 2 * link->ap->port_no + link->pmp;
253 u32 v = 0;
254
255 switch (scr) {
256 case SCR_ERROR:
257 /* devices other than 5287 uses 0xA8 as base */
258 WARN_ON(pdev->device != 0x5287);
259 pci_write_config_dword(pdev, 0xB0 + slot * 4, val);
260 return 0;
261
262 case SCR_CONTROL:
263 /* set the DET field */
264 v |= ((val & 0x4) >> 1) | (val & 0x1);
265
266 /* set the IPM field */
267 v |= ((val >> 8) & 0x3) << 2;
268
269 pci_write_config_byte(pdev, 0xA4 + slot, v);
270 return 0;
271
272 default:
273 return -EINVAL;
274 }
275}
276
277/**
278 * svia_tf_load - send taskfile registers to host controller
279 * @ap: Port to which output is sent
280 * @tf: ATA taskfile register set
281 *
282 * Outputs ATA taskfile to standard ATA host controller.
283 *
284 * This is to fix the internal bug of via chipsets, which will
285 * reset the device register after changing the IEN bit on ctl
286 * register.
287 */
288static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
289{
290 struct ata_taskfile ttf;
291
292 if (tf->ctl != ap->last_ctl) {
293 ttf = *tf;
294 ttf.flags |= ATA_TFLAG_DEVICE;
295 tf = &ttf;
296 }
297 ata_sff_tf_load(ap, tf);
298}
299
171static void svia_noop_freeze(struct ata_port *ap) 300static void svia_noop_freeze(struct ata_port *ap)
172{ 301{
173 /* Some VIA controllers choke if ATA_NIEN is manipulated in 302 /* Some VIA controllers choke if ATA_NIEN is manipulated in
@@ -367,6 +496,30 @@ static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
367 return 0; 496 return 0;
368} 497}
369 498
499static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
500{
501 const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL };
502 struct ata_host *host;
503 int i, rc;
504
505 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
506 if (rc)
507 return rc;
508 *r_host = host;
509
510 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
511 if (rc) {
512 dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
513 return rc;
514 }
515
516 /* 8251 hosts four sata ports as M/S of the two channels */
517 for (i = 0; i < host->n_ports; i++)
518 ata_slave_link_init(host->ports[i]);
519
520 return 0;
521}
522
370static void svia_configure(struct pci_dev *pdev) 523static void svia_configure(struct pci_dev *pdev)
371{ 524{
372 u8 tmp8; 525 u8 tmp8;
@@ -422,10 +575,10 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
422 if (rc) 575 if (rc)
423 return rc; 576 return rc;
424 577
425 if (board_id == vt6420) 578 if (board_id == vt6421)
426 bar_sizes = &svia_bar_sizes[0];
427 else
428 bar_sizes = &vt6421_bar_sizes[0]; 579 bar_sizes = &vt6421_bar_sizes[0];
580 else
581 bar_sizes = &svia_bar_sizes[0];
429 582
430 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) 583 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
431 if ((pci_resource_start(pdev, i) == 0) || 584 if ((pci_resource_start(pdev, i) == 0) ||
@@ -438,10 +591,19 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
438 return -ENODEV; 591 return -ENODEV;
439 } 592 }
440 593
441 if (board_id == vt6420) 594 switch (board_id) {
595 case vt6420:
442 rc = vt6420_prepare_host(pdev, &host); 596 rc = vt6420_prepare_host(pdev, &host);
443 else 597 break;
598 case vt6421:
444 rc = vt6421_prepare_host(pdev, &host); 599 rc = vt6421_prepare_host(pdev, &host);
600 break;
601 case vt8251:
602 rc = vt8251_prepare_host(pdev, &host);
603 break;
604 default:
605 rc = -EINVAL;
606 }
445 if (rc) 607 if (rc)
446 return rc; 608 return rc;
447 609
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 615412364e99..6b969f8c684f 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2705,7 +2705,7 @@ static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_
2705 2705
2706 /* XXX DEV_LABEL is a guess */ 2706 /* XXX DEV_LABEL is a guess */
2707 if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) { 2707 if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) {
2708 return -EINVAL; 2708 err = -EINVAL;
2709 goto out_disable; 2709 goto out_disable;
2710 } 2710 }
2711 2711
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 75dd6e22faff..c98c31ec2f75 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -355,7 +355,7 @@ static void __sysdev_resume(struct sys_device *dev)
355 * sysdev_suspend - Suspend all system devices. 355 * sysdev_suspend - Suspend all system devices.
356 * @state: Power state to enter. 356 * @state: Power state to enter.
357 * 357 *
358 * We perform an almost identical operation as sys_device_shutdown() 358 * We perform an almost identical operation as sysdev_shutdown()
359 * above, though calling ->suspend() instead. Interrupts are disabled 359 * above, though calling ->suspend() instead. Interrupts are disabled
360 * when this called. Devices are responsible for both saving state and 360 * when this called. Devices are responsible for both saving state and
361 * quiescing or powering down the device. 361 * quiescing or powering down the device.
@@ -437,7 +437,7 @@ aux_driver:
437/** 437/**
438 * sysdev_resume - Bring system devices back to life. 438 * sysdev_resume - Bring system devices back to life.
439 * 439 *
440 * Similar to sys_device_suspend(), but we iterate the list forwards 440 * Similar to sysdev_suspend(), but we iterate the list forwards
441 * to guarantee that parent devices are resumed before their children. 441 * to guarantee that parent devices are resumed before their children.
442 * 442 *
443 * Note: Interrupts are disabled when called. 443 * Note: Interrupts are disabled when called.
@@ -488,7 +488,8 @@ ssize_t sysdev_store_ulong(struct sys_device *sysdev,
488 if (end == buf) 488 if (end == buf)
489 return -EINVAL; 489 return -EINVAL;
490 *(unsigned long *)(ea->var) = new; 490 *(unsigned long *)(ea->var) = new;
491 return end - buf; 491 /* Always return full write size even if we didn't consume all */
492 return size;
492} 493}
493EXPORT_SYMBOL_GPL(sysdev_store_ulong); 494EXPORT_SYMBOL_GPL(sysdev_store_ulong);
494 495
@@ -511,7 +512,8 @@ ssize_t sysdev_store_int(struct sys_device *sysdev,
511 if (end == buf || new > INT_MAX || new < INT_MIN) 512 if (end == buf || new > INT_MAX || new < INT_MIN)
512 return -EINVAL; 513 return -EINVAL;
513 *(int *)(ea->var) = new; 514 *(int *)(ea->var) = new;
514 return end - buf; 515 /* Always return full write size even if we didn't consume all */
516 return size;
515} 517}
516EXPORT_SYMBOL_GPL(sysdev_store_int); 518EXPORT_SYMBOL_GPL(sysdev_store_int);
517 519
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index a002a381df92..f6a337c34ac4 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -72,9 +72,9 @@ static long disk_size(DAC960_Controller_T *p, int drive_nr)
72 } 72 }
73} 73}
74 74
75static int DAC960_open(struct inode *inode, struct file *file) 75static int DAC960_open(struct block_device *bdev, fmode_t mode)
76{ 76{
77 struct gendisk *disk = inode->i_bdev->bd_disk; 77 struct gendisk *disk = bdev->bd_disk;
78 DAC960_Controller_T *p = disk->queue->queuedata; 78 DAC960_Controller_T *p = disk->queue->queuedata;
79 int drive_nr = (long)disk->private_data; 79 int drive_nr = (long)disk->private_data;
80 80
@@ -89,7 +89,7 @@ static int DAC960_open(struct inode *inode, struct file *file)
89 return -ENXIO; 89 return -ENXIO;
90 } 90 }
91 91
92 check_disk_change(inode->i_bdev); 92 check_disk_change(bdev);
93 93
94 if (!get_capacity(p->disks[drive_nr])) 94 if (!get_capacity(p->disks[drive_nr]))
95 return -ENXIO; 95 return -ENXIO;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 61ad8d639ba3..0344a8a8321d 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -21,7 +21,8 @@ config BLK_DEV_FD
21 ---help--- 21 ---help---
22 If you want to use the floppy disk drive(s) of your PC under Linux, 22 If you want to use the floppy disk drive(s) of your PC under Linux,
23 say Y. Information about this driver, especially important for IBM 23 say Y. Information about this driver, especially important for IBM
24 Thinkpad users, is contained in <file:Documentation/floppy.txt>. 24 Thinkpad users, is contained in
25 <file:Documentation/blockdev/floppy.txt>.
25 That file also contains the location of the Floppy driver FAQ as 26 That file also contains the location of the Floppy driver FAQ as
26 well as location of the fdutils package used to configure additional 27 well as location of the fdutils package used to configure additional
27 parameters of the driver at run time. 28 parameters of the driver at run time.
@@ -76,7 +77,7 @@ config PARIDE
76 your computer's parallel port. Most of them are actually IDE devices 77 your computer's parallel port. Most of them are actually IDE devices
77 using a parallel port IDE adapter. This option enables the PARIDE 78 using a parallel port IDE adapter. This option enables the PARIDE
78 subsystem which contains drivers for many of these external drives. 79 subsystem which contains drivers for many of these external drives.
79 Read <file:Documentation/paride.txt> for more information. 80 Read <file:Documentation/blockdev/paride.txt> for more information.
80 81
81 If you have said Y to the "Parallel-port support" configuration 82 If you have said Y to the "Parallel-port support" configuration
82 option, you may share a single port between your printer and other 83 option, you may share a single port between your printer and other
@@ -114,9 +115,9 @@ config BLK_CPQ_DA
114 help 115 help
115 This is the driver for Compaq Smart Array controllers. Everyone 116 This is the driver for Compaq Smart Array controllers. Everyone
116 using these boards should say Y here. See the file 117 using these boards should say Y here. See the file
117 <file:Documentation/cpqarray.txt> for the current list of boards 118 <file:Documentation/blockdev/cpqarray.txt> for the current list of
118 supported by this driver, and for further information on the use of 119 boards supported by this driver, and for further information on the
119 this driver. 120 use of this driver.
120 121
121config BLK_CPQ_CISS_DA 122config BLK_CPQ_CISS_DA
122 tristate "Compaq Smart Array 5xxx support" 123 tristate "Compaq Smart Array 5xxx support"
@@ -124,7 +125,7 @@ config BLK_CPQ_CISS_DA
124 help 125 help
125 This is the driver for Compaq Smart Array 5xxx controllers. 126 This is the driver for Compaq Smart Array 5xxx controllers.
126 Everyone using these boards should say Y here. 127 Everyone using these boards should say Y here.
127 See <file:Documentation/cciss.txt> for the current list of 128 See <file:Documentation/blockdev/cciss.txt> for the current list of
128 boards supported by this driver, and for further information 129 boards supported by this driver, and for further information
129 on the use of this driver. 130 on the use of this driver.
130 131
@@ -135,7 +136,7 @@ config CISS_SCSI_TAPE
135 help 136 help
136 When enabled (Y), this option allows SCSI tape drives and SCSI medium 137 When enabled (Y), this option allows SCSI tape drives and SCSI medium
137 changers (tape robots) to be accessed via a Compaq 5xxx array 138 changers (tape robots) to be accessed via a Compaq 5xxx array
138 controller. (See <file:Documentation/cciss.txt> for more details.) 139 controller. (See <file:Documentation/blockdev/cciss.txt> for more details.)
139 140
140 "SCSI support" and "SCSI tape support" must also be enabled for this 141 "SCSI support" and "SCSI tape support" must also be enabled for this
141 option to work. 142 option to work.
@@ -149,8 +150,8 @@ config BLK_DEV_DAC960
149 help 150 help
150 This driver adds support for the Mylex DAC960, AcceleRAID, and 151 This driver adds support for the Mylex DAC960, AcceleRAID, and
151 eXtremeRAID PCI RAID controllers. See the file 152 eXtremeRAID PCI RAID controllers. See the file
152 <file:Documentation/README.DAC960> for further information about 153 <file:Documentation/blockdev/README.DAC960> for further information
153 this driver. 154 about this driver.
154 155
155 To compile this driver as a module, choose M here: the 156 To compile this driver as a module, choose M here: the
156 module will be called DAC960. 157 module will be called DAC960.
@@ -278,9 +279,9 @@ config BLK_DEV_NBD
278 userland (making server and client physically the same computer, 279 userland (making server and client physically the same computer,
279 communicating using the loopback network device). 280 communicating using the loopback network device).
280 281
281 Read <file:Documentation/nbd.txt> for more information, especially 282 Read <file:Documentation/blockdev/nbd.txt> for more information,
282 about where to find the server code, which runs in user space and 283 especially about where to find the server code, which runs in user
283 does not need special kernel support. 284 space and does not need special kernel support.
284 285
285 Note that this has nothing to do with the network file systems NFS 286 Note that this has nothing to do with the network file systems NFS
286 or Coda; you can say N here even if you intend to use NFS or Coda. 287 or Coda; you can say N here even if you intend to use NFS or Coda.
@@ -321,8 +322,8 @@ config BLK_DEV_RAM
321 store a copy of a minimal root file system off of a floppy into RAM 322 store a copy of a minimal root file system off of a floppy into RAM
322 during the initial install of Linux. 323 during the initial install of Linux.
323 324
324 Note that the kernel command line option "ramdisk=XX" is now 325 Note that the kernel command line option "ramdisk=XX" is now obsolete.
325 obsolete. For details, read <file:Documentation/ramdisk.txt>. 326 For details, read <file:Documentation/blockdev/ramdisk.txt>.
326 327
327 To compile this driver as a module, choose M here: the 328 To compile this driver as a module, choose M here: the
328 module will be called rd. 329 module will be called rd.
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 7516baff3bb9..4b1d4ac960f1 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1437,10 +1437,11 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1437 return 0; 1437 return 0;
1438} 1438}
1439 1439
1440static int fd_ioctl(struct inode *inode, struct file *filp, 1440static int fd_ioctl(struct block_device *bdev, fmode_t mode,
1441 unsigned int cmd, unsigned long param) 1441 unsigned int cmd, unsigned long param)
1442{ 1442{
1443 int drive = iminor(inode) & 3; 1443 struct amiga_floppy_struct *p = bdev->bd_disk->private_data;
1444 int drive = p - unit;
1444 static struct floppy_struct getprm; 1445 static struct floppy_struct getprm;
1445 void __user *argp = (void __user *)param; 1446 void __user *argp = (void __user *)param;
1446 1447
@@ -1451,7 +1452,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
1451 rel_fdc(); 1452 rel_fdc();
1452 return -EBUSY; 1453 return -EBUSY;
1453 } 1454 }
1454 fsync_bdev(inode->i_bdev); 1455 fsync_bdev(bdev);
1455 if (fd_motor_on(drive) == 0) { 1456 if (fd_motor_on(drive) == 0) {
1456 rel_fdc(); 1457 rel_fdc();
1457 return -ENODEV; 1458 return -ENODEV;
@@ -1464,12 +1465,12 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
1464 rel_fdc(); 1465 rel_fdc();
1465 break; 1466 break;
1466 case FDFMTTRK: 1467 case FDFMTTRK:
1467 if (param < unit[drive].type->tracks * unit[drive].type->heads) 1468 if (param < p->type->tracks * p->type->heads)
1468 { 1469 {
1469 get_fdc(drive); 1470 get_fdc(drive);
1470 if (fd_seek(drive,param) != 0){ 1471 if (fd_seek(drive,param) != 0){
1471 memset(unit[drive].trackbuf, FD_FILL_BYTE, 1472 memset(p->trackbuf, FD_FILL_BYTE,
1472 unit[drive].dtype->sects * unit[drive].type->sect_mult * 512); 1473 p->dtype->sects * p->type->sect_mult * 512);
1473 non_int_flush_track(drive); 1474 non_int_flush_track(drive);
1474 } 1475 }
1475 floppy_off(drive); 1476 floppy_off(drive);
@@ -1480,14 +1481,14 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
1480 break; 1481 break;
1481 case FDFMTEND: 1482 case FDFMTEND:
1482 floppy_off(drive); 1483 floppy_off(drive);
1483 invalidate_bdev(inode->i_bdev); 1484 invalidate_bdev(bdev);
1484 break; 1485 break;
1485 case FDGETPRM: 1486 case FDGETPRM:
1486 memset((void *)&getprm, 0, sizeof (getprm)); 1487 memset((void *)&getprm, 0, sizeof (getprm));
1487 getprm.track=unit[drive].type->tracks; 1488 getprm.track=p->type->tracks;
1488 getprm.head=unit[drive].type->heads; 1489 getprm.head=p->type->heads;
1489 getprm.sect=unit[drive].dtype->sects * unit[drive].type->sect_mult; 1490 getprm.sect=p->dtype->sects * p->type->sect_mult;
1490 getprm.size=unit[drive].blocks; 1491 getprm.size=p->blocks;
1491 if (copy_to_user(argp, &getprm, sizeof(struct floppy_struct))) 1492 if (copy_to_user(argp, &getprm, sizeof(struct floppy_struct)))
1492 return -EFAULT; 1493 return -EFAULT;
1493 break; 1494 break;
@@ -1500,10 +1501,10 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
1500 break; 1501 break;
1501#ifdef RAW_IOCTL 1502#ifdef RAW_IOCTL
1502 case IOCTL_RAW_TRACK: 1503 case IOCTL_RAW_TRACK:
1503 if (copy_to_user(argp, raw_buf, unit[drive].type->read_size)) 1504 if (copy_to_user(argp, raw_buf, p->type->read_size))
1504 return -EFAULT; 1505 return -EFAULT;
1505 else 1506 else
1506 return unit[drive].type->read_size; 1507 return p->type->read_size;
1507#endif 1508#endif
1508 default: 1509 default:
1509 printk(KERN_DEBUG "fd_ioctl: unknown cmd %d for drive %d.", 1510 printk(KERN_DEBUG "fd_ioctl: unknown cmd %d for drive %d.",
@@ -1548,10 +1549,10 @@ static void fd_probe(int dev)
1548 * /dev/PS0 etc), and disallows simultaneous access to the same 1549 * /dev/PS0 etc), and disallows simultaneous access to the same
1549 * drive with different device numbers. 1550 * drive with different device numbers.
1550 */ 1551 */
1551static int floppy_open(struct inode *inode, struct file *filp) 1552static int floppy_open(struct block_device *bdev, fmode_t mode)
1552{ 1553{
1553 int drive = iminor(inode) & 3; 1554 int drive = MINOR(bdev->bd_dev) & 3;
1554 int system = (iminor(inode) & 4) >> 2; 1555 int system = (MINOR(bdev->bd_dev) & 4) >> 2;
1555 int old_dev; 1556 int old_dev;
1556 unsigned long flags; 1557 unsigned long flags;
1557 1558
@@ -1560,9 +1561,9 @@ static int floppy_open(struct inode *inode, struct file *filp)
1560 if (fd_ref[drive] && old_dev != system) 1561 if (fd_ref[drive] && old_dev != system)
1561 return -EBUSY; 1562 return -EBUSY;
1562 1563
1563 if (filp && filp->f_mode & 3) { 1564 if (mode & (FMODE_READ|FMODE_WRITE)) {
1564 check_disk_change(inode->i_bdev); 1565 check_disk_change(bdev);
1565 if (filp->f_mode & 2 ) { 1566 if (mode & FMODE_WRITE) {
1566 int wrprot; 1567 int wrprot;
1567 1568
1568 get_fdc(drive); 1569 get_fdc(drive);
@@ -1592,9 +1593,10 @@ static int floppy_open(struct inode *inode, struct file *filp)
1592 return 0; 1593 return 0;
1593} 1594}
1594 1595
1595static int floppy_release(struct inode * inode, struct file * filp) 1596static int floppy_release(struct gendisk *disk, fmode_t mode)
1596{ 1597{
1597 int drive = iminor(inode) & 3; 1598 struct amiga_floppy_struct *p = disk->private_data;
1599 int drive = p - unit;
1598 1600
1599 if (unit[drive].dirty == 1) { 1601 if (unit[drive].dirty == 1) {
1600 del_timer (flush_track_timer + drive); 1602 del_timer (flush_track_timer + drive);
@@ -1650,7 +1652,7 @@ static struct block_device_operations floppy_fops = {
1650 .owner = THIS_MODULE, 1652 .owner = THIS_MODULE,
1651 .open = floppy_open, 1653 .open = floppy_open,
1652 .release = floppy_release, 1654 .release = floppy_release,
1653 .ioctl = fd_ioctl, 1655 .locked_ioctl = fd_ioctl,
1654 .getgeo = fd_getgeo, 1656 .getgeo = fd_getgeo,
1655 .media_changed = amiga_floppy_change, 1657 .media_changed = amiga_floppy_change,
1656}; 1658};
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index d876ad861237..1747dd272cd4 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -118,13 +118,11 @@ aoedisk_rm_sysfs(struct aoedev *d)
118} 118}
119 119
120static int 120static int
121aoeblk_open(struct inode *inode, struct file *filp) 121aoeblk_open(struct block_device *bdev, fmode_t mode)
122{ 122{
123 struct aoedev *d; 123 struct aoedev *d = bdev->bd_disk->private_data;
124 ulong flags; 124 ulong flags;
125 125
126 d = inode->i_bdev->bd_disk->private_data;
127
128 spin_lock_irqsave(&d->lock, flags); 126 spin_lock_irqsave(&d->lock, flags);
129 if (d->flags & DEVFL_UP) { 127 if (d->flags & DEVFL_UP) {
130 d->nopen++; 128 d->nopen++;
@@ -136,13 +134,11 @@ aoeblk_open(struct inode *inode, struct file *filp)
136} 134}
137 135
138static int 136static int
139aoeblk_release(struct inode *inode, struct file *filp) 137aoeblk_release(struct gendisk *disk, fmode_t mode)
140{ 138{
141 struct aoedev *d; 139 struct aoedev *d = disk->private_data;
142 ulong flags; 140 ulong flags;
143 141
144 d = inode->i_bdev->bd_disk->private_data;
145
146 spin_lock_irqsave(&d->lock, flags); 142 spin_lock_irqsave(&d->lock, flags);
147 143
148 if (--d->nopen == 0) { 144 if (--d->nopen == 0) {
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 432cf4018291..69e1df7dfa14 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -361,13 +361,13 @@ static void finish_fdc( void );
361static void finish_fdc_done( int dummy ); 361static void finish_fdc_done( int dummy );
362static void setup_req_params( int drive ); 362static void setup_req_params( int drive );
363static void redo_fd_request( void); 363static void redo_fd_request( void);
364static int fd_ioctl( struct inode *inode, struct file *filp, unsigned int 364static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
365 cmd, unsigned long param); 365 cmd, unsigned long param);
366static void fd_probe( int drive ); 366static void fd_probe( int drive );
367static int fd_test_drive_present( int drive ); 367static int fd_test_drive_present( int drive );
368static void config_types( void ); 368static void config_types( void );
369static int floppy_open( struct inode *inode, struct file *filp ); 369static int floppy_open(struct block_device *bdev, fmode_t mode);
370static int floppy_release( struct inode * inode, struct file * filp ); 370static int floppy_release(struct gendisk *disk, fmode_t mode);
371 371
372/************************* End of Prototypes **************************/ 372/************************* End of Prototypes **************************/
373 373
@@ -1483,10 +1483,10 @@ void do_fd_request(struct request_queue * q)
1483 atari_enable_irq( IRQ_MFP_FDC ); 1483 atari_enable_irq( IRQ_MFP_FDC );
1484} 1484}
1485 1485
1486static int fd_ioctl(struct inode *inode, struct file *filp, 1486static int fd_ioctl(struct block_device *bdev, fmode_t mode,
1487 unsigned int cmd, unsigned long param) 1487 unsigned int cmd, unsigned long param)
1488{ 1488{
1489 struct gendisk *disk = inode->i_bdev->bd_disk; 1489 struct gendisk *disk = bdev->bd_disk;
1490 struct atari_floppy_struct *floppy = disk->private_data; 1490 struct atari_floppy_struct *floppy = disk->private_data;
1491 int drive = floppy - unit; 1491 int drive = floppy - unit;
1492 int type = floppy->type; 1492 int type = floppy->type;
@@ -1661,7 +1661,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
1661 /* invalidate the buffer track to force a reread */ 1661 /* invalidate the buffer track to force a reread */
1662 BufferDrive = -1; 1662 BufferDrive = -1;
1663 set_bit(drive, &fake_change); 1663 set_bit(drive, &fake_change);
1664 check_disk_change(inode->i_bdev); 1664 check_disk_change(bdev);
1665 return 0; 1665 return 0;
1666 default: 1666 default:
1667 return -EINVAL; 1667 return -EINVAL;
@@ -1804,37 +1804,36 @@ static void __init config_types( void )
1804 * drive with different device numbers. 1804 * drive with different device numbers.
1805 */ 1805 */
1806 1806
1807static int floppy_open( struct inode *inode, struct file *filp ) 1807static int floppy_open(struct block_device *bdev, fmode_t mode)
1808{ 1808{
1809 struct atari_floppy_struct *p = inode->i_bdev->bd_disk->private_data; 1809 struct atari_floppy_struct *p = bdev->bd_disk->private_data;
1810 int type = iminor(inode) >> 2; 1810 int type = MINOR(bdev->bd_dev) >> 2;
1811 1811
1812 DPRINT(("fd_open: type=%d\n",type)); 1812 DPRINT(("fd_open: type=%d\n",type));
1813 if (p->ref && p->type != type) 1813 if (p->ref && p->type != type)
1814 return -EBUSY; 1814 return -EBUSY;
1815 1815
1816 if (p->ref == -1 || (p->ref && filp->f_flags & O_EXCL)) 1816 if (p->ref == -1 || (p->ref && mode & FMODE_EXCL))
1817 return -EBUSY; 1817 return -EBUSY;
1818 1818
1819 if (filp->f_flags & O_EXCL) 1819 if (mode & FMODE_EXCL)
1820 p->ref = -1; 1820 p->ref = -1;
1821 else 1821 else
1822 p->ref++; 1822 p->ref++;
1823 1823
1824 p->type = type; 1824 p->type = type;
1825 1825
1826 if (filp->f_flags & O_NDELAY) 1826 if (mode & FMODE_NDELAY)
1827 return 0; 1827 return 0;
1828 1828
1829 if (filp->f_mode & 3) { 1829 if (mode & (FMODE_READ|FMODE_WRITE)) {
1830 check_disk_change(inode->i_bdev); 1830 check_disk_change(bdev);
1831 if (filp->f_mode & 2) { 1831 if (mode & FMODE_WRITE) {
1832 if (p->wpstat) { 1832 if (p->wpstat) {
1833 if (p->ref < 0) 1833 if (p->ref < 0)
1834 p->ref = 0; 1834 p->ref = 0;
1835 else 1835 else
1836 p->ref--; 1836 p->ref--;
1837 floppy_release(inode, filp);
1838 return -EROFS; 1837 return -EROFS;
1839 } 1838 }
1840 } 1839 }
@@ -1843,9 +1842,9 @@ static int floppy_open( struct inode *inode, struct file *filp )
1843} 1842}
1844 1843
1845 1844
1846static int floppy_release( struct inode * inode, struct file * filp ) 1845static int floppy_release(struct gendisk *disk, fmode_t mode)
1847{ 1846{
1848 struct atari_floppy_struct *p = inode->i_bdev->bd_disk->private_data; 1847 struct atari_floppy_struct *p = disk->private_data;
1849 if (p->ref < 0) 1848 if (p->ref < 0)
1850 p->ref = 0; 1849 p->ref = 0;
1851 else if (!p->ref--) { 1850 else if (!p->ref--) {
@@ -1859,7 +1858,7 @@ static struct block_device_operations floppy_fops = {
1859 .owner = THIS_MODULE, 1858 .owner = THIS_MODULE,
1860 .open = floppy_open, 1859 .open = floppy_open,
1861 .release = floppy_release, 1860 .release = floppy_release,
1862 .ioctl = fd_ioctl, 1861 .locked_ioctl = fd_ioctl,
1863 .media_changed = check_floppy_change, 1862 .media_changed = check_floppy_change,
1864 .revalidate_disk= floppy_revalidate, 1863 .revalidate_disk= floppy_revalidate,
1865}; 1864};
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d070d492e385..bdd4f5f45575 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -340,11 +340,10 @@ static int brd_direct_access (struct block_device *bdev, sector_t sector,
340} 340}
341#endif 341#endif
342 342
343static int brd_ioctl(struct inode *inode, struct file *file, 343static int brd_ioctl(struct block_device *bdev, fmode_t mode,
344 unsigned int cmd, unsigned long arg) 344 unsigned int cmd, unsigned long arg)
345{ 345{
346 int error; 346 int error;
347 struct block_device *bdev = inode->i_bdev;
348 struct brd_device *brd = bdev->bd_disk->private_data; 347 struct brd_device *brd = bdev->bd_disk->private_data;
349 348
350 if (cmd != BLKFLSBUF) 349 if (cmd != BLKFLSBUF)
@@ -376,7 +375,7 @@ static int brd_ioctl(struct inode *inode, struct file *file,
376 375
377static struct block_device_operations brd_fops = { 376static struct block_device_operations brd_fops = {
378 .owner = THIS_MODULE, 377 .owner = THIS_MODULE,
379 .ioctl = brd_ioctl, 378 .locked_ioctl = brd_ioctl,
380#ifdef CONFIG_BLK_DEV_XIP 379#ifdef CONFIG_BLK_DEV_XIP
381 .direct_access = brd_direct_access, 380 .direct_access = brd_direct_access,
382#endif 381#endif
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 1e1f9153000c..9f7c543cc04b 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -96,6 +96,8 @@ static const struct pci_device_id cciss_pci_device_id[] = {
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
99 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 101 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
100 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 102 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
101 {0,} 103 {0,}
@@ -133,6 +135,8 @@ static struct board_type products[] = {
133 {0x3245103C, "Smart Array P410i", &SA5_access}, 135 {0x3245103C, "Smart Array P410i", &SA5_access},
134 {0x3247103C, "Smart Array P411", &SA5_access}, 136 {0x3247103C, "Smart Array P411", &SA5_access},
135 {0x3249103C, "Smart Array P812", &SA5_access}, 137 {0x3249103C, "Smart Array P812", &SA5_access},
138 {0x324A103C, "Smart Array P712m", &SA5_access},
139 {0x324B103C, "Smart Array P711m", &SA5_access},
136 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 140 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
137}; 141};
138 142
@@ -152,9 +156,9 @@ static ctlr_info_t *hba[MAX_CTLR];
152 156
153static void do_cciss_request(struct request_queue *q); 157static void do_cciss_request(struct request_queue *q);
154static irqreturn_t do_cciss_intr(int irq, void *dev_id); 158static irqreturn_t do_cciss_intr(int irq, void *dev_id);
155static int cciss_open(struct inode *inode, struct file *filep); 159static int cciss_open(struct block_device *bdev, fmode_t mode);
156static int cciss_release(struct inode *inode, struct file *filep); 160static int cciss_release(struct gendisk *disk, fmode_t mode);
157static int cciss_ioctl(struct inode *inode, struct file *filep, 161static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
158 unsigned int cmd, unsigned long arg); 162 unsigned int cmd, unsigned long arg);
159static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); 163static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
160 164
@@ -192,14 +196,15 @@ static void cciss_procinit(int i)
192#endif /* CONFIG_PROC_FS */ 196#endif /* CONFIG_PROC_FS */
193 197
194#ifdef CONFIG_COMPAT 198#ifdef CONFIG_COMPAT
195static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg); 199static int cciss_compat_ioctl(struct block_device *, fmode_t,
200 unsigned, unsigned long);
196#endif 201#endif
197 202
198static struct block_device_operations cciss_fops = { 203static struct block_device_operations cciss_fops = {
199 .owner = THIS_MODULE, 204 .owner = THIS_MODULE,
200 .open = cciss_open, 205 .open = cciss_open,
201 .release = cciss_release, 206 .release = cciss_release,
202 .ioctl = cciss_ioctl, 207 .locked_ioctl = cciss_ioctl,
203 .getgeo = cciss_getgeo, 208 .getgeo = cciss_getgeo,
204#ifdef CONFIG_COMPAT 209#ifdef CONFIG_COMPAT
205 .compat_ioctl = cciss_compat_ioctl, 210 .compat_ioctl = cciss_compat_ioctl,
@@ -547,13 +552,13 @@ static inline drive_info_struct *get_drv(struct gendisk *disk)
547/* 552/*
548 * Open. Make sure the device is really there. 553 * Open. Make sure the device is really there.
549 */ 554 */
550static int cciss_open(struct inode *inode, struct file *filep) 555static int cciss_open(struct block_device *bdev, fmode_t mode)
551{ 556{
552 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk); 557 ctlr_info_t *host = get_host(bdev->bd_disk);
553 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk); 558 drive_info_struct *drv = get_drv(bdev->bd_disk);
554 559
555#ifdef CCISS_DEBUG 560#ifdef CCISS_DEBUG
556 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name); 561 printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
557#endif /* CCISS_DEBUG */ 562#endif /* CCISS_DEBUG */
558 563
559 if (host->busy_initializing || drv->busy_configuring) 564 if (host->busy_initializing || drv->busy_configuring)
@@ -567,9 +572,9 @@ static int cciss_open(struct inode *inode, struct file *filep)
567 * for "raw controller". 572 * for "raw controller".
568 */ 573 */
569 if (drv->heads == 0) { 574 if (drv->heads == 0) {
570 if (iminor(inode) != 0) { /* not node 0? */ 575 if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */
571 /* if not node 0 make sure it is a partition = 0 */ 576 /* if not node 0 make sure it is a partition = 0 */
572 if (iminor(inode) & 0x0f) { 577 if (MINOR(bdev->bd_dev) & 0x0f) {
573 return -ENXIO; 578 return -ENXIO;
574 /* if it is, make sure we have a LUN ID */ 579 /* if it is, make sure we have a LUN ID */
575 } else if (drv->LunID == 0) { 580 } else if (drv->LunID == 0) {
@@ -587,14 +592,13 @@ static int cciss_open(struct inode *inode, struct file *filep)
587/* 592/*
588 * Close. Sync first. 593 * Close. Sync first.
589 */ 594 */
590static int cciss_release(struct inode *inode, struct file *filep) 595static int cciss_release(struct gendisk *disk, fmode_t mode)
591{ 596{
592 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk); 597 ctlr_info_t *host = get_host(disk);
593 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk); 598 drive_info_struct *drv = get_drv(disk);
594 599
595#ifdef CCISS_DEBUG 600#ifdef CCISS_DEBUG
596 printk(KERN_DEBUG "cciss_release %s\n", 601 printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name);
597 inode->i_bdev->bd_disk->disk_name);
598#endif /* CCISS_DEBUG */ 602#endif /* CCISS_DEBUG */
599 603
600 drv->usage_count--; 604 drv->usage_count--;
@@ -604,21 +608,23 @@ static int cciss_release(struct inode *inode, struct file *filep)
604 608
605#ifdef CONFIG_COMPAT 609#ifdef CONFIG_COMPAT
606 610
607static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg) 611static int do_ioctl(struct block_device *bdev, fmode_t mode,
612 unsigned cmd, unsigned long arg)
608{ 613{
609 int ret; 614 int ret;
610 lock_kernel(); 615 lock_kernel();
611 ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg); 616 ret = cciss_ioctl(bdev, mode, cmd, arg);
612 unlock_kernel(); 617 unlock_kernel();
613 return ret; 618 return ret;
614} 619}
615 620
616static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, 621static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
617 unsigned long arg); 622 unsigned cmd, unsigned long arg);
618static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, 623static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
619 unsigned long arg); 624 unsigned cmd, unsigned long arg);
620 625
621static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg) 626static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
627 unsigned cmd, unsigned long arg)
622{ 628{
623 switch (cmd) { 629 switch (cmd) {
624 case CCISS_GETPCIINFO: 630 case CCISS_GETPCIINFO:
@@ -636,20 +642,20 @@ static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
636 case CCISS_REGNEWD: 642 case CCISS_REGNEWD:
637 case CCISS_RESCANDISK: 643 case CCISS_RESCANDISK:
638 case CCISS_GETLUNINFO: 644 case CCISS_GETLUNINFO:
639 return do_ioctl(f, cmd, arg); 645 return do_ioctl(bdev, mode, cmd, arg);
640 646
641 case CCISS_PASSTHRU32: 647 case CCISS_PASSTHRU32:
642 return cciss_ioctl32_passthru(f, cmd, arg); 648 return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
643 case CCISS_BIG_PASSTHRU32: 649 case CCISS_BIG_PASSTHRU32:
644 return cciss_ioctl32_big_passthru(f, cmd, arg); 650 return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg);
645 651
646 default: 652 default:
647 return -ENOIOCTLCMD; 653 return -ENOIOCTLCMD;
648 } 654 }
649} 655}
650 656
651static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, 657static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
652 unsigned long arg) 658 unsigned cmd, unsigned long arg)
653{ 659{
654 IOCTL32_Command_struct __user *arg32 = 660 IOCTL32_Command_struct __user *arg32 =
655 (IOCTL32_Command_struct __user *) arg; 661 (IOCTL32_Command_struct __user *) arg;
@@ -676,7 +682,7 @@ static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
676 if (err) 682 if (err)
677 return -EFAULT; 683 return -EFAULT;
678 684
679 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p); 685 err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
680 if (err) 686 if (err)
681 return err; 687 return err;
682 err |= 688 err |=
@@ -687,8 +693,8 @@ static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
687 return err; 693 return err;
688} 694}
689 695
690static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, 696static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
691 unsigned long arg) 697 unsigned cmd, unsigned long arg)
692{ 698{
693 BIG_IOCTL32_Command_struct __user *arg32 = 699 BIG_IOCTL32_Command_struct __user *arg32 =
694 (BIG_IOCTL32_Command_struct __user *) arg; 700 (BIG_IOCTL32_Command_struct __user *) arg;
@@ -717,7 +723,7 @@ static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
717 if (err) 723 if (err)
718 return -EFAULT; 724 return -EFAULT;
719 725
720 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p); 726 err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
721 if (err) 727 if (err)
722 return err; 728 return err;
723 err |= 729 err |=
@@ -745,10 +751,9 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
745/* 751/*
746 * ioctl 752 * ioctl
747 */ 753 */
748static int cciss_ioctl(struct inode *inode, struct file *filep, 754static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
749 unsigned int cmd, unsigned long arg) 755 unsigned int cmd, unsigned long arg)
750{ 756{
751 struct block_device *bdev = inode->i_bdev;
752 struct gendisk *disk = bdev->bd_disk; 757 struct gendisk *disk = bdev->bd_disk;
753 ctlr_info_t *host = get_host(disk); 758 ctlr_info_t *host = get_host(disk);
754 drive_info_struct *drv = get_drv(disk); 759 drive_info_struct *drv = get_drv(disk);
@@ -1232,7 +1237,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
1232 case SG_EMULATED_HOST: 1237 case SG_EMULATED_HOST:
1233 case SG_IO: 1238 case SG_IO:
1234 case SCSI_IOCTL_SEND_COMMAND: 1239 case SCSI_IOCTL_SEND_COMMAND:
1235 return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp); 1240 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
1236 1241
1237 /* scsi_cmd_ioctl would normally handle these, below, but */ 1242 /* scsi_cmd_ioctl would normally handle these, below, but */
1238 /* they aren't a good fit for cciss, as CD-ROMs are */ 1243 /* they aren't a good fit for cciss, as CD-ROMs are */
@@ -1365,6 +1370,7 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1365 disk->first_minor = drv_index << NWD_SHIFT; 1370 disk->first_minor = drv_index << NWD_SHIFT;
1366 disk->fops = &cciss_fops; 1371 disk->fops = &cciss_fops;
1367 disk->private_data = &h->drv[drv_index]; 1372 disk->private_data = &h->drv[drv_index];
1373 disk->driverfs_dev = &h->pdev->dev;
1368 1374
1369 /* Set up queue information */ 1375 /* Set up queue information */
1370 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1376 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -1687,6 +1693,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1687 for (i = 0; i <= h->highest_lun; i++) { 1693 for (i = 0; i <= h->highest_lun; i++) {
1688 int j; 1694 int j;
1689 drv_found = 0; 1695 drv_found = 0;
1696
1697 /* skip holes in the array from already deleted drives */
1698 if (h->drv[i].raid_level == -1)
1699 continue;
1700
1690 for (j = 0; j < num_luns; j++) { 1701 for (j = 0; j < num_luns; j++) {
1691 memcpy(&lunid, &ld_buff->LUN[j][0], 4); 1702 memcpy(&lunid, &ld_buff->LUN[j][0], 4);
1692 lunid = le32_to_cpu(lunid); 1703 lunid = le32_to_cpu(lunid);
@@ -2841,7 +2852,7 @@ static void do_cciss_request(struct request_queue *q)
2841 h->maxSG = seg; 2852 h->maxSG = seg;
2842 2853
2843#ifdef CCISS_DEBUG 2854#ifdef CCISS_DEBUG
2844 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", 2855 printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n",
2845 creq->nr_sectors, seg); 2856 creq->nr_sectors, seg);
2846#endif /* CCISS_DEBUG */ 2857#endif /* CCISS_DEBUG */
2847 2858
@@ -3191,7 +3202,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3191 3202
3192 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */ 3203 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
3193#ifdef CCISS_DEBUG 3204#ifdef CCISS_DEBUG
3194 printk("address 0 = %x\n", c->paddr); 3205 printk("address 0 = %lx\n", c->paddr);
3195#endif /* CCISS_DEBUG */ 3206#endif /* CCISS_DEBUG */
3196 c->vaddr = remap_pci_mem(c->paddr, 0x250); 3207 c->vaddr = remap_pci_mem(c->paddr, 0x250);
3197 3208
@@ -3218,7 +3229,8 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3218#endif /* CCISS_DEBUG */ 3229#endif /* CCISS_DEBUG */
3219 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); 3230 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3220#ifdef CCISS_DEBUG 3231#ifdef CCISS_DEBUG
3221 printk("cfg base address index = %x\n", cfg_base_addr_index); 3232 printk("cfg base address index = %llx\n",
3233 (unsigned long long)cfg_base_addr_index);
3222#endif /* CCISS_DEBUG */ 3234#endif /* CCISS_DEBUG */
3223 if (cfg_base_addr_index == -1) { 3235 if (cfg_base_addr_index == -1) {
3224 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n"); 3236 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
@@ -3228,7 +3240,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3228 3240
3229 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET); 3241 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
3230#ifdef CCISS_DEBUG 3242#ifdef CCISS_DEBUG
3231 printk("cfg offset = %x\n", cfg_offset); 3243 printk("cfg offset = %llx\n", (unsigned long long)cfg_offset);
3232#endif /* CCISS_DEBUG */ 3244#endif /* CCISS_DEBUG */
3233 c->cfgtable = remap_pci_mem(pci_resource_start(pdev, 3245 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3234 cfg_base_addr_index) + 3246 cfg_base_addr_index) +
@@ -3403,7 +3415,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3403 int i; 3415 int i;
3404 int j = 0; 3416 int j = 0;
3405 int rc; 3417 int rc;
3406 int dac; 3418 int dac, return_code;
3419 InquiryData_struct *inq_buff = NULL;
3407 3420
3408 i = alloc_cciss_hba(); 3421 i = alloc_cciss_hba();
3409 if (i < 0) 3422 if (i < 0)
@@ -3509,6 +3522,25 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3509 /* Turn the interrupts on so we can service requests */ 3522 /* Turn the interrupts on so we can service requests */
3510 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON); 3523 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3511 3524
3525 /* Get the firmware version */
3526 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3527 if (inq_buff == NULL) {
3528 printk(KERN_ERR "cciss: out of memory\n");
3529 goto clean4;
3530 }
3531
3532 return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
3533 sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD);
3534 if (return_code == IO_OK) {
3535 hba[i]->firm_ver[0] = inq_buff->data_byte[32];
3536 hba[i]->firm_ver[1] = inq_buff->data_byte[33];
3537 hba[i]->firm_ver[2] = inq_buff->data_byte[34];
3538 hba[i]->firm_ver[3] = inq_buff->data_byte[35];
3539 } else { /* send command failed */
3540 printk(KERN_WARNING "cciss: unable to determine firmware"
3541 " version of controller\n");
3542 }
3543
3512 cciss_procinit(i); 3544 cciss_procinit(i);
3513 3545
3514 hba[i]->cciss_max_sectors = 2048; 3546 hba[i]->cciss_max_sectors = 2048;
@@ -3519,6 +3551,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3519 return 1; 3551 return 1;
3520 3552
3521clean4: 3553clean4:
3554 kfree(inq_buff);
3522#ifdef CONFIG_CISS_SCSI_TAPE 3555#ifdef CONFIG_CISS_SCSI_TAPE
3523 kfree(hba[i]->scsi_rejects.complete); 3556 kfree(hba[i]->scsi_rejects.complete);
3524#endif 3557#endif
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 3d967525e9a9..5d39df14ed90 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -156,9 +156,9 @@ static int sendcmd(
156 unsigned int blkcnt, 156 unsigned int blkcnt,
157 unsigned int log_unit ); 157 unsigned int log_unit );
158 158
159static int ida_open(struct inode *inode, struct file *filep); 159static int ida_open(struct block_device *bdev, fmode_t mode);
160static int ida_release(struct inode *inode, struct file *filep); 160static int ida_release(struct gendisk *disk, fmode_t mode);
161static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg); 161static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
162static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo); 162static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
163static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io); 163static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
164 164
@@ -197,7 +197,7 @@ static struct block_device_operations ida_fops = {
197 .owner = THIS_MODULE, 197 .owner = THIS_MODULE,
198 .open = ida_open, 198 .open = ida_open,
199 .release = ida_release, 199 .release = ida_release,
200 .ioctl = ida_ioctl, 200 .locked_ioctl = ida_ioctl,
201 .getgeo = ida_getgeo, 201 .getgeo = ida_getgeo,
202 .revalidate_disk= ida_revalidate, 202 .revalidate_disk= ida_revalidate,
203}; 203};
@@ -567,7 +567,12 @@ static int __init cpqarray_init(void)
567 num_cntlrs_reg++; 567 num_cntlrs_reg++;
568 } 568 }
569 569
570 return(num_cntlrs_reg); 570 if (num_cntlrs_reg)
571 return 0;
572 else {
573 pci_unregister_driver(&cpqarray_pci_driver);
574 return -ENODEV;
575 }
571} 576}
572 577
573/* Function to find the first free pointer into our hba[] array */ 578/* Function to find the first free pointer into our hba[] array */
@@ -818,12 +823,12 @@ DBGINFO(
818/* 823/*
819 * Open. Make sure the device is really there. 824 * Open. Make sure the device is really there.
820 */ 825 */
821static int ida_open(struct inode *inode, struct file *filep) 826static int ida_open(struct block_device *bdev, fmode_t mode)
822{ 827{
823 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk); 828 drv_info_t *drv = get_drv(bdev->bd_disk);
824 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk); 829 ctlr_info_t *host = get_host(bdev->bd_disk);
825 830
826 DBGINFO(printk("ida_open %s\n", inode->i_bdev->bd_disk->disk_name)); 831 DBGINFO(printk("ida_open %s\n", bdev->bd_disk->disk_name));
827 /* 832 /*
828 * Root is allowed to open raw volume zero even if it's not configured 833 * Root is allowed to open raw volume zero even if it's not configured
829 * so array config can still work. I don't think I really like this, 834 * so array config can still work. I don't think I really like this,
@@ -843,9 +848,9 @@ static int ida_open(struct inode *inode, struct file *filep)
843/* 848/*
844 * Close. Sync first. 849 * Close. Sync first.
845 */ 850 */
846static int ida_release(struct inode *inode, struct file *filep) 851static int ida_release(struct gendisk *disk, fmode_t mode)
847{ 852{
848 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk); 853 ctlr_info_t *host = get_host(disk);
849 host->usage_count--; 854 host->usage_count--;
850 return 0; 855 return 0;
851} 856}
@@ -1128,10 +1133,10 @@ static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1128 * ida_ioctl does some miscellaneous stuff like reporting drive geometry, 1133 * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1129 * setting readahead and submitting commands from userspace to the controller. 1134 * setting readahead and submitting commands from userspace to the controller.
1130 */ 1135 */
1131static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg) 1136static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
1132{ 1137{
1133 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk); 1138 drv_info_t *drv = get_drv(bdev->bd_disk);
1134 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk); 1139 ctlr_info_t *host = get_host(bdev->bd_disk);
1135 int error; 1140 int error;
1136 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg; 1141 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1137 ida_ioctl_t *my_io; 1142 ida_ioctl_t *my_io;
@@ -1165,7 +1170,7 @@ out_passthru:
1165 put_user(host->ctlr_sig, (int __user *)arg); 1170 put_user(host->ctlr_sig, (int __user *)arg);
1166 return 0; 1171 return 0;
1167 case IDAREVALIDATEVOLS: 1172 case IDAREVALIDATEVOLS:
1168 if (iminor(inode) != 0) 1173 if (MINOR(bdev->bd_dev) != 0)
1169 return -ENXIO; 1174 return -ENXIO;
1170 return revalidate_allvol(host); 1175 return revalidate_allvol(host);
1171 case IDADRIVERVERSION: 1176 case IDADRIVERVERSION:
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 2cea27aba9a0..cf29cc4e6ab7 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3450,14 +3450,14 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3450 return 0; 3450 return 0;
3451} 3451}
3452 3452
3453static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 3453static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
3454 unsigned long param) 3454 unsigned long param)
3455{ 3455{
3456#define FD_IOCTL_ALLOWED ((filp) && (filp)->private_data) 3456#define FD_IOCTL_ALLOWED (mode & (FMODE_WRITE|FMODE_WRITE_IOCTL))
3457#define OUT(c,x) case c: outparam = (const char *) (x); break 3457#define OUT(c,x) case c: outparam = (const char *) (x); break
3458#define IN(c,x,tag) case c: *(x) = inparam. tag ; return 0 3458#define IN(c,x,tag) case c: *(x) = inparam. tag ; return 0
3459 3459
3460 int drive = (long)inode->i_bdev->bd_disk->private_data; 3460 int drive = (long)bdev->bd_disk->private_data;
3461 int type = ITYPE(UDRS->fd_device); 3461 int type = ITYPE(UDRS->fd_device);
3462 int i; 3462 int i;
3463 int ret; 3463 int ret;
@@ -3516,11 +3516,11 @@ static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
3516 current_type[drive] = NULL; 3516 current_type[drive] = NULL;
3517 floppy_sizes[drive] = MAX_DISK_SIZE << 1; 3517 floppy_sizes[drive] = MAX_DISK_SIZE << 1;
3518 UDRS->keep_data = 0; 3518 UDRS->keep_data = 0;
3519 return invalidate_drive(inode->i_bdev); 3519 return invalidate_drive(bdev);
3520 case FDSETPRM: 3520 case FDSETPRM:
3521 case FDDEFPRM: 3521 case FDDEFPRM:
3522 return set_geometry(cmd, &inparam.g, 3522 return set_geometry(cmd, &inparam.g,
3523 drive, type, inode->i_bdev); 3523 drive, type, bdev);
3524 case FDGETPRM: 3524 case FDGETPRM:
3525 ECALL(get_floppy_geometry(drive, type, 3525 ECALL(get_floppy_geometry(drive, type,
3526 (struct floppy_struct **) 3526 (struct floppy_struct **)
@@ -3551,7 +3551,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
3551 case FDFMTEND: 3551 case FDFMTEND:
3552 case FDFLUSH: 3552 case FDFLUSH:
3553 LOCK_FDC(drive, 1); 3553 LOCK_FDC(drive, 1);
3554 return invalidate_drive(inode->i_bdev); 3554 return invalidate_drive(bdev);
3555 3555
3556 case FDSETEMSGTRESH: 3556 case FDSETEMSGTRESH:
3557 UDP->max_errors.reporting = 3557 UDP->max_errors.reporting =
@@ -3659,9 +3659,9 @@ static void __init config_types(void)
3659 printk("\n"); 3659 printk("\n");
3660} 3660}
3661 3661
3662static int floppy_release(struct inode *inode, struct file *filp) 3662static int floppy_release(struct gendisk *disk, fmode_t mode)
3663{ 3663{
3664 int drive = (long)inode->i_bdev->bd_disk->private_data; 3664 int drive = (long)disk->private_data;
3665 3665
3666 mutex_lock(&open_lock); 3666 mutex_lock(&open_lock);
3667 if (UDRS->fd_ref < 0) 3667 if (UDRS->fd_ref < 0)
@@ -3682,18 +3682,17 @@ static int floppy_release(struct inode *inode, struct file *filp)
3682 * /dev/PS0 etc), and disallows simultaneous access to the same 3682 * /dev/PS0 etc), and disallows simultaneous access to the same
3683 * drive with different device numbers. 3683 * drive with different device numbers.
3684 */ 3684 */
3685static int floppy_open(struct inode *inode, struct file *filp) 3685static int floppy_open(struct block_device *bdev, fmode_t mode)
3686{ 3686{
3687 int drive = (long)inode->i_bdev->bd_disk->private_data; 3687 int drive = (long)bdev->bd_disk->private_data;
3688 int old_dev; 3688 int old_dev, new_dev;
3689 int try; 3689 int try;
3690 int res = -EBUSY; 3690 int res = -EBUSY;
3691 char *tmp; 3691 char *tmp;
3692 3692
3693 filp->private_data = (void *)0;
3694 mutex_lock(&open_lock); 3693 mutex_lock(&open_lock);
3695 old_dev = UDRS->fd_device; 3694 old_dev = UDRS->fd_device;
3696 if (opened_bdev[drive] && opened_bdev[drive] != inode->i_bdev) 3695 if (opened_bdev[drive] && opened_bdev[drive] != bdev)
3697 goto out2; 3696 goto out2;
3698 3697
3699 if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)) { 3698 if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)) {
@@ -3701,15 +3700,15 @@ static int floppy_open(struct inode *inode, struct file *filp)
3701 USETF(FD_VERIFY); 3700 USETF(FD_VERIFY);
3702 } 3701 }
3703 3702
3704 if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (filp->f_flags & O_EXCL))) 3703 if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (mode & FMODE_EXCL)))
3705 goto out2; 3704 goto out2;
3706 3705
3707 if (filp->f_flags & O_EXCL) 3706 if (mode & FMODE_EXCL)
3708 UDRS->fd_ref = -1; 3707 UDRS->fd_ref = -1;
3709 else 3708 else
3710 UDRS->fd_ref++; 3709 UDRS->fd_ref++;
3711 3710
3712 opened_bdev[drive] = inode->i_bdev; 3711 opened_bdev[drive] = bdev;
3713 3712
3714 res = -ENXIO; 3713 res = -ENXIO;
3715 3714
@@ -3744,31 +3743,26 @@ static int floppy_open(struct inode *inode, struct file *filp)
3744 } 3743 }
3745 } 3744 }
3746 3745
3747 UDRS->fd_device = iminor(inode); 3746 new_dev = MINOR(bdev->bd_dev);
3748 set_capacity(disks[drive], floppy_sizes[iminor(inode)]); 3747 UDRS->fd_device = new_dev;
3749 if (old_dev != -1 && old_dev != iminor(inode)) { 3748 set_capacity(disks[drive], floppy_sizes[new_dev]);
3749 if (old_dev != -1 && old_dev != new_dev) {
3750 if (buffer_drive == drive) 3750 if (buffer_drive == drive)
3751 buffer_track = -1; 3751 buffer_track = -1;
3752 } 3752 }
3753 3753
3754 /* Allow ioctls if we have write-permissions even if read-only open.
3755 * Needed so that programs such as fdrawcmd still can work on write
3756 * protected disks */
3757 if ((filp->f_mode & FMODE_WRITE) || !file_permission(filp, MAY_WRITE))
3758 filp->private_data = (void *)8;
3759
3760 if (UFDCS->rawcmd == 1) 3754 if (UFDCS->rawcmd == 1)
3761 UFDCS->rawcmd = 2; 3755 UFDCS->rawcmd = 2;
3762 3756
3763 if (!(filp->f_flags & O_NDELAY)) { 3757 if (!(mode & FMODE_NDELAY)) {
3764 if (filp->f_mode & 3) { 3758 if (mode & (FMODE_READ|FMODE_WRITE)) {
3765 UDRS->last_checked = 0; 3759 UDRS->last_checked = 0;
3766 check_disk_change(inode->i_bdev); 3760 check_disk_change(bdev);
3767 if (UTESTF(FD_DISK_CHANGED)) 3761 if (UTESTF(FD_DISK_CHANGED))
3768 goto out; 3762 goto out;
3769 } 3763 }
3770 res = -EROFS; 3764 res = -EROFS;
3771 if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE))) 3765 if ((mode & FMODE_WRITE) && !(UTESTF(FD_DISK_WRITABLE)))
3772 goto out; 3766 goto out;
3773 } 3767 }
3774 mutex_unlock(&open_lock); 3768 mutex_unlock(&open_lock);
@@ -3911,7 +3905,7 @@ static struct block_device_operations floppy_fops = {
3911 .owner = THIS_MODULE, 3905 .owner = THIS_MODULE,
3912 .open = floppy_open, 3906 .open = floppy_open,
3913 .release = floppy_release, 3907 .release = floppy_release,
3914 .ioctl = fd_ioctl, 3908 .locked_ioctl = fd_ioctl,
3915 .getgeo = fd_getgeo, 3909 .getgeo = fd_getgeo,
3916 .media_changed = check_floppy_change, 3910 .media_changed = check_floppy_change,
3917 .revalidate_disk = floppy_revalidate, 3911 .revalidate_disk = floppy_revalidate,
@@ -4130,7 +4124,7 @@ static int __init floppy_setup(char *str)
4130 printk("\n"); 4124 printk("\n");
4131 } else 4125 } else
4132 DPRINT("botched floppy option\n"); 4126 DPRINT("botched floppy option\n");
4133 DPRINT("Read Documentation/floppy.txt\n"); 4127 DPRINT("Read Documentation/blockdev/floppy.txt\n");
4134 return 0; 4128 return 0;
4135} 4129}
4136 4130
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d3a25b027ff9..5c4ee70d5cf3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -40,8 +40,7 @@
40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002 40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
41 * 41 *
42 * Support for falling back on the write file operation when the address space 42 * Support for falling back on the write file operation when the address space
43 * operations prepare_write and/or commit_write are not available on the 43 * operations write_begin is not available on the backing filesystem.
44 * backing filesystem.
45 * Anton Altaparmakov, 16 Feb 2005 44 * Anton Altaparmakov, 16 Feb 2005
46 * 45 *
47 * Still To Fix: 46 * Still To Fix:
@@ -210,7 +209,7 @@ lo_do_transfer(struct loop_device *lo, int cmd,
210 * space operations write_begin and write_end. 209 * space operations write_begin and write_end.
211 */ 210 */
212static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, 211static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
213 int bsize, loff_t pos, struct page *unused) 212 loff_t pos, struct page *unused)
214{ 213{
215 struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */ 214 struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
216 struct address_space *mapping = file->f_mapping; 215 struct address_space *mapping = file->f_mapping;
@@ -302,7 +301,7 @@ static int __do_lo_send_write(struct file *file,
302 * filesystems. 301 * filesystems.
303 */ 302 */
304static int do_lo_send_direct_write(struct loop_device *lo, 303static int do_lo_send_direct_write(struct loop_device *lo,
305 struct bio_vec *bvec, int bsize, loff_t pos, struct page *page) 304 struct bio_vec *bvec, loff_t pos, struct page *page)
306{ 305{
307 ssize_t bw = __do_lo_send_write(lo->lo_backing_file, 306 ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
308 kmap(bvec->bv_page) + bvec->bv_offset, 307 kmap(bvec->bv_page) + bvec->bv_offset,
@@ -326,7 +325,7 @@ static int do_lo_send_direct_write(struct loop_device *lo,
326 * destination pages of the backing file. 325 * destination pages of the backing file.
327 */ 326 */
328static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec, 327static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
329 int bsize, loff_t pos, struct page *page) 328 loff_t pos, struct page *page)
330{ 329{
331 int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page, 330 int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
332 bvec->bv_offset, bvec->bv_len, pos >> 9); 331 bvec->bv_offset, bvec->bv_len, pos >> 9);
@@ -341,10 +340,9 @@ static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
341 return ret; 340 return ret;
342} 341}
343 342
344static int lo_send(struct loop_device *lo, struct bio *bio, int bsize, 343static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
345 loff_t pos)
346{ 344{
347 int (*do_lo_send)(struct loop_device *, struct bio_vec *, int, loff_t, 345 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
348 struct page *page); 346 struct page *page);
349 struct bio_vec *bvec; 347 struct bio_vec *bvec;
350 struct page *page = NULL; 348 struct page *page = NULL;
@@ -362,7 +360,7 @@ static int lo_send(struct loop_device *lo, struct bio *bio, int bsize,
362 } 360 }
363 } 361 }
364 bio_for_each_segment(bvec, bio, i) { 362 bio_for_each_segment(bvec, bio, i) {
365 ret = do_lo_send(lo, bvec, bsize, pos, page); 363 ret = do_lo_send(lo, bvec, pos, page);
366 if (ret < 0) 364 if (ret < 0)
367 break; 365 break;
368 pos += bvec->bv_len; 366 pos += bvec->bv_len;
@@ -478,7 +476,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
478 476
479 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 477 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
480 if (bio_rw(bio) == WRITE) 478 if (bio_rw(bio) == WRITE)
481 ret = lo_send(lo, bio, lo->lo_blocksize, pos); 479 ret = lo_send(lo, bio, pos);
482 else 480 else
483 ret = lo_receive(lo, bio, lo->lo_blocksize, pos); 481 ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
484 return ret; 482 return ret;
@@ -652,8 +650,8 @@ static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
652 * This can only work if the loop device is used read-only, and if the 650 * This can only work if the loop device is used read-only, and if the
653 * new backing store is the same size and type as the old backing store. 651 * new backing store is the same size and type as the old backing store.
654 */ 652 */
655static int loop_change_fd(struct loop_device *lo, struct file *lo_file, 653static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
656 struct block_device *bdev, unsigned int arg) 654 unsigned int arg)
657{ 655{
658 struct file *file, *old_file; 656 struct file *file, *old_file;
659 struct inode *inode; 657 struct inode *inode;
@@ -712,7 +710,7 @@ static inline int is_loop_device(struct file *file)
712 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; 710 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
713} 711}
714 712
715static int loop_set_fd(struct loop_device *lo, struct file *lo_file, 713static int loop_set_fd(struct loop_device *lo, fmode_t mode,
716 struct block_device *bdev, unsigned int arg) 714 struct block_device *bdev, unsigned int arg)
717{ 715{
718 struct file *file, *f; 716 struct file *file, *f;
@@ -740,7 +738,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
740 while (is_loop_device(f)) { 738 while (is_loop_device(f)) {
741 struct loop_device *l; 739 struct loop_device *l;
742 740
743 if (f->f_mapping->host->i_rdev == lo_file->f_mapping->host->i_rdev) 741 if (f->f_mapping->host->i_bdev == bdev)
744 goto out_putf; 742 goto out_putf;
745 743
746 l = f->f_mapping->host->i_bdev->bd_disk->private_data; 744 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
@@ -766,7 +764,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
766 */ 764 */
767 if (!file->f_op->splice_read) 765 if (!file->f_op->splice_read)
768 goto out_putf; 766 goto out_putf;
769 if (aops->prepare_write || aops->write_begin) 767 if (aops->write_begin)
770 lo_flags |= LO_FLAGS_USE_AOPS; 768 lo_flags |= LO_FLAGS_USE_AOPS;
771 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write) 769 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
772 lo_flags |= LO_FLAGS_READ_ONLY; 770 lo_flags |= LO_FLAGS_READ_ONLY;
@@ -786,7 +784,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
786 goto out_putf; 784 goto out_putf;
787 } 785 }
788 786
789 if (!(lo_file->f_mode & FMODE_WRITE)) 787 if (!(mode & FMODE_WRITE))
790 lo_flags |= LO_FLAGS_READ_ONLY; 788 lo_flags |= LO_FLAGS_READ_ONLY;
791 789
792 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); 790 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
@@ -918,9 +916,11 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
918 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); 916 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
919 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); 917 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
920 memset(lo->lo_file_name, 0, LO_NAME_SIZE); 918 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
921 invalidate_bdev(bdev); 919 if (bdev)
920 invalidate_bdev(bdev);
922 set_capacity(lo->lo_disk, 0); 921 set_capacity(lo->lo_disk, 0);
923 bd_set_size(bdev, 0); 922 if (bdev)
923 bd_set_size(bdev, 0);
924 mapping_set_gfp_mask(filp->f_mapping, gfp); 924 mapping_set_gfp_mask(filp->f_mapping, gfp);
925 lo->lo_state = Lo_unbound; 925 lo->lo_state = Lo_unbound;
926 fput(filp); 926 fput(filp);
@@ -1137,22 +1137,22 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1137 return err; 1137 return err;
1138} 1138}
1139 1139
1140static int lo_ioctl(struct inode * inode, struct file * file, 1140static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1141 unsigned int cmd, unsigned long arg) 1141 unsigned int cmd, unsigned long arg)
1142{ 1142{
1143 struct loop_device *lo = inode->i_bdev->bd_disk->private_data; 1143 struct loop_device *lo = bdev->bd_disk->private_data;
1144 int err; 1144 int err;
1145 1145
1146 mutex_lock(&lo->lo_ctl_mutex); 1146 mutex_lock(&lo->lo_ctl_mutex);
1147 switch (cmd) { 1147 switch (cmd) {
1148 case LOOP_SET_FD: 1148 case LOOP_SET_FD:
1149 err = loop_set_fd(lo, file, inode->i_bdev, arg); 1149 err = loop_set_fd(lo, mode, bdev, arg);
1150 break; 1150 break;
1151 case LOOP_CHANGE_FD: 1151 case LOOP_CHANGE_FD:
1152 err = loop_change_fd(lo, file, inode->i_bdev, arg); 1152 err = loop_change_fd(lo, bdev, arg);
1153 break; 1153 break;
1154 case LOOP_CLR_FD: 1154 case LOOP_CLR_FD:
1155 err = loop_clr_fd(lo, inode->i_bdev); 1155 err = loop_clr_fd(lo, bdev);
1156 break; 1156 break;
1157 case LOOP_SET_STATUS: 1157 case LOOP_SET_STATUS:
1158 err = loop_set_status_old(lo, (struct loop_info __user *) arg); 1158 err = loop_set_status_old(lo, (struct loop_info __user *) arg);
@@ -1292,10 +1292,10 @@ loop_get_status_compat(struct loop_device *lo,
1292 return err; 1292 return err;
1293} 1293}
1294 1294
1295static long lo_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1295static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1296 unsigned int cmd, unsigned long arg)
1296{ 1297{
1297 struct inode *inode = file->f_path.dentry->d_inode; 1298 struct loop_device *lo = bdev->bd_disk->private_data;
1298 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1299 int err; 1299 int err;
1300 1300
1301 switch(cmd) { 1301 switch(cmd) {
@@ -1317,7 +1317,7 @@ static long lo_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a
1317 arg = (unsigned long) compat_ptr(arg); 1317 arg = (unsigned long) compat_ptr(arg);
1318 case LOOP_SET_FD: 1318 case LOOP_SET_FD:
1319 case LOOP_CHANGE_FD: 1319 case LOOP_CHANGE_FD:
1320 err = lo_ioctl(inode, file, cmd, arg); 1320 err = lo_ioctl(bdev, mode, cmd, arg);
1321 break; 1321 break;
1322 default: 1322 default:
1323 err = -ENOIOCTLCMD; 1323 err = -ENOIOCTLCMD;
@@ -1327,9 +1327,9 @@ static long lo_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a
1327} 1327}
1328#endif 1328#endif
1329 1329
1330static int lo_open(struct inode *inode, struct file *file) 1330static int lo_open(struct block_device *bdev, fmode_t mode)
1331{ 1331{
1332 struct loop_device *lo = inode->i_bdev->bd_disk->private_data; 1332 struct loop_device *lo = bdev->bd_disk->private_data;
1333 1333
1334 mutex_lock(&lo->lo_ctl_mutex); 1334 mutex_lock(&lo->lo_ctl_mutex);
1335 lo->lo_refcnt++; 1335 lo->lo_refcnt++;
@@ -1338,15 +1338,15 @@ static int lo_open(struct inode *inode, struct file *file)
1338 return 0; 1338 return 0;
1339} 1339}
1340 1340
1341static int lo_release(struct inode *inode, struct file *file) 1341static int lo_release(struct gendisk *disk, fmode_t mode)
1342{ 1342{
1343 struct loop_device *lo = inode->i_bdev->bd_disk->private_data; 1343 struct loop_device *lo = disk->private_data;
1344 1344
1345 mutex_lock(&lo->lo_ctl_mutex); 1345 mutex_lock(&lo->lo_ctl_mutex);
1346 --lo->lo_refcnt; 1346 --lo->lo_refcnt;
1347 1347
1348 if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) && !lo->lo_refcnt) 1348 if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) && !lo->lo_refcnt)
1349 loop_clr_fd(lo, inode->i_bdev); 1349 loop_clr_fd(lo, NULL);
1350 1350
1351 mutex_unlock(&lo->lo_ctl_mutex); 1351 mutex_unlock(&lo->lo_ctl_mutex);
1352 1352
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9034ca585afd..d3a91cacee8c 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -557,10 +557,11 @@ static void do_nbd_request(struct request_queue * q)
557 } 557 }
558} 558}
559 559
560static int nbd_ioctl(struct inode *inode, struct file *file, 560static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
561 unsigned int cmd, unsigned long arg) 561 unsigned int cmd, unsigned long arg)
562{ 562{
563 struct nbd_device *lo = inode->i_bdev->bd_disk->private_data; 563 struct nbd_device *lo = bdev->bd_disk->private_data;
564 struct file *file;
564 int error; 565 int error;
565 struct request sreq ; 566 struct request sreq ;
566 struct task_struct *thread; 567 struct task_struct *thread;
@@ -612,8 +613,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
612 error = -EINVAL; 613 error = -EINVAL;
613 file = fget(arg); 614 file = fget(arg);
614 if (file) { 615 if (file) {
615 struct block_device *bdev = inode->i_bdev; 616 struct inode *inode = file->f_path.dentry->d_inode;
616 inode = file->f_path.dentry->d_inode;
617 if (S_ISSOCK(inode->i_mode)) { 617 if (S_ISSOCK(inode->i_mode)) {
618 lo->file = file; 618 lo->file = file;
619 lo->sock = SOCKET_I(inode); 619 lo->sock = SOCKET_I(inode);
@@ -628,14 +628,14 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
628 case NBD_SET_BLKSIZE: 628 case NBD_SET_BLKSIZE:
629 lo->blksize = arg; 629 lo->blksize = arg;
630 lo->bytesize &= ~(lo->blksize-1); 630 lo->bytesize &= ~(lo->blksize-1);
631 inode->i_bdev->bd_inode->i_size = lo->bytesize; 631 bdev->bd_inode->i_size = lo->bytesize;
632 set_blocksize(inode->i_bdev, lo->blksize); 632 set_blocksize(bdev, lo->blksize);
633 set_capacity(lo->disk, lo->bytesize >> 9); 633 set_capacity(lo->disk, lo->bytesize >> 9);
634 return 0; 634 return 0;
635 case NBD_SET_SIZE: 635 case NBD_SET_SIZE:
636 lo->bytesize = arg & ~(lo->blksize-1); 636 lo->bytesize = arg & ~(lo->blksize-1);
637 inode->i_bdev->bd_inode->i_size = lo->bytesize; 637 bdev->bd_inode->i_size = lo->bytesize;
638 set_blocksize(inode->i_bdev, lo->blksize); 638 set_blocksize(bdev, lo->blksize);
639 set_capacity(lo->disk, lo->bytesize >> 9); 639 set_capacity(lo->disk, lo->bytesize >> 9);
640 return 0; 640 return 0;
641 case NBD_SET_TIMEOUT: 641 case NBD_SET_TIMEOUT:
@@ -643,8 +643,8 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
643 return 0; 643 return 0;
644 case NBD_SET_SIZE_BLOCKS: 644 case NBD_SET_SIZE_BLOCKS:
645 lo->bytesize = ((u64) arg) * lo->blksize; 645 lo->bytesize = ((u64) arg) * lo->blksize;
646 inode->i_bdev->bd_inode->i_size = lo->bytesize; 646 bdev->bd_inode->i_size = lo->bytesize;
647 set_blocksize(inode->i_bdev, lo->blksize); 647 set_blocksize(bdev, lo->blksize);
648 set_capacity(lo->disk, lo->bytesize >> 9); 648 set_capacity(lo->disk, lo->bytesize >> 9);
649 return 0; 649 return 0;
650 case NBD_DO_IT: 650 case NBD_DO_IT:
@@ -666,10 +666,10 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
666 if (file) 666 if (file)
667 fput(file); 667 fput(file);
668 lo->bytesize = 0; 668 lo->bytesize = 0;
669 inode->i_bdev->bd_inode->i_size = 0; 669 bdev->bd_inode->i_size = 0;
670 set_capacity(lo->disk, 0); 670 set_capacity(lo->disk, 0);
671 if (max_part > 0) 671 if (max_part > 0)
672 ioctl_by_bdev(inode->i_bdev, BLKRRPART, 0); 672 ioctl_by_bdev(bdev, BLKRRPART, 0);
673 return lo->harderror; 673 return lo->harderror;
674 case NBD_CLEAR_QUE: 674 case NBD_CLEAR_QUE:
675 /* 675 /*
@@ -680,7 +680,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
680 return 0; 680 return 0;
681 case NBD_PRINT_DEBUG: 681 case NBD_PRINT_DEBUG:
682 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", 682 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
683 inode->i_bdev->bd_disk->disk_name, 683 bdev->bd_disk->disk_name,
684 lo->queue_head.next, lo->queue_head.prev, 684 lo->queue_head.next, lo->queue_head.prev,
685 &lo->queue_head); 685 &lo->queue_head);
686 return 0; 686 return 0;
@@ -691,7 +691,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
691static struct block_device_operations nbd_fops = 691static struct block_device_operations nbd_fops =
692{ 692{
693 .owner = THIS_MODULE, 693 .owner = THIS_MODULE,
694 .ioctl = nbd_ioctl, 694 .locked_ioctl = nbd_ioctl,
695}; 695};
696 696
697/* 697/*
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index b8a994a2b013..e91d4b4b014f 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -223,23 +223,24 @@ static int pcd_warned; /* Have we logged a phase warning ? */
223 223
224/* kernel glue structures */ 224/* kernel glue structures */
225 225
226static int pcd_block_open(struct inode *inode, struct file *file) 226static int pcd_block_open(struct block_device *bdev, fmode_t mode)
227{ 227{
228 struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data; 228 struct pcd_unit *cd = bdev->bd_disk->private_data;
229 return cdrom_open(&cd->info, inode, file); 229 return cdrom_open(&cd->info, bdev, mode);
230} 230}
231 231
232static int pcd_block_release(struct inode *inode, struct file *file) 232static int pcd_block_release(struct gendisk *disk, fmode_t mode)
233{ 233{
234 struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data; 234 struct pcd_unit *cd = disk->private_data;
235 return cdrom_release(&cd->info, file); 235 cdrom_release(&cd->info, mode);
236 return 0;
236} 237}
237 238
238static int pcd_block_ioctl(struct inode *inode, struct file *file, 239static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
239 unsigned cmd, unsigned long arg) 240 unsigned cmd, unsigned long arg)
240{ 241{
241 struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data; 242 struct pcd_unit *cd = bdev->bd_disk->private_data;
242 return cdrom_ioctl(file, &cd->info, inode, cmd, arg); 243 return cdrom_ioctl(&cd->info, bdev, mode, cmd, arg);
243} 244}
244 245
245static int pcd_block_media_changed(struct gendisk *disk) 246static int pcd_block_media_changed(struct gendisk *disk)
@@ -252,7 +253,7 @@ static struct block_device_operations pcd_bdops = {
252 .owner = THIS_MODULE, 253 .owner = THIS_MODULE,
253 .open = pcd_block_open, 254 .open = pcd_block_open,
254 .release = pcd_block_release, 255 .release = pcd_block_release,
255 .ioctl = pcd_block_ioctl, 256 .locked_ioctl = pcd_block_ioctl,
256 .media_changed = pcd_block_media_changed, 257 .media_changed = pcd_block_media_changed,
257}; 258};
258 259
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 5fdfa7c888ce..9299455b0af6 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -728,9 +728,9 @@ static int pd_special_command(struct pd_unit *disk,
728 728
729/* kernel glue structures */ 729/* kernel glue structures */
730 730
731static int pd_open(struct inode *inode, struct file *file) 731static int pd_open(struct block_device *bdev, fmode_t mode)
732{ 732{
733 struct pd_unit *disk = inode->i_bdev->bd_disk->private_data; 733 struct pd_unit *disk = bdev->bd_disk->private_data;
734 734
735 disk->access++; 735 disk->access++;
736 736
@@ -758,10 +758,10 @@ static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
758 return 0; 758 return 0;
759} 759}
760 760
761static int pd_ioctl(struct inode *inode, struct file *file, 761static int pd_ioctl(struct block_device *bdev, fmode_t mode,
762 unsigned int cmd, unsigned long arg) 762 unsigned int cmd, unsigned long arg)
763{ 763{
764 struct pd_unit *disk = inode->i_bdev->bd_disk->private_data; 764 struct pd_unit *disk = bdev->bd_disk->private_data;
765 765
766 switch (cmd) { 766 switch (cmd) {
767 case CDROMEJECT: 767 case CDROMEJECT:
@@ -773,9 +773,9 @@ static int pd_ioctl(struct inode *inode, struct file *file,
773 } 773 }
774} 774}
775 775
776static int pd_release(struct inode *inode, struct file *file) 776static int pd_release(struct gendisk *p, fmode_t mode)
777{ 777{
778 struct pd_unit *disk = inode->i_bdev->bd_disk->private_data; 778 struct pd_unit *disk = p->private_data;
779 779
780 if (!--disk->access && disk->removable) 780 if (!--disk->access && disk->removable)
781 pd_special_command(disk, pd_door_unlock); 781 pd_special_command(disk, pd_door_unlock);
@@ -809,7 +809,7 @@ static struct block_device_operations pd_fops = {
809 .owner = THIS_MODULE, 809 .owner = THIS_MODULE,
810 .open = pd_open, 810 .open = pd_open,
811 .release = pd_release, 811 .release = pd_release,
812 .ioctl = pd_ioctl, 812 .locked_ioctl = pd_ioctl,
813 .getgeo = pd_getgeo, 813 .getgeo = pd_getgeo,
814 .media_changed = pd_check_media, 814 .media_changed = pd_check_media,
815 .revalidate_disk= pd_revalidate 815 .revalidate_disk= pd_revalidate
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index e7fe6ca97dd8..bef3b997ba3e 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -201,13 +201,13 @@ module_param_array(drive3, int, NULL, 0);
201#define ATAPI_READ_10 0x28 201#define ATAPI_READ_10 0x28
202#define ATAPI_WRITE_10 0x2a 202#define ATAPI_WRITE_10 0x2a
203 203
204static int pf_open(struct inode *inode, struct file *file); 204static int pf_open(struct block_device *bdev, fmode_t mode);
205static void do_pf_request(struct request_queue * q); 205static void do_pf_request(struct request_queue * q);
206static int pf_ioctl(struct inode *inode, struct file *file, 206static int pf_ioctl(struct block_device *bdev, fmode_t mode,
207 unsigned int cmd, unsigned long arg); 207 unsigned int cmd, unsigned long arg);
208static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo); 208static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
209 209
210static int pf_release(struct inode *inode, struct file *file); 210static int pf_release(struct gendisk *disk, fmode_t mode);
211 211
212static int pf_detect(void); 212static int pf_detect(void);
213static void do_pf_read(void); 213static void do_pf_read(void);
@@ -266,7 +266,7 @@ static struct block_device_operations pf_fops = {
266 .owner = THIS_MODULE, 266 .owner = THIS_MODULE,
267 .open = pf_open, 267 .open = pf_open,
268 .release = pf_release, 268 .release = pf_release,
269 .ioctl = pf_ioctl, 269 .locked_ioctl = pf_ioctl,
270 .getgeo = pf_getgeo, 270 .getgeo = pf_getgeo,
271 .media_changed = pf_check_media, 271 .media_changed = pf_check_media,
272}; 272};
@@ -296,16 +296,16 @@ static void __init pf_init_units(void)
296 } 296 }
297} 297}
298 298
299static int pf_open(struct inode *inode, struct file *file) 299static int pf_open(struct block_device *bdev, fmode_t mode)
300{ 300{
301 struct pf_unit *pf = inode->i_bdev->bd_disk->private_data; 301 struct pf_unit *pf = bdev->bd_disk->private_data;
302 302
303 pf_identify(pf); 303 pf_identify(pf);
304 304
305 if (pf->media_status == PF_NM) 305 if (pf->media_status == PF_NM)
306 return -ENODEV; 306 return -ENODEV;
307 307
308 if ((pf->media_status == PF_RO) && (file->f_mode & 2)) 308 if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE))
309 return -EROFS; 309 return -EROFS;
310 310
311 pf->access++; 311 pf->access++;
@@ -333,9 +333,9 @@ static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
333 return 0; 333 return 0;
334} 334}
335 335
336static int pf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 336static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
337{ 337{
338 struct pf_unit *pf = inode->i_bdev->bd_disk->private_data; 338 struct pf_unit *pf = bdev->bd_disk->private_data;
339 339
340 if (cmd != CDROMEJECT) 340 if (cmd != CDROMEJECT)
341 return -EINVAL; 341 return -EINVAL;
@@ -346,9 +346,9 @@ static int pf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
346 return 0; 346 return 0;
347} 347}
348 348
349static int pf_release(struct inode *inode, struct file *file) 349static int pf_release(struct gendisk *disk, fmode_t mode)
350{ 350{
351 struct pf_unit *pf = inode->i_bdev->bd_disk->private_data; 351 struct pf_unit *pf = disk->private_data;
352 352
353 if (pf->access <= 0) 353 if (pf->access <= 0)
354 return -EINVAL; 354 return -EINVAL;
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 5ae229656eaa..1e4006e18f03 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -667,7 +667,7 @@ static int pt_open(struct inode *inode, struct file *file)
667 goto out; 667 goto out;
668 668
669 err = -EROFS; 669 err = -EROFS;
670 if ((!(tape->flags & PT_WRITE_OK)) && (file->f_mode & 2)) 670 if ((!(tape->flags & PT_WRITE_OK)) && (file->f_mode & FMODE_WRITE))
671 goto out; 671 goto out;
672 672
673 if (!(iminor(inode) & 128)) 673 if (!(iminor(inode) & 128))
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 195ca7c720f5..dc7a8c352da2 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -302,7 +302,7 @@ static struct kobj_type kobj_pkt_type_wqueue = {
302static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) 302static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
303{ 303{
304 if (class_pktcdvd) { 304 if (class_pktcdvd) {
305 pd->dev = device_create(class_pktcdvd, NULL, pd->pkt_dev, NULL, 305 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
306 "%s", pd->name); 306 "%s", pd->name);
307 if (IS_ERR(pd->dev)) 307 if (IS_ERR(pd->dev))
308 pd->dev = NULL; 308 pd->dev = NULL;
@@ -2320,7 +2320,7 @@ static int pkt_open_write(struct pktcdvd_device *pd)
2320/* 2320/*
2321 * called at open time. 2321 * called at open time.
2322 */ 2322 */
2323static int pkt_open_dev(struct pktcdvd_device *pd, int write) 2323static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2324{ 2324{
2325 int ret; 2325 int ret;
2326 long lba; 2326 long lba;
@@ -2332,7 +2332,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
2332 * so bdget() can't fail. 2332 * so bdget() can't fail.
2333 */ 2333 */
2334 bdget(pd->bdev->bd_dev); 2334 bdget(pd->bdev->bd_dev);
2335 if ((ret = blkdev_get(pd->bdev, FMODE_READ, O_RDONLY))) 2335 if ((ret = blkdev_get(pd->bdev, FMODE_READ)))
2336 goto out; 2336 goto out;
2337 2337
2338 if ((ret = bd_claim(pd->bdev, pd))) 2338 if ((ret = bd_claim(pd->bdev, pd)))
@@ -2381,7 +2381,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
2381out_unclaim: 2381out_unclaim:
2382 bd_release(pd->bdev); 2382 bd_release(pd->bdev);
2383out_putdev: 2383out_putdev:
2384 blkdev_put(pd->bdev); 2384 blkdev_put(pd->bdev, FMODE_READ);
2385out: 2385out:
2386 return ret; 2386 return ret;
2387} 2387}
@@ -2399,7 +2399,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2399 2399
2400 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); 2400 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2401 bd_release(pd->bdev); 2401 bd_release(pd->bdev);
2402 blkdev_put(pd->bdev); 2402 blkdev_put(pd->bdev, FMODE_READ);
2403 2403
2404 pkt_shrink_pktlist(pd); 2404 pkt_shrink_pktlist(pd);
2405} 2405}
@@ -2411,7 +2411,7 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
2411 return pkt_devs[dev_minor]; 2411 return pkt_devs[dev_minor];
2412} 2412}
2413 2413
2414static int pkt_open(struct inode *inode, struct file *file) 2414static int pkt_open(struct block_device *bdev, fmode_t mode)
2415{ 2415{
2416 struct pktcdvd_device *pd = NULL; 2416 struct pktcdvd_device *pd = NULL;
2417 int ret; 2417 int ret;
@@ -2419,7 +2419,7 @@ static int pkt_open(struct inode *inode, struct file *file)
2419 VPRINTK(DRIVER_NAME": entering open\n"); 2419 VPRINTK(DRIVER_NAME": entering open\n");
2420 2420
2421 mutex_lock(&ctl_mutex); 2421 mutex_lock(&ctl_mutex);
2422 pd = pkt_find_dev_from_minor(iminor(inode)); 2422 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
2423 if (!pd) { 2423 if (!pd) {
2424 ret = -ENODEV; 2424 ret = -ENODEV;
2425 goto out; 2425 goto out;
@@ -2428,20 +2428,20 @@ static int pkt_open(struct inode *inode, struct file *file)
2428 2428
2429 pd->refcnt++; 2429 pd->refcnt++;
2430 if (pd->refcnt > 1) { 2430 if (pd->refcnt > 1) {
2431 if ((file->f_mode & FMODE_WRITE) && 2431 if ((mode & FMODE_WRITE) &&
2432 !test_bit(PACKET_WRITABLE, &pd->flags)) { 2432 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2433 ret = -EBUSY; 2433 ret = -EBUSY;
2434 goto out_dec; 2434 goto out_dec;
2435 } 2435 }
2436 } else { 2436 } else {
2437 ret = pkt_open_dev(pd, file->f_mode & FMODE_WRITE); 2437 ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2438 if (ret) 2438 if (ret)
2439 goto out_dec; 2439 goto out_dec;
2440 /* 2440 /*
2441 * needed here as well, since ext2 (among others) may change 2441 * needed here as well, since ext2 (among others) may change
2442 * the blocksize at mount time 2442 * the blocksize at mount time
2443 */ 2443 */
2444 set_blocksize(inode->i_bdev, CD_FRAMESIZE); 2444 set_blocksize(bdev, CD_FRAMESIZE);
2445 } 2445 }
2446 2446
2447 mutex_unlock(&ctl_mutex); 2447 mutex_unlock(&ctl_mutex);
@@ -2455,9 +2455,9 @@ out:
2455 return ret; 2455 return ret;
2456} 2456}
2457 2457
2458static int pkt_close(struct inode *inode, struct file *file) 2458static int pkt_close(struct gendisk *disk, fmode_t mode)
2459{ 2459{
2460 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data; 2460 struct pktcdvd_device *pd = disk->private_data;
2461 int ret = 0; 2461 int ret = 0;
2462 2462
2463 mutex_lock(&ctl_mutex); 2463 mutex_lock(&ctl_mutex);
@@ -2765,7 +2765,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2765 bdev = bdget(dev); 2765 bdev = bdget(dev);
2766 if (!bdev) 2766 if (!bdev)
2767 return -ENOMEM; 2767 return -ENOMEM;
2768 ret = blkdev_get(bdev, FMODE_READ, O_RDONLY | O_NONBLOCK); 2768 ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY);
2769 if (ret) 2769 if (ret)
2770 return ret; 2770 return ret;
2771 2771
@@ -2790,19 +2790,28 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2790 return 0; 2790 return 0;
2791 2791
2792out_mem: 2792out_mem:
2793 blkdev_put(bdev); 2793 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2794 /* This is safe: open() is still holding a reference. */ 2794 /* This is safe: open() is still holding a reference. */
2795 module_put(THIS_MODULE); 2795 module_put(THIS_MODULE);
2796 return ret; 2796 return ret;
2797} 2797}
2798 2798
2799static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 2799static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
2800{ 2800{
2801 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data; 2801 struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2802 2802
2803 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode)); 2803 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
2804 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2804 2805
2805 switch (cmd) { 2806 switch (cmd) {
2807 case CDROMEJECT:
2808 /*
2809 * The door gets locked when the device is opened, so we
2810 * have to unlock it or else the eject command fails.
2811 */
2812 if (pd->refcnt == 1)
2813 pkt_lock_door(pd, 0);
2814 /* fallthru */
2806 /* 2815 /*
2807 * forward selected CDROM ioctls to CD-ROM, for UDF 2816 * forward selected CDROM ioctls to CD-ROM, for UDF
2808 */ 2817 */
@@ -2811,16 +2820,7 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
2811 case CDROM_LAST_WRITTEN: 2820 case CDROM_LAST_WRITTEN:
2812 case CDROM_SEND_PACKET: 2821 case CDROM_SEND_PACKET:
2813 case SCSI_IOCTL_SEND_COMMAND: 2822 case SCSI_IOCTL_SEND_COMMAND:
2814 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); 2823 return __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
2815
2816 case CDROMEJECT:
2817 /*
2818 * The door gets locked when the device is opened, so we
2819 * have to unlock it or else the eject command fails.
2820 */
2821 if (pd->refcnt == 1)
2822 pkt_lock_door(pd, 0);
2823 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
2824 2824
2825 default: 2825 default:
2826 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); 2826 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
@@ -2849,7 +2849,7 @@ static struct block_device_operations pktcdvd_ops = {
2849 .owner = THIS_MODULE, 2849 .owner = THIS_MODULE,
2850 .open = pkt_open, 2850 .open = pkt_open,
2851 .release = pkt_close, 2851 .release = pkt_close,
2852 .ioctl = pkt_ioctl, 2852 .locked_ioctl = pkt_ioctl,
2853 .media_changed = pkt_media_changed, 2853 .media_changed = pkt_media_changed,
2854}; 2854};
2855 2855
@@ -2975,7 +2975,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
2975 pkt_debugfs_dev_remove(pd); 2975 pkt_debugfs_dev_remove(pd);
2976 pkt_sysfs_dev_remove(pd); 2976 pkt_sysfs_dev_remove(pd);
2977 2977
2978 blkdev_put(pd->bdev); 2978 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2979 2979
2980 remove_proc_entry(pd->name, pkt_proc); 2980 remove_proc_entry(pd->name, pkt_proc);
2981 DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name); 2981 DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 730ccea78e45..612965307ba0 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -244,10 +244,10 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
244 int interruptible); 244 int interruptible);
245static void release_drive(struct floppy_state *fs); 245static void release_drive(struct floppy_state *fs);
246static int fd_eject(struct floppy_state *fs); 246static int fd_eject(struct floppy_state *fs);
247static int floppy_ioctl(struct inode *inode, struct file *filp, 247static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
248 unsigned int cmd, unsigned long param); 248 unsigned int cmd, unsigned long param);
249static int floppy_open(struct inode *inode, struct file *filp); 249static int floppy_open(struct block_device *bdev, fmode_t mode);
250static int floppy_release(struct inode *inode, struct file *filp); 250static int floppy_release(struct gendisk *disk, fmode_t mode);
251static int floppy_check_change(struct gendisk *disk); 251static int floppy_check_change(struct gendisk *disk);
252static int floppy_revalidate(struct gendisk *disk); 252static int floppy_revalidate(struct gendisk *disk);
253 253
@@ -839,10 +839,10 @@ static int fd_eject(struct floppy_state *fs)
839static struct floppy_struct floppy_type = 839static struct floppy_struct floppy_type =
840 { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */ 840 { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
841 841
842static int floppy_ioctl(struct inode *inode, struct file *filp, 842static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
843 unsigned int cmd, unsigned long param) 843 unsigned int cmd, unsigned long param)
844{ 844{
845 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data; 845 struct floppy_state *fs = bdev->bd_disk->private_data;
846 int err; 846 int err;
847 847
848 if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)) 848 if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
@@ -868,9 +868,9 @@ static int floppy_ioctl(struct inode *inode, struct file *filp,
868 return -ENOTTY; 868 return -ENOTTY;
869} 869}
870 870
871static int floppy_open(struct inode *inode, struct file *filp) 871static int floppy_open(struct block_device *bdev, fmode_t mode)
872{ 872{
873 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data; 873 struct floppy_state *fs = bdev->bd_disk->private_data;
874 struct swim3 __iomem *sw = fs->swim3; 874 struct swim3 __iomem *sw = fs->swim3;
875 int n, err = 0; 875 int n, err = 0;
876 876
@@ -904,17 +904,17 @@ static int floppy_open(struct inode *inode, struct file *filp)
904 swim3_action(fs, SETMFM); 904 swim3_action(fs, SETMFM);
905 swim3_select(fs, RELAX); 905 swim3_select(fs, RELAX);
906 906
907 } else if (fs->ref_count == -1 || filp->f_flags & O_EXCL) 907 } else if (fs->ref_count == -1 || mode & FMODE_EXCL)
908 return -EBUSY; 908 return -EBUSY;
909 909
910 if (err == 0 && (filp->f_flags & O_NDELAY) == 0 910 if (err == 0 && (mode & FMODE_NDELAY) == 0
911 && (filp->f_mode & 3)) { 911 && (mode & (FMODE_READ|FMODE_WRITE))) {
912 check_disk_change(inode->i_bdev); 912 check_disk_change(bdev);
913 if (fs->ejected) 913 if (fs->ejected)
914 err = -ENXIO; 914 err = -ENXIO;
915 } 915 }
916 916
917 if (err == 0 && (filp->f_mode & 2)) { 917 if (err == 0 && (mode & FMODE_WRITE)) {
918 if (fs->write_prot < 0) 918 if (fs->write_prot < 0)
919 fs->write_prot = swim3_readbit(fs, WRITE_PROT); 919 fs->write_prot = swim3_readbit(fs, WRITE_PROT);
920 if (fs->write_prot) 920 if (fs->write_prot)
@@ -930,7 +930,7 @@ static int floppy_open(struct inode *inode, struct file *filp)
930 return err; 930 return err;
931 } 931 }
932 932
933 if (filp->f_flags & O_EXCL) 933 if (mode & FMODE_EXCL)
934 fs->ref_count = -1; 934 fs->ref_count = -1;
935 else 935 else
936 ++fs->ref_count; 936 ++fs->ref_count;
@@ -938,9 +938,9 @@ static int floppy_open(struct inode *inode, struct file *filp)
938 return 0; 938 return 0;
939} 939}
940 940
941static int floppy_release(struct inode *inode, struct file *filp) 941static int floppy_release(struct gendisk *disk, fmode_t mode)
942{ 942{
943 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data; 943 struct floppy_state *fs = disk->private_data;
944 struct swim3 __iomem *sw = fs->swim3; 944 struct swim3 __iomem *sw = fs->swim3;
945 if (fs->ref_count > 0 && --fs->ref_count == 0) { 945 if (fs->ref_count > 0 && --fs->ref_count == 0) {
946 swim3_action(fs, MOTOR_OFF); 946 swim3_action(fs, MOTOR_OFF);
@@ -1000,7 +1000,7 @@ static int floppy_revalidate(struct gendisk *disk)
1000static struct block_device_operations floppy_fops = { 1000static struct block_device_operations floppy_fops = {
1001 .open = floppy_open, 1001 .open = floppy_open,
1002 .release = floppy_release, 1002 .release = floppy_release,
1003 .ioctl = floppy_ioctl, 1003 .locked_ioctl = floppy_ioctl,
1004 .media_changed = floppy_check_change, 1004 .media_changed = floppy_check_change,
1005 .revalidate_disk= floppy_revalidate, 1005 .revalidate_disk= floppy_revalidate,
1006}; 1006};
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index f60e41833f69..048d71d244d7 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -1546,8 +1546,6 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1546 1546
1547/* 1547/*
1548 * Reset management 1548 * Reset management
1549 * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing.
1550 * XXX Make usb_sync_reset asynchronous.
1551 */ 1549 */
1552 1550
1553static void ub_reset_enter(struct ub_dev *sc, int try) 1551static void ub_reset_enter(struct ub_dev *sc, int try)
@@ -1633,6 +1631,22 @@ static void ub_reset_task(struct work_struct *work)
1633} 1631}
1634 1632
1635/* 1633/*
1634 * XXX Reset brackets are too much hassle to implement, so just stub them
1635 * in order to prevent forced unbinding (which deadlocks solid when our
1636 * ->disconnect method waits for the reset to complete and this kills keventd).
1637 *
1638 * XXX Tell Alan to move usb_unlock_device inside of usb_reset_device,
1639 * or else the post_reset is invoked, and restats I/O on a locked device.
1640 */
1641static int ub_pre_reset(struct usb_interface *iface) {
1642 return 0;
1643}
1644
1645static int ub_post_reset(struct usb_interface *iface) {
1646 return 0;
1647}
1648
1649/*
1636 * This is called from a process context. 1650 * This is called from a process context.
1637 */ 1651 */
1638static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) 1652static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
@@ -1667,10 +1681,9 @@ static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1667 * This is mostly needed to keep refcounting, but also to support 1681 * This is mostly needed to keep refcounting, but also to support
1668 * media checks on removable media drives. 1682 * media checks on removable media drives.
1669 */ 1683 */
1670static int ub_bd_open(struct inode *inode, struct file *filp) 1684static int ub_bd_open(struct block_device *bdev, fmode_t mode)
1671{ 1685{
1672 struct gendisk *disk = inode->i_bdev->bd_disk; 1686 struct ub_lun *lun = bdev->bd_disk->private_data;
1673 struct ub_lun *lun = disk->private_data;
1674 struct ub_dev *sc = lun->udev; 1687 struct ub_dev *sc = lun->udev;
1675 unsigned long flags; 1688 unsigned long flags;
1676 int rc; 1689 int rc;
@@ -1684,19 +1697,19 @@ static int ub_bd_open(struct inode *inode, struct file *filp)
1684 spin_unlock_irqrestore(&ub_lock, flags); 1697 spin_unlock_irqrestore(&ub_lock, flags);
1685 1698
1686 if (lun->removable || lun->readonly) 1699 if (lun->removable || lun->readonly)
1687 check_disk_change(inode->i_bdev); 1700 check_disk_change(bdev);
1688 1701
1689 /* 1702 /*
1690 * The sd.c considers ->media_present and ->changed not equivalent, 1703 * The sd.c considers ->media_present and ->changed not equivalent,
1691 * under some pretty murky conditions (a failure of READ CAPACITY). 1704 * under some pretty murky conditions (a failure of READ CAPACITY).
1692 * We may need it one day. 1705 * We may need it one day.
1693 */ 1706 */
1694 if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) { 1707 if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) {
1695 rc = -ENOMEDIUM; 1708 rc = -ENOMEDIUM;
1696 goto err_open; 1709 goto err_open;
1697 } 1710 }
1698 1711
1699 if (lun->readonly && (filp->f_mode & FMODE_WRITE)) { 1712 if (lun->readonly && (mode & FMODE_WRITE)) {
1700 rc = -EROFS; 1713 rc = -EROFS;
1701 goto err_open; 1714 goto err_open;
1702 } 1715 }
@@ -1710,9 +1723,8 @@ err_open:
1710 1723
1711/* 1724/*
1712 */ 1725 */
1713static int ub_bd_release(struct inode *inode, struct file *filp) 1726static int ub_bd_release(struct gendisk *disk, fmode_t mode)
1714{ 1727{
1715 struct gendisk *disk = inode->i_bdev->bd_disk;
1716 struct ub_lun *lun = disk->private_data; 1728 struct ub_lun *lun = disk->private_data;
1717 struct ub_dev *sc = lun->udev; 1729 struct ub_dev *sc = lun->udev;
1718 1730
@@ -1723,13 +1735,13 @@ static int ub_bd_release(struct inode *inode, struct file *filp)
1723/* 1735/*
1724 * The ioctl interface. 1736 * The ioctl interface.
1725 */ 1737 */
1726static int ub_bd_ioctl(struct inode *inode, struct file *filp, 1738static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
1727 unsigned int cmd, unsigned long arg) 1739 unsigned int cmd, unsigned long arg)
1728{ 1740{
1729 struct gendisk *disk = inode->i_bdev->bd_disk; 1741 struct gendisk *disk = bdev->bd_disk;
1730 void __user *usermem = (void __user *) arg; 1742 void __user *usermem = (void __user *) arg;
1731 1743
1732 return scsi_cmd_ioctl(filp, disk->queue, disk, cmd, usermem); 1744 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
1733} 1745}
1734 1746
1735/* 1747/*
@@ -1793,7 +1805,7 @@ static struct block_device_operations ub_bd_fops = {
1793 .owner = THIS_MODULE, 1805 .owner = THIS_MODULE,
1794 .open = ub_bd_open, 1806 .open = ub_bd_open,
1795 .release = ub_bd_release, 1807 .release = ub_bd_release,
1796 .ioctl = ub_bd_ioctl, 1808 .locked_ioctl = ub_bd_ioctl,
1797 .media_changed = ub_bd_media_changed, 1809 .media_changed = ub_bd_media_changed,
1798 .revalidate_disk = ub_bd_revalidate, 1810 .revalidate_disk = ub_bd_revalidate,
1799}; 1811};
@@ -2448,6 +2460,8 @@ static struct usb_driver ub_driver = {
2448 .probe = ub_probe, 2460 .probe = ub_probe,
2449 .disconnect = ub_disconnect, 2461 .disconnect = ub_disconnect,
2450 .id_table = ub_usb_ids, 2462 .id_table = ub_usb_ids,
2463 .pre_reset = ub_pre_reset,
2464 .post_reset = ub_post_reset,
2451}; 2465};
2452 2466
2453static int __init ub_init(void) 2467static int __init ub_init(void)
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 1730d29e6044..ecccf65dce2f 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -130,15 +130,15 @@ struct viodasd_device {
130/* 130/*
131 * External open entry point. 131 * External open entry point.
132 */ 132 */
133static int viodasd_open(struct inode *ino, struct file *fil) 133static int viodasd_open(struct block_device *bdev, fmode_t mode)
134{ 134{
135 struct viodasd_device *d = ino->i_bdev->bd_disk->private_data; 135 struct viodasd_device *d = bdev->bd_disk->private_data;
136 HvLpEvent_Rc hvrc; 136 HvLpEvent_Rc hvrc;
137 struct viodasd_waitevent we; 137 struct viodasd_waitevent we;
138 u16 flags = 0; 138 u16 flags = 0;
139 139
140 if (d->read_only) { 140 if (d->read_only) {
141 if ((fil != NULL) && (fil->f_mode & FMODE_WRITE)) 141 if (mode & FMODE_WRITE)
142 return -EROFS; 142 return -EROFS;
143 flags = vioblockflags_ro; 143 flags = vioblockflags_ro;
144 } 144 }
@@ -179,9 +179,9 @@ static int viodasd_open(struct inode *ino, struct file *fil)
179/* 179/*
180 * External release entry point. 180 * External release entry point.
181 */ 181 */
182static int viodasd_release(struct inode *ino, struct file *fil) 182static int viodasd_release(struct gendisk *disk, fmode_t mode)
183{ 183{
184 struct viodasd_device *d = ino->i_bdev->bd_disk->private_data; 184 struct viodasd_device *d = disk->private_data;
185 HvLpEvent_Rc hvrc; 185 HvLpEvent_Rc hvrc;
186 186
187 /* Send the event to OS/400. We DON'T expect a response */ 187 /* Send the event to OS/400. We DON'T expect a response */
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ec5fc052786..85d79a02d487 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -146,11 +146,11 @@ static void do_virtblk_request(struct request_queue *q)
146 vblk->vq->vq_ops->kick(vblk->vq); 146 vblk->vq->vq_ops->kick(vblk->vq);
147} 147}
148 148
149static int virtblk_ioctl(struct inode *inode, struct file *filp, 149static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
150 unsigned cmd, unsigned long data) 150 unsigned cmd, unsigned long data)
151{ 151{
152 return scsi_cmd_ioctl(filp, inode->i_bdev->bd_disk->queue, 152 return scsi_cmd_ioctl(bdev->bd_disk->queue,
153 inode->i_bdev->bd_disk, cmd, 153 bdev->bd_disk, mode, cmd,
154 (void __user *)data); 154 (void __user *)data);
155} 155}
156 156
@@ -180,7 +180,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
180} 180}
181 181
182static struct block_device_operations virtblk_fops = { 182static struct block_device_operations virtblk_fops = {
183 .ioctl = virtblk_ioctl, 183 .locked_ioctl = virtblk_ioctl,
184 .owner = THIS_MODULE, 184 .owner = THIS_MODULE,
185 .getgeo = virtblk_getgeo, 185 .getgeo = virtblk_getgeo,
186}; 186};
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 624d30f7da3f..64b496fce98b 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -132,7 +132,7 @@ static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
132 132
133static struct block_device_operations xd_fops = { 133static struct block_device_operations xd_fops = {
134 .owner = THIS_MODULE, 134 .owner = THIS_MODULE,
135 .ioctl = xd_ioctl, 135 .locked_ioctl = xd_ioctl,
136 .getgeo = xd_getgeo, 136 .getgeo = xd_getgeo,
137}; 137};
138static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int); 138static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
@@ -343,7 +343,7 @@ static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
343} 343}
344 344
345/* xd_ioctl: handle device ioctl's */ 345/* xd_ioctl: handle device ioctl's */
346static int xd_ioctl (struct inode *inode,struct file *file,u_int cmd,u_long arg) 346static int xd_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg)
347{ 347{
348 switch (cmd) { 348 switch (cmd) {
349 case HDIO_SET_DMA: 349 case HDIO_SET_DMA:
diff --git a/drivers/block/xd.h b/drivers/block/xd.h
index cffd44a20383..37cacef16e93 100644
--- a/drivers/block/xd.h
+++ b/drivers/block/xd.h
@@ -105,7 +105,7 @@ static u_char xd_detect (u_char *controller, unsigned int *address);
105static u_char xd_initdrives (void (*init_drive)(u_char drive)); 105static u_char xd_initdrives (void (*init_drive)(u_char drive));
106 106
107static void do_xd_request (struct request_queue * q); 107static void do_xd_request (struct request_queue * q);
108static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg); 108static int xd_ioctl (struct block_device *bdev,fmode_t mode,unsigned int cmd,unsigned long arg);
109static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count); 109static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count);
110static void xd_recalibrate (u_char drive); 110static void xd_recalibrate (u_char drive);
111 111
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 1a50ae70f716..2d19f0cc47f2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -156,11 +156,10 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
156 return 0; 156 return 0;
157} 157}
158 158
159static int blkif_ioctl(struct inode *inode, struct file *filep, 159static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
160 unsigned command, unsigned long argument) 160 unsigned command, unsigned long argument)
161{ 161{
162 struct blkfront_info *info = 162 struct blkfront_info *info = bdev->bd_disk->private_data;
163 inode->i_bdev->bd_disk->private_data;
164 int i; 163 int i;
165 164
166 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", 165 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
@@ -339,12 +338,18 @@ wait:
339static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 338static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
340{ 339{
341 struct request_queue *rq; 340 struct request_queue *rq;
341 elevator_t *old_e;
342 342
343 rq = blk_init_queue(do_blkif_request, &blkif_io_lock); 343 rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
344 if (rq == NULL) 344 if (rq == NULL)
345 return -1; 345 return -1;
346 346
347 elevator_init(rq, "noop"); 347 old_e = rq->elevator;
348 if (IS_ERR_VALUE(elevator_init(rq, "noop")))
349 printk(KERN_WARNING
350 "blkfront: Switch elevator failed, use default\n");
351 else
352 elevator_exit(old_e);
348 353
349 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 354 /* Hard sector size and max sectors impersonate the equiv. hardware. */
350 blk_queue_hardsect_size(rq, sector_size); 355 blk_queue_hardsect_size(rq, sector_size);
@@ -1014,16 +1019,16 @@ static int blkfront_is_ready(struct xenbus_device *dev)
1014 return info->is_ready; 1019 return info->is_ready;
1015} 1020}
1016 1021
1017static int blkif_open(struct inode *inode, struct file *filep) 1022static int blkif_open(struct block_device *bdev, fmode_t mode)
1018{ 1023{
1019 struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; 1024 struct blkfront_info *info = bdev->bd_disk->private_data;
1020 info->users++; 1025 info->users++;
1021 return 0; 1026 return 0;
1022} 1027}
1023 1028
1024static int blkif_release(struct inode *inode, struct file *filep) 1029static int blkif_release(struct gendisk *disk, fmode_t mode)
1025{ 1030{
1026 struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; 1031 struct blkfront_info *info = disk->private_data;
1027 info->users--; 1032 info->users--;
1028 if (info->users == 0) { 1033 if (info->users == 0) {
1029 /* Check whether we have been instructed to close. We will 1034 /* Check whether we have been instructed to close. We will
@@ -1044,7 +1049,7 @@ static struct block_device_operations xlvbd_block_fops =
1044 .open = blkif_open, 1049 .open = blkif_open,
1045 .release = blkif_release, 1050 .release = blkif_release,
1046 .getgeo = blkif_getgeo, 1051 .getgeo = blkif_getgeo,
1047 .ioctl = blkif_ioctl, 1052 .locked_ioctl = blkif_ioctl,
1048}; 1053};
1049 1054
1050 1055
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 4a7a059ebaf7..29e1dfafb7c6 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -194,7 +194,7 @@ struct ace_device {
194 int in_irq; 194 int in_irq;
195 195
196 /* Details of hardware device */ 196 /* Details of hardware device */
197 unsigned long physaddr; 197 resource_size_t physaddr;
198 void __iomem *baseaddr; 198 void __iomem *baseaddr;
199 int irq; 199 int irq;
200 int bus_width; /* 0 := 8 bit; 1 := 16 bit */ 200 int bus_width; /* 0 := 8 bit; 1 := 16 bit */
@@ -628,8 +628,8 @@ static void ace_fsm_dostate(struct ace_device *ace)
628 628
629 /* Okay, it's a data request, set it up for transfer */ 629 /* Okay, it's a data request, set it up for transfer */
630 dev_dbg(ace->dev, 630 dev_dbg(ace->dev,
631 "request: sec=%lx hcnt=%lx, ccnt=%x, dir=%i\n", 631 "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n",
632 req->sector, req->hard_nr_sectors, 632 (unsigned long long) req->sector, req->hard_nr_sectors,
633 req->current_nr_sectors, rq_data_dir(req)); 633 req->current_nr_sectors, rq_data_dir(req));
634 634
635 ace->req = req; 635 ace->req = req;
@@ -870,25 +870,24 @@ static int ace_revalidate_disk(struct gendisk *gd)
870 return ace->id_result; 870 return ace->id_result;
871} 871}
872 872
873static int ace_open(struct inode *inode, struct file *filp) 873static int ace_open(struct block_device *bdev, fmode_t mode)
874{ 874{
875 struct ace_device *ace = inode->i_bdev->bd_disk->private_data; 875 struct ace_device *ace = bdev->bd_disk->private_data;
876 unsigned long flags; 876 unsigned long flags;
877 877
878 dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1); 878 dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
879 879
880 filp->private_data = ace;
881 spin_lock_irqsave(&ace->lock, flags); 880 spin_lock_irqsave(&ace->lock, flags);
882 ace->users++; 881 ace->users++;
883 spin_unlock_irqrestore(&ace->lock, flags); 882 spin_unlock_irqrestore(&ace->lock, flags);
884 883
885 check_disk_change(inode->i_bdev); 884 check_disk_change(bdev);
886 return 0; 885 return 0;
887} 886}
888 887
889static int ace_release(struct inode *inode, struct file *filp) 888static int ace_release(struct gendisk *disk, fmode_t mode)
890{ 889{
891 struct ace_device *ace = inode->i_bdev->bd_disk->private_data; 890 struct ace_device *ace = disk->private_data;
892 unsigned long flags; 891 unsigned long flags;
893 u16 val; 892 u16 val;
894 893
@@ -936,7 +935,8 @@ static int __devinit ace_setup(struct ace_device *ace)
936 int rc; 935 int rc;
937 936
938 dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace); 937 dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace);
939 dev_dbg(ace->dev, "physaddr=0x%lx irq=%i\n", ace->physaddr, ace->irq); 938 dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n",
939 (unsigned long long)ace->physaddr, ace->irq);
940 940
941 spin_lock_init(&ace->lock); 941 spin_lock_init(&ace->lock);
942 init_completion(&ace->id_completion); 942 init_completion(&ace->id_completion);
@@ -1018,8 +1018,8 @@ static int __devinit ace_setup(struct ace_device *ace)
1018 /* Print the identification */ 1018 /* Print the identification */
1019 dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n", 1019 dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
1020 (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff); 1020 (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
1021 dev_dbg(ace->dev, "physaddr 0x%lx, mapped to 0x%p, irq=%i\n", 1021 dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n",
1022 ace->physaddr, ace->baseaddr, ace->irq); 1022 (unsigned long long) ace->physaddr, ace->baseaddr, ace->irq);
1023 1023
1024 ace->media_change = 1; 1024 ace->media_change = 1;
1025 ace_revalidate_disk(ace->gd); 1025 ace_revalidate_disk(ace->gd);
@@ -1036,8 +1036,8 @@ err_alloc_disk:
1036err_blk_initq: 1036err_blk_initq:
1037 iounmap(ace->baseaddr); 1037 iounmap(ace->baseaddr);
1038err_ioremap: 1038err_ioremap:
1039 dev_info(ace->dev, "xsysace: error initializing device at 0x%lx\n", 1039 dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n",
1040 ace->physaddr); 1040 (unsigned long long) ace->physaddr);
1041 return -ENOMEM; 1041 return -ENOMEM;
1042} 1042}
1043 1043
@@ -1060,7 +1060,7 @@ static void __devexit ace_teardown(struct ace_device *ace)
1060} 1060}
1061 1061
1062static int __devinit 1062static int __devinit
1063ace_alloc(struct device *dev, int id, unsigned long physaddr, 1063ace_alloc(struct device *dev, int id, resource_size_t physaddr,
1064 int irq, int bus_width) 1064 int irq, int bus_width)
1065{ 1065{
1066 struct ace_device *ace; 1066 struct ace_device *ace;
@@ -1120,7 +1120,7 @@ static void __devexit ace_free(struct device *dev)
1120 1120
1121static int __devinit ace_probe(struct platform_device *dev) 1121static int __devinit ace_probe(struct platform_device *dev)
1122{ 1122{
1123 unsigned long physaddr = 0; 1123 resource_size_t physaddr = 0;
1124 int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */ 1124 int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
1125 int id = dev->id; 1125 int id = dev->id;
1126 int irq = NO_IRQ; 1126 int irq = NO_IRQ;
@@ -1166,7 +1166,7 @@ static int __devinit
1166ace_of_probe(struct of_device *op, const struct of_device_id *match) 1166ace_of_probe(struct of_device *op, const struct of_device_id *match)
1167{ 1167{
1168 struct resource res; 1168 struct resource res;
1169 unsigned long physaddr; 1169 resource_size_t physaddr;
1170 const u32 *id; 1170 const u32 *id;
1171 int irq, bus_width, rc; 1171 int irq, bus_width, rc;
1172 1172
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index be20a67f1fa8..80754cdd3119 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -137,8 +137,7 @@ get_chipram( void )
137 return; 137 return;
138} 138}
139 139
140static int 140static int z2_open(struct block_device *bdev, fmode_t mode)
141z2_open( struct inode *inode, struct file *filp )
142{ 141{
143 int device; 142 int device;
144 int max_z2_map = ( Z2RAM_SIZE / Z2RAM_CHUNKSIZE ) * 143 int max_z2_map = ( Z2RAM_SIZE / Z2RAM_CHUNKSIZE ) *
@@ -147,7 +146,7 @@ z2_open( struct inode *inode, struct file *filp )
147 sizeof( z2ram_map[0] ); 146 sizeof( z2ram_map[0] );
148 int rc = -ENOMEM; 147 int rc = -ENOMEM;
149 148
150 device = iminor(inode); 149 device = MINOR(bdev->bd_dev);
151 150
152 if ( current_device != -1 && current_device != device ) 151 if ( current_device != -1 && current_device != device )
153 { 152 {
@@ -299,7 +298,7 @@ err_out:
299} 298}
300 299
301static int 300static int
302z2_release( struct inode *inode, struct file *filp ) 301z2_release(struct gendisk *disk, fmode_t mode)
303{ 302{
304 if ( current_device == -1 ) 303 if ( current_device == -1 )
305 return 0; 304 return 0;
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index e6ee21d99d92..b0e569ba730d 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -867,7 +867,7 @@ static int bluecard_probe(struct pcmcia_device *link)
867 867
868 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 868 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
869 link->io.NumPorts1 = 8; 869 link->io.NumPorts1 = 8;
870 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 870 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
871 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 871 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
872 872
873 link->irq.Handler = bluecard_interrupt; 873 link->irq.Handler = bluecard_interrupt;
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 32f3a8ed8d3d..b936d8ce2728 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -443,8 +443,8 @@ static void bpa10x_destruct(struct hci_dev *hdev)
443 443
444 BT_DBG("%s", hdev->name); 444 BT_DBG("%s", hdev->name);
445 445
446 kfree(data->rx_skb[0]); 446 kfree_skb(data->rx_skb[0]);
447 kfree(data->rx_skb[1]); 447 kfree_skb(data->rx_skb[1]);
448 kfree(data); 448 kfree(data);
449} 449}
450 450
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 2cbe70b66470..b3e4d07a4ac2 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -343,6 +343,7 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
343 bt3c_info_t *info = dev_inst; 343 bt3c_info_t *info = dev_inst;
344 unsigned int iobase; 344 unsigned int iobase;
345 int iir; 345 int iir;
346 irqreturn_t r = IRQ_NONE;
346 347
347 BUG_ON(!info->hdev); 348 BUG_ON(!info->hdev);
348 349
@@ -374,11 +375,12 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
374 375
375 outb(iir, iobase + CONTROL); 376 outb(iir, iobase + CONTROL);
376 } 377 }
378 r = IRQ_HANDLED;
377 } 379 }
378 380
379 spin_unlock(&(info->lock)); 381 spin_unlock(&(info->lock));
380 382
381 return IRQ_HANDLED; 383 return r;
382} 384}
383 385
384 386
@@ -657,7 +659,7 @@ static int bt3c_probe(struct pcmcia_device *link)
657 659
658 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 660 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
659 link->io.NumPorts1 = 8; 661 link->io.NumPorts1 = 8;
660 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 662 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
661 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 663 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
662 664
663 link->irq.Handler = bt3c_interrupt; 665 link->irq.Handler = bt3c_interrupt;
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 58630cc1eff2..cda6c7cc944b 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -152,7 +152,7 @@ static int btsdio_rx_packet(struct btsdio_data *data)
152 152
153 err = sdio_readsb(data->func, skb->data, REG_RDAT, len - 4); 153 err = sdio_readsb(data->func, skb->data, REG_RDAT, len - 4);
154 if (err < 0) { 154 if (err < 0) {
155 kfree(skb); 155 kfree_skb(skb);
156 return err; 156 return err;
157 } 157 }
158 158
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 8e556b7ff9f6..efd689a062eb 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -293,6 +293,7 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst)
293 unsigned int iobase; 293 unsigned int iobase;
294 int boguscount = 0; 294 int boguscount = 0;
295 int iir, lsr; 295 int iir, lsr;
296 irqreturn_t r = IRQ_NONE;
296 297
297 BUG_ON(!info->hdev); 298 BUG_ON(!info->hdev);
298 299
@@ -302,6 +303,7 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst)
302 303
303 iir = inb(iobase + UART_IIR) & UART_IIR_ID; 304 iir = inb(iobase + UART_IIR) & UART_IIR_ID;
304 while (iir) { 305 while (iir) {
306 r = IRQ_HANDLED;
305 307
306 /* Clear interrupt */ 308 /* Clear interrupt */
307 lsr = inb(iobase + UART_LSR); 309 lsr = inb(iobase + UART_LSR);
@@ -335,7 +337,7 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst)
335 337
336 spin_unlock(&(info->lock)); 338 spin_unlock(&(info->lock));
337 339
338 return IRQ_HANDLED; 340 return r;
339} 341}
340 342
341 343
@@ -586,7 +588,7 @@ static int btuart_probe(struct pcmcia_device *link)
586 588
587 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 589 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
588 link->io.NumPorts1 = 8; 590 link->io.NumPorts1 = 8;
589 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 591 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
590 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 592 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
591 593
592 link->irq.Handler = btuart_interrupt; 594 link->irq.Handler = btuart_interrupt;
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index e6e6b037695a..901bdd95655f 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -297,6 +297,7 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst)
297 unsigned char msr; 297 unsigned char msr;
298 int boguscount = 0; 298 int boguscount = 0;
299 int iir, lsr; 299 int iir, lsr;
300 irqreturn_t r = IRQ_NONE;
300 301
301 BUG_ON(!info->hdev); 302 BUG_ON(!info->hdev);
302 303
@@ -307,6 +308,7 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst)
307 iir = inb(iobase + UART_IIR) & UART_IIR_ID; 308 iir = inb(iobase + UART_IIR) & UART_IIR_ID;
308 while (iir) { 309 while (iir) {
309 310
311 r = IRQ_HANDLED;
310 /* Clear interrupt */ 312 /* Clear interrupt */
311 lsr = inb(iobase + UART_LSR); 313 lsr = inb(iobase + UART_LSR);
312 314
@@ -343,11 +345,12 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst)
343 info->ri_latch = msr & UART_MSR_RI; 345 info->ri_latch = msr & UART_MSR_RI;
344 clear_bit(XMIT_WAITING, &(info->tx_state)); 346 clear_bit(XMIT_WAITING, &(info->tx_state));
345 dtl1_write_wakeup(info); 347 dtl1_write_wakeup(info);
348 r = IRQ_HANDLED;
346 } 349 }
347 350
348 spin_unlock(&(info->lock)); 351 spin_unlock(&(info->lock));
349 352
350 return IRQ_HANDLED; 353 return r;
351} 354}
352 355
353 356
@@ -568,7 +571,7 @@ static int dtl1_probe(struct pcmcia_device *link)
568 571
569 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 572 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
570 link->io.NumPorts1 = 8; 573 link->io.NumPorts1 = 8;
571 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 574 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
572 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 575 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
573 576
574 link->irq.Handler = dtl1_interrupt; 577 link->irq.Handler = dtl1_interrupt;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index d47f2f80accd..7d2e91cccb13 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -973,7 +973,7 @@ static int cdrom_close_write(struct cdrom_device_info *cdi)
973 * is in their own interest: device control becomes a lot easier 973 * is in their own interest: device control becomes a lot easier
974 * this way. 974 * this way.
975 */ 975 */
976int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp) 976int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode)
977{ 977{
978 int ret; 978 int ret;
979 979
@@ -982,14 +982,14 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
982 /* if this was a O_NONBLOCK open and we should honor the flags, 982 /* if this was a O_NONBLOCK open and we should honor the flags,
983 * do a quick open without drive/disc integrity checks. */ 983 * do a quick open without drive/disc integrity checks. */
984 cdi->use_count++; 984 cdi->use_count++;
985 if ((fp->f_flags & O_NONBLOCK) && (cdi->options & CDO_USE_FFLAGS)) { 985 if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) {
986 ret = cdi->ops->open(cdi, 1); 986 ret = cdi->ops->open(cdi, 1);
987 } else { 987 } else {
988 ret = open_for_data(cdi); 988 ret = open_for_data(cdi);
989 if (ret) 989 if (ret)
990 goto err; 990 goto err;
991 cdrom_mmc3_profile(cdi); 991 cdrom_mmc3_profile(cdi);
992 if (fp->f_mode & FMODE_WRITE) { 992 if (mode & FMODE_WRITE) {
993 ret = -EROFS; 993 ret = -EROFS;
994 if (cdrom_open_write(cdi)) 994 if (cdrom_open_write(cdi))
995 goto err_release; 995 goto err_release;
@@ -1007,7 +1007,7 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
1007 cdi->name, cdi->use_count); 1007 cdi->name, cdi->use_count);
1008 /* Do this on open. Don't wait for mount, because they might 1008 /* Do this on open. Don't wait for mount, because they might
1009 not be mounting, but opening with O_NONBLOCK */ 1009 not be mounting, but opening with O_NONBLOCK */
1010 check_disk_change(ip->i_bdev); 1010 check_disk_change(bdev);
1011 return 0; 1011 return 0;
1012err_release: 1012err_release:
1013 if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { 1013 if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
@@ -1184,7 +1184,7 @@ static int check_for_audio_disc(struct cdrom_device_info * cdi,
1184 return 0; 1184 return 0;
1185} 1185}
1186 1186
1187int cdrom_release(struct cdrom_device_info *cdi, struct file *fp) 1187void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
1188{ 1188{
1189 struct cdrom_device_ops *cdo = cdi->ops; 1189 struct cdrom_device_ops *cdo = cdi->ops;
1190 int opened_for_data; 1190 int opened_for_data;
@@ -1205,7 +1205,7 @@ int cdrom_release(struct cdrom_device_info *cdi, struct file *fp)
1205 } 1205 }
1206 1206
1207 opened_for_data = !(cdi->options & CDO_USE_FFLAGS) || 1207 opened_for_data = !(cdi->options & CDO_USE_FFLAGS) ||
1208 !(fp && fp->f_flags & O_NONBLOCK); 1208 !(mode & FMODE_NDELAY);
1209 1209
1210 /* 1210 /*
1211 * flush cache on last write release 1211 * flush cache on last write release
@@ -1219,7 +1219,6 @@ int cdrom_release(struct cdrom_device_info *cdi, struct file *fp)
1219 cdi->options & CDO_AUTO_EJECT && CDROM_CAN(CDC_OPEN_TRAY)) 1219 cdi->options & CDO_AUTO_EJECT && CDROM_CAN(CDC_OPEN_TRAY))
1220 cdo->tray_move(cdi, 1); 1220 cdo->tray_move(cdi, 1);
1221 } 1221 }
1222 return 0;
1223} 1222}
1224 1223
1225static int cdrom_read_mech_status(struct cdrom_device_info *cdi, 1224static int cdrom_read_mech_status(struct cdrom_device_info *cdi,
@@ -2082,10 +2081,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2082 if (!q) 2081 if (!q)
2083 return -ENXIO; 2082 return -ENXIO;
2084 2083
2085 rq = blk_get_request(q, READ, GFP_KERNEL);
2086 if (!rq)
2087 return -ENOMEM;
2088
2089 cdi->last_sense = 0; 2084 cdi->last_sense = 0;
2090 2085
2091 while (nframes) { 2086 while (nframes) {
@@ -2097,9 +2092,17 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2097 2092
2098 len = nr * CD_FRAMESIZE_RAW; 2093 len = nr * CD_FRAMESIZE_RAW;
2099 2094
2095 rq = blk_get_request(q, READ, GFP_KERNEL);
2096 if (!rq) {
2097 ret = -ENOMEM;
2098 break;
2099 }
2100
2100 ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); 2101 ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
2101 if (ret) 2102 if (ret) {
2103 blk_put_request(rq);
2102 break; 2104 break;
2105 }
2103 2106
2104 rq->cmd[0] = GPCMD_READ_CD; 2107 rq->cmd[0] = GPCMD_READ_CD;
2105 rq->cmd[1] = 1 << 2; 2108 rq->cmd[1] = 1 << 2;
@@ -2125,6 +2128,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2125 2128
2126 if (blk_rq_unmap_user(bio)) 2129 if (blk_rq_unmap_user(bio))
2127 ret = -EFAULT; 2130 ret = -EFAULT;
2131 blk_put_request(rq);
2128 2132
2129 if (ret) 2133 if (ret)
2130 break; 2134 break;
@@ -2134,7 +2138,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2134 ubuf += len; 2138 ubuf += len;
2135 } 2139 }
2136 2140
2137 blk_put_request(rq);
2138 return ret; 2141 return ret;
2139} 2142}
2140 2143
@@ -2662,17 +2665,17 @@ static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi,
2662 * these days. 2665 * these days.
2663 * ATAPI / SCSI specific code now mainly resides in mmc_ioctl(). 2666 * ATAPI / SCSI specific code now mainly resides in mmc_ioctl().
2664 */ 2667 */
2665int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi, 2668int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
2666 struct inode *ip, unsigned int cmd, unsigned long arg) 2669 fmode_t mode, unsigned int cmd, unsigned long arg)
2667{ 2670{
2668 void __user *argp = (void __user *)arg; 2671 void __user *argp = (void __user *)arg;
2669 int ret; 2672 int ret;
2670 struct gendisk *disk = ip->i_bdev->bd_disk; 2673 struct gendisk *disk = bdev->bd_disk;
2671 2674
2672 /* 2675 /*
2673 * Try the generic SCSI command ioctl's first. 2676 * Try the generic SCSI command ioctl's first.
2674 */ 2677 */
2675 ret = scsi_cmd_ioctl(file, disk->queue, disk, cmd, argp); 2678 ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
2676 if (ret != -ENOTTY) 2679 if (ret != -ENOTTY)
2677 return ret; 2680 return ret;
2678 2681
@@ -2696,7 +2699,7 @@ int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi,
2696 case CDROM_SELECT_DISC: 2699 case CDROM_SELECT_DISC:
2697 return cdrom_ioctl_select_disc(cdi, arg); 2700 return cdrom_ioctl_select_disc(cdi, arg);
2698 case CDROMRESET: 2701 case CDROMRESET:
2699 return cdrom_ioctl_reset(cdi, ip->i_bdev); 2702 return cdrom_ioctl_reset(cdi, bdev);
2700 case CDROM_LOCKDOOR: 2703 case CDROM_LOCKDOOR:
2701 return cdrom_ioctl_lock_door(cdi, arg); 2704 return cdrom_ioctl_lock_door(cdi, arg);
2702 case CDROM_DEBUG: 2705 case CDROM_DEBUG:
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index d6ba77a2dd7b..2eecb779437b 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -490,14 +490,15 @@ static struct cdrom_device_ops gdrom_ops = {
490 .n_minors = 1, 490 .n_minors = 1,
491}; 491};
492 492
493static int gdrom_bdops_open(struct inode *inode, struct file *file) 493static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
494{ 494{
495 return cdrom_open(gd.cd_info, inode, file); 495 return cdrom_open(gd.cd_info, bdev, mode);
496} 496}
497 497
498static int gdrom_bdops_release(struct inode *inode, struct file *file) 498static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
499{ 499{
500 return cdrom_release(gd.cd_info, file); 500 cdrom_release(gd.cd_info, mode);
501 return 0;
501} 502}
502 503
503static int gdrom_bdops_mediachanged(struct gendisk *disk) 504static int gdrom_bdops_mediachanged(struct gendisk *disk)
@@ -505,10 +506,10 @@ static int gdrom_bdops_mediachanged(struct gendisk *disk)
505 return cdrom_media_changed(gd.cd_info); 506 return cdrom_media_changed(gd.cd_info);
506} 507}
507 508
508static int gdrom_bdops_ioctl(struct inode *inode, struct file *file, 509static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
509 unsigned cmd, unsigned long arg) 510 unsigned cmd, unsigned long arg)
510{ 511{
511 return cdrom_ioctl(file, gd.cd_info, inode, cmd, arg); 512 return cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
512} 513}
513 514
514static struct block_device_operations gdrom_bdops = { 515static struct block_device_operations gdrom_bdops = {
@@ -516,7 +517,7 @@ static struct block_device_operations gdrom_bdops = {
516 .open = gdrom_bdops_open, 517 .open = gdrom_bdops_open,
517 .release = gdrom_bdops_release, 518 .release = gdrom_bdops_release,
518 .media_changed = gdrom_bdops_mediachanged, 519 .media_changed = gdrom_bdops_mediachanged,
519 .ioctl = gdrom_bdops_ioctl, 520 .locked_ioctl = gdrom_bdops_ioctl,
520}; 521};
521 522
522static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id) 523static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 031e0e1a1a3b..13929356135c 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -151,23 +151,24 @@ static const struct file_operations proc_viocd_operations = {
151 .release = single_release, 151 .release = single_release,
152}; 152};
153 153
154static int viocd_blk_open(struct inode *inode, struct file *file) 154static int viocd_blk_open(struct block_device *bdev, fmode_t mode)
155{ 155{
156 struct disk_info *di = inode->i_bdev->bd_disk->private_data; 156 struct disk_info *di = bdev->bd_disk->private_data;
157 return cdrom_open(&di->viocd_info, inode, file); 157 return cdrom_open(&di->viocd_info, bdev, mode);
158} 158}
159 159
160static int viocd_blk_release(struct inode *inode, struct file *file) 160static int viocd_blk_release(struct gendisk *disk, fmode_t mode)
161{ 161{
162 struct disk_info *di = inode->i_bdev->bd_disk->private_data; 162 struct disk_info *di = disk->private_data;
163 return cdrom_release(&di->viocd_info, file); 163 cdrom_release(&di->viocd_info, mode);
164 return 0;
164} 165}
165 166
166static int viocd_blk_ioctl(struct inode *inode, struct file *file, 167static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode,
167 unsigned cmd, unsigned long arg) 168 unsigned cmd, unsigned long arg)
168{ 169{
169 struct disk_info *di = inode->i_bdev->bd_disk->private_data; 170 struct disk_info *di = bdev->bd_disk->private_data;
170 return cdrom_ioctl(file, &di->viocd_info, inode, cmd, arg); 171 return cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg);
171} 172}
172 173
173static int viocd_blk_media_changed(struct gendisk *disk) 174static int viocd_blk_media_changed(struct gendisk *disk)
@@ -180,7 +181,7 @@ struct block_device_operations viocd_fops = {
180 .owner = THIS_MODULE, 181 .owner = THIS_MODULE,
181 .open = viocd_blk_open, 182 .open = viocd_blk_open,
182 .release = viocd_blk_release, 183 .release = viocd_blk_release,
183 .ioctl = viocd_blk_ioctl, 184 .locked_ioctl = viocd_blk_ioctl,
184 .media_changed = viocd_blk_media_changed, 185 .media_changed = viocd_blk_media_changed,
185}; 186};
186 187
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 122254155ae1..43d6ba83a191 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -124,7 +124,7 @@ config COMPUTONE
124 which give you many serial ports. You would need something like this 124 which give you many serial ports. You would need something like this
125 to connect more than two modems to your Linux box, for instance in 125 to connect more than two modems to your Linux box, for instance in
126 order to become a dial-in server. If you have a card like that, say 126 order to become a dial-in server. If you have a card like that, say
127 Y here and read <file:Documentation/computone.txt>. 127 Y here and read <file:Documentation/serial/computone.txt>.
128 128
129 To compile this driver as module, choose M here: the 129 To compile this driver as module, choose M here: the
130 module will be called ip2. 130 module will be called ip2.
@@ -136,7 +136,7 @@ config ROCKETPORT
136 This driver supports Comtrol RocketPort and RocketModem PCI boards. 136 This driver supports Comtrol RocketPort and RocketModem PCI boards.
137 These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or 137 These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
138 modems. For information about the RocketPort/RocketModem boards 138 modems. For information about the RocketPort/RocketModem boards
139 and this driver read <file:Documentation/rocket.txt>. 139 and this driver read <file:Documentation/serial/rocket.txt>.
140 140
141 To compile this driver as a module, choose M here: the 141 To compile this driver as a module, choose M here: the
142 module will be called rocket. 142 module will be called rocket.
@@ -154,7 +154,7 @@ config CYCLADES
154 your Linux box, for instance in order to become a dial-in server. 154 your Linux box, for instance in order to become a dial-in server.
155 155
156 For information about the Cyclades-Z card, read 156 For information about the Cyclades-Z card, read
157 <file:Documentation/README.cycladesZ>. 157 <file:Documentation/serial/README.cycladesZ>.
158 158
159 To compile this driver as a module, choose M here: the 159 To compile this driver as a module, choose M here: the
160 module will be called cyclades. 160 module will be called cyclades.
@@ -183,7 +183,7 @@ config DIGIEPCA
183 box, for instance in order to become a dial-in server. This driver 183 box, for instance in order to become a dial-in server. This driver
184 supports the original PC (ISA) boards as well as PCI, and EISA. If 184 supports the original PC (ISA) boards as well as PCI, and EISA. If
185 you have a card like this, say Y here and read the file 185 you have a card like this, say Y here and read the file
186 <file:Documentation/digiepca.txt>. 186 <file:Documentation/serial/digiepca.txt>.
187 187
188 To compile this driver as a module, choose M here: the 188 To compile this driver as a module, choose M here: the
189 module will be called epca. 189 module will be called epca.
@@ -289,7 +289,7 @@ config RISCOM8
289 which gives you many serial ports. You would need something like 289 which gives you many serial ports. You would need something like
290 this to connect more than two modems to your Linux box, for instance 290 this to connect more than two modems to your Linux box, for instance
291 in order to become a dial-in server. If you have a card like that, 291 in order to become a dial-in server. If you have a card like that,
292 say Y here and read the file <file:Documentation/riscom8.txt>. 292 say Y here and read the file <file:Documentation/serial/riscom8.txt>.
293 293
294 Also it's possible to say M here and compile this driver as kernel 294 Also it's possible to say M here and compile this driver as kernel
295 loadable module; the module will be called riscom8. 295 loadable module; the module will be called riscom8.
@@ -304,8 +304,8 @@ config SPECIALIX
304 your Linux box, for instance in order to become a dial-in server. 304 your Linux box, for instance in order to become a dial-in server.
305 305
306 If you have a card like that, say Y here and read the file 306 If you have a card like that, say Y here and read the file
307 <file:Documentation/specialix.txt>. Also it's possible to say M here 307 <file:Documentation/serial/specialix.txt>. Also it's possible to say
308 and compile this driver as kernel loadable module which will be 308 M here and compile this driver as kernel loadable module which will be
309 called specialix. 309 called specialix.
310 310
311config SX 311config SX
@@ -313,7 +313,7 @@ config SX
313 depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) 313 depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA)
314 help 314 help
315 This is a driver for the SX and SI multiport serial cards. 315 This is a driver for the SX and SI multiport serial cards.
316 Please read the file <file:Documentation/sx.txt> for details. 316 Please read the file <file:Documentation/serial/sx.txt> for details.
317 317
318 This driver can only be built as a module ( = code which can be 318 This driver can only be built as a module ( = code which can be
319 inserted in and removed from the running kernel whenever you want). 319 inserted in and removed from the running kernel whenever you want).
@@ -344,8 +344,8 @@ config STALDRV
344 like this to connect more than two modems to your Linux box, for 344 like this to connect more than two modems to your Linux box, for
345 instance in order to become a dial-in server. If you say Y here, 345 instance in order to become a dial-in server. If you say Y here,
346 you will be asked for your specific card model in the next 346 you will be asked for your specific card model in the next
347 questions. Make sure to read <file:Documentation/stallion.txt> in 347 questions. Make sure to read <file:Documentation/serial/stallion.txt>
348 this case. If you have never heard about all this, it's safe to 348 in this case. If you have never heard about all this, it's safe to
349 say N. 349 say N.
350 350
351config STALLION 351config STALLION
@@ -354,7 +354,7 @@ config STALLION
354 help 354 help
355 If you have an EasyIO or EasyConnection 8/32 multiport Stallion 355 If you have an EasyIO or EasyConnection 8/32 multiport Stallion
356 card, then this is for you; say Y. Make sure to read 356 card, then this is for you; say Y. Make sure to read
357 <file:Documentation/stallion.txt>. 357 <file:Documentation/serial/stallion.txt>.
358 358
359 To compile this driver as a module, choose M here: the 359 To compile this driver as a module, choose M here: the
360 module will be called stallion. 360 module will be called stallion.
@@ -365,7 +365,7 @@ config ISTALLION
365 help 365 help
366 If you have an EasyConnection 8/64, ONboard, Brumby or Stallion 366 If you have an EasyConnection 8/64, ONboard, Brumby or Stallion
367 serial multiport card, say Y here. Make sure to read 367 serial multiport card, say Y here. Make sure to read
368 <file:Documentation/stallion.txt>. 368 <file:Documentation/serial/stallion.txt>.
369 369
370 To compile this driver as a module, choose M here: the 370 To compile this driver as a module, choose M here: the
371 module will be called istallion. 371 module will be called istallion.
@@ -812,28 +812,6 @@ config JS_RTC
812 To compile this driver as a module, choose M here: the 812 To compile this driver as a module, choose M here: the
813 module will be called js-rtc. 813 module will be called js-rtc.
814 814
815config SGI_DS1286
816 tristate "SGI DS1286 RTC support"
817 depends on SGI_HAS_DS1286
818 help
819 If you say Y here and create a character special file /dev/rtc with
820 major number 10 and minor number 135 using mknod ("man mknod"), you
821 will get access to the real time clock built into your computer.
822 Every SGI has such a clock built in. It reports status information
823 via the file /proc/rtc and its behaviour is set by various ioctls on
824 /dev/rtc.
825
826config SGI_IP27_RTC
827 bool "SGI M48T35 RTC support"
828 depends on SGI_IP27
829 help
830 If you say Y here and create a character special file /dev/rtc with
831 major number 10 and minor number 135 using mknod ("man mknod"), you
832 will get access to the real time clock built into your computer.
833 Every SGI has such a clock built in. It reports status information
834 via the file /proc/rtc and its behaviour is set by various ioctls on
835 /dev/rtc.
836
837config GEN_RTC 815config GEN_RTC
838 tristate "Generic /dev/rtc emulation" 816 tristate "Generic /dev/rtc emulation"
839 depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 817 depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 1a4247dccac4..438f71317c5c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -74,8 +74,6 @@ obj-$(CONFIG_RTC) += rtc.o
74obj-$(CONFIG_HPET) += hpet.o 74obj-$(CONFIG_HPET) += hpet.o
75obj-$(CONFIG_GEN_RTC) += genrtc.o 75obj-$(CONFIG_GEN_RTC) += genrtc.o
76obj-$(CONFIG_EFI_RTC) += efirtc.o 76obj-$(CONFIG_EFI_RTC) += efirtc.o
77obj-$(CONFIG_SGI_DS1286) += ds1286.o
78obj-$(CONFIG_SGI_IP27_RTC) += ip27-rtc.o
79obj-$(CONFIG_DS1302) += ds1302.o 77obj-$(CONFIG_DS1302) += ds1302.o
80obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/ 78obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
81ifeq ($(CONFIG_GENERIC_NVRAM),y) 79ifeq ($(CONFIG_GENERIC_NVRAM),y)
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 0f004b65ec03..03f95ec08f59 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -27,7 +27,7 @@
27static int uninorth_rev; 27static int uninorth_rev;
28static int is_u3; 28static int is_u3;
29 29
30static char __devinitdata *aperture = NULL; 30static char *aperture = NULL;
31 31
32static int uninorth_fetch_size(void) 32static int uninorth_fetch_size(void)
33{ 33{
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 98821f97583c..b97aebd7aeb8 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -2071,12 +2071,13 @@ module_init(rs_init)
2071module_exit(rs_exit) 2071module_exit(rs_exit)
2072 2072
2073 2073
2074#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE)
2075
2074/* 2076/*
2075 * ------------------------------------------------------------ 2077 * ------------------------------------------------------------
2076 * Serial console driver 2078 * Serial console driver
2077 * ------------------------------------------------------------ 2079 * ------------------------------------------------------------
2078 */ 2080 */
2079#ifdef CONFIG_SERIAL_CONSOLE
2080 2081
2081static void amiga_serial_putc(char c) 2082static void amiga_serial_putc(char c)
2082{ 2083{
@@ -2130,6 +2131,7 @@ static int __init amiserial_console_init(void)
2130 return 0; 2131 return 0;
2131} 2132}
2132console_initcall(amiserial_console_init); 2133console_initcall(amiserial_console_init);
2133#endif 2134
2135#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */
2134 2136
2135MODULE_LICENSE("GPL"); 2137MODULE_LICENSE("GPL");
diff --git a/drivers/char/cp437.uni b/drivers/char/cp437.uni
index 1f06889a96b9..bc6163484f62 100644
--- a/drivers/char/cp437.uni
+++ b/drivers/char/cp437.uni
@@ -27,7 +27,7 @@
270x0c U+2640 270x0c U+2640
280x0d U+266a 280x0d U+266a
290x0e U+266b 290x0e U+266b
300x0f U+263c 300x0f U+263c U+00a4
310x10 U+25b6 U+25ba 310x10 U+25b6 U+25ba
320x11 U+25c0 U+25c4 320x11 U+25c0 U+25c4
330x12 U+2195 330x12 U+2195
@@ -55,7 +55,7 @@
550x24 U+0024 550x24 U+0024
560x25 U+0025 560x25 U+0025
570x26 U+0026 570x26 U+0026
580x27 U+0027 580x27 U+0027 U+00b4
590x28 U+0028 590x28 U+0028
600x29 U+0029 600x29 U+0029
610x2a U+002a 610x2a U+002a
@@ -84,7 +84,7 @@
840x41 U+0041 U+00c0 U+00c1 U+00c2 U+00c3 840x41 U+0041 U+00c0 U+00c1 U+00c2 U+00c3
850x42 U+0042 850x42 U+0042
860x43 U+0043 U+00a9 860x43 U+0043 U+00a9
870x44 U+0044 870x44 U+0044 U+00d0
880x45 U+0045 U+00c8 U+00ca U+00cb 880x45 U+0045 U+00c8 U+00ca U+00cb
890x46 U+0046 890x46 U+0046
900x47 U+0047 900x47 U+0047
@@ -140,7 +140,7 @@
1400x79 U+0079 U+00fd 1400x79 U+0079 U+00fd
1410x7a U+007a 1410x7a U+007a
1420x7b U+007b 1420x7b U+007b
1430x7c U+007c U+00a5 1430x7c U+007c U+00a6
1440x7d U+007d 1440x7d U+007d
1450x7e U+007e 1450x7e U+007e
146# 146#
@@ -263,10 +263,10 @@
2630xe8 U+03a6 U+00d8 2630xe8 U+03a6 U+00d8
2640xe9 U+0398 2640xe9 U+0398
2650xea U+03a9 U+2126 2650xea U+03a9 U+2126
2660xeb U+03b4 2660xeb U+03b4 U+00f0
2670xec U+221e 2670xec U+221e
2680xed U+03c6 U+00f8 2680xed U+03c6 U+00f8
2690xee U+03b5 2690xee U+03b5 U+2208
2700xef U+2229 2700xef U+2229
2710xf0 U+2261 2710xf0 U+2261
2720xf1 U+00b1 2720xf1 U+00b1
diff --git a/drivers/char/ds1286.c b/drivers/char/ds1286.c
deleted file mode 100644
index 0a826d7be10e..000000000000
--- a/drivers/char/ds1286.c
+++ /dev/null
@@ -1,585 +0,0 @@
1/*
2 * DS1286 Real Time Clock interface for Linux
3 *
4 * Copyright (C) 1998, 1999, 2000 Ralf Baechle
5 *
6 * Based on code written by Paul Gortmaker.
7 *
8 * This driver allows use of the real time clock (built into nearly all
9 * computers) from user space. It exports the /dev/rtc interface supporting
10 * various ioctl() and also the /proc/rtc pseudo-file for status
11 * information.
12 *
13 * The ioctls can be used to set the interrupt behaviour and generation rate
14 * from the RTC via IRQ 8. Then the /dev/rtc interface can be used to make
15 * use of these timer interrupts, be they interval or alarm based.
16 *
17 * The /dev/rtc interface will block on reads until an interrupt has been
18 * received. If a RTC interrupt has already happened, it will output an
19 * unsigned long and then block. The output value contains the interrupt
20 * status in the low byte and the number of interrupts since the last read
21 * in the remaining high bytes. The /dev/rtc interface can also be used with
22 * the select(2) call.
23 *
24 * This program is free software; you can redistribute it and/or modify it
25 * under the terms of the GNU General Public License as published by the
26 * Free Software Foundation; either version 2 of the License, or (at your
27 * option) any later version.
28 */
29#include <linux/ds1286.h>
30#include <linux/smp_lock.h>
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/miscdevice.h>
34#include <linux/slab.h>
35#include <linux/ioport.h>
36#include <linux/fcntl.h>
37#include <linux/init.h>
38#include <linux/poll.h>
39#include <linux/rtc.h>
40#include <linux/spinlock.h>
41#include <linux/bcd.h>
42#include <linux/proc_fs.h>
43#include <linux/jiffies.h>
44
45#include <asm/uaccess.h>
46#include <asm/system.h>
47
48#define DS1286_VERSION "1.0"
49
50/*
51 * We sponge a minor off of the misc major. No need slurping
52 * up another valuable major dev number for this. If you add
53 * an ioctl, make sure you don't conflict with SPARC's RTC
54 * ioctls.
55 */
56
57static DECLARE_WAIT_QUEUE_HEAD(ds1286_wait);
58
59static ssize_t ds1286_read(struct file *file, char *buf,
60 size_t count, loff_t *ppos);
61
62static int ds1286_ioctl(struct inode *inode, struct file *file,
63 unsigned int cmd, unsigned long arg);
64
65static unsigned int ds1286_poll(struct file *file, poll_table *wait);
66
67static void ds1286_get_alm_time (struct rtc_time *alm_tm);
68static void ds1286_get_time(struct rtc_time *rtc_tm);
69static int ds1286_set_time(struct rtc_time *rtc_tm);
70
71static inline unsigned char ds1286_is_updating(void);
72
73static DEFINE_SPINLOCK(ds1286_lock);
74
75static int ds1286_read_proc(char *page, char **start, off_t off,
76 int count, int *eof, void *data);
77
78/*
79 * Bits in rtc_status. (7 bits of room for future expansion)
80 */
81
82#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
83#define RTC_TIMER_ON 0x02 /* missed irq timer active */
84
85static unsigned char ds1286_status; /* bitmapped status byte. */
86
87static unsigned char days_in_mo[] = {
88 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
89};
90
91/*
92 * Now all the various file operations that we export.
93 */
94
95static ssize_t ds1286_read(struct file *file, char *buf,
96 size_t count, loff_t *ppos)
97{
98 return -EIO;
99}
100
101static int ds1286_ioctl(struct inode *inode, struct file *file,
102 unsigned int cmd, unsigned long arg)
103{
104 struct rtc_time wtime;
105
106 switch (cmd) {
107 case RTC_AIE_OFF: /* Mask alarm int. enab. bit */
108 {
109 unsigned long flags;
110 unsigned char val;
111
112 if (!capable(CAP_SYS_TIME))
113 return -EACCES;
114
115 spin_lock_irqsave(&ds1286_lock, flags);
116 val = rtc_read(RTC_CMD);
117 val |= RTC_TDM;
118 rtc_write(val, RTC_CMD);
119 spin_unlock_irqrestore(&ds1286_lock, flags);
120
121 return 0;
122 }
123 case RTC_AIE_ON: /* Allow alarm interrupts. */
124 {
125 unsigned long flags;
126 unsigned char val;
127
128 if (!capable(CAP_SYS_TIME))
129 return -EACCES;
130
131 spin_lock_irqsave(&ds1286_lock, flags);
132 val = rtc_read(RTC_CMD);
133 val &= ~RTC_TDM;
134 rtc_write(val, RTC_CMD);
135 spin_unlock_irqrestore(&ds1286_lock, flags);
136
137 return 0;
138 }
139 case RTC_WIE_OFF: /* Mask watchdog int. enab. bit */
140 {
141 unsigned long flags;
142 unsigned char val;
143
144 if (!capable(CAP_SYS_TIME))
145 return -EACCES;
146
147 spin_lock_irqsave(&ds1286_lock, flags);
148 val = rtc_read(RTC_CMD);
149 val |= RTC_WAM;
150 rtc_write(val, RTC_CMD);
151 spin_unlock_irqrestore(&ds1286_lock, flags);
152
153 return 0;
154 }
155 case RTC_WIE_ON: /* Allow watchdog interrupts. */
156 {
157 unsigned long flags;
158 unsigned char val;
159
160 if (!capable(CAP_SYS_TIME))
161 return -EACCES;
162
163 spin_lock_irqsave(&ds1286_lock, flags);
164 val = rtc_read(RTC_CMD);
165 val &= ~RTC_WAM;
166 rtc_write(val, RTC_CMD);
167 spin_unlock_irqrestore(&ds1286_lock, flags);
168
169 return 0;
170 }
171 case RTC_ALM_READ: /* Read the present alarm time */
172 {
173 /*
174 * This returns a struct rtc_time. Reading >= 0xc0
175 * means "don't care" or "match all". Only the tm_hour,
176 * tm_min, and tm_sec values are filled in.
177 */
178
179 memset(&wtime, 0, sizeof(wtime));
180 ds1286_get_alm_time(&wtime);
181 break;
182 }
183 case RTC_ALM_SET: /* Store a time into the alarm */
184 {
185 /*
186 * This expects a struct rtc_time. Writing 0xff means
187 * "don't care" or "match all". Only the tm_hour,
188 * tm_min and tm_sec are used.
189 */
190 unsigned char hrs, min, sec;
191 struct rtc_time alm_tm;
192
193 if (!capable(CAP_SYS_TIME))
194 return -EACCES;
195
196 if (copy_from_user(&alm_tm, (struct rtc_time*)arg,
197 sizeof(struct rtc_time)))
198 return -EFAULT;
199
200 hrs = alm_tm.tm_hour;
201 min = alm_tm.tm_min;
202 sec = alm_tm.tm_sec;
203
204 if (hrs >= 24)
205 hrs = 0xff;
206
207 if (min >= 60)
208 min = 0xff;
209
210 if (sec != 0)
211 return -EINVAL;
212
213 min = bin2bcd(min);
214 min = bin2bcd(hrs);
215
216 spin_lock(&ds1286_lock);
217 rtc_write(hrs, RTC_HOURS_ALARM);
218 rtc_write(min, RTC_MINUTES_ALARM);
219 spin_unlock(&ds1286_lock);
220
221 return 0;
222 }
223 case RTC_RD_TIME: /* Read the time/date from RTC */
224 {
225 memset(&wtime, 0, sizeof(wtime));
226 ds1286_get_time(&wtime);
227 break;
228 }
229 case RTC_SET_TIME: /* Set the RTC */
230 {
231 struct rtc_time rtc_tm;
232
233 if (!capable(CAP_SYS_TIME))
234 return -EACCES;
235
236 if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
237 sizeof(struct rtc_time)))
238 return -EFAULT;
239
240 return ds1286_set_time(&rtc_tm);
241 }
242 default:
243 return -EINVAL;
244 }
245 return copy_to_user((void *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
246}
247
248/*
249 * We enforce only one user at a time here with the open/close.
250 * Also clear the previous interrupt data on an open, and clean
251 * up things on a close.
252 */
253
254static int ds1286_open(struct inode *inode, struct file *file)
255{
256 lock_kernel();
257 spin_lock_irq(&ds1286_lock);
258
259 if (ds1286_status & RTC_IS_OPEN)
260 goto out_busy;
261
262 ds1286_status |= RTC_IS_OPEN;
263
264 spin_unlock_irq(&ds1286_lock);
265 unlock_kernel();
266 return 0;
267
268out_busy:
269 spin_lock_irq(&ds1286_lock);
270 unlock_kernel();
271 return -EBUSY;
272}
273
274static int ds1286_release(struct inode *inode, struct file *file)
275{
276 ds1286_status &= ~RTC_IS_OPEN;
277
278 return 0;
279}
280
281static unsigned int ds1286_poll(struct file *file, poll_table *wait)
282{
283 poll_wait(file, &ds1286_wait, wait);
284
285 return 0;
286}
287
288/*
289 * The various file operations we support.
290 */
291
292static const struct file_operations ds1286_fops = {
293 .llseek = no_llseek,
294 .read = ds1286_read,
295 .poll = ds1286_poll,
296 .ioctl = ds1286_ioctl,
297 .open = ds1286_open,
298 .release = ds1286_release,
299};
300
301static struct miscdevice ds1286_dev=
302{
303 .minor = RTC_MINOR,
304 .name = "rtc",
305 .fops = &ds1286_fops,
306};
307
308static int __init ds1286_init(void)
309{
310 int err;
311
312 printk(KERN_INFO "DS1286 Real Time Clock Driver v%s\n", DS1286_VERSION);
313
314 err = misc_register(&ds1286_dev);
315 if (err)
316 goto out;
317
318 if (!create_proc_read_entry("driver/rtc", 0, 0, ds1286_read_proc, NULL)) {
319 err = -ENOMEM;
320
321 goto out_deregister;
322 }
323
324 return 0;
325
326out_deregister:
327 misc_deregister(&ds1286_dev);
328
329out:
330 return err;
331}
332
333static void __exit ds1286_exit(void)
334{
335 remove_proc_entry("driver/rtc", NULL);
336 misc_deregister(&ds1286_dev);
337}
338
339static char *days[] = {
340 "***", "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
341};
342
343/*
344 * Info exported via "/proc/rtc".
345 */
346static int ds1286_proc_output(char *buf)
347{
348 char *p, *s;
349 struct rtc_time tm;
350 unsigned char hundredth, month, cmd, amode;
351
352 p = buf;
353
354 ds1286_get_time(&tm);
355 hundredth = rtc_read(RTC_HUNDREDTH_SECOND);
356 hundredth = bcd2bin(hundredth);
357
358 p += sprintf(p,
359 "rtc_time\t: %02d:%02d:%02d.%02d\n"
360 "rtc_date\t: %04d-%02d-%02d\n",
361 tm.tm_hour, tm.tm_min, tm.tm_sec, hundredth,
362 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday);
363
364 /*
365 * We implicitly assume 24hr mode here. Alarm values >= 0xc0 will
366 * match any value for that particular field. Values that are
367 * greater than a valid time, but less than 0xc0 shouldn't appear.
368 */
369 ds1286_get_alm_time(&tm);
370 p += sprintf(p, "alarm\t\t: %s ", days[tm.tm_wday]);
371 if (tm.tm_hour <= 24)
372 p += sprintf(p, "%02d:", tm.tm_hour);
373 else
374 p += sprintf(p, "**:");
375
376 if (tm.tm_min <= 59)
377 p += sprintf(p, "%02d\n", tm.tm_min);
378 else
379 p += sprintf(p, "**\n");
380
381 month = rtc_read(RTC_MONTH);
382 p += sprintf(p,
383 "oscillator\t: %s\n"
384 "square_wave\t: %s\n",
385 (month & RTC_EOSC) ? "disabled" : "enabled",
386 (month & RTC_ESQW) ? "disabled" : "enabled");
387
388 amode = ((rtc_read(RTC_MINUTES_ALARM) & 0x80) >> 5) |
389 ((rtc_read(RTC_HOURS_ALARM) & 0x80) >> 6) |
390 ((rtc_read(RTC_DAY_ALARM) & 0x80) >> 7);
391 if (amode == 7) s = "each minute";
392 else if (amode == 3) s = "minutes match";
393 else if (amode == 1) s = "hours and minutes match";
394 else if (amode == 0) s = "days, hours and minutes match";
395 else s = "invalid";
396 p += sprintf(p, "alarm_mode\t: %s\n", s);
397
398 cmd = rtc_read(RTC_CMD);
399 p += sprintf(p,
400 "alarm_enable\t: %s\n"
401 "wdog_alarm\t: %s\n"
402 "alarm_mask\t: %s\n"
403 "wdog_alarm_mask\t: %s\n"
404 "interrupt_mode\t: %s\n"
405 "INTB_mode\t: %s_active\n"
406 "interrupt_pins\t: %s\n",
407 (cmd & RTC_TDF) ? "yes" : "no",
408 (cmd & RTC_WAF) ? "yes" : "no",
409 (cmd & RTC_TDM) ? "disabled" : "enabled",
410 (cmd & RTC_WAM) ? "disabled" : "enabled",
411 (cmd & RTC_PU_LVL) ? "pulse" : "level",
412 (cmd & RTC_IBH_LO) ? "low" : "high",
413 (cmd & RTC_IPSW) ? "unswapped" : "swapped");
414
415 return p - buf;
416}
417
418static int ds1286_read_proc(char *page, char **start, off_t off,
419 int count, int *eof, void *data)
420{
421 int len = ds1286_proc_output (page);
422 if (len <= off+count) *eof = 1;
423 *start = page + off;
424 len -= off;
425 if (len>count)
426 len = count;
427 if (len<0)
428 len = 0;
429
430 return len;
431}
432
433/*
434 * Returns true if a clock update is in progress
435 */
436static inline unsigned char ds1286_is_updating(void)
437{
438 return rtc_read(RTC_CMD) & RTC_TE;
439}
440
441
442static void ds1286_get_time(struct rtc_time *rtc_tm)
443{
444 unsigned char save_control;
445 unsigned long flags;
446
447 /*
448 * read RTC once any update in progress is done. The update
449 * can take just over 2ms. We wait 10 to 20ms. There is no need to
450 * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
451 * If you need to know *exactly* when a second has started, enable
452 * periodic update complete interrupts, (via ioctl) and then
453 * immediately read /dev/rtc which will block until you get the IRQ.
454 * Once the read clears, read the RTC time (again via ioctl). Easy.
455 */
456
457 if (ds1286_is_updating() != 0)
458 msleep(20);
459
460 /*
461 * Only the values that we read from the RTC are set. We leave
462 * tm_wday, tm_yday and tm_isdst untouched. Even though the
463 * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
464 * by the RTC when initially set to a non-zero value.
465 */
466 spin_lock_irqsave(&ds1286_lock, flags);
467 save_control = rtc_read(RTC_CMD);
468 rtc_write((save_control|RTC_TE), RTC_CMD);
469
470 rtc_tm->tm_sec = rtc_read(RTC_SECONDS);
471 rtc_tm->tm_min = rtc_read(RTC_MINUTES);
472 rtc_tm->tm_hour = rtc_read(RTC_HOURS) & 0x3f;
473 rtc_tm->tm_mday = rtc_read(RTC_DATE);
474 rtc_tm->tm_mon = rtc_read(RTC_MONTH) & 0x1f;
475 rtc_tm->tm_year = rtc_read(RTC_YEAR);
476
477 rtc_write(save_control, RTC_CMD);
478 spin_unlock_irqrestore(&ds1286_lock, flags);
479
480 rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
481 rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
482 rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
483 rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
484 rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
485 rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
486
487 /*
488 * Account for differences between how the RTC uses the values
489 * and how they are defined in a struct rtc_time;
490 */
491 if (rtc_tm->tm_year < 45)
492 rtc_tm->tm_year += 30;
493 if ((rtc_tm->tm_year += 40) < 70)
494 rtc_tm->tm_year += 100;
495
496 rtc_tm->tm_mon--;
497}
498
499static int ds1286_set_time(struct rtc_time *rtc_tm)
500{
501 unsigned char mon, day, hrs, min, sec, leap_yr;
502 unsigned char save_control;
503 unsigned int yrs;
504 unsigned long flags;
505
506
507 yrs = rtc_tm->tm_year + 1900;
508 mon = rtc_tm->tm_mon + 1; /* tm_mon starts at zero */
509 day = rtc_tm->tm_mday;
510 hrs = rtc_tm->tm_hour;
511 min = rtc_tm->tm_min;
512 sec = rtc_tm->tm_sec;
513
514 if (yrs < 1970)
515 return -EINVAL;
516
517 leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
518
519 if ((mon > 12) || (day == 0))
520 return -EINVAL;
521
522 if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
523 return -EINVAL;
524
525 if ((hrs >= 24) || (min >= 60) || (sec >= 60))
526 return -EINVAL;
527
528 if ((yrs -= 1940) > 255) /* They are unsigned */
529 return -EINVAL;
530
531 if (yrs >= 100)
532 yrs -= 100;
533
534 sec = bin2bcd(sec);
535 min = bin2bcd(min);
536 hrs = bin2bcd(hrs);
537 day = bin2bcd(day);
538 mon = bin2bcd(mon);
539 yrs = bin2bcd(yrs);
540
541 spin_lock_irqsave(&ds1286_lock, flags);
542 save_control = rtc_read(RTC_CMD);
543 rtc_write((save_control|RTC_TE), RTC_CMD);
544
545 rtc_write(yrs, RTC_YEAR);
546 rtc_write(mon, RTC_MONTH);
547 rtc_write(day, RTC_DATE);
548 rtc_write(hrs, RTC_HOURS);
549 rtc_write(min, RTC_MINUTES);
550 rtc_write(sec, RTC_SECONDS);
551 rtc_write(0, RTC_HUNDREDTH_SECOND);
552
553 rtc_write(save_control, RTC_CMD);
554 spin_unlock_irqrestore(&ds1286_lock, flags);
555
556 return 0;
557}
558
559static void ds1286_get_alm_time(struct rtc_time *alm_tm)
560{
561 unsigned char cmd;
562 unsigned long flags;
563
564 /*
565 * Only the values that we read from the RTC are set. That
566 * means only tm_wday, tm_hour, tm_min.
567 */
568 spin_lock_irqsave(&ds1286_lock, flags);
569 alm_tm->tm_min = rtc_read(RTC_MINUTES_ALARM) & 0x7f;
570 alm_tm->tm_hour = rtc_read(RTC_HOURS_ALARM) & 0x1f;
571 alm_tm->tm_wday = rtc_read(RTC_DAY_ALARM) & 0x07;
572 cmd = rtc_read(RTC_CMD);
573 spin_unlock_irqrestore(&ds1286_lock, flags);
574
575 alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
576 alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
577 alm_tm->tm_sec = 0;
578}
579
580module_init(ds1286_init);
581module_exit(ds1286_exit);
582
583MODULE_AUTHOR("Ralf Baechle");
584MODULE_LICENSE("GPL");
585MODULE_ALIAS_MISCDEV(RTC_MINOR);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 408f5f92cb4e..53fdc7ff3870 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -427,9 +427,6 @@ static int hpet_release(struct inode *inode, struct file *file)
427 if (irq) 427 if (irq)
428 free_irq(irq, devp); 428 free_irq(irq, devp);
429 429
430 if (file->f_flags & FASYNC)
431 hpet_fasync(-1, file, 0);
432
433 file->private_data = NULL; 430 file->private_data = NULL;
434 return 0; 431 return 0;
435} 432}
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index bf70450a49cc..5b819b12675a 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -161,7 +161,7 @@ static void hvc_console_print(struct console *co, const char *b,
161 } 161 }
162 } else { 162 } else {
163 r = cons_ops[index]->put_chars(vtermnos[index], c, i); 163 r = cons_ops[index]->put_chars(vtermnos[index], c, i);
164 if (r < 0) { 164 if (r <= 0) {
165 /* throw away chars on error */ 165 /* throw away chars on error */
166 i = 0; 166 i = 0;
167 } else if (r > 0) { 167 } else if (r > 0) {
@@ -374,6 +374,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
374 if (hp->ops->notifier_del) 374 if (hp->ops->notifier_del)
375 hp->ops->notifier_del(hp, hp->data); 375 hp->ops->notifier_del(hp, hp->data);
376 376
377 /* cancel pending tty resize work */
378 cancel_work_sync(&hp->tty_resize);
379
377 /* 380 /*
378 * Chain calls chars_in_buffer() and returns immediately if 381 * Chain calls chars_in_buffer() and returns immediately if
379 * there is no buffered data otherwise sleeps on a wait queue 382 * there is no buffered data otherwise sleeps on a wait queue
@@ -399,6 +402,9 @@ static void hvc_hangup(struct tty_struct *tty)
399 if (!hp) 402 if (!hp)
400 return; 403 return;
401 404
405 /* cancel pending tty resize work */
406 cancel_work_sync(&hp->tty_resize);
407
402 spin_lock_irqsave(&hp->lock, flags); 408 spin_lock_irqsave(&hp->lock, flags);
403 409
404 /* 410 /*
@@ -418,8 +424,8 @@ static void hvc_hangup(struct tty_struct *tty)
418 424
419 spin_unlock_irqrestore(&hp->lock, flags); 425 spin_unlock_irqrestore(&hp->lock, flags);
420 426
421 if (hp->ops->notifier_del) 427 if (hp->ops->notifier_hangup)
422 hp->ops->notifier_del(hp, hp->data); 428 hp->ops->notifier_hangup(hp, hp->data);
423 429
424 while(temp_open_count) { 430 while(temp_open_count) {
425 --temp_open_count; 431 --temp_open_count;
@@ -431,7 +437,7 @@ static void hvc_hangup(struct tty_struct *tty)
431 * Push buffered characters whether they were just recently buffered or waiting 437 * Push buffered characters whether they were just recently buffered or waiting
432 * on a blocked hypervisor. Call this function with hp->lock held. 438 * on a blocked hypervisor. Call this function with hp->lock held.
433 */ 439 */
434static void hvc_push(struct hvc_struct *hp) 440static int hvc_push(struct hvc_struct *hp)
435{ 441{
436 int n; 442 int n;
437 443
@@ -439,7 +445,7 @@ static void hvc_push(struct hvc_struct *hp)
439 if (n <= 0) { 445 if (n <= 0) {
440 if (n == 0) { 446 if (n == 0) {
441 hp->do_wakeup = 1; 447 hp->do_wakeup = 1;
442 return; 448 return 0;
443 } 449 }
444 /* throw away output on error; this happens when 450 /* throw away output on error; this happens when
445 there is no session connected to the vterm. */ 451 there is no session connected to the vterm. */
@@ -450,6 +456,8 @@ static void hvc_push(struct hvc_struct *hp)
450 memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf); 456 memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf);
451 else 457 else
452 hp->do_wakeup = 1; 458 hp->do_wakeup = 1;
459
460 return n;
453} 461}
454 462
455static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count) 463static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -492,6 +500,39 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
492 return written; 500 return written;
493} 501}
494 502
503/**
504 * hvc_set_winsz() - Resize the hvc tty terminal window.
505 * @work: work structure.
506 *
507 * The routine shall not be called within an atomic context because it
508 * might sleep.
509 *
510 * Locking: hp->lock
511 */
512static void hvc_set_winsz(struct work_struct *work)
513{
514 struct hvc_struct *hp;
515 unsigned long hvc_flags;
516 struct tty_struct *tty;
517 struct winsize ws;
518
519 hp = container_of(work, struct hvc_struct, tty_resize);
520 if (!hp)
521 return;
522
523 spin_lock_irqsave(&hp->lock, hvc_flags);
524 if (!hp->tty) {
525 spin_unlock_irqrestore(&hp->lock, hvc_flags);
526 return;
527 }
528 ws = hp->ws;
529 tty = tty_kref_get(hp->tty);
530 spin_unlock_irqrestore(&hp->lock, hvc_flags);
531
532 tty_do_resize(tty, tty, &ws);
533 tty_kref_put(tty);
534}
535
495/* 536/*
496 * This is actually a contract between the driver and the tty layer outlining 537 * This is actually a contract between the driver and the tty layer outlining
497 * how much write room the driver can guarantee will be sent OR BUFFERED. This 538 * how much write room the driver can guarantee will be sent OR BUFFERED. This
@@ -538,16 +579,20 @@ int hvc_poll(struct hvc_struct *hp)
538 char buf[N_INBUF] __ALIGNED__; 579 char buf[N_INBUF] __ALIGNED__;
539 unsigned long flags; 580 unsigned long flags;
540 int read_total = 0; 581 int read_total = 0;
582 int written_total = 0;
541 583
542 spin_lock_irqsave(&hp->lock, flags); 584 spin_lock_irqsave(&hp->lock, flags);
543 585
544 /* Push pending writes */ 586 /* Push pending writes */
545 if (hp->n_outbuf > 0) 587 if (hp->n_outbuf > 0)
546 hvc_push(hp); 588 written_total = hvc_push(hp);
547 589
548 /* Reschedule us if still some write pending */ 590 /* Reschedule us if still some write pending */
549 if (hp->n_outbuf > 0) 591 if (hp->n_outbuf > 0) {
550 poll_mask |= HVC_POLL_WRITE; 592 poll_mask |= HVC_POLL_WRITE;
593 /* If hvc_push() was not able to write, sleep a few msecs */
594 timeout = (written_total) ? 0 : MIN_TIMEOUT;
595 }
551 596
552 /* No tty attached, just skip */ 597 /* No tty attached, just skip */
553 tty = hp->tty; 598 tty = hp->tty;
@@ -632,6 +677,24 @@ int hvc_poll(struct hvc_struct *hp)
632} 677}
633EXPORT_SYMBOL_GPL(hvc_poll); 678EXPORT_SYMBOL_GPL(hvc_poll);
634 679
680/**
681 * hvc_resize() - Update terminal window size information.
682 * @hp: HVC console pointer
683 * @ws: Terminal window size structure
684 *
685 * Stores the specified window size information in the hvc structure of @hp.
686 * The function schedule the tty resize update.
687 *
688 * Locking: Locking free; the function MUST be called holding hp->lock
689 */
690void hvc_resize(struct hvc_struct *hp, struct winsize ws)
691{
692 if ((hp->ws.ws_row != ws.ws_row) || (hp->ws.ws_col != ws.ws_col)) {
693 hp->ws = ws;
694 schedule_work(&hp->tty_resize);
695 }
696}
697
635/* 698/*
636 * This kthread is either polling or interrupt driven. This is determined by 699 * This kthread is either polling or interrupt driven. This is determined by
637 * calling hvc_poll() who determines whether a console adapter support 700 * calling hvc_poll() who determines whether a console adapter support
@@ -659,10 +722,6 @@ static int khvcd(void *unused)
659 poll_mask |= HVC_POLL_READ; 722 poll_mask |= HVC_POLL_READ;
660 if (hvc_kicked) 723 if (hvc_kicked)
661 continue; 724 continue;
662 if (poll_mask & HVC_POLL_WRITE) {
663 yield();
664 continue;
665 }
666 set_current_state(TASK_INTERRUPTIBLE); 725 set_current_state(TASK_INTERRUPTIBLE);
667 if (!hvc_kicked) { 726 if (!hvc_kicked) {
668 if (poll_mask == 0) 727 if (poll_mask == 0)
@@ -718,6 +777,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
718 777
719 kref_init(&hp->kref); 778 kref_init(&hp->kref);
720 779
780 INIT_WORK(&hp->tty_resize, hvc_set_winsz);
721 spin_lock_init(&hp->lock); 781 spin_lock_init(&hp->lock);
722 spin_lock(&hvc_structs_lock); 782 spin_lock(&hvc_structs_lock);
723 783
@@ -743,7 +803,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
743} 803}
744EXPORT_SYMBOL_GPL(hvc_alloc); 804EXPORT_SYMBOL_GPL(hvc_alloc);
745 805
746int __devexit hvc_remove(struct hvc_struct *hp) 806int hvc_remove(struct hvc_struct *hp)
747{ 807{
748 unsigned long flags; 808 unsigned long flags;
749 struct tty_struct *tty; 809 struct tty_struct *tty;
@@ -796,7 +856,7 @@ static int hvc_init(void)
796 drv->minor_start = HVC_MINOR; 856 drv->minor_start = HVC_MINOR;
797 drv->type = TTY_DRIVER_TYPE_SYSTEM; 857 drv->type = TTY_DRIVER_TYPE_SYSTEM;
798 drv->init_termios = tty_std_termios; 858 drv->init_termios = tty_std_termios;
799 drv->flags = TTY_DRIVER_REAL_RAW; 859 drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
800 tty_set_operations(drv, &hvc_ops); 860 tty_set_operations(drv, &hvc_ops);
801 861
802 /* Always start the kthread because there can be hotplug vty adapters 862 /* Always start the kthread because there can be hotplug vty adapters
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 9790201718ae..8297dbc2e6ec 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -27,6 +27,7 @@
27#ifndef HVC_CONSOLE_H 27#ifndef HVC_CONSOLE_H
28#define HVC_CONSOLE_H 28#define HVC_CONSOLE_H
29#include <linux/kref.h> 29#include <linux/kref.h>
30#include <linux/tty.h>
30 31
31/* 32/*
32 * This is the max number of console adapters that can/will be found as 33 * This is the max number of console adapters that can/will be found as
@@ -56,6 +57,8 @@ struct hvc_struct {
56 struct hv_ops *ops; 57 struct hv_ops *ops;
57 int irq_requested; 58 int irq_requested;
58 int data; 59 int data;
60 struct winsize ws;
61 struct work_struct tty_resize;
59 struct list_head next; 62 struct list_head next;
60 struct kref kref; /* ref count & hvc_struct lifetime */ 63 struct kref kref; /* ref count & hvc_struct lifetime */
61}; 64};
@@ -65,9 +68,10 @@ struct hv_ops {
65 int (*get_chars)(uint32_t vtermno, char *buf, int count); 68 int (*get_chars)(uint32_t vtermno, char *buf, int count);
66 int (*put_chars)(uint32_t vtermno, const char *buf, int count); 69 int (*put_chars)(uint32_t vtermno, const char *buf, int count);
67 70
68 /* Callbacks for notification. Called in open and close */ 71 /* Callbacks for notification. Called in open, close and hangup */
69 int (*notifier_add)(struct hvc_struct *hp, int irq); 72 int (*notifier_add)(struct hvc_struct *hp, int irq);
70 void (*notifier_del)(struct hvc_struct *hp, int irq); 73 void (*notifier_del)(struct hvc_struct *hp, int irq);
74 void (*notifier_hangup)(struct hvc_struct *hp, int irq);
71}; 75};
72 76
73/* Register a vterm and a slot index for use as a console (console_init) */ 77/* Register a vterm and a slot index for use as a console (console_init) */
@@ -77,15 +81,19 @@ extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
77extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data, 81extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
78 struct hv_ops *ops, int outbuf_size); 82 struct hv_ops *ops, int outbuf_size);
79/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ 83/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
80extern int __devexit hvc_remove(struct hvc_struct *hp); 84extern int hvc_remove(struct hvc_struct *hp);
81 85
82/* data available */ 86/* data available */
83int hvc_poll(struct hvc_struct *hp); 87int hvc_poll(struct hvc_struct *hp);
84void hvc_kick(void); 88void hvc_kick(void);
85 89
90/* Resize hvc tty terminal window */
91extern void hvc_resize(struct hvc_struct *hp, struct winsize ws);
92
86/* default notifier for irq based notification */ 93/* default notifier for irq based notification */
87extern int notifier_add_irq(struct hvc_struct *hp, int data); 94extern int notifier_add_irq(struct hvc_struct *hp, int data);
88extern void notifier_del_irq(struct hvc_struct *hp, int data); 95extern void notifier_del_irq(struct hvc_struct *hp, int data);
96extern void notifier_hangup_irq(struct hvc_struct *hp, int data);
89 97
90 98
91#if defined(CONFIG_XMON) && defined(CONFIG_SMP) 99#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
diff --git a/drivers/char/hvc_irq.c b/drivers/char/hvc_irq.c
index 73a59cdb8947..d09e5688d449 100644
--- a/drivers/char/hvc_irq.c
+++ b/drivers/char/hvc_irq.c
@@ -42,3 +42,8 @@ void notifier_del_irq(struct hvc_struct *hp, int irq)
42 free_irq(irq, hp); 42 free_irq(irq, hp);
43 hp->irq_requested = 0; 43 hp->irq_requested = 0;
44} 44}
45
46void notifier_hangup_irq(struct hvc_struct *hp, int irq)
47{
48 notifier_del_irq(hp, irq);
49}
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
index b71c610fe5ae..b74a2f8ab908 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/char/hvc_iseries.c
@@ -202,6 +202,7 @@ static struct hv_ops hvc_get_put_ops = {
202 .put_chars = put_chars, 202 .put_chars = put_chars,
203 .notifier_add = notifier_add_irq, 203 .notifier_add = notifier_add_irq,
204 .notifier_del = notifier_del_irq, 204 .notifier_del = notifier_del_irq,
205 .notifier_hangup = notifier_hangup_irq,
205}; 206};
206 207
207static int __devinit hvc_vio_probe(struct vio_dev *vdev, 208static int __devinit hvc_vio_probe(struct vio_dev *vdev,
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 93f3840c1682..019e0b58593d 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -82,6 +82,7 @@ static struct hv_ops hvc_get_put_ops = {
82 .put_chars = hvc_put_chars, 82 .put_chars = hvc_put_chars,
83 .notifier_add = notifier_add_irq, 83 .notifier_add = notifier_add_irq,
84 .notifier_del = notifier_del_irq, 84 .notifier_del = notifier_del_irq,
85 .notifier_hangup = notifier_hangup_irq,
85}; 86};
86 87
87static int __devinit hvc_vio_probe(struct vio_dev *vdev, 88static int __devinit hvc_vio_probe(struct vio_dev *vdev,
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index 538ceea5e7df..eba999f8598d 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -102,6 +102,7 @@ static struct hv_ops hvc_ops = {
102 .put_chars = write_console, 102 .put_chars = write_console,
103 .notifier_add = notifier_add_irq, 103 .notifier_add = notifier_add_irq,
104 .notifier_del = notifier_del_irq, 104 .notifier_del = notifier_del_irq,
105 .notifier_hangup = notifier_hangup_irq,
105}; 106};
106 107
107static int __init xen_init(void) 108static int __init xen_init(void)
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index c422e870dc52..cd0ba51f7c80 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -11,7 +11,7 @@
11 * derived from 11 * derived from
12 * 12 *
13 * Hardware driver for the AMD 768 Random Number Generator (RNG) 13 * Hardware driver for the AMD 768 Random Number Generator (RNG)
14 * (c) Copyright 2001 Red Hat Inc <alan@redhat.com> 14 * (c) Copyright 2001 Red Hat Inc
15 * 15 *
16 * derived from 16 * derived from
17 * 17 *
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index fed4ef5569f5..64d513f68368 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -11,7 +11,7 @@
11 * derived from 11 * derived from
12 * 12 *
13 * Hardware driver for the AMD 768 Random Number Generator (RNG) 13 * Hardware driver for the AMD 768 Random Number Generator (RNG)
14 * (c) Copyright 2001 Red Hat Inc <alan@redhat.com> 14 * (c) Copyright 2001 Red Hat Inc
15 * 15 *
16 * derived from 16 * derived from
17 * 17 *
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 8a2fce0756ec..5dcbe603eca2 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -11,7 +11,7 @@
11 * derived from 11 * derived from
12 * 12 *
13 * Hardware driver for the AMD 768 Random Number Generator (RNG) 13 * Hardware driver for the AMD 768 Random Number Generator (RNG)
14 * (c) Copyright 2001 Red Hat Inc <alan@redhat.com> 14 * (c) Copyright 2001 Red Hat Inc
15 * 15 *
16 * derived from 16 * derived from
17 * 17 *
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 128202e18fc9..4e9573c1d39e 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -11,7 +11,7 @@
11 * derived from 11 * derived from
12 * 12 *
13 * Hardware driver for the AMD 768 Random Number Generator (RNG) 13 * Hardware driver for the AMD 768 Random Number Generator (RNG)
14 * (c) Copyright 2001 Red Hat Inc <alan@redhat.com> 14 * (c) Copyright 2001 Red Hat Inc
15 * 15 *
16 * derived from 16 * derived from
17 * 17 *
diff --git a/drivers/char/ip27-rtc.c b/drivers/char/ip27-rtc.c
deleted file mode 100644
index 2abd881b4cbc..000000000000
--- a/drivers/char/ip27-rtc.c
+++ /dev/null
@@ -1,329 +0,0 @@
1/*
2 * Driver for the SGS-Thomson M48T35 Timekeeper RAM chip
3 *
4 * Real Time Clock interface for Linux
5 *
6 * TODO: Implement periodic interrupts.
7 *
8 * Copyright (C) 2000 Silicon Graphics, Inc.
9 * Written by Ulf Carlsson (ulfc@engr.sgi.com)
10 *
11 * Based on code written by Paul Gortmaker.
12 *
13 * This driver allows use of the real time clock (built into
14 * nearly all computers) from user space. It exports the /dev/rtc
15 * interface supporting various ioctl() and also the /proc/rtc
16 * pseudo-file for status information.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#define RTC_VERSION "1.09b"
26
27#include <linux/bcd.h>
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/smp_lock.h>
31#include <linux/types.h>
32#include <linux/miscdevice.h>
33#include <linux/ioport.h>
34#include <linux/fcntl.h>
35#include <linux/rtc.h>
36#include <linux/init.h>
37#include <linux/poll.h>
38#include <linux/proc_fs.h>
39
40#include <asm/m48t35.h>
41#include <asm/sn/ioc3.h>
42#include <asm/io.h>
43#include <asm/uaccess.h>
44#include <asm/system.h>
45#include <asm/sn/klconfig.h>
46#include <asm/sn/sn0/ip27.h>
47#include <asm/sn/sn0/hub.h>
48#include <asm/sn/sn_private.h>
49
50static long rtc_ioctl(struct file *filp, unsigned int cmd,
51 unsigned long arg);
52
53static int rtc_read_proc(char *page, char **start, off_t off,
54 int count, int *eof, void *data);
55
56static void get_rtc_time(struct rtc_time *rtc_tm);
57
58/*
59 * Bits in rtc_status. (6 bits of room for future expansion)
60 */
61
62#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
63#define RTC_TIMER_ON 0x02 /* missed irq timer active */
64
65static unsigned char rtc_status; /* bitmapped status byte. */
66static unsigned long rtc_freq; /* Current periodic IRQ rate */
67static struct m48t35_rtc *rtc;
68
69/*
70 * If this driver ever becomes modularised, it will be really nice
71 * to make the epoch retain its value across module reload...
72 */
73
74static unsigned long epoch = 1970; /* year corresponding to 0x00 */
75
76static const unsigned char days_in_mo[] =
77{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
78
79static long rtc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
80{
81
82 struct rtc_time wtime;
83
84 switch (cmd) {
85 case RTC_RD_TIME: /* Read the time/date from RTC */
86 {
87 get_rtc_time(&wtime);
88 break;
89 }
90 case RTC_SET_TIME: /* Set the RTC */
91 {
92 struct rtc_time rtc_tm;
93 unsigned char mon, day, hrs, min, sec, leap_yr;
94 unsigned int yrs;
95
96 if (!capable(CAP_SYS_TIME))
97 return -EACCES;
98
99 if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
100 sizeof(struct rtc_time)))
101 return -EFAULT;
102
103 yrs = rtc_tm.tm_year + 1900;
104 mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
105 day = rtc_tm.tm_mday;
106 hrs = rtc_tm.tm_hour;
107 min = rtc_tm.tm_min;
108 sec = rtc_tm.tm_sec;
109
110 if (yrs < 1970)
111 return -EINVAL;
112
113 leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
114
115 if ((mon > 12) || (day == 0))
116 return -EINVAL;
117
118 if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
119 return -EINVAL;
120
121 if ((hrs >= 24) || (min >= 60) || (sec >= 60))
122 return -EINVAL;
123
124 if ((yrs -= epoch) > 255) /* They are unsigned */
125 return -EINVAL;
126
127 if (yrs > 169)
128 return -EINVAL;
129
130 if (yrs >= 100)
131 yrs -= 100;
132
133 sec = bin2bcd(sec);
134 min = bin2bcd(min);
135 hrs = bin2bcd(hrs);
136 day = bin2bcd(day);
137 mon = bin2bcd(mon);
138 yrs = bin2bcd(yrs);
139
140 spin_lock_irq(&rtc_lock);
141 rtc->control |= M48T35_RTC_SET;
142 rtc->year = yrs;
143 rtc->month = mon;
144 rtc->date = day;
145 rtc->hour = hrs;
146 rtc->min = min;
147 rtc->sec = sec;
148 rtc->control &= ~M48T35_RTC_SET;
149 spin_unlock_irq(&rtc_lock);
150
151 return 0;
152 }
153 default:
154 return -EINVAL;
155 }
156 return copy_to_user((void *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
157}
158
159/*
160 * We enforce only one user at a time here with the open/close.
161 * Also clear the previous interrupt data on an open, and clean
162 * up things on a close.
163 */
164
165static int rtc_open(struct inode *inode, struct file *file)
166{
167 lock_kernel();
168 spin_lock_irq(&rtc_lock);
169
170 if (rtc_status & RTC_IS_OPEN) {
171 spin_unlock_irq(&rtc_lock);
172 unlock_kernel();
173 return -EBUSY;
174 }
175
176 rtc_status |= RTC_IS_OPEN;
177 spin_unlock_irq(&rtc_lock);
178 unlock_kernel();
179
180 return 0;
181}
182
183static int rtc_release(struct inode *inode, struct file *file)
184{
185 /*
186 * Turn off all interrupts once the device is no longer
187 * in use, and clear the data.
188 */
189
190 spin_lock_irq(&rtc_lock);
191 rtc_status &= ~RTC_IS_OPEN;
192 spin_unlock_irq(&rtc_lock);
193
194 return 0;
195}
196
197/*
198 * The various file operations we support.
199 */
200
201static const struct file_operations rtc_fops = {
202 .owner = THIS_MODULE,
203 .unlocked_ioctl = rtc_ioctl,
204 .open = rtc_open,
205 .release = rtc_release,
206};
207
208static struct miscdevice rtc_dev=
209{
210 RTC_MINOR,
211 "rtc",
212 &rtc_fops
213};
214
215static int __init rtc_init(void)
216{
217 rtc = (struct m48t35_rtc *)
218 (KL_CONFIG_CH_CONS_INFO(master_nasid)->memory_base + IOC3_BYTEBUS_DEV0);
219
220 printk(KERN_INFO "Real Time Clock Driver v%s\n", RTC_VERSION);
221 if (misc_register(&rtc_dev)) {
222 printk(KERN_ERR "rtc: cannot register misc device.\n");
223 return -ENODEV;
224 }
225 if (!create_proc_read_entry("driver/rtc", 0, NULL, rtc_read_proc, NULL)) {
226 printk(KERN_ERR "rtc: cannot create /proc/rtc.\n");
227 misc_deregister(&rtc_dev);
228 return -ENOENT;
229 }
230
231 rtc_freq = 1024;
232
233 return 0;
234}
235
236static void __exit rtc_exit (void)
237{
238 /* interrupts and timer disabled at this point by rtc_release */
239
240 remove_proc_entry ("rtc", NULL);
241 misc_deregister(&rtc_dev);
242}
243
244module_init(rtc_init);
245module_exit(rtc_exit);
246
247/*
248 * Info exported via "/proc/rtc".
249 */
250
251static int rtc_get_status(char *buf)
252{
253 char *p;
254 struct rtc_time tm;
255
256 /*
257 * Just emulate the standard /proc/rtc
258 */
259
260 p = buf;
261
262 get_rtc_time(&tm);
263
264 /*
265 * There is no way to tell if the luser has the RTC set for local
266 * time or for Universal Standard Time (GMT). Probably local though.
267 */
268 p += sprintf(p,
269 "rtc_time\t: %02d:%02d:%02d\n"
270 "rtc_date\t: %04d-%02d-%02d\n"
271 "rtc_epoch\t: %04lu\n"
272 "24hr\t\t: yes\n",
273 tm.tm_hour, tm.tm_min, tm.tm_sec,
274 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, epoch);
275
276 return p - buf;
277}
278
279static int rtc_read_proc(char *page, char **start, off_t off,
280 int count, int *eof, void *data)
281{
282 int len = rtc_get_status(page);
283 if (len <= off+count) *eof = 1;
284 *start = page + off;
285 len -= off;
286 if (len>count) len = count;
287 if (len<0) len = 0;
288 return len;
289}
290
291static void get_rtc_time(struct rtc_time *rtc_tm)
292{
293 /*
294 * Do we need to wait for the last update to finish?
295 */
296
297 /*
298 * Only the values that we read from the RTC are set. We leave
299 * tm_wday, tm_yday and tm_isdst untouched. Even though the
300 * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
301 * by the RTC when initially set to a non-zero value.
302 */
303 spin_lock_irq(&rtc_lock);
304 rtc->control |= M48T35_RTC_READ;
305 rtc_tm->tm_sec = rtc->sec;
306 rtc_tm->tm_min = rtc->min;
307 rtc_tm->tm_hour = rtc->hour;
308 rtc_tm->tm_mday = rtc->date;
309 rtc_tm->tm_mon = rtc->month;
310 rtc_tm->tm_year = rtc->year;
311 rtc->control &= ~M48T35_RTC_READ;
312 spin_unlock_irq(&rtc_lock);
313
314 rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
315 rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
316 rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
317 rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
318 rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
319 rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
320
321 /*
322 * Account for differences between how the RTC uses the values
323 * and how they are defined in a struct rtc_time;
324 */
325 if ((rtc_tm->tm_year += (epoch - 1900)) <= 69)
326 rtc_tm->tm_year += 100;
327
328 rtc_tm->tm_mon--;
329}
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 835a33c8d5f5..41fc11dc921c 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -162,8 +162,6 @@ static int ipmi_release(struct inode *inode, struct file *file)
162 if (rv) 162 if (rv)
163 return rv; 163 return rv;
164 164
165 ipmi_fasync (-1, file, 0);
166
167 /* FIXME - free the messages in the list. */ 165 /* FIXME - free the messages in the list. */
168 kfree(priv); 166 kfree(priv);
169 167
@@ -957,3 +955,4 @@ module_exit(cleanup_ipmi);
957MODULE_LICENSE("GPL"); 955MODULE_LICENSE("GPL");
958MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 956MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
959MODULE_DESCRIPTION("Linux device interface for the IPMI message handler."); 957MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
958MODULE_ALIAS("platform:ipmi_si");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 8a59aaa21be5..7a88dfd4427b 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -422,9 +422,11 @@ struct ipmi_smi {
422/** 422/**
423 * The driver model view of the IPMI messaging driver. 423 * The driver model view of the IPMI messaging driver.
424 */ 424 */
425static struct device_driver ipmidriver = { 425static struct platform_driver ipmidriver = {
426 .name = "ipmi", 426 .driver = {
427 .bus = &platform_bus_type 427 .name = "ipmi",
428 .bus = &platform_bus_type
429 }
428}; 430};
429static DEFINE_MUTEX(ipmidriver_mutex); 431static DEFINE_MUTEX(ipmidriver_mutex);
430 432
@@ -2384,9 +2386,9 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2384 * representing the interfaced BMC already 2386 * representing the interfaced BMC already
2385 */ 2387 */
2386 if (bmc->guid_set) 2388 if (bmc->guid_set)
2387 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid); 2389 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, bmc->guid);
2388 else 2390 else
2389 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver, 2391 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2390 bmc->id.product_id, 2392 bmc->id.product_id,
2391 bmc->id.device_id); 2393 bmc->id.device_id);
2392 2394
@@ -2416,7 +2418,7 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2416 snprintf(name, sizeof(name), 2418 snprintf(name, sizeof(name),
2417 "ipmi_bmc.%4.4x", bmc->id.product_id); 2419 "ipmi_bmc.%4.4x", bmc->id.product_id);
2418 2420
2419 while (ipmi_find_bmc_prod_dev_id(&ipmidriver, 2421 while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2420 bmc->id.product_id, 2422 bmc->id.product_id,
2421 bmc->id.device_id)) { 2423 bmc->id.device_id)) {
2422 if (!warn_printed) { 2424 if (!warn_printed) {
@@ -2446,7 +2448,7 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2446 " Unable to allocate platform device\n"); 2448 " Unable to allocate platform device\n");
2447 return -ENOMEM; 2449 return -ENOMEM;
2448 } 2450 }
2449 bmc->dev->dev.driver = &ipmidriver; 2451 bmc->dev->dev.driver = &ipmidriver.driver;
2450 dev_set_drvdata(&bmc->dev->dev, bmc); 2452 dev_set_drvdata(&bmc->dev->dev, bmc);
2451 kref_init(&bmc->refcount); 2453 kref_init(&bmc->refcount);
2452 2454
@@ -4247,7 +4249,7 @@ static int ipmi_init_msghandler(void)
4247 if (initialized) 4249 if (initialized)
4248 return 0; 4250 return 0;
4249 4251
4250 rv = driver_register(&ipmidriver); 4252 rv = driver_register(&ipmidriver.driver);
4251 if (rv) { 4253 if (rv) {
4252 printk(KERN_ERR PFX "Could not register IPMI driver\n"); 4254 printk(KERN_ERR PFX "Could not register IPMI driver\n");
4253 return rv; 4255 return rv;
@@ -4308,7 +4310,7 @@ static __exit void cleanup_ipmi(void)
4308 remove_proc_entry(proc_ipmi_root->name, NULL); 4310 remove_proc_entry(proc_ipmi_root->name, NULL);
4309#endif /* CONFIG_PROC_FS */ 4311#endif /* CONFIG_PROC_FS */
4310 4312
4311 driver_unregister(&ipmidriver); 4313 driver_unregister(&ipmidriver.driver);
4312 4314
4313 initialized = 0; 4315 initialized = 0;
4314 4316
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 3123bf57ad91..3000135f2ead 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -114,9 +114,11 @@ static char *si_to_str[] = { "kcs", "smic", "bt" };
114 114
115#define DEVICE_NAME "ipmi_si" 115#define DEVICE_NAME "ipmi_si"
116 116
117static struct device_driver ipmi_driver = { 117static struct platform_driver ipmi_driver = {
118 .name = DEVICE_NAME, 118 .driver = {
119 .bus = &platform_bus_type 119 .name = DEVICE_NAME,
120 .bus = &platform_bus_type
121 }
120}; 122};
121 123
122 124
@@ -2868,7 +2870,7 @@ static int try_smi_init(struct smi_info *new_smi)
2868 goto out_err; 2870 goto out_err;
2869 } 2871 }
2870 new_smi->dev = &new_smi->pdev->dev; 2872 new_smi->dev = &new_smi->pdev->dev;
2871 new_smi->dev->driver = &ipmi_driver; 2873 new_smi->dev->driver = &ipmi_driver.driver;
2872 2874
2873 rv = platform_device_add(new_smi->pdev); 2875 rv = platform_device_add(new_smi->pdev);
2874 if (rv) { 2876 if (rv) {
@@ -2983,7 +2985,7 @@ static __devinit int init_ipmi_si(void)
2983 initialized = 1; 2985 initialized = 1;
2984 2986
2985 /* Register the device drivers. */ 2987 /* Register the device drivers. */
2986 rv = driver_register(&ipmi_driver); 2988 rv = driver_register(&ipmi_driver.driver);
2987 if (rv) { 2989 if (rv) {
2988 printk(KERN_ERR 2990 printk(KERN_ERR
2989 "init_ipmi_si: Unable to register driver: %d\n", 2991 "init_ipmi_si: Unable to register driver: %d\n",
@@ -3052,7 +3054,7 @@ static __devinit int init_ipmi_si(void)
3052#ifdef CONFIG_PPC_OF 3054#ifdef CONFIG_PPC_OF
3053 of_unregister_platform_driver(&ipmi_of_platform_driver); 3055 of_unregister_platform_driver(&ipmi_of_platform_driver);
3054#endif 3056#endif
3055 driver_unregister(&ipmi_driver); 3057 driver_unregister(&ipmi_driver.driver);
3056 printk(KERN_WARNING 3058 printk(KERN_WARNING
3057 "ipmi_si: Unable to find any System Interface(s)\n"); 3059 "ipmi_si: Unable to find any System Interface(s)\n");
3058 return -ENODEV; 3060 return -ENODEV;
@@ -3151,7 +3153,7 @@ static __exit void cleanup_ipmi_si(void)
3151 cleanup_one_si(e); 3153 cleanup_one_si(e);
3152 mutex_unlock(&smi_infos_lock); 3154 mutex_unlock(&smi_infos_lock);
3153 3155
3154 driver_unregister(&ipmi_driver); 3156 driver_unregister(&ipmi_driver.driver);
3155} 3157}
3156module_exit(cleanup_ipmi_si); 3158module_exit(cleanup_ipmi_si);
3157 3159
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 235fab0bdf79..a4d57e31f713 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -870,7 +870,6 @@ static int ipmi_close(struct inode *ino, struct file *filep)
870 clear_bit(0, &ipmi_wdog_open); 870 clear_bit(0, &ipmi_wdog_open);
871 } 871 }
872 872
873 ipmi_fasync(-1, filep, 0);
874 expect_close = 0; 873 expect_close = 0;
875 874
876 return 0; 875 return 0;
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 7d30ee1d3fca..04e4549299ba 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -7,12 +7,14 @@
7 * Original driver code supplied by Multi-Tech 7 * Original driver code supplied by Multi-Tech
8 * 8 *
9 * Changes 9 * Changes
10 * 1/9/98 alan@redhat.com Merge to 2.0.x kernel tree 10 * 1/9/98 alan@lxorguk.ukuu.org.uk
11 * Merge to 2.0.x kernel tree
11 * Obtain and use official major/minors 12 * Obtain and use official major/minors
12 * Loader switched to a misc device 13 * Loader switched to a misc device
13 * (fixed range check bug as a side effect) 14 * (fixed range check bug as a side effect)
14 * Printk clean up 15 * Printk clean up
15 * 9/12/98 alan@redhat.com Rough port to 2.1.x 16 * 9/12/98 alan@lxorguk.ukuu.org.uk
17 * Rough port to 2.1.x
16 * 18 *
17 * 10/6/99 sameer Merged the ISA and PCI drivers to 19 * 10/6/99 sameer Merged the ISA and PCI drivers to
18 * a new unified driver. 20 * a new unified driver.
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 44e5d60f517e..4b10770fa937 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -3739,7 +3739,7 @@ static int stli_getbrdnr(void)
3739 * do is go probing around in the usual places hoping we can find it. 3739 * do is go probing around in the usual places hoping we can find it.
3740 */ 3740 */
3741 3741
3742static int stli_findeisabrds(void) 3742static int __init stli_findeisabrds(void)
3743{ 3743{
3744 struct stlibrd *brdp; 3744 struct stlibrd *brdp;
3745 unsigned int iobase, eid, i; 3745 unsigned int iobase, eid, i;
@@ -3935,7 +3935,7 @@ static struct stlibrd *stli_allocbrd(void)
3935 * can find. 3935 * can find.
3936 */ 3936 */
3937 3937
3938static int stli_initbrds(void) 3938static int __init stli_initbrds(void)
3939{ 3939{
3940 struct stlibrd *brdp, *nxtbrdp; 3940 struct stlibrd *brdp, *nxtbrdp;
3941 struct stlconf conf; 3941 struct stlconf conf;
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 8beef50f95a0..047766915411 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -14,7 +14,8 @@
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox 16 * Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox
17 * <alan@redhat.com>. The original 1.8 code is available on www.moxa.com. 17 * <alan@lxorguk.ukuu.org.uk>. The original 1.8 code is available on
18 * www.moxa.com.
18 * - Fixed x86_64 cleanness 19 * - Fixed x86_64 cleanness
19 */ 20 */
20 21
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 39f6357e3b5d..8054ee839b3c 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -338,7 +338,7 @@ nvram_open(struct inode *inode, struct file *file)
338 338
339 if ((nvram_open_cnt && (file->f_flags & O_EXCL)) || 339 if ((nvram_open_cnt && (file->f_flags & O_EXCL)) ||
340 (nvram_open_mode & NVRAM_EXCL) || 340 (nvram_open_mode & NVRAM_EXCL) ||
341 ((file->f_mode & 2) && (nvram_open_mode & NVRAM_WRITE))) { 341 ((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) {
342 spin_unlock(&nvram_state_lock); 342 spin_unlock(&nvram_state_lock);
343 unlock_kernel(); 343 unlock_kernel();
344 return -EBUSY; 344 return -EBUSY;
@@ -346,7 +346,7 @@ nvram_open(struct inode *inode, struct file *file)
346 346
347 if (file->f_flags & O_EXCL) 347 if (file->f_flags & O_EXCL)
348 nvram_open_mode |= NVRAM_EXCL; 348 nvram_open_mode |= NVRAM_EXCL;
349 if (file->f_mode & 2) 349 if (file->f_mode & FMODE_WRITE)
350 nvram_open_mode |= NVRAM_WRITE; 350 nvram_open_mode |= NVRAM_WRITE;
351 nvram_open_cnt++; 351 nvram_open_cnt++;
352 352
@@ -366,7 +366,7 @@ nvram_release(struct inode *inode, struct file *file)
366 /* if only one instance is open, clear the EXCL bit */ 366 /* if only one instance is open, clear the EXCL bit */
367 if (nvram_open_mode & NVRAM_EXCL) 367 if (nvram_open_mode & NVRAM_EXCL)
368 nvram_open_mode &= ~NVRAM_EXCL; 368 nvram_open_mode &= ~NVRAM_EXCL;
369 if (file->f_mode & 2) 369 if (file->f_mode & FMODE_WRITE)
370 nvram_open_mode &= ~NVRAM_WRITE; 370 nvram_open_mode &= ~NVRAM_WRITE;
371 371
372 spin_unlock(&nvram_state_lock); 372 spin_unlock(&nvram_state_lock);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 9a626e50b793..4d64a02612a4 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -554,7 +554,7 @@ static int mgslpc_probe(struct pcmcia_device *link)
554 /* Initialize the struct pcmcia_device structure */ 554 /* Initialize the struct pcmcia_device structure */
555 555
556 /* Interrupt setup */ 556 /* Interrupt setup */
557 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 557 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
558 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 558 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
559 link->irq.Handler = NULL; 559 link->irq.Handler = NULL;
560 560
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 705a839f1796..675076f5fca8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1139,18 +1139,12 @@ static int random_fasync(int fd, struct file *filp, int on)
1139 return fasync_helper(fd, filp, on, &fasync); 1139 return fasync_helper(fd, filp, on, &fasync);
1140} 1140}
1141 1141
1142static int random_release(struct inode *inode, struct file *filp)
1143{
1144 return fasync_helper(-1, filp, 0, &fasync);
1145}
1146
1147const struct file_operations random_fops = { 1142const struct file_operations random_fops = {
1148 .read = random_read, 1143 .read = random_read,
1149 .write = random_write, 1144 .write = random_write,
1150 .poll = random_poll, 1145 .poll = random_poll,
1151 .unlocked_ioctl = random_ioctl, 1146 .unlocked_ioctl = random_ioctl,
1152 .fasync = random_fasync, 1147 .fasync = random_fasync,
1153 .release = random_release,
1154}; 1148};
1155 1149
1156const struct file_operations urandom_fops = { 1150const struct file_operations urandom_fops = {
@@ -1158,7 +1152,6 @@ const struct file_operations urandom_fops = {
1158 .write = random_write, 1152 .write = random_write,
1159 .unlocked_ioctl = random_ioctl, 1153 .unlocked_ioctl = random_ioctl,
1160 .fasync = random_fasync, 1154 .fasync = random_fasync,
1161 .release = random_release,
1162}; 1155};
1163 1156
1164/*************************************************************** 1157/***************************************************************
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index e139372d0e69..96adf28a17e4 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -65,7 +65,7 @@ static int raw_open(struct inode *inode, struct file *filp)
65 if (!bdev) 65 if (!bdev)
66 goto out; 66 goto out;
67 igrab(bdev->bd_inode); 67 igrab(bdev->bd_inode);
68 err = blkdev_get(bdev, filp->f_mode, 0); 68 err = blkdev_get(bdev, filp->f_mode);
69 if (err) 69 if (err)
70 goto out; 70 goto out;
71 err = bd_claim(bdev, raw_open); 71 err = bd_claim(bdev, raw_open);
@@ -87,7 +87,7 @@ static int raw_open(struct inode *inode, struct file *filp)
87out2: 87out2:
88 bd_release(bdev); 88 bd_release(bdev);
89out1: 89out1:
90 blkdev_put(bdev); 90 blkdev_put(bdev, filp->f_mode);
91out: 91out:
92 mutex_unlock(&raw_mutex); 92 mutex_unlock(&raw_mutex);
93 return err; 93 return err;
@@ -112,7 +112,7 @@ static int raw_release(struct inode *inode, struct file *filp)
112 mutex_unlock(&raw_mutex); 112 mutex_unlock(&raw_mutex);
113 113
114 bd_release(bdev); 114 bd_release(bdev);
115 blkdev_put(bdev); 115 blkdev_put(bdev, filp->f_mode);
116 return 0; 116 return 0;
117} 117}
118 118
@@ -125,7 +125,7 @@ raw_ioctl(struct inode *inode, struct file *filp,
125{ 125{
126 struct block_device *bdev = filp->private_data; 126 struct block_device *bdev = filp->private_data;
127 127
128 return blkdev_ioctl(bdev->bd_inode, NULL, command, arg); 128 return blkdev_ioctl(bdev, 0, command, arg);
129} 129}
130 130
131static void bind_device(struct raw_config_request *rq) 131static void bind_device(struct raw_config_request *rq)
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 32dc89720d58..20d6efb6324e 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -788,8 +788,6 @@ static int rtc_release(struct inode *inode, struct file *file)
788 } 788 }
789 spin_unlock_irq(&rtc_lock); 789 spin_unlock_irq(&rtc_lock);
790 790
791 if (file->f_flags & FASYNC)
792 rtc_fasync(-1, file, 0);
793no_irq: 791no_irq:
794#endif 792#endif
795 793
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 3b23270eaa65..a8f15e6be594 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -418,7 +418,7 @@ static irqreturn_t cd2401_rxerr_interrupt(int irq, void *dev_id)
418 TTY_OVERRUN); 418 TTY_OVERRUN);
419 /* 419 /*
420 If the flip buffer itself is 420 If the flip buffer itself is
421 overflowing, we still loose 421 overflowing, we still lose
422 the next incoming character. 422 the next incoming character.
423 */ 423 */
424 if (tty_buffer_request_room(tty, 1) != 424 if (tty_buffer_request_room(tty, 1) !=
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 85e0eb76eeab..f4374437a033 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -523,7 +523,7 @@ static int acpi_driver_registered;
523 523
524static int sonypi_ec_write(u8 addr, u8 value) 524static int sonypi_ec_write(u8 addr, u8 value)
525{ 525{
526#ifdef CONFIG_ACPI_EC 526#ifdef CONFIG_ACPI
527 if (SONYPI_ACPI_ACTIVE) 527 if (SONYPI_ACPI_ACTIVE)
528 return ec_write(addr, value); 528 return ec_write(addr, value);
529#endif 529#endif
@@ -539,7 +539,7 @@ static int sonypi_ec_write(u8 addr, u8 value)
539 539
540static int sonypi_ec_read(u8 addr, u8 *value) 540static int sonypi_ec_read(u8 addr, u8 *value)
541{ 541{
542#ifdef CONFIG_ACPI_EC 542#ifdef CONFIG_ACPI
543 if (SONYPI_ACPI_ACTIVE) 543 if (SONYPI_ACPI_ACTIVE)
544 return ec_read(addr, value); 544 return ec_read(addr, value);
545#endif 545#endif
@@ -898,7 +898,6 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
898 898
899static int sonypi_misc_release(struct inode *inode, struct file *file) 899static int sonypi_misc_release(struct inode *inode, struct file *file)
900{ 900{
901 sonypi_misc_fasync(-1, file, 0);
902 mutex_lock(&sonypi_device.lock); 901 mutex_lock(&sonypi_device.lock);
903 sonypi_device.open_count--; 902 sonypi_device.open_count--;
904 mutex_unlock(&sonypi_device.lock); 903 mutex_unlock(&sonypi_device.lock);
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 242fd46fda22..a16b94f12eb2 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -72,7 +72,7 @@
72/* 72/*
73 * There is a bunch of documentation about the card, jumpers, config 73 * There is a bunch of documentation about the card, jumpers, config
74 * settings, restrictions, cables, device names and numbers in 74 * settings, restrictions, cables, device names and numbers in
75 * Documentation/specialix.txt 75 * Documentation/serial/specialix.txt
76 */ 76 */
77 77
78#include <linux/module.h> 78#include <linux/module.h>
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 59f472143f08..1412a8d1e58d 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1795,12 +1795,15 @@ retry_open:
1795 } 1795 }
1796#endif 1796#endif
1797 if (device == MKDEV(TTYAUX_MAJOR, 1)) { 1797 if (device == MKDEV(TTYAUX_MAJOR, 1)) {
1798 driver = tty_driver_kref_get(console_device(&index)); 1798 struct tty_driver *console_driver = console_device(&index);
1799 if (driver) { 1799 if (console_driver) {
1800 /* Don't let /dev/console block */ 1800 driver = tty_driver_kref_get(console_driver);
1801 filp->f_flags |= O_NONBLOCK; 1801 if (driver) {
1802 noctty = 1; 1802 /* Don't let /dev/console block */
1803 goto got_driver; 1803 filp->f_flags |= O_NONBLOCK;
1804 noctty = 1;
1805 goto got_driver;
1806 }
1804 } 1807 }
1805 mutex_unlock(&tty_mutex); 1808 mutex_unlock(&tty_mutex);
1806 return -ENODEV; 1809 return -ENODEV;
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index 553b0e9d8d17..c8f8024cb40e 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -90,7 +90,7 @@ void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty)
90 spin_lock_irqsave(&port->lock, flags); 90 spin_lock_irqsave(&port->lock, flags);
91 if (port->tty) 91 if (port->tty)
92 tty_kref_put(port->tty); 92 tty_kref_put(port->tty);
93 port->tty = tty; 93 port->tty = tty_kref_get(tty);
94 spin_unlock_irqrestore(&port->lock, flags); 94 spin_unlock_irqrestore(&port->lock, flags);
95} 95}
96EXPORT_SYMBOL(tty_port_tty_set); 96EXPORT_SYMBOL(tty_port_tty_set);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d0f4eb6fdb7f..3fb0d2c88ba5 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -198,6 +198,7 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
198 virtio_cons.put_chars = put_chars; 198 virtio_cons.put_chars = put_chars;
199 virtio_cons.notifier_add = notifier_add_vio; 199 virtio_cons.notifier_add = notifier_add_vio;
200 virtio_cons.notifier_del = notifier_del_vio; 200 virtio_cons.notifier_del = notifier_del_vio;
201 virtio_cons.notifier_hangup = notifier_del_vio;
201 202
202 /* The first argument of hvc_alloc() is the virtual console number, so 203 /* The first argument of hvc_alloc() is the virtual console number, so
203 * we use zero. The second argument is the parameter for the 204 * we use zero. The second argument is the parameter for the
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index d8f83e26e4a4..008176edbd64 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -1644,7 +1644,10 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
1644 vc->vc_tab_stop[1] = 1644 vc->vc_tab_stop[1] =
1645 vc->vc_tab_stop[2] = 1645 vc->vc_tab_stop[2] =
1646 vc->vc_tab_stop[3] = 1646 vc->vc_tab_stop[3] =
1647 vc->vc_tab_stop[4] = 0x01010101; 1647 vc->vc_tab_stop[4] =
1648 vc->vc_tab_stop[5] =
1649 vc->vc_tab_stop[6] =
1650 vc->vc_tab_stop[7] = 0x01010101;
1648 1651
1649 vc->vc_bell_pitch = DEFAULT_BELL_PITCH; 1652 vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
1650 vc->vc_bell_duration = DEFAULT_BELL_DURATION; 1653 vc->vc_bell_duration = DEFAULT_BELL_DURATION;
@@ -1935,7 +1938,10 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1935 vc->vc_tab_stop[1] = 1938 vc->vc_tab_stop[1] =
1936 vc->vc_tab_stop[2] = 1939 vc->vc_tab_stop[2] =
1937 vc->vc_tab_stop[3] = 1940 vc->vc_tab_stop[3] =
1938 vc->vc_tab_stop[4] = 0; 1941 vc->vc_tab_stop[4] =
1942 vc->vc_tab_stop[5] =
1943 vc->vc_tab_stop[6] =
1944 vc->vc_tab_stop[7] = 0;
1939 } 1945 }
1940 return; 1946 return;
1941 case 'm': 1947 case 'm':
@@ -2268,7 +2274,7 @@ rescan_last_byte:
2268 continue; /* nothing to display */ 2274 continue; /* nothing to display */
2269 } 2275 }
2270 /* Glyph not found */ 2276 /* Glyph not found */
2271 if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) { 2277 if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) {
2272 /* In legacy mode use the glyph we get by a 1:1 mapping. 2278 /* In legacy mode use the glyph we get by a 1:1 mapping.
2273 This would make absolutely no sense with Unicode in mind, 2279 This would make absolutely no sense with Unicode in mind,
2274 but do this for ASCII characters since a font may lack 2280 but do this for ASCII characters since a font may lack
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c
index aa7f7962a9a0..05d897764f02 100644
--- a/drivers/char/xilinx_hwicap/buffer_icap.c
+++ b/drivers/char/xilinx_hwicap/buffer_icap.c
@@ -21,9 +21,6 @@
21 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE. 22 * FOR A PARTICULAR PURPOSE.
23 * 23 *
24 * Xilinx products are not intended for use in life support appliances,
25 * devices, or systems. Use in such applications is expressly prohibited.
26 *
27 * (c) Copyright 2003-2008 Xilinx Inc. 24 * (c) Copyright 2003-2008 Xilinx Inc.
28 * All rights reserved. 25 * All rights reserved.
29 * 26 *
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.h b/drivers/char/xilinx_hwicap/buffer_icap.h
index 8b0252bf06e2..d4f419ee87ab 100644
--- a/drivers/char/xilinx_hwicap/buffer_icap.h
+++ b/drivers/char/xilinx_hwicap/buffer_icap.h
@@ -21,9 +21,6 @@
21 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE. 22 * FOR A PARTICULAR PURPOSE.
23 * 23 *
24 * Xilinx products are not intended for use in life support appliances,
25 * devices, or systems. Use in such applications is expressly prohibited.
26 *
27 * (c) Copyright 2003-2008 Xilinx Inc. 24 * (c) Copyright 2003-2008 Xilinx Inc.
28 * All rights reserved. 25 * All rights reserved.
29 * 26 *
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c
index 776b50528478..02225eb19cf6 100644
--- a/drivers/char/xilinx_hwicap/fifo_icap.c
+++ b/drivers/char/xilinx_hwicap/fifo_icap.c
@@ -21,9 +21,6 @@
21 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE. 22 * FOR A PARTICULAR PURPOSE.
23 * 23 *
24 * Xilinx products are not intended for use in life support appliances,
25 * devices, or systems. Use in such applications is expressly prohibited.
26 *
27 * (c) Copyright 2007-2008 Xilinx Inc. 24 * (c) Copyright 2007-2008 Xilinx Inc.
28 * All rights reserved. 25 * All rights reserved.
29 * 26 *
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.h b/drivers/char/xilinx_hwicap/fifo_icap.h
index 62bda453c90b..4c9dd9a3b62a 100644
--- a/drivers/char/xilinx_hwicap/fifo_icap.h
+++ b/drivers/char/xilinx_hwicap/fifo_icap.h
@@ -21,9 +21,6 @@
21 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE. 22 * FOR A PARTICULAR PURPOSE.
23 * 23 *
24 * Xilinx products are not intended for use in life support appliances,
25 * devices, or systems. Use in such applications is expressly prohibited.
26 *
27 * (c) Copyright 2007-2008 Xilinx Inc. 24 * (c) Copyright 2007-2008 Xilinx Inc.
28 * All rights reserved. 25 * All rights reserved.
29 * 26 *
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index ed132fe55d3d..f40ab699860f 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -21,9 +21,6 @@
21 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE. 22 * FOR A PARTICULAR PURPOSE.
23 * 23 *
24 * Xilinx products are not intended for use in life support appliances,
25 * devices, or systems. Use in such applications is expressly prohibited.
26 *
27 * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group 24 * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group
28 * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group 25 * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group
29 * (c) Copyright 2007-2008 Xilinx Inc. 26 * (c) Copyright 2007-2008 Xilinx Inc.
@@ -626,7 +623,7 @@ static int __devinit hwicap_setup(struct device *dev, int id,
626 if (!request_mem_region(drvdata->mem_start, 623 if (!request_mem_region(drvdata->mem_start,
627 drvdata->mem_size, DRIVER_NAME)) { 624 drvdata->mem_size, DRIVER_NAME)) {
628 dev_err(dev, "Couldn't lock memory region at %Lx\n", 625 dev_err(dev, "Couldn't lock memory region at %Lx\n",
629 regs_res->start); 626 (unsigned long long) regs_res->start);
630 retval = -EBUSY; 627 retval = -EBUSY;
631 goto failed1; 628 goto failed1;
632 } 629 }
@@ -645,9 +642,10 @@ static int __devinit hwicap_setup(struct device *dev, int id,
645 mutex_init(&drvdata->sem); 642 mutex_init(&drvdata->sem);
646 drvdata->is_open = 0; 643 drvdata->is_open = 0;
647 644
648 dev_info(dev, "ioremap %lx to %p with size %Lx\n", 645 dev_info(dev, "ioremap %llx to %p with size %llx\n",
649 (unsigned long int)drvdata->mem_start, 646 (unsigned long long) drvdata->mem_start,
650 drvdata->base_address, drvdata->mem_size); 647 drvdata->base_address,
648 (unsigned long long) drvdata->mem_size);
651 649
652 cdev_init(&drvdata->cdev, &hwicap_fops); 650 cdev_init(&drvdata->cdev, &hwicap_fops);
653 drvdata->cdev.owner = THIS_MODULE; 651 drvdata->cdev.owner = THIS_MODULE;
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
index 24d0d9b938fb..8cca11981c5f 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -21,9 +21,6 @@
21 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE. 22 * FOR A PARTICULAR PURPOSE.
23 * 23 *
24 * Xilinx products are not intended for use in life support appliances,
25 * devices, or systems. Use in such applications is expressly prohibited.
26 *
27 * (c) Copyright 2003-2007 Xilinx Inc. 24 * (c) Copyright 2003-2007 Xilinx Inc.
28 * All rights reserved. 25 * All rights reserved.
29 * 26 *
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 5ce07b517c58..8504a2108557 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -16,6 +16,7 @@
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/cpuidle.h> 17#include <linux/cpuidle.h>
18#include <linux/ktime.h> 18#include <linux/ktime.h>
19#include <linux/hrtimer.h>
19 20
20#include "cpuidle.h" 21#include "cpuidle.h"
21 22
@@ -56,10 +57,22 @@ static void cpuidle_idle_call(void)
56 if (pm_idle_old) 57 if (pm_idle_old)
57 pm_idle_old(); 58 pm_idle_old();
58 else 59 else
60#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
61 default_idle();
62#else
59 local_irq_enable(); 63 local_irq_enable();
64#endif
60 return; 65 return;
61 } 66 }
62 67
68#if 0
69 /* shows regressions, re-enable for 2.6.29 */
70 /*
71 * run any timers that can be run now, at this point
72 * before calculating the idle duration etc.
73 */
74 hrtimer_peek_ahead_timers();
75#endif
63 /* ask the governor for the next state */ 76 /* ask the governor for the next state */
64 next_state = cpuidle_curr_governor->select(dev); 77 next_state = cpuidle_curr_governor->select(dev);
65 if (need_resched()) 78 if (need_resched())
@@ -67,8 +80,11 @@ static void cpuidle_idle_call(void)
67 target_state = &dev->states[next_state]; 80 target_state = &dev->states[next_state];
68 81
69 /* enter the state and update stats */ 82 /* enter the state and update stats */
70 dev->last_residency = target_state->enter(dev, target_state);
71 dev->last_state = target_state; 83 dev->last_state = target_state;
84 dev->last_residency = target_state->enter(dev, target_state);
85 if (dev->last_state)
86 target_state = dev->last_state;
87
72 target_state->time += (unsigned long long)dev->last_residency; 88 target_state->time += (unsigned long long)dev->last_residency;
73 target_state->usage++; 89 target_state->usage++;
74 90
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index b6ad3ac5916e..24607669a52b 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1357,7 +1357,7 @@ static int hw_supports(struct device *dev, __be32 desc_hdr_template)
1357 return ret; 1357 return ret;
1358} 1358}
1359 1359
1360static int __devexit talitos_remove(struct of_device *ofdev) 1360static int talitos_remove(struct of_device *ofdev)
1361{ 1361{
1362 struct device *dev = &ofdev->dev; 1362 struct device *dev = &ofdev->dev;
1363 struct talitos_private *priv = dev_get_drvdata(dev); 1363 struct talitos_private *priv = dev_get_drvdata(dev);
@@ -1622,7 +1622,7 @@ static struct of_platform_driver talitos_driver = {
1622 .name = "talitos", 1622 .name = "talitos",
1623 .match_table = talitos_match, 1623 .match_table = talitos_match,
1624 .probe = talitos_probe, 1624 .probe = talitos_probe,
1625 .remove = __devexit_p(talitos_remove), 1625 .remove = talitos_remove,
1626}; 1626};
1627 1627
1628static int __init talitos_init(void) 1628static int __init talitos_init(void)
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index ec249d2db633..d883e1b8bb8c 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -270,6 +270,6 @@ static void __exit dca_exit(void)
270 dca_sysfs_exit(); 270 dca_sysfs_exit();
271} 271}
272 272
273module_init(dca_init); 273subsys_initcall(dca_init);
274module_exit(dca_exit); 274module_exit(dca_exit);
275 275
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index dc003a3a787d..657996517374 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -388,7 +388,10 @@ int dma_async_device_register(struct dma_device *device)
388 388
389 init_completion(&device->done); 389 init_completion(&device->done);
390 kref_init(&device->refcount); 390 kref_init(&device->refcount);
391
392 mutex_lock(&dma_list_mutex);
391 device->dev_id = id++; 393 device->dev_id = id++;
394 mutex_unlock(&dma_list_mutex);
392 395
393 /* represent channels in sysfs. Probably want devs too */ 396 /* represent channels in sysfs. Probably want devs too */
394 list_for_each_entry(chan, &device->channels, device_node) { 397 list_for_each_entry(chan, &device->channels, device_node) {
@@ -399,8 +402,8 @@ int dma_async_device_register(struct dma_device *device)
399 chan->chan_id = chancnt++; 402 chan->chan_id = chancnt++;
400 chan->dev.class = &dma_devclass; 403 chan->dev.class = &dma_devclass;
401 chan->dev.parent = device->dev; 404 chan->dev.parent = device->dev;
402 snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", 405 dev_set_name(&chan->dev, "dma%dchan%d",
403 device->dev_id, chan->chan_id); 406 device->dev_id, chan->chan_id);
404 407
405 rc = device_register(&chan->dev); 408 rc = device_register(&chan->dev);
406 if (rc) { 409 if (rc) {
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index d1e381e35a9e..ed9636bfb54a 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -20,11 +20,11 @@ static unsigned int test_buf_size = 16384;
20module_param(test_buf_size, uint, S_IRUGO); 20module_param(test_buf_size, uint, S_IRUGO);
21MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); 21MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
22 22
23static char test_channel[BUS_ID_SIZE]; 23static char test_channel[20];
24module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO); 24module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
25MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); 25MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
26 26
27static char test_device[BUS_ID_SIZE]; 27static char test_device[20];
28module_param_string(device, test_device, sizeof(test_device), S_IRUGO); 28module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
29MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); 29MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
30 30
@@ -80,14 +80,14 @@ static bool dmatest_match_channel(struct dma_chan *chan)
80{ 80{
81 if (test_channel[0] == '\0') 81 if (test_channel[0] == '\0')
82 return true; 82 return true;
83 return strcmp(chan->dev.bus_id, test_channel) == 0; 83 return strcmp(dev_name(&chan->dev), test_channel) == 0;
84} 84}
85 85
86static bool dmatest_match_device(struct dma_device *device) 86static bool dmatest_match_device(struct dma_device *device)
87{ 87{
88 if (test_device[0] == '\0') 88 if (test_device[0] == '\0')
89 return true; 89 return true;
90 return strcmp(device->dev->bus_id, test_device) == 0; 90 return strcmp(dev_name(device->dev), test_device) == 0;
91} 91}
92 92
93static unsigned long dmatest_random(void) 93static unsigned long dmatest_random(void)
@@ -332,7 +332,7 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
332 332
333 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); 333 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
334 if (!dtc) { 334 if (!dtc) {
335 pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id); 335 pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev));
336 return DMA_NAK; 336 return DMA_NAK;
337 } 337 }
338 338
@@ -343,16 +343,16 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
343 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); 343 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
344 if (!thread) { 344 if (!thread) {
345 pr_warning("dmatest: No memory for %s-test%u\n", 345 pr_warning("dmatest: No memory for %s-test%u\n",
346 chan->dev.bus_id, i); 346 dev_name(&chan->dev), i);
347 break; 347 break;
348 } 348 }
349 thread->chan = dtc->chan; 349 thread->chan = dtc->chan;
350 smp_wmb(); 350 smp_wmb();
351 thread->task = kthread_run(dmatest_func, thread, "%s-test%u", 351 thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
352 chan->dev.bus_id, i); 352 dev_name(&chan->dev), i);
353 if (IS_ERR(thread->task)) { 353 if (IS_ERR(thread->task)) {
354 pr_warning("dmatest: Failed to run thread %s-test%u\n", 354 pr_warning("dmatest: Failed to run thread %s-test%u\n",
355 chan->dev.bus_id, i); 355 dev_name(&chan->dev), i);
356 kfree(thread); 356 kfree(thread);
357 break; 357 break;
358 } 358 }
@@ -362,7 +362,7 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
362 list_add_tail(&thread->node, &dtc->threads); 362 list_add_tail(&thread->node, &dtc->threads);
363 } 363 }
364 364
365 pr_info("dmatest: Started %u threads using %s\n", i, chan->dev.bus_id); 365 pr_info("dmatest: Started %u threads using %s\n", i, dev_name(&chan->dev));
366 366
367 list_add_tail(&dtc->node, &dmatest_channels); 367 list_add_tail(&dtc->node, &dmatest_channels);
368 nr_channels++; 368 nr_channels++;
@@ -379,7 +379,7 @@ static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
379 list_del(&dtc->node); 379 list_del(&dtc->node);
380 dmatest_cleanup_channel(dtc); 380 dmatest_cleanup_channel(dtc);
381 pr_debug("dmatest: lost channel %s\n", 381 pr_debug("dmatest: lost channel %s\n",
382 chan->dev.bus_id); 382 dev_name(&chan->dev));
383 return DMA_ACK; 383 return DMA_ACK;
384 } 384 }
385 } 385 }
@@ -418,7 +418,7 @@ dmatest_event(struct dma_client *client, struct dma_chan *chan,
418 418
419 default: 419 default:
420 pr_info("dmatest: Unhandled event %u (%s)\n", 420 pr_info("dmatest: Unhandled event %u (%s)\n",
421 state, chan->dev.bus_id); 421 state, dev_name(&chan->dev));
422 break; 422 break;
423 } 423 }
424 424
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 1ef68b315657..6607fdd00b1c 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -33,6 +33,7 @@
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36#include <linux/i7300_idle.h>
36#include "ioatdma.h" 37#include "ioatdma.h"
37#include "ioatdma_registers.h" 38#include "ioatdma_registers.h"
38#include "ioatdma_hw.h" 39#include "ioatdma_hw.h"
@@ -171,6 +172,11 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
171 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); 172 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
172 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); 173 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
173 174
175#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
176 if (i7300_idle_platform_probe(NULL, NULL) == 0) {
177 device->common.chancnt--;
178 }
179#endif
174 for (i = 0; i < device->common.chancnt; i++) { 180 for (i = 0; i < device->common.chancnt; i++) {
175 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); 181 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
176 if (!ioat_chan) { 182 if (!ioat_chan) {
@@ -519,7 +525,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
519 } 525 }
520 526
521 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 527 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
522 if (new->async_tx.callback) { 528 if (first->async_tx.callback) {
523 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 529 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
524 if (first != new) { 530 if (first != new) {
525 /* move callback into to last desc */ 531 /* move callback into to last desc */
@@ -611,7 +617,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
611 } 617 }
612 618
613 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 619 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
614 if (new->async_tx.callback) { 620 if (first->async_tx.callback) {
615 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 621 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
616 if (first != new) { 622 if (first != new) {
617 /* move callback into to last desc */ 623 /* move callback into to last desc */
@@ -801,6 +807,12 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
801 struct ioat_desc_sw *desc, *_desc; 807 struct ioat_desc_sw *desc, *_desc;
802 int in_use_descs = 0; 808 int in_use_descs = 0;
803 809
810 /* Before freeing channel resources first check
811 * if they have been previously allocated for this channel.
812 */
813 if (ioat_chan->desccount == 0)
814 return;
815
804 tasklet_disable(&ioat_chan->cleanup_task); 816 tasklet_disable(&ioat_chan->cleanup_task);
805 ioat_dma_memcpy_cleanup(ioat_chan); 817 ioat_dma_memcpy_cleanup(ioat_chan);
806 818
@@ -863,6 +875,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
863 ioat_chan->last_completion = ioat_chan->completion_addr = 0; 875 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
864 ioat_chan->pending = 0; 876 ioat_chan->pending = 0;
865 ioat_chan->dmacount = 0; 877 ioat_chan->dmacount = 0;
878 ioat_chan->desccount = 0;
866 ioat_chan->watchdog_completion = 0; 879 ioat_chan->watchdog_completion = 0;
867 ioat_chan->last_compl_desc_addr_hw = 0; 880 ioat_chan->last_compl_desc_addr_hw = 0;
868 ioat_chan->watchdog_tcp_cookie = 881 ioat_chan->watchdog_tcp_cookie =
@@ -1328,10 +1341,12 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
1328 */ 1341 */
1329#define IOAT_TEST_SIZE 2000 1342#define IOAT_TEST_SIZE 2000
1330 1343
1344DECLARE_COMPLETION(test_completion);
1331static void ioat_dma_test_callback(void *dma_async_param) 1345static void ioat_dma_test_callback(void *dma_async_param)
1332{ 1346{
1333 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", 1347 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
1334 dma_async_param); 1348 dma_async_param);
1349 complete(&test_completion);
1335} 1350}
1336 1351
1337/** 1352/**
@@ -1397,7 +1412,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1397 goto free_resources; 1412 goto free_resources;
1398 } 1413 }
1399 device->common.device_issue_pending(dma_chan); 1414 device->common.device_issue_pending(dma_chan);
1400 msleep(1); 1415
1416 wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000));
1401 1417
1402 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) 1418 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1403 != DMA_SUCCESS) { 1419 != DMA_SUCCESS) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 71fba82462cb..6be317262200 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
85 enum dma_ctrl_flags flags = desc->async_tx.flags; 85 enum dma_ctrl_flags flags = desc->async_tx.flags;
86 u32 src_cnt; 86 u32 src_cnt;
87 dma_addr_t addr; 87 dma_addr_t addr;
88 dma_addr_t dest;
88 89
90 src_cnt = unmap->unmap_src_cnt;
91 dest = iop_desc_get_dest_addr(unmap, iop_chan);
89 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 92 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
90 addr = iop_desc_get_dest_addr(unmap, iop_chan); 93 enum dma_data_direction dir;
91 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 94
95 if (src_cnt > 1) /* is xor? */
96 dir = DMA_BIDIRECTIONAL;
97 else
98 dir = DMA_FROM_DEVICE;
99
100 dma_unmap_page(dev, dest, len, dir);
92 } 101 }
93 102
94 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 103 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
95 src_cnt = unmap->unmap_src_cnt;
96 while (src_cnt--) { 104 while (src_cnt--) {
97 addr = iop_desc_get_src_addr(unmap, 105 addr = iop_desc_get_src_addr(unmap,
98 iop_chan, 106 iop_chan,
99 src_cnt); 107 src_cnt);
108 if (addr == dest)
109 continue;
100 dma_unmap_page(dev, addr, len, 110 dma_unmap_page(dev, addr, len,
101 DMA_TO_DEVICE); 111 DMA_TO_DEVICE);
102 } 112 }
@@ -411,6 +421,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
411 int slot_cnt; 421 int slot_cnt;
412 int slots_per_op; 422 int slots_per_op;
413 dma_cookie_t cookie; 423 dma_cookie_t cookie;
424 dma_addr_t next_dma;
414 425
415 grp_start = sw_desc->group_head; 426 grp_start = sw_desc->group_head;
416 slot_cnt = grp_start->slot_cnt; 427 slot_cnt = grp_start->slot_cnt;
@@ -425,12 +436,12 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
425 &old_chain_tail->chain_node); 436 &old_chain_tail->chain_node);
426 437
427 /* fix up the hardware chain */ 438 /* fix up the hardware chain */
428 iop_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); 439 next_dma = grp_start->async_tx.phys;
440 iop_desc_set_next_desc(old_chain_tail, next_dma);
441 BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
429 442
430 /* 1/ don't add pre-chained descriptors 443 /* check for pre-chained descriptors */
431 * 2/ dummy read to flush next_desc write 444 iop_paranoia(iop_desc_get_next_desc(sw_desc));
432 */
433 BUG_ON(iop_desc_get_next_desc(sw_desc));
434 445
435 /* increment the pending count by the number of slots 446 /* increment the pending count by the number of slots
436 * memcpy operations have a 1:1 (slot:operation) relation 447 * memcpy operations have a 1:1 (slot:operation) relation
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
index e763d723e4cf..9f6fe46a9b87 100644
--- a/drivers/dma/iovlock.c
+++ b/drivers/dma/iovlock.c
@@ -55,7 +55,6 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
55 int nr_iovecs = 0; 55 int nr_iovecs = 0;
56 int iovec_len_used = 0; 56 int iovec_len_used = 0;
57 int iovec_pages_used = 0; 57 int iovec_pages_used = 0;
58 long err;
59 58
60 /* don't pin down non-user-based iovecs */ 59 /* don't pin down non-user-based iovecs */
61 if (segment_eq(get_fs(), KERNEL_DS)) 60 if (segment_eq(get_fs(), KERNEL_DS))
@@ -72,23 +71,21 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
72 local_list = kmalloc(sizeof(*local_list) 71 local_list = kmalloc(sizeof(*local_list)
73 + (nr_iovecs * sizeof (struct dma_page_list)) 72 + (nr_iovecs * sizeof (struct dma_page_list))
74 + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL); 73 + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
75 if (!local_list) { 74 if (!local_list)
76 err = -ENOMEM;
77 goto out; 75 goto out;
78 }
79 76
80 /* list of pages starts right after the page list array */ 77 /* list of pages starts right after the page list array */
81 pages = (struct page **) &local_list->page_list[nr_iovecs]; 78 pages = (struct page **) &local_list->page_list[nr_iovecs];
82 79
80 local_list->nr_iovecs = 0;
81
83 for (i = 0; i < nr_iovecs; i++) { 82 for (i = 0; i < nr_iovecs; i++) {
84 struct dma_page_list *page_list = &local_list->page_list[i]; 83 struct dma_page_list *page_list = &local_list->page_list[i];
85 84
86 len -= iov[i].iov_len; 85 len -= iov[i].iov_len;
87 86
88 if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) { 87 if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
89 err = -EFAULT;
90 goto unpin; 88 goto unpin;
91 }
92 89
93 page_list->nr_pages = num_pages_spanned(&iov[i]); 90 page_list->nr_pages = num_pages_spanned(&iov[i]);
94 page_list->base_address = iov[i].iov_base; 91 page_list->base_address = iov[i].iov_base;
@@ -109,10 +106,8 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
109 NULL); 106 NULL);
110 up_read(&current->mm->mmap_sem); 107 up_read(&current->mm->mmap_sem);
111 108
112 if (ret != page_list->nr_pages) { 109 if (ret != page_list->nr_pages)
113 err = -ENOMEM;
114 goto unpin; 110 goto unpin;
115 }
116 111
117 local_list->nr_iovecs = i + 1; 112 local_list->nr_iovecs = i + 1;
118 } 113 }
@@ -122,7 +117,7 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
122unpin: 117unpin:
123 dma_unpin_iovec_pages(local_list); 118 dma_unpin_iovec_pages(local_list);
124out: 119out:
125 return ERR_PTR(err); 120 return NULL;
126} 121}
127 122
128void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list) 123void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 0328da020a10..bcda17426411 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
311 enum dma_ctrl_flags flags = desc->async_tx.flags; 311 enum dma_ctrl_flags flags = desc->async_tx.flags;
312 u32 src_cnt; 312 u32 src_cnt;
313 dma_addr_t addr; 313 dma_addr_t addr;
314 dma_addr_t dest;
314 315
316 src_cnt = unmap->unmap_src_cnt;
317 dest = mv_desc_get_dest_addr(unmap);
315 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 318 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
316 addr = mv_desc_get_dest_addr(unmap); 319 enum dma_data_direction dir;
317 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 320
321 if (src_cnt > 1) /* is xor ? */
322 dir = DMA_BIDIRECTIONAL;
323 else
324 dir = DMA_FROM_DEVICE;
325 dma_unmap_page(dev, dest, len, dir);
318 } 326 }
319 327
320 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 328 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
321 src_cnt = unmap->unmap_src_cnt;
322 while (src_cnt--) { 329 while (src_cnt--) {
323 addr = mv_desc_get_src_addr(unmap, 330 addr = mv_desc_get_src_addr(unmap,
324 src_cnt); 331 src_cnt);
332 if (addr == dest)
333 continue;
325 dma_unmap_page(dev, addr, len, 334 dma_unmap_page(dev, addr, len,
326 DMA_TO_DEVICE); 335 DMA_TO_DEVICE);
327 } 336 }
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 5a11e3cbcae2..e0dbd388757f 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -102,6 +102,13 @@ config EDAC_I3000
102 Support for error detection and correction on the Intel 102 Support for error detection and correction on the Intel
103 3000 and 3010 server chipsets. 103 3000 and 3010 server chipsets.
104 104
105config EDAC_X38
106 tristate "Intel X38"
107 depends on EDAC_MM_EDAC && PCI && X86
108 help
109 Support for error detection and correction on the Intel
110 X38 server chipsets.
111
105config EDAC_I82860 112config EDAC_I82860
106 tristate "Intel 82860" 113 tristate "Intel 82860"
107 depends on EDAC_MM_EDAC && PCI && X86_32 114 depends on EDAC_MM_EDAC && PCI && X86_32
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index e5e9104b5520..62c2d9bad8dc 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
26obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o 26obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
27obj-$(CONFIG_EDAC_I82975X) += i82975x_edac.o 27obj-$(CONFIG_EDAC_I82975X) += i82975x_edac.o
28obj-$(CONFIG_EDAC_I3000) += i3000_edac.o 28obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
29obj-$(CONFIG_EDAC_X38) += x38_edac.o
29obj-$(CONFIG_EDAC_I82860) += i82860_edac.o 30obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
30obj-$(CONFIG_EDAC_R82600) += r82600_edac.o 31obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
31obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o 32obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 887072f5dc8b..cd2e3b8087e7 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#undef DEBUG 10#undef DEBUG
11 11
12#include <linux/edac.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/platform_device.h> 15#include <linux/platform_device.h>
@@ -164,6 +165,8 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
164 if (regs == NULL) 165 if (regs == NULL)
165 return -ENODEV; 166 return -ENODEV;
166 167
168 edac_op_state = EDAC_OPSTATE_POLL;
169
167 /* Get channel population */ 170 /* Get channel population */
168 reg = in_be64(&regs->mic_mnt_cfg); 171 reg = in_be64(&regs->mic_mnt_cfg);
169 dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016lx\n", reg); 172 dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016lx\n", reg);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index f0d9b415db50..d335086f4a26 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1381,6 +1381,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1381 if (mci == NULL) 1381 if (mci == NULL)
1382 return -ENOMEM; 1382 return -ENOMEM;
1383 1383
1384 kobject_get(&mci->edac_mci_kobj);
1384 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 1385 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
1385 1386
1386 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1387 mci->dev = &pdev->dev; /* record ptr to the generic device */
@@ -1453,6 +1454,7 @@ fail1:
1453 i5000_put_devices(mci); 1454 i5000_put_devices(mci);
1454 1455
1455fail0: 1456fail0:
1457 kobject_put(&mci->edac_mci_kobj);
1456 edac_mc_free(mci); 1458 edac_mc_free(mci);
1457 return -ENODEV; 1459 return -ENODEV;
1458} 1460}
@@ -1498,7 +1500,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
1498 1500
1499 /* retrieve references to resources, and free those resources */ 1501 /* retrieve references to resources, and free those resources */
1500 i5000_put_devices(mci); 1502 i5000_put_devices(mci);
1501 1503 kobject_put(&mci->edac_mci_kobj);
1502 edac_mc_free(mci); 1504 edac_mc_free(mci);
1503} 1505}
1504 1506
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index e43bdc43a1bf..ebb037b78758 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -182,8 +182,6 @@ static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
182 * already registered driver 182 * already registered driver
183 */ 183 */
184 184
185static int i82875p_registered = 1;
186
187static struct edac_pci_ctl_info *i82875p_pci; 185static struct edac_pci_ctl_info *i82875p_pci;
188 186
189static void i82875p_get_error_info(struct mem_ctl_info *mci, 187static void i82875p_get_error_info(struct mem_ctl_info *mci,
@@ -295,6 +293,7 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
295 "%s(): pci_bus_add_device() Failed\n", 293 "%s(): pci_bus_add_device() Failed\n",
296 __func__); 294 __func__);
297 } 295 }
296 pci_bus_assign_resources(dev->bus);
298 } 297 }
299 298
300 *ovrfl_pdev = dev; 299 *ovrfl_pdev = dev;
@@ -409,6 +408,9 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
409 goto fail0; 408 goto fail0;
410 } 409 }
411 410
411 /* Keeps mci available after edac_mc_del_mc() till edac_mc_free() */
412 kobject_get(&mci->edac_mci_kobj);
413
412 debugf3("%s(): init mci\n", __func__); 414 debugf3("%s(): init mci\n", __func__);
413 mci->dev = &pdev->dev; 415 mci->dev = &pdev->dev;
414 mci->mtype_cap = MEM_FLAG_DDR; 416 mci->mtype_cap = MEM_FLAG_DDR;
@@ -451,6 +453,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
451 return 0; 453 return 0;
452 454
453fail1: 455fail1:
456 kobject_put(&mci->edac_mci_kobj);
454 edac_mc_free(mci); 457 edac_mc_free(mci);
455 458
456fail0: 459fail0:
@@ -578,12 +581,11 @@ static void __exit i82875p_exit(void)
578{ 581{
579 debugf3("%s()\n", __func__); 582 debugf3("%s()\n", __func__);
580 583
584 i82875p_remove_one(mci_pdev);
585 pci_dev_put(mci_pdev);
586
581 pci_unregister_driver(&i82875p_driver); 587 pci_unregister_driver(&i82875p_driver);
582 588
583 if (!i82875p_registered) {
584 i82875p_remove_one(mci_pdev);
585 pci_dev_put(mci_pdev);
586 }
587} 589}
588 590
589module_init(i82875p_init); 591module_init(i82875p_init);
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
new file mode 100644
index 000000000000..2406c2ce2844
--- /dev/null
+++ b/drivers/edac/x38_edac.c
@@ -0,0 +1,524 @@
1/*
2 * Intel X38 Memory Controller kernel module
3 * Copyright (C) 2008 Cluster Computing, Inc.
4 *
5 * This file may be distributed under the terms of the
6 * GNU General Public License.
7 *
8 * This file is based on i3200_edac.c
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/pci.h>
15#include <linux/pci_ids.h>
16#include <linux/slab.h>
17#include <linux/edac.h>
18#include "edac_core.h"
19
20#define X38_REVISION "1.1"
21
22#define EDAC_MOD_STR "x38_edac"
23
24#define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0
25
26#define X38_RANKS 8
27#define X38_RANKS_PER_CHANNEL 4
28#define X38_CHANNELS 2
29
30/* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
31
32#define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
33#define X38_MCHBAR_HIGH 0x4b
34#define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
35#define X38_MMR_WINDOW_SIZE 16384
36
37#define X38_TOM 0xa0 /* Top of Memory (16b)
38 *
39 * 15:10 reserved
40 * 9:0 total populated physical memory
41 */
42#define X38_TOM_MASK 0x3ff /* bits 9:0 */
43#define X38_TOM_SHIFT 26 /* 64MiB grain */
44
45#define X38_ERRSTS 0xc8 /* Error Status Register (16b)
46 *
47 * 15 reserved
48 * 14 Isochronous TBWRR Run Behind FIFO Full
49 * (ITCV)
50 * 13 Isochronous TBWRR Run Behind FIFO Put
51 * (ITSTV)
52 * 12 reserved
53 * 11 MCH Thermal Sensor Event
54 * for SMI/SCI/SERR (GTSE)
55 * 10 reserved
56 * 9 LOCK to non-DRAM Memory Flag (LCKF)
57 * 8 reserved
58 * 7 DRAM Throttle Flag (DTF)
59 * 6:2 reserved
60 * 1 Multi-bit DRAM ECC Error Flag (DMERR)
61 * 0 Single-bit DRAM ECC Error Flag (DSERR)
62 */
63#define X38_ERRSTS_UE 0x0002
64#define X38_ERRSTS_CE 0x0001
65#define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE)
66
67
68/* Intel MMIO register space - device 0 function 0 - MMR space */
69
70#define X38_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
71 *
72 * 15:10 reserved
73 * 9:0 Channel 0 DRAM Rank Boundary Address
74 */
75#define X38_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
76#define X38_DRB_MASK 0x3ff /* bits 9:0 */
77#define X38_DRB_SHIFT 26 /* 64MiB grain */
78
79#define X38_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
80 *
81 * 63:48 Error Column Address (ERRCOL)
82 * 47:32 Error Row Address (ERRROW)
83 * 31:29 Error Bank Address (ERRBANK)
84 * 28:27 Error Rank Address (ERRRANK)
85 * 26:24 reserved
86 * 23:16 Error Syndrome (ERRSYND)
87 * 15: 2 reserved
88 * 1 Multiple Bit Error Status (MERRSTS)
89 * 0 Correctable Error Status (CERRSTS)
90 */
91#define X38_C1ECCERRLOG 0x680 /* Channel 1 ECC Error Log (64b) */
92#define X38_ECCERRLOG_CE 0x1
93#define X38_ECCERRLOG_UE 0x2
94#define X38_ECCERRLOG_RANK_BITS 0x18000000
95#define X38_ECCERRLOG_SYNDROME_BITS 0xff0000
96
97#define X38_CAPID0 0xe0 /* see P.94 of spec for details */
98
99static int x38_channel_num;
100
101static int how_many_channel(struct pci_dev *pdev)
102{
103 unsigned char capid0_8b; /* 8th byte of CAPID0 */
104
105 pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
106 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
107 debugf0("In single channel mode.\n");
108 x38_channel_num = 1;
109 } else {
110 debugf0("In dual channel mode.\n");
111 x38_channel_num = 2;
112 }
113
114 return x38_channel_num;
115}
116
117static unsigned long eccerrlog_syndrome(u64 log)
118{
119 return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16;
120}
121
122static int eccerrlog_row(int channel, u64 log)
123{
124 return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) |
125 (channel * X38_RANKS_PER_CHANNEL);
126}
127
128enum x38_chips {
129 X38 = 0,
130};
131
132struct x38_dev_info {
133 const char *ctl_name;
134};
135
136struct x38_error_info {
137 u16 errsts;
138 u16 errsts2;
139 u64 eccerrlog[X38_CHANNELS];
140};
141
142static const struct x38_dev_info x38_devs[] = {
143 [X38] = {
144 .ctl_name = "x38"},
145};
146
147static struct pci_dev *mci_pdev;
148static int x38_registered = 1;
149
150
151static void x38_clear_error_info(struct mem_ctl_info *mci)
152{
153 struct pci_dev *pdev;
154
155 pdev = to_pci_dev(mci->dev);
156
157 /*
158 * Clear any error bits.
159 * (Yes, we really clear bits by writing 1 to them.)
160 */
161 pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS,
162 X38_ERRSTS_BITS);
163}
164
165static u64 x38_readq(const void __iomem *addr)
166{
167 return readl(addr) | (((u64)readl(addr + 4)) << 32);
168}
169
170static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
171 struct x38_error_info *info)
172{
173 struct pci_dev *pdev;
174 void __iomem *window = mci->pvt_info;
175
176 pdev = to_pci_dev(mci->dev);
177
178 /*
179 * This is a mess because there is no atomic way to read all the
180 * registers at once and the registers can transition from CE being
181 * overwritten by UE.
182 */
183 pci_read_config_word(pdev, X38_ERRSTS, &info->errsts);
184 if (!(info->errsts & X38_ERRSTS_BITS))
185 return;
186
187 info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
188 if (x38_channel_num == 2)
189 info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG);
190
191 pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
192
193 /*
194 * If the error is the same for both reads then the first set
195 * of reads is valid. If there is a change then there is a CE
196 * with no info and the second set of reads is valid and
197 * should be UE info.
198 */
199 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
200 info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
201 if (x38_channel_num == 2)
202 info->eccerrlog[1] =
203 x38_readq(window + X38_C1ECCERRLOG);
204 }
205
206 x38_clear_error_info(mci);
207}
208
209static void x38_process_error_info(struct mem_ctl_info *mci,
210 struct x38_error_info *info)
211{
212 int channel;
213 u64 log;
214
215 if (!(info->errsts & X38_ERRSTS_BITS))
216 return;
217
218 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
219 edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
220 info->errsts = info->errsts2;
221 }
222
223 for (channel = 0; channel < x38_channel_num; channel++) {
224 log = info->eccerrlog[channel];
225 if (log & X38_ECCERRLOG_UE) {
226 edac_mc_handle_ue(mci, 0, 0,
227 eccerrlog_row(channel, log), "x38 UE");
228 } else if (log & X38_ECCERRLOG_CE) {
229 edac_mc_handle_ce(mci, 0, 0,
230 eccerrlog_syndrome(log),
231 eccerrlog_row(channel, log), 0, "x38 CE");
232 }
233 }
234}
235
236static void x38_check(struct mem_ctl_info *mci)
237{
238 struct x38_error_info info;
239
240 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
241 x38_get_and_clear_error_info(mci, &info);
242 x38_process_error_info(mci, &info);
243}
244
245
246void __iomem *x38_map_mchbar(struct pci_dev *pdev)
247{
248 union {
249 u64 mchbar;
250 struct {
251 u32 mchbar_low;
252 u32 mchbar_high;
253 };
254 } u;
255 void __iomem *window;
256
257 pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low);
258 pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1);
259 pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high);
260 u.mchbar &= X38_MCHBAR_MASK;
261
262 if (u.mchbar != (resource_size_t)u.mchbar) {
263 printk(KERN_ERR
264 "x38: mmio space beyond accessible range (0x%llx)\n",
265 (unsigned long long)u.mchbar);
266 return NULL;
267 }
268
269 window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
270 if (!window)
271 printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
272 (unsigned long long)u.mchbar);
273
274 return window;
275}
276
277
278static void x38_get_drbs(void __iomem *window,
279 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
280{
281 int i;
282
283 for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) {
284 drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK;
285 drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK;
286 }
287}
288
289static bool x38_is_stacked(struct pci_dev *pdev,
290 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
291{
292 u16 tom;
293
294 pci_read_config_word(pdev, X38_TOM, &tom);
295 tom &= X38_TOM_MASK;
296
297 return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom;
298}
299
300static unsigned long drb_to_nr_pages(
301 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL],
302 bool stacked, int channel, int rank)
303{
304 int n;
305
306 n = drbs[channel][rank];
307 if (rank > 0)
308 n -= drbs[channel][rank - 1];
309 if (stacked && (channel == 1) && drbs[channel][rank] ==
310 drbs[channel][X38_RANKS_PER_CHANNEL - 1]) {
311 n -= drbs[0][X38_RANKS_PER_CHANNEL - 1];
312 }
313
314 n <<= (X38_DRB_SHIFT - PAGE_SHIFT);
315 return n;
316}
317
318static int x38_probe1(struct pci_dev *pdev, int dev_idx)
319{
320 int rc;
321 int i;
322 struct mem_ctl_info *mci = NULL;
323 unsigned long last_page;
324 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
325 bool stacked;
326 void __iomem *window;
327
328 debugf0("MC: %s()\n", __func__);
329
330 window = x38_map_mchbar(pdev);
331 if (!window)
332 return -ENODEV;
333
334 x38_get_drbs(window, drbs);
335
336 how_many_channel(pdev);
337
338 /* FIXME: unconventional pvt_info usage */
339 mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
340 if (!mci)
341 return -ENOMEM;
342
343 debugf3("MC: %s(): init mci\n", __func__);
344
345 mci->dev = &pdev->dev;
346 mci->mtype_cap = MEM_FLAG_DDR2;
347
348 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
349 mci->edac_cap = EDAC_FLAG_SECDED;
350
351 mci->mod_name = EDAC_MOD_STR;
352 mci->mod_ver = X38_REVISION;
353 mci->ctl_name = x38_devs[dev_idx].ctl_name;
354 mci->dev_name = pci_name(pdev);
355 mci->edac_check = x38_check;
356 mci->ctl_page_to_phys = NULL;
357 mci->pvt_info = window;
358
359 stacked = x38_is_stacked(pdev, drbs);
360
361 /*
362 * The dram rank boundary (DRB) reg values are boundary addresses
363 * for each DRAM rank with a granularity of 64MB. DRB regs are
364 * cumulative; the last one will contain the total memory
365 * contained in all ranks.
366 */
367 last_page = -1UL;
368 for (i = 0; i < mci->nr_csrows; i++) {
369 unsigned long nr_pages;
370 struct csrow_info *csrow = &mci->csrows[i];
371
372 nr_pages = drb_to_nr_pages(drbs, stacked,
373 i / X38_RANKS_PER_CHANNEL,
374 i % X38_RANKS_PER_CHANNEL);
375
376 if (nr_pages == 0) {
377 csrow->mtype = MEM_EMPTY;
378 continue;
379 }
380
381 csrow->first_page = last_page + 1;
382 last_page += nr_pages;
383 csrow->last_page = last_page;
384 csrow->nr_pages = nr_pages;
385
386 csrow->grain = nr_pages << PAGE_SHIFT;
387 csrow->mtype = MEM_DDR2;
388 csrow->dtype = DEV_UNKNOWN;
389 csrow->edac_mode = EDAC_UNKNOWN;
390 }
391
392 x38_clear_error_info(mci);
393
394 rc = -ENODEV;
395 if (edac_mc_add_mc(mci)) {
396 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
397 goto fail;
398 }
399
400 /* get this far and it's successful */
401 debugf3("MC: %s(): success\n", __func__);
402 return 0;
403
404fail:
405 iounmap(window);
406 if (mci)
407 edac_mc_free(mci);
408
409 return rc;
410}
411
412static int __devinit x38_init_one(struct pci_dev *pdev,
413 const struct pci_device_id *ent)
414{
415 int rc;
416
417 debugf0("MC: %s()\n", __func__);
418
419 if (pci_enable_device(pdev) < 0)
420 return -EIO;
421
422 rc = x38_probe1(pdev, ent->driver_data);
423 if (!mci_pdev)
424 mci_pdev = pci_dev_get(pdev);
425
426 return rc;
427}
428
429static void __devexit x38_remove_one(struct pci_dev *pdev)
430{
431 struct mem_ctl_info *mci;
432
433 debugf0("%s()\n", __func__);
434
435 mci = edac_mc_del_mc(&pdev->dev);
436 if (!mci)
437 return;
438
439 iounmap(mci->pvt_info);
440
441 edac_mc_free(mci);
442}
443
444static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
445 {
446 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
447 X38},
448 {
449 0,
450 } /* 0 terminated list. */
451};
452
453MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
454
455static struct pci_driver x38_driver = {
456 .name = EDAC_MOD_STR,
457 .probe = x38_init_one,
458 .remove = __devexit_p(x38_remove_one),
459 .id_table = x38_pci_tbl,
460};
461
462static int __init x38_init(void)
463{
464 int pci_rc;
465
466 debugf3("MC: %s()\n", __func__);
467
468 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
469 opstate_init();
470
471 pci_rc = pci_register_driver(&x38_driver);
472 if (pci_rc < 0)
473 goto fail0;
474
475 if (!mci_pdev) {
476 x38_registered = 0;
477 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
478 PCI_DEVICE_ID_INTEL_X38_HB, NULL);
479 if (!mci_pdev) {
480 debugf0("x38 pci_get_device fail\n");
481 pci_rc = -ENODEV;
482 goto fail1;
483 }
484
485 pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
486 if (pci_rc < 0) {
487 debugf0("x38 init fail\n");
488 pci_rc = -ENODEV;
489 goto fail1;
490 }
491 }
492
493 return 0;
494
495fail1:
496 pci_unregister_driver(&x38_driver);
497
498fail0:
499 if (mci_pdev)
500 pci_dev_put(mci_pdev);
501
502 return pci_rc;
503}
504
505static void __exit x38_exit(void)
506{
507 debugf3("MC: %s()\n", __func__);
508
509 pci_unregister_driver(&x38_driver);
510 if (!x38_registered) {
511 x38_remove_one(mci_pdev);
512 pci_dev_put(mci_pdev);
513 }
514}
515
516module_init(x38_init);
517module_exit(x38_exit);
518
519MODULE_LICENSE("GPL");
520MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake");
521MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers");
522
523module_param(edac_op_state, int, 0444);
524MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index 3fccdd484100..6b9be42c7b98 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -587,8 +587,7 @@ static void create_units(struct fw_device *device)
587 unit->device.bus = &fw_bus_type; 587 unit->device.bus = &fw_bus_type;
588 unit->device.type = &fw_unit_type; 588 unit->device.type = &fw_unit_type;
589 unit->device.parent = &device->device; 589 unit->device.parent = &device->device;
590 snprintf(unit->device.bus_id, sizeof(unit->device.bus_id), 590 dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
591 "%s.%d", device->device.bus_id, i++);
592 591
593 init_fw_attribute_group(&unit->device, 592 init_fw_attribute_group(&unit->device,
594 fw_unit_attributes, 593 fw_unit_attributes,
@@ -711,8 +710,7 @@ static void fw_device_init(struct work_struct *work)
711 device->device.type = &fw_device_type; 710 device->device.type = &fw_device_type;
712 device->device.parent = device->card->device; 711 device->device.parent = device->card->device;
713 device->device.devt = MKDEV(fw_cdev_major, minor); 712 device->device.devt = MKDEV(fw_cdev_major, minor);
714 snprintf(device->device.bus_id, sizeof(device->device.bus_id), 713 dev_set_name(&device->device, "fw%d", minor);
715 "fw%d", minor);
716 714
717 init_fw_attribute_group(&device->device, 715 init_fw_attribute_group(&device->device,
718 fw_device_attributes, 716 fw_device_attributes,
@@ -741,13 +739,13 @@ static void fw_device_init(struct work_struct *work)
741 if (device->config_rom_retries) 739 if (device->config_rom_retries)
742 fw_notify("created device %s: GUID %08x%08x, S%d00, " 740 fw_notify("created device %s: GUID %08x%08x, S%d00, "
743 "%d config ROM retries\n", 741 "%d config ROM retries\n",
744 device->device.bus_id, 742 dev_name(&device->device),
745 device->config_rom[3], device->config_rom[4], 743 device->config_rom[3], device->config_rom[4],
746 1 << device->max_speed, 744 1 << device->max_speed,
747 device->config_rom_retries); 745 device->config_rom_retries);
748 else 746 else
749 fw_notify("created device %s: GUID %08x%08x, S%d00\n", 747 fw_notify("created device %s: GUID %08x%08x, S%d00\n",
750 device->device.bus_id, 748 dev_name(&device->device),
751 device->config_rom[3], device->config_rom[4], 749 device->config_rom[3], device->config_rom[4],
752 1 << device->max_speed); 750 1 << device->max_speed);
753 device->config_rom_retries = 0; 751 device->config_rom_retries = 0;
@@ -883,12 +881,12 @@ static void fw_device_refresh(struct work_struct *work)
883 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) 881 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
884 goto gone; 882 goto gone;
885 883
886 fw_notify("refreshed device %s\n", device->device.bus_id); 884 fw_notify("refreshed device %s\n", dev_name(&device->device));
887 device->config_rom_retries = 0; 885 device->config_rom_retries = 0;
888 goto out; 886 goto out;
889 887
890 give_up: 888 give_up:
891 fw_notify("giving up on refresh of device %s\n", device->device.bus_id); 889 fw_notify("giving up on refresh of device %s\n", dev_name(&device->device));
892 gone: 890 gone:
893 atomic_set(&device->state, FW_DEVICE_SHUTDOWN); 891 atomic_set(&device->state, FW_DEVICE_SHUTDOWN);
894 fw_device_shutdown(work); 892 fw_device_shutdown(work);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 251416f2148f..ab9c01e462ef 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -476,6 +476,7 @@ static int ar_context_add_page(struct ar_context *ctx)
476 if (ab == NULL) 476 if (ab == NULL)
477 return -ENOMEM; 477 return -ENOMEM;
478 478
479 ab->next = NULL;
479 memset(&ab->descriptor, 0, sizeof(ab->descriptor)); 480 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
480 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 481 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
481 DESCRIPTOR_STATUS | 482 DESCRIPTOR_STATUS |
@@ -496,6 +497,21 @@ static int ar_context_add_page(struct ar_context *ctx)
496 return 0; 497 return 0;
497} 498}
498 499
500static void ar_context_release(struct ar_context *ctx)
501{
502 struct ar_buffer *ab, *ab_next;
503 size_t offset;
504 dma_addr_t ab_bus;
505
506 for (ab = ctx->current_buffer; ab; ab = ab_next) {
507 ab_next = ab->next;
508 offset = offsetof(struct ar_buffer, data);
509 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
510 dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
511 ab, ab_bus);
512 }
513}
514
499#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 515#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
500#define cond_le32_to_cpu(v) \ 516#define cond_le32_to_cpu(v) \
501 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v)) 517 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
@@ -958,6 +974,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
958 packet->ack = RCODE_SEND_ERROR; 974 packet->ack = RCODE_SEND_ERROR;
959 return -1; 975 return -1;
960 } 976 }
977 packet->payload_bus = payload_bus;
961 978
962 d[2].req_count = cpu_to_le16(packet->payload_length); 979 d[2].req_count = cpu_to_le16(packet->payload_length);
963 d[2].data_address = cpu_to_le32(payload_bus); 980 d[2].data_address = cpu_to_le32(payload_bus);
@@ -1009,7 +1026,6 @@ static int handle_at_packet(struct context *context,
1009 struct driver_data *driver_data; 1026 struct driver_data *driver_data;
1010 struct fw_packet *packet; 1027 struct fw_packet *packet;
1011 struct fw_ohci *ohci = context->ohci; 1028 struct fw_ohci *ohci = context->ohci;
1012 dma_addr_t payload_bus;
1013 int evt; 1029 int evt;
1014 1030
1015 if (last->transfer_status == 0) 1031 if (last->transfer_status == 0)
@@ -1022,9 +1038,8 @@ static int handle_at_packet(struct context *context,
1022 /* This packet was cancelled, just continue. */ 1038 /* This packet was cancelled, just continue. */
1023 return 1; 1039 return 1;
1024 1040
1025 payload_bus = le32_to_cpu(last->data_address); 1041 if (packet->payload_bus)
1026 if (payload_bus != 0) 1042 dma_unmap_single(ohci->card.device, packet->payload_bus,
1027 dma_unmap_single(ohci->card.device, payload_bus,
1028 packet->payload_length, DMA_TO_DEVICE); 1043 packet->payload_length, DMA_TO_DEVICE);
1029 1044
1030 evt = le16_to_cpu(last->transfer_status) & 0x1f; 1045 evt = le16_to_cpu(last->transfer_status) & 0x1f;
@@ -1681,6 +1696,10 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1681 if (packet->ack != 0) 1696 if (packet->ack != 0)
1682 goto out; 1697 goto out;
1683 1698
1699 if (packet->payload_bus)
1700 dma_unmap_single(ohci->card.device, packet->payload_bus,
1701 packet->payload_length, DMA_TO_DEVICE);
1702
1684 log_ar_at_event('T', packet->speed, packet->header, 0x20); 1703 log_ar_at_event('T', packet->speed, packet->header, 0x20);
1685 driver_data->packet = NULL; 1704 driver_data->packet = NULL;
1686 packet->ack = RCODE_CANCELLED; 1705 packet->ack = RCODE_CANCELLED;
@@ -2349,8 +2368,8 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2349 2368
2350 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 2369 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
2351 if (ohci == NULL) { 2370 if (ohci == NULL) {
2352 fw_error("Could not malloc fw_ohci data.\n"); 2371 err = -ENOMEM;
2353 return -ENOMEM; 2372 goto fail;
2354 } 2373 }
2355 2374
2356 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); 2375 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
@@ -2359,7 +2378,7 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2359 2378
2360 err = pci_enable_device(dev); 2379 err = pci_enable_device(dev);
2361 if (err) { 2380 if (err) {
2362 fw_error("Failed to enable OHCI hardware.\n"); 2381 fw_error("Failed to enable OHCI hardware\n");
2363 goto fail_free; 2382 goto fail_free;
2364 } 2383 }
2365 2384
@@ -2427,9 +2446,8 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2427 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 2446 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2428 2447
2429 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { 2448 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
2430 fw_error("Out of memory for it/ir contexts.\n");
2431 err = -ENOMEM; 2449 err = -ENOMEM;
2432 goto fail_registers; 2450 goto fail_contexts;
2433 } 2451 }
2434 2452
2435 /* self-id dma buffer allocation */ 2453 /* self-id dma buffer allocation */
@@ -2438,9 +2456,8 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2438 &ohci->self_id_bus, 2456 &ohci->self_id_bus,
2439 GFP_KERNEL); 2457 GFP_KERNEL);
2440 if (ohci->self_id_cpu == NULL) { 2458 if (ohci->self_id_cpu == NULL) {
2441 fw_error("Out of memory for self ID buffer.\n");
2442 err = -ENOMEM; 2459 err = -ENOMEM;
2443 goto fail_registers; 2460 goto fail_contexts;
2444 } 2461 }
2445 2462
2446 bus_options = reg_read(ohci, OHCI1394_BusOptions); 2463 bus_options = reg_read(ohci, OHCI1394_BusOptions);
@@ -2454,15 +2471,19 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2454 goto fail_self_id; 2471 goto fail_self_id;
2455 2472
2456 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", 2473 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2457 dev->dev.bus_id, version >> 16, version & 0xff); 2474 dev_name(&dev->dev), version >> 16, version & 0xff);
2458 return 0; 2475 return 0;
2459 2476
2460 fail_self_id: 2477 fail_self_id:
2461 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, 2478 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2462 ohci->self_id_cpu, ohci->self_id_bus); 2479 ohci->self_id_cpu, ohci->self_id_bus);
2463 fail_registers: 2480 fail_contexts:
2464 kfree(ohci->it_context_list);
2465 kfree(ohci->ir_context_list); 2481 kfree(ohci->ir_context_list);
2482 kfree(ohci->it_context_list);
2483 context_release(&ohci->at_response_ctx);
2484 context_release(&ohci->at_request_ctx);
2485 ar_context_release(&ohci->ar_response_ctx);
2486 ar_context_release(&ohci->ar_request_ctx);
2466 pci_iounmap(dev, ohci->registers); 2487 pci_iounmap(dev, ohci->registers);
2467 fail_iomem: 2488 fail_iomem:
2468 pci_release_region(dev, 0); 2489 pci_release_region(dev, 0);
@@ -2471,6 +2492,9 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2471 fail_free: 2492 fail_free:
2472 kfree(&ohci->card); 2493 kfree(&ohci->card);
2473 ohci_pmac_off(dev); 2494 ohci_pmac_off(dev);
2495 fail:
2496 if (err == -ENOMEM)
2497 fw_error("Out of memory\n");
2474 2498
2475 return err; 2499 return err;
2476} 2500}
@@ -2491,8 +2515,19 @@ static void pci_remove(struct pci_dev *dev)
2491 2515
2492 software_reset(ohci); 2516 software_reset(ohci);
2493 free_irq(dev->irq, ohci); 2517 free_irq(dev->irq, ohci);
2518
2519 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
2520 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2521 ohci->next_config_rom, ohci->next_config_rom_bus);
2522 if (ohci->config_rom)
2523 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2524 ohci->config_rom, ohci->config_rom_bus);
2494 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, 2525 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2495 ohci->self_id_cpu, ohci->self_id_bus); 2526 ohci->self_id_cpu, ohci->self_id_bus);
2527 ar_context_release(&ohci->ar_request_ctx);
2528 ar_context_release(&ohci->ar_response_ctx);
2529 context_release(&ohci->at_request_ctx);
2530 context_release(&ohci->at_response_ctx);
2496 kfree(ohci->it_context_list); 2531 kfree(ohci->it_context_list);
2497 kfree(ohci->ir_context_list); 2532 kfree(ohci->ir_context_list);
2498 pci_iounmap(dev, ohci->registers); 2533 pci_iounmap(dev, ohci->registers);
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index ef0b9b419c27..e54403ee59e7 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -173,6 +173,9 @@ struct sbp2_target {
173 int blocked; /* ditto */ 173 int blocked; /* ditto */
174}; 174};
175 175
176/* Impossible login_id, to detect logout attempt before successful login */
177#define INVALID_LOGIN_ID 0x10000
178
176/* 179/*
177 * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be 180 * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
178 * provided in the config rom. Most devices do provide a value, which 181 * provided in the config rom. Most devices do provide a value, which
@@ -369,6 +372,11 @@ static const struct {
369 }, 372 },
370 /* iPod mini */ { 373 /* iPod mini */ {
371 .firmware_revision = 0x0a2700, 374 .firmware_revision = 0x0a2700,
375 .model = 0x000022,
376 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
377 },
378 /* iPod mini */ {
379 .firmware_revision = 0x0a2700,
372 .model = 0x000023, 380 .model = 0x000023,
373 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, 381 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
374 }, 382 },
@@ -788,9 +796,20 @@ static void sbp2_release_target(struct kref *kref)
788 scsi_remove_device(sdev); 796 scsi_remove_device(sdev);
789 scsi_device_put(sdev); 797 scsi_device_put(sdev);
790 } 798 }
791 sbp2_send_management_orb(lu, tgt->node_id, lu->generation, 799 if (lu->login_id != INVALID_LOGIN_ID) {
792 SBP2_LOGOUT_REQUEST, lu->login_id, NULL); 800 int generation, node_id;
793 801 /*
802 * tgt->node_id may be obsolete here if we failed
803 * during initial login or after a bus reset where
804 * the topology changed.
805 */
806 generation = device->generation;
807 smp_rmb(); /* node_id vs. generation */
808 node_id = device->node_id;
809 sbp2_send_management_orb(lu, node_id, generation,
810 SBP2_LOGOUT_REQUEST,
811 lu->login_id, NULL);
812 }
794 fw_core_remove_address_handler(&lu->address_handler); 813 fw_core_remove_address_handler(&lu->address_handler);
795 list_del(&lu->link); 814 list_del(&lu->link);
796 kfree(lu); 815 kfree(lu);
@@ -805,19 +824,20 @@ static void sbp2_release_target(struct kref *kref)
805 824
806static struct workqueue_struct *sbp2_wq; 825static struct workqueue_struct *sbp2_wq;
807 826
827static void sbp2_target_put(struct sbp2_target *tgt)
828{
829 kref_put(&tgt->kref, sbp2_release_target);
830}
831
808/* 832/*
809 * Always get the target's kref when scheduling work on one its units. 833 * Always get the target's kref when scheduling work on one its units.
810 * Each workqueue job is responsible to call sbp2_target_put() upon return. 834 * Each workqueue job is responsible to call sbp2_target_put() upon return.
811 */ 835 */
812static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay) 836static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
813{ 837{
814 if (queue_delayed_work(sbp2_wq, &lu->work, delay)) 838 kref_get(&lu->tgt->kref);
815 kref_get(&lu->tgt->kref); 839 if (!queue_delayed_work(sbp2_wq, &lu->work, delay))
816} 840 sbp2_target_put(lu->tgt);
817
818static void sbp2_target_put(struct sbp2_target *tgt)
819{
820 kref_put(&tgt->kref, sbp2_release_target);
821} 841}
822 842
823/* 843/*
@@ -978,6 +998,7 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
978 998
979 lu->tgt = tgt; 999 lu->tgt = tgt;
980 lu->lun = lun_entry & 0xffff; 1000 lu->lun = lun_entry & 0xffff;
1001 lu->login_id = INVALID_LOGIN_ID;
981 lu->retries = 0; 1002 lu->retries = 0;
982 lu->has_sdev = false; 1003 lu->has_sdev = false;
983 lu->blocked = false; 1004 lu->blocked = false;
@@ -1119,7 +1140,7 @@ static int sbp2_probe(struct device *dev)
1119 tgt->unit = unit; 1140 tgt->unit = unit;
1120 kref_init(&tgt->kref); 1141 kref_init(&tgt->kref);
1121 INIT_LIST_HEAD(&tgt->lu_list); 1142 INIT_LIST_HEAD(&tgt->lu_list);
1122 tgt->bus_id = unit->device.bus_id; 1143 tgt->bus_id = dev_name(&unit->device);
1123 tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; 1144 tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
1124 1145
1125 if (fw_device_enable_phys_dma(device) < 0) 1146 if (fw_device_enable_phys_dma(device) < 0)
@@ -1147,7 +1168,7 @@ static int sbp2_probe(struct device *dev)
1147 1168
1148 /* Do the login in a workqueue so we can easily reschedule retries. */ 1169 /* Do the login in a workqueue so we can easily reschedule retries. */
1149 list_for_each_entry(lu, &tgt->lu_list, link) 1170 list_for_each_entry(lu, &tgt->lu_list, link)
1150 sbp2_queue_work(lu, 0); 1171 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
1151 return 0; 1172 return 0;
1152 1173
1153 fail_tgt_put: 1174 fail_tgt_put:
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index c1b81077c4a8..5e204713002d 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -413,7 +413,7 @@ static void
413update_tree(struct fw_card *card, struct fw_node *root) 413update_tree(struct fw_card *card, struct fw_node *root)
414{ 414{
415 struct list_head list0, list1; 415 struct list_head list0, list1;
416 struct fw_node *node0, *node1; 416 struct fw_node *node0, *node1, *next1;
417 int i, event; 417 int i, event;
418 418
419 INIT_LIST_HEAD(&list0); 419 INIT_LIST_HEAD(&list0);
@@ -485,7 +485,9 @@ update_tree(struct fw_card *card, struct fw_node *root)
485 } 485 }
486 486
487 node0 = fw_node(node0->link.next); 487 node0 = fw_node(node0->link.next);
488 node1 = fw_node(node1->link.next); 488 next1 = fw_node(node1->link.next);
489 fw_node_put(node1);
490 node1 = next1;
489 } 491 }
490} 492}
491 493
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 022ac4fabb67..2884f876397b 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -207,6 +207,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
207 packet->speed = speed; 207 packet->speed = speed;
208 packet->generation = generation; 208 packet->generation = generation;
209 packet->ack = 0; 209 packet->ack = 0;
210 packet->payload_bus = 0;
210} 211}
211 212
212/** 213/**
@@ -581,6 +582,8 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
581 BUG(); 582 BUG();
582 return; 583 return;
583 } 584 }
585
586 response->payload_bus = 0;
584} 587}
585EXPORT_SYMBOL(fw_fill_response); 588EXPORT_SYMBOL(fw_fill_response);
586 589
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index 027f58ce81ad..839466f0a795 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -27,6 +27,7 @@
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/spinlock_types.h> 28#include <linux/spinlock_types.h>
29#include <linux/timer.h> 29#include <linux/timer.h>
30#include <linux/types.h>
30#include <linux/workqueue.h> 31#include <linux/workqueue.h>
31 32
32#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) 33#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
@@ -153,6 +154,7 @@ struct fw_packet {
153 size_t header_length; 154 size_t header_length;
154 void *payload; 155 void *payload;
155 size_t payload_length; 156 size_t payload_length;
157 dma_addr_t payload_bus;
156 u32 timestamp; 158 u32 timestamp;
157 159
158 /* 160 /*
@@ -248,7 +250,7 @@ struct fw_card {
248 struct fw_node *local_node; 250 struct fw_node *local_node;
249 struct fw_node *root_node; 251 struct fw_node *root_node;
250 struct fw_node *irm_node; 252 struct fw_node *irm_node;
251 int color; 253 u8 color; /* must be u8 to match the definition in struct fw_node */
252 int gap_count; 254 int gap_count;
253 bool beta_repeaters_present; 255 bool beta_repeaters_present;
254 256
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 3e526b6d00cb..4a597d8c2f70 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -81,9 +81,9 @@ static void dmi_table(u8 *buf, int len, int num,
81 const struct dmi_header *dm = (const struct dmi_header *)data; 81 const struct dmi_header *dm = (const struct dmi_header *)data;
82 82
83 /* 83 /*
84 * We want to know the total length (formated area and strings) 84 * We want to know the total length (formatted area and
85 * before decoding to make sure we won't run off the table in 85 * strings) before decoding to make sure we won't run off the
86 * dmi_decode or dmi_string 86 * table in dmi_decode or dmi_string
87 */ 87 */
88 data += dm->length; 88 data += dm->length;
89 while ((data - buf < len - 1) && (data[0] || data[1])) 89 while ((data - buf < len - 1) && (data[0] || data[1]))
@@ -467,6 +467,17 @@ const char *dmi_get_system_info(int field)
467} 467}
468EXPORT_SYMBOL(dmi_get_system_info); 468EXPORT_SYMBOL(dmi_get_system_info);
469 469
470/**
471 * dmi_name_in_serial - Check if string is in the DMI product serial
472 * information.
473 */
474int dmi_name_in_serial(const char *str)
475{
476 int f = DMI_PRODUCT_SERIAL;
477 if (dmi_ident[f] && strstr(dmi_ident[f], str))
478 return 1;
479 return 0;
480}
470 481
471/** 482/**
472 * dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information. 483 * dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information.
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index dbd42d6c93a7..48f49d93d249 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -65,6 +65,14 @@ config GPIO_SYSFS
65 65
66# put expanders in the right section, in alphabetical order 66# put expanders in the right section, in alphabetical order
67 67
68comment "Memory mapped GPIO expanders:"
69
70config GPIO_XILINX
71 bool "Xilinx GPIO support"
72 depends on PPC_OF
73 help
74 Say yes here to support the Xilinx FPGA GPIO device
75
68comment "I2C GPIO expanders:" 76comment "I2C GPIO expanders:"
69 77
70config GPIO_MAX732X 78config GPIO_MAX732X
@@ -127,6 +135,13 @@ config GPIO_PCF857X
127 This driver provides an in-kernel interface to those GPIOs using 135 This driver provides an in-kernel interface to those GPIOs using
128 platform-neutral GPIO calls. 136 platform-neutral GPIO calls.
129 137
138config GPIO_TWL4030
139 tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
140 depends on TWL4030_CORE
141 help
142 Say yes here to access the GPIO signals of various multi-function
143 power management chips from Texas Instruments.
144
130comment "PCI GPIO expanders:" 145comment "PCI GPIO expanders:"
131 146
132config GPIO_BT8XX 147config GPIO_BT8XX
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 01b4bbde1956..49ac64e515e6 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -9,4 +9,6 @@ obj-$(CONFIG_GPIO_MAX732X) += max732x.o
9obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o 9obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
10obj-$(CONFIG_GPIO_PCA953X) += pca953x.o 10obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
11obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o 11obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
12obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
13obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
12obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o 14obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index faa1cc66e9cf..82020abc329e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1134,7 +1134,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
1134 continue; 1134 continue;
1135 1135
1136 is_out = test_bit(FLAG_IS_OUT, &gdesc->flags); 1136 is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
1137 seq_printf(s, " gpio-%-3d (%-12s) %s %s", 1137 seq_printf(s, " gpio-%-3d (%-20.20s) %s %s",
1138 gpio, gdesc->label, 1138 gpio, gdesc->label,
1139 is_out ? "out" : "in ", 1139 is_out ? "out" : "in ",
1140 chip->get 1140 chip->get
diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/twl4030-gpio.c
new file mode 100644
index 000000000000..37d3eec8730a
--- /dev/null
+++ b/drivers/gpio/twl4030-gpio.c
@@ -0,0 +1,521 @@
1/*
2 * twl4030_gpio.c -- access to GPIOs on TWL4030/TPS659x0 chips
3 *
4 * Copyright (C) 2006-2007 Texas Instruments, Inc.
5 * Copyright (C) 2006 MontaVista Software, Inc.
6 *
7 * Code re-arranged and cleaned up by:
8 * Syed Mohammed Khasim <x0khasim@ti.com>
9 *
10 * Initial Code:
11 * Andy Lowe / Nishanth Menon
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/kthread.h>
32#include <linux/irq.h>
33#include <linux/gpio.h>
34#include <linux/platform_device.h>
35#include <linux/slab.h>
36
37#include <linux/i2c/twl4030.h>
38
39
40/*
41 * The GPIO "subchip" supports 18 GPIOs which can be configured as
42 * inputs or outputs, with pullups or pulldowns on each pin. Each
43 * GPIO can trigger interrupts on either or both edges.
44 *
45 * GPIO interrupts can be fed to either of two IRQ lines; this is
46 * intended to support multiple hosts.
47 *
48 * There are also two LED pins used sometimes as output-only GPIOs.
49 */
50
51
52static struct gpio_chip twl_gpiochip;
53static int twl4030_gpio_irq_base;
54
55/* genirq interfaces are not available to modules */
56#ifdef MODULE
57#define is_module() true
58#else
59#define is_module() false
60#endif
61
62/* GPIO_CTRL Fields */
63#define MASK_GPIO_CTRL_GPIO0CD1 BIT(0)
64#define MASK_GPIO_CTRL_GPIO1CD2 BIT(1)
65#define MASK_GPIO_CTRL_GPIO_ON BIT(2)
66
67/* Mask for GPIO registers when aggregated into a 32-bit integer */
68#define GPIO_32_MASK 0x0003ffff
69
70/* Data structures */
71static DEFINE_MUTEX(gpio_lock);
72
73/* store usage of each GPIO. - each bit represents one GPIO */
74static unsigned int gpio_usage_count;
75
76/*----------------------------------------------------------------------*/
77
78/*
79 * To configure TWL4030 GPIO module registers
80 */
81static inline int gpio_twl4030_write(u8 address, u8 data)
82{
83 return twl4030_i2c_write_u8(TWL4030_MODULE_GPIO, data, address);
84}
85
86/*----------------------------------------------------------------------*/
87
88/*
89 * LED register offsets (use TWL4030_MODULE_{LED,PWMA,PWMB}))
90 * PWMs A and B are dedicated to LEDs A and B, respectively.
91 */
92
93#define TWL4030_LED_LEDEN 0x0
94
95/* LEDEN bits */
96#define LEDEN_LEDAON BIT(0)
97#define LEDEN_LEDBON BIT(1)
98#define LEDEN_LEDAEXT BIT(2)
99#define LEDEN_LEDBEXT BIT(3)
100#define LEDEN_LEDAPWM BIT(4)
101#define LEDEN_LEDBPWM BIT(5)
102#define LEDEN_PWM_LENGTHA BIT(6)
103#define LEDEN_PWM_LENGTHB BIT(7)
104
105#define TWL4030_PWMx_PWMxON 0x0
106#define TWL4030_PWMx_PWMxOFF 0x1
107
108#define PWMxON_LENGTH BIT(7)
109
110/*----------------------------------------------------------------------*/
111
112/*
113 * To read a TWL4030 GPIO module register
114 */
115static inline int gpio_twl4030_read(u8 address)
116{
117 u8 data;
118 int ret = 0;
119
120 ret = twl4030_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address);
121 return (ret < 0) ? ret : data;
122}
123
124/*----------------------------------------------------------------------*/
125
126static u8 cached_leden; /* protected by gpio_lock */
127
128/* The LED lines are open drain outputs ... a FET pulls to GND, so an
129 * external pullup is needed. We could also expose the integrated PWM
130 * as a LED brightness control; we initialize it as "always on".
131 */
132static void twl4030_led_set_value(int led, int value)
133{
134 u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM;
135 int status;
136
137 if (led)
138 mask <<= 1;
139
140 mutex_lock(&gpio_lock);
141 if (value)
142 cached_leden &= ~mask;
143 else
144 cached_leden |= mask;
145 status = twl4030_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
146 TWL4030_LED_LEDEN);
147 mutex_unlock(&gpio_lock);
148}
149
150static int twl4030_set_gpio_direction(int gpio, int is_input)
151{
152 u8 d_bnk = gpio >> 3;
153 u8 d_msk = BIT(gpio & 0x7);
154 u8 reg = 0;
155 u8 base = REG_GPIODATADIR1 + d_bnk;
156 int ret = 0;
157
158 mutex_lock(&gpio_lock);
159 ret = gpio_twl4030_read(base);
160 if (ret >= 0) {
161 if (is_input)
162 reg = ret & ~d_msk;
163 else
164 reg = ret | d_msk;
165
166 ret = gpio_twl4030_write(base, reg);
167 }
168 mutex_unlock(&gpio_lock);
169 return ret;
170}
171
172static int twl4030_set_gpio_dataout(int gpio, int enable)
173{
174 u8 d_bnk = gpio >> 3;
175 u8 d_msk = BIT(gpio & 0x7);
176 u8 base = 0;
177
178 if (enable)
179 base = REG_SETGPIODATAOUT1 + d_bnk;
180 else
181 base = REG_CLEARGPIODATAOUT1 + d_bnk;
182
183 return gpio_twl4030_write(base, d_msk);
184}
185
186static int twl4030_get_gpio_datain(int gpio)
187{
188 u8 d_bnk = gpio >> 3;
189 u8 d_off = gpio & 0x7;
190 u8 base = 0;
191 int ret = 0;
192
193 if (unlikely((gpio >= TWL4030_GPIO_MAX)
194 || !(gpio_usage_count & BIT(gpio))))
195 return -EPERM;
196
197 base = REG_GPIODATAIN1 + d_bnk;
198 ret = gpio_twl4030_read(base);
199 if (ret > 0)
200 ret = (ret >> d_off) & 0x1;
201
202 return ret;
203}
204
205/*
206 * Configure debounce timing value for a GPIO pin on TWL4030
207 */
208int twl4030_set_gpio_debounce(int gpio, int enable)
209{
210 u8 d_bnk = gpio >> 3;
211 u8 d_msk = BIT(gpio & 0x7);
212 u8 reg = 0;
213 u8 base = 0;
214 int ret = 0;
215
216 if (unlikely((gpio >= TWL4030_GPIO_MAX)
217 || !(gpio_usage_count & BIT(gpio))))
218 return -EPERM;
219
220 base = REG_GPIO_DEBEN1 + d_bnk;
221 mutex_lock(&gpio_lock);
222 ret = gpio_twl4030_read(base);
223 if (ret >= 0) {
224 if (enable)
225 reg = ret | d_msk;
226 else
227 reg = ret & ~d_msk;
228
229 ret = gpio_twl4030_write(base, reg);
230 }
231 mutex_unlock(&gpio_lock);
232 return ret;
233}
234EXPORT_SYMBOL(twl4030_set_gpio_debounce);
235
236/*----------------------------------------------------------------------*/
237
238static int twl_request(struct gpio_chip *chip, unsigned offset)
239{
240 int status = 0;
241
242 mutex_lock(&gpio_lock);
243
244 /* Support the two LED outputs as output-only GPIOs. */
245 if (offset >= TWL4030_GPIO_MAX) {
246 u8 ledclr_mask = LEDEN_LEDAON | LEDEN_LEDAEXT
247 | LEDEN_LEDAPWM | LEDEN_PWM_LENGTHA;
248 u8 module = TWL4030_MODULE_PWMA;
249
250 offset -= TWL4030_GPIO_MAX;
251 if (offset) {
252 ledclr_mask <<= 1;
253 module = TWL4030_MODULE_PWMB;
254 }
255
256 /* initialize PWM to always-drive */
257 status = twl4030_i2c_write_u8(module, 0x7f,
258 TWL4030_PWMx_PWMxOFF);
259 if (status < 0)
260 goto done;
261 status = twl4030_i2c_write_u8(module, 0x7f,
262 TWL4030_PWMx_PWMxON);
263 if (status < 0)
264 goto done;
265
266 /* init LED to not-driven (high) */
267 module = TWL4030_MODULE_LED;
268 status = twl4030_i2c_read_u8(module, &cached_leden,
269 TWL4030_LED_LEDEN);
270 if (status < 0)
271 goto done;
272 cached_leden &= ~ledclr_mask;
273 status = twl4030_i2c_write_u8(module, cached_leden,
274 TWL4030_LED_LEDEN);
275 if (status < 0)
276 goto done;
277
278 status = 0;
279 goto done;
280 }
281
282 /* on first use, turn GPIO module "on" */
283 if (!gpio_usage_count) {
284 struct twl4030_gpio_platform_data *pdata;
285 u8 value = MASK_GPIO_CTRL_GPIO_ON;
286
287 /* optionally have the first two GPIOs switch vMMC1
288 * and vMMC2 power supplies based on card presence.
289 */
290 pdata = chip->dev->platform_data;
291 value |= pdata->mmc_cd & 0x03;
292
293 status = gpio_twl4030_write(REG_GPIO_CTRL, value);
294 }
295
296 if (!status)
297 gpio_usage_count |= (0x1 << offset);
298
299done:
300 mutex_unlock(&gpio_lock);
301 return status;
302}
303
304static void twl_free(struct gpio_chip *chip, unsigned offset)
305{
306 if (offset >= TWL4030_GPIO_MAX) {
307 twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1);
308 return;
309 }
310
311 mutex_lock(&gpio_lock);
312
313 gpio_usage_count &= ~BIT(offset);
314
315 /* on last use, switch off GPIO module */
316 if (!gpio_usage_count)
317 gpio_twl4030_write(REG_GPIO_CTRL, 0x0);
318
319 mutex_unlock(&gpio_lock);
320}
321
322static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
323{
324 return (offset < TWL4030_GPIO_MAX)
325 ? twl4030_set_gpio_direction(offset, 1)
326 : -EINVAL;
327}
328
329static int twl_get(struct gpio_chip *chip, unsigned offset)
330{
331 int status = 0;
332
333 if (offset < TWL4030_GPIO_MAX)
334 status = twl4030_get_gpio_datain(offset);
335 else if (offset == TWL4030_GPIO_MAX)
336 status = cached_leden & LEDEN_LEDAON;
337 else
338 status = cached_leden & LEDEN_LEDBON;
339 return (status < 0) ? 0 : status;
340}
341
342static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
343{
344 if (offset < TWL4030_GPIO_MAX) {
345 twl4030_set_gpio_dataout(offset, value);
346 return twl4030_set_gpio_direction(offset, 0);
347 } else {
348 twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
349 return 0;
350 }
351}
352
353static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
354{
355 if (offset < TWL4030_GPIO_MAX)
356 twl4030_set_gpio_dataout(offset, value);
357 else
358 twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
359}
360
361static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
362{
363 return (twl4030_gpio_irq_base && (offset < TWL4030_GPIO_MAX))
364 ? (twl4030_gpio_irq_base + offset)
365 : -EINVAL;
366}
367
368static struct gpio_chip twl_gpiochip = {
369 .label = "twl4030",
370 .owner = THIS_MODULE,
371 .request = twl_request,
372 .free = twl_free,
373 .direction_input = twl_direction_in,
374 .get = twl_get,
375 .direction_output = twl_direction_out,
376 .set = twl_set,
377 .to_irq = twl_to_irq,
378 .can_sleep = 1,
379};
380
381/*----------------------------------------------------------------------*/
382
383static int __devinit gpio_twl4030_pulls(u32 ups, u32 downs)
384{
385 u8 message[6];
386 unsigned i, gpio_bit;
387
388 /* For most pins, a pulldown was enabled by default.
389 * We should have data that's specific to this board.
390 */
391 for (gpio_bit = 1, i = 1; i < 6; i++) {
392 u8 bit_mask;
393 unsigned j;
394
395 for (bit_mask = 0, j = 0; j < 8; j += 2, gpio_bit <<= 1) {
396 if (ups & gpio_bit)
397 bit_mask |= 1 << (j + 1);
398 else if (downs & gpio_bit)
399 bit_mask |= 1 << (j + 0);
400 }
401 message[i] = bit_mask;
402 }
403
404 return twl4030_i2c_write(TWL4030_MODULE_GPIO, message,
405 REG_GPIOPUPDCTR1, 5);
406}
407
408static int gpio_twl4030_remove(struct platform_device *pdev);
409
410static int __devinit gpio_twl4030_probe(struct platform_device *pdev)
411{
412 struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
413 int ret;
414
415 /* maybe setup IRQs */
416 if (pdata->irq_base) {
417 if (is_module()) {
418 dev_err(&pdev->dev,
419 "can't dispatch IRQs from modules\n");
420 goto no_irqs;
421 }
422 ret = twl4030_sih_setup(TWL4030_MODULE_GPIO);
423 if (ret < 0)
424 return ret;
425 WARN_ON(ret != pdata->irq_base);
426 twl4030_gpio_irq_base = ret;
427 }
428
429no_irqs:
430 /*
431 * NOTE: boards may waste power if they don't set pullups
432 * and pulldowns correctly ... default for non-ULPI pins is
433 * pulldown, and some other pins may have external pullups
434 * or pulldowns. Careful!
435 */
436 ret = gpio_twl4030_pulls(pdata->pullups, pdata->pulldowns);
437 if (ret)
438 dev_dbg(&pdev->dev, "pullups %.05x %.05x --> %d\n",
439 pdata->pullups, pdata->pulldowns,
440 ret);
441
442 twl_gpiochip.base = pdata->gpio_base;
443 twl_gpiochip.ngpio = TWL4030_GPIO_MAX;
444 twl_gpiochip.dev = &pdev->dev;
445
446 /* NOTE: we assume VIBRA_CTL.VIBRA_EN, in MODULE_AUDIO_VOICE,
447 * is (still) clear if use_leds is set.
448 */
449 if (pdata->use_leds)
450 twl_gpiochip.ngpio += 2;
451
452 ret = gpiochip_add(&twl_gpiochip);
453 if (ret < 0) {
454 dev_err(&pdev->dev,
455 "could not register gpiochip, %d\n",
456 ret);
457 twl_gpiochip.ngpio = 0;
458 gpio_twl4030_remove(pdev);
459 } else if (pdata->setup) {
460 int status;
461
462 status = pdata->setup(&pdev->dev,
463 pdata->gpio_base, TWL4030_GPIO_MAX);
464 if (status)
465 dev_dbg(&pdev->dev, "setup --> %d\n", status);
466 }
467
468 return ret;
469}
470
471static int __devexit gpio_twl4030_remove(struct platform_device *pdev)
472{
473 struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
474 int status;
475
476 if (pdata->teardown) {
477 status = pdata->teardown(&pdev->dev,
478 pdata->gpio_base, TWL4030_GPIO_MAX);
479 if (status) {
480 dev_dbg(&pdev->dev, "teardown --> %d\n", status);
481 return status;
482 }
483 }
484
485 status = gpiochip_remove(&twl_gpiochip);
486 if (status < 0)
487 return status;
488
489 if (is_module())
490 return 0;
491
492 /* REVISIT no support yet for deregistering all the IRQs */
493 WARN_ON(1);
494 return -EIO;
495}
496
497/* Note: this hardware lives inside an I2C-based multi-function device. */
498MODULE_ALIAS("platform:twl4030_gpio");
499
500static struct platform_driver gpio_twl4030_driver = {
501 .driver.name = "twl4030_gpio",
502 .driver.owner = THIS_MODULE,
503 .probe = gpio_twl4030_probe,
504 .remove = __devexit_p(gpio_twl4030_remove),
505};
506
507static int __init gpio_twl4030_init(void)
508{
509 return platform_driver_register(&gpio_twl4030_driver);
510}
511subsys_initcall(gpio_twl4030_init);
512
513static void __exit gpio_twl4030_exit(void)
514{
515 platform_driver_unregister(&gpio_twl4030_driver);
516}
517module_exit(gpio_twl4030_exit);
518
519MODULE_AUTHOR("Texas Instruments, Inc.");
520MODULE_DESCRIPTION("GPIO interface for TWL4030");
521MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/xilinx_gpio.c b/drivers/gpio/xilinx_gpio.c
new file mode 100644
index 000000000000..3c1177abebd3
--- /dev/null
+++ b/drivers/gpio/xilinx_gpio.c
@@ -0,0 +1,235 @@
1/*
2 * Xilinx gpio driver
3 *
4 * Copyright 2008 Xilinx, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * You should have received a copy of the GNU General Public License
11 * along with this program; if not, write to the Free Software
12 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/of_device.h>
18#include <linux/of_platform.h>
19#include <linux/of_gpio.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22
23/* Register Offset Definitions */
24#define XGPIO_DATA_OFFSET (0x0) /* Data register */
25#define XGPIO_TRI_OFFSET (0x4) /* I/O direction register */
26
27struct xgpio_instance {
28 struct of_mm_gpio_chip mmchip;
29 u32 gpio_state; /* GPIO state shadow register */
30 u32 gpio_dir; /* GPIO direction shadow register */
31 spinlock_t gpio_lock; /* Lock used for synchronization */
32};
33
34/**
35 * xgpio_get - Read the specified signal of the GPIO device.
36 * @gc: Pointer to gpio_chip device structure.
37 * @gpio: GPIO signal number.
38 *
39 * This function reads the specified signal of the GPIO device. It returns 0 if
40 * the signal clear, 1 if signal is set or negative value on error.
41 */
42static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
43{
44 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
45
46 return (in_be32(mm_gc->regs + XGPIO_DATA_OFFSET) >> gpio) & 1;
47}
48
49/**
50 * xgpio_set - Write the specified signal of the GPIO device.
51 * @gc: Pointer to gpio_chip device structure.
52 * @gpio: GPIO signal number.
53 * @val: Value to be written to specified signal.
54 *
55 * This function writes the specified value in to the specified signal of the
56 * GPIO device.
57 */
58static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
59{
60 unsigned long flags;
61 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
62 struct xgpio_instance *chip =
63 container_of(mm_gc, struct xgpio_instance, mmchip);
64
65 spin_lock_irqsave(&chip->gpio_lock, flags);
66
67 /* Write to GPIO signal and set its direction to output */
68 if (val)
69 chip->gpio_state |= 1 << gpio;
70 else
71 chip->gpio_state &= ~(1 << gpio);
72 out_be32(mm_gc->regs + XGPIO_DATA_OFFSET, chip->gpio_state);
73
74 spin_unlock_irqrestore(&chip->gpio_lock, flags);
75}
76
77/**
78 * xgpio_dir_in - Set the direction of the specified GPIO signal as input.
79 * @gc: Pointer to gpio_chip device structure.
80 * @gpio: GPIO signal number.
81 *
82 * This function sets the direction of specified GPIO signal as input.
83 * It returns 0 if direction of GPIO signals is set as input otherwise it
84 * returns negative error value.
85 */
86static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
87{
88 unsigned long flags;
89 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
90 struct xgpio_instance *chip =
91 container_of(mm_gc, struct xgpio_instance, mmchip);
92
93 spin_lock_irqsave(&chip->gpio_lock, flags);
94
95 /* Set the GPIO bit in shadow register and set direction as input */
96 chip->gpio_dir |= (1 << gpio);
97 out_be32(mm_gc->regs + XGPIO_TRI_OFFSET, chip->gpio_dir);
98
99 spin_unlock_irqrestore(&chip->gpio_lock, flags);
100
101 return 0;
102}
103
104/**
105 * xgpio_dir_out - Set the direction of the specified GPIO signal as output.
106 * @gc: Pointer to gpio_chip device structure.
107 * @gpio: GPIO signal number.
108 * @val: Value to be written to specified signal.
109 *
110 * This function sets the direction of specified GPIO signal as output. If all
111 * GPIO signals of GPIO chip is configured as input then it returns
112 * error otherwise it returns 0.
113 */
114static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
115{
116 unsigned long flags;
117 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
118 struct xgpio_instance *chip =
119 container_of(mm_gc, struct xgpio_instance, mmchip);
120
121 spin_lock_irqsave(&chip->gpio_lock, flags);
122
123 /* Write state of GPIO signal */
124 if (val)
125 chip->gpio_state |= 1 << gpio;
126 else
127 chip->gpio_state &= ~(1 << gpio);
128 out_be32(mm_gc->regs + XGPIO_DATA_OFFSET, chip->gpio_state);
129
130 /* Clear the GPIO bit in shadow register and set direction as output */
131 chip->gpio_dir &= (~(1 << gpio));
132 out_be32(mm_gc->regs + XGPIO_TRI_OFFSET, chip->gpio_dir);
133
134 spin_unlock_irqrestore(&chip->gpio_lock, flags);
135
136 return 0;
137}
138
139/**
140 * xgpio_save_regs - Set initial values of GPIO pins
141 * @mm_gc: pointer to memory mapped GPIO chip structure
142 */
143static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc)
144{
145 struct xgpio_instance *chip =
146 container_of(mm_gc, struct xgpio_instance, mmchip);
147
148 out_be32(mm_gc->regs + XGPIO_DATA_OFFSET, chip->gpio_state);
149 out_be32(mm_gc->regs + XGPIO_TRI_OFFSET, chip->gpio_dir);
150}
151
152/**
153 * xgpio_of_probe - Probe method for the GPIO device.
154 * @np: pointer to device tree node
155 *
156 * This function probes the GPIO device in the device tree. It initializes the
157 * driver data structure. It returns 0, if the driver is bound to the GPIO
158 * device, or a negative value if there is an error.
159 */
160static int __devinit xgpio_of_probe(struct device_node *np)
161{
162 struct xgpio_instance *chip;
163 struct of_gpio_chip *ofchip;
164 int status = 0;
165 const u32 *tree_info;
166
167 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
168 if (!chip)
169 return -ENOMEM;
170 ofchip = &chip->mmchip.of_gc;
171
172 /* Update GPIO state shadow register with default value */
173 tree_info = of_get_property(np, "xlnx,dout-default", NULL);
174 if (tree_info)
175 chip->gpio_state = *tree_info;
176
177 /* Update GPIO direction shadow register with default value */
178 chip->gpio_dir = 0xFFFFFFFF; /* By default, all pins are inputs */
179 tree_info = of_get_property(np, "xlnx,tri-default", NULL);
180 if (tree_info)
181 chip->gpio_dir = *tree_info;
182
183 /* Check device node and parent device node for device width */
184 ofchip->gc.ngpio = 32; /* By default assume full GPIO controller */
185 tree_info = of_get_property(np, "xlnx,gpio-width", NULL);
186 if (!tree_info)
187 tree_info = of_get_property(np->parent,
188 "xlnx,gpio-width", NULL);
189 if (tree_info)
190 ofchip->gc.ngpio = *tree_info;
191
192 spin_lock_init(&chip->gpio_lock);
193
194 ofchip->gpio_cells = 2;
195 ofchip->gc.direction_input = xgpio_dir_in;
196 ofchip->gc.direction_output = xgpio_dir_out;
197 ofchip->gc.get = xgpio_get;
198 ofchip->gc.set = xgpio_set;
199
200 chip->mmchip.save_regs = xgpio_save_regs;
201
202 /* Call the OF gpio helper to setup and register the GPIO device */
203 status = of_mm_gpiochip_add(np, &chip->mmchip);
204 if (status) {
205 kfree(chip);
206 pr_err("%s: error in probe function with status %d\n",
207 np->full_name, status);
208 return status;
209 }
210 pr_info("XGpio: %s: registered\n", np->full_name);
211 return 0;
212}
213
214static struct of_device_id xgpio_of_match[] __devinitdata = {
215 { .compatible = "xlnx,xps-gpio-1.00.a", },
216 { /* end of list */ },
217};
218
219static int __init xgpio_init(void)
220{
221 struct device_node *np;
222
223 for_each_matching_node(np, xgpio_of_match)
224 xgpio_of_probe(np);
225
226 return 0;
227}
228
229/* Make sure we get initialized before anyone else tries to use us */
230subsys_initcall(xgpio_init);
231/* No exit call at the moment as we cannot unregister of GPIO chips */
232
233MODULE_AUTHOR("Xilinx, Inc.");
234MODULE_DESCRIPTION("Xilinx GPIO driver");
235MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
index 1839c57663c5..80be1cab62af 100644
--- a/drivers/gpu/drm/drm_drawable.c
+++ b/drivers/gpu/drm/drm_drawable.c
@@ -76,11 +76,18 @@ int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
76{ 76{
77 struct drm_draw *draw = data; 77 struct drm_draw *draw = data;
78 unsigned long irqflags; 78 unsigned long irqflags;
79 struct drm_drawable_info *info;
79 80
80 spin_lock_irqsave(&dev->drw_lock, irqflags); 81 spin_lock_irqsave(&dev->drw_lock, irqflags);
81 82
82 drm_free(drm_get_drawable_info(dev, draw->handle), 83 info = drm_get_drawable_info(dev, draw->handle);
83 sizeof(struct drm_drawable_info), DRM_MEM_BUFS); 84 if (info == NULL) {
85 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
86 return -EINVAL;
87 }
88 drm_free(info->rects, info->num_rects * sizeof(struct drm_clip_rect),
89 DRM_MEM_BUFS);
90 drm_free(info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
84 91
85 idr_remove(&dev->drw_idr, draw->handle); 92 idr_remove(&dev->drw_idr, draw->handle);
86 93
@@ -111,7 +118,9 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file
111 118
112 switch (update->type) { 119 switch (update->type) {
113 case DRM_DRAWABLE_CLIPRECTS: 120 case DRM_DRAWABLE_CLIPRECTS:
114 if (update->num != info->num_rects) { 121 if (update->num == 0)
122 rects = NULL;
123 else if (update->num != info->num_rects) {
115 rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), 124 rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
116 DRM_MEM_BUFS); 125 DRM_MEM_BUFS);
117 } else 126 } else
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 96f416afc3f6..996097acb5e7 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -266,11 +266,19 @@ int drm_init(struct drm_driver *driver)
266 for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { 266 for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
267 pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; 267 pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
268 268
269 /* Loop around setting up a DRM device for each PCI device
270 * matching our ID and device class. If we had the internal
271 * function that pci_get_subsys and pci_get_class used, we'd
272 * be able to just pass pid in instead of doing a two-stage
273 * thing.
274 */
269 pdev = NULL; 275 pdev = NULL;
270 /* pass back in pdev to account for multiple identical cards */
271 while ((pdev = 276 while ((pdev =
272 pci_get_subsys(pid->vendor, pid->device, pid->subvendor, 277 pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
273 pid->subdevice, pdev)) != NULL) { 278 pid->subdevice, pdev)) != NULL) {
279 if ((pdev->class & pid->class_mask) != pid->class)
280 continue;
281
274 /* stealth mode requires a manual probe */ 282 /* stealth mode requires a manual probe */
275 pci_dev_get(pdev); 283 pci_dev_get(pdev);
276 drm_get_dev(pdev, pid, driver); 284 drm_get_dev(pdev, pid, driver);
@@ -297,6 +305,8 @@ static void drm_cleanup(struct drm_device * dev)
297 return; 305 return;
298 } 306 }
299 307
308 drm_vblank_cleanup(dev);
309
300 drm_lastclose(dev); 310 drm_lastclose(dev);
301 311
302 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && 312 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 0d46627663b1..78eeed5caaff 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -406,8 +406,6 @@ int drm_release(struct inode *inode, struct file *filp)
406 if (dev->driver->driver_features & DRIVER_GEM) 406 if (dev->driver->driver_features & DRIVER_GEM)
407 drm_gem_release(dev, file_priv); 407 drm_gem_release(dev, file_priv);
408 408
409 drm_fasync(-1, filp, 0);
410
411 mutex_lock(&dev->ctxlist_mutex); 409 mutex_lock(&dev->ctxlist_mutex);
412 if (!list_empty(&dev->ctxlist)) { 410 if (!list_empty(&dev->ctxlist)) {
413 struct drm_ctx_list *pos, *n; 411 struct drm_ctx_list *pos, *n;
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 90f5a8d9bdcb..920b72fbc958 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -64,6 +64,8 @@
64#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t) 64#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t)
65#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t) 65#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t)
66 66
67#define DRM_IOCTL_UPDATE_DRAW32 DRM_IOW( 0x3f, drm_update_draw32_t)
68
67#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t) 69#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
68 70
69typedef struct drm_version_32 { 71typedef struct drm_version_32 {
@@ -952,6 +954,37 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
952 DRM_IOCTL_SG_FREE, (unsigned long)request); 954 DRM_IOCTL_SG_FREE, (unsigned long)request);
953} 955}
954 956
957typedef struct drm_update_draw32 {
958 drm_drawable_t handle;
959 unsigned int type;
960 unsigned int num;
961 /* 64-bit version has a 32-bit pad here */
962 u64 data; /**< Pointer */
963} __attribute__((packed)) drm_update_draw32_t;
964
965static int compat_drm_update_draw(struct file *file, unsigned int cmd,
966 unsigned long arg)
967{
968 drm_update_draw32_t update32;
969 struct drm_update_draw __user *request;
970 int err;
971
972 if (copy_from_user(&update32, (void __user *)arg, sizeof(update32)))
973 return -EFAULT;
974
975 request = compat_alloc_user_space(sizeof(*request));
976 if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
977 __put_user(update32.handle, &request->handle) ||
978 __put_user(update32.type, &request->type) ||
979 __put_user(update32.num, &request->num) ||
980 __put_user(update32.data, &request->data))
981 return -EFAULT;
982
983 err = drm_ioctl(file->f_path.dentry->d_inode, file,
984 DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
985 return err;
986}
987
955struct drm_wait_vblank_request32 { 988struct drm_wait_vblank_request32 {
956 enum drm_vblank_seq_type type; 989 enum drm_vblank_seq_type type;
957 unsigned int sequence; 990 unsigned int sequence;
@@ -1033,6 +1066,7 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
1033#endif 1066#endif
1034 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc, 1067 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
1035 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free, 1068 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
1069 [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
1036 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank, 1070 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
1037}; 1071};
1038 1072
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 4091b9e291f9..1e787f894b3c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -94,7 +94,7 @@ static void vblank_disable_fn(unsigned long arg)
94 } 94 }
95} 95}
96 96
97static void drm_vblank_cleanup(struct drm_device *dev) 97void drm_vblank_cleanup(struct drm_device *dev)
98{ 98{
99 /* Bail if the driver didn't call drm_vblank_init() */ 99 /* Bail if the driver didn't call drm_vblank_init() */
100 if (dev->num_crtcs == 0) 100 if (dev->num_crtcs == 0)
@@ -278,10 +278,6 @@ int drm_irq_uninstall(struct drm_device * dev)
278 278
279 free_irq(dev->pdev->irq, dev); 279 free_irq(dev->pdev->irq, dev);
280 280
281 drm_vblank_cleanup(dev);
282
283 dev->locked_tasklet_func = NULL;
284
285 return 0; 281 return 0;
286} 282}
287EXPORT_SYMBOL(drm_irq_uninstall); 283EXPORT_SYMBOL(drm_irq_uninstall);
@@ -594,11 +590,14 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
594 goto done; 590 goto done;
595 } 591 }
596 592
593 /* Get a refcount on the vblank, which will be released by
594 * drm_vbl_send_signals().
595 */
597 ret = drm_vblank_get(dev, crtc); 596 ret = drm_vblank_get(dev, crtc);
598 if (ret) { 597 if (ret) {
599 drm_free(vbl_sig, sizeof(struct drm_vbl_sig), 598 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
600 DRM_MEM_DRIVER); 599 DRM_MEM_DRIVER);
601 return ret; 600 goto done;
602 } 601 }
603 602
604 atomic_inc(&dev->vbl_signal_pending); 603 atomic_inc(&dev->vbl_signal_pending);
@@ -696,81 +695,3 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
696 drm_vbl_send_signals(dev, crtc); 695 drm_vbl_send_signals(dev, crtc);
697} 696}
698EXPORT_SYMBOL(drm_handle_vblank); 697EXPORT_SYMBOL(drm_handle_vblank);
699
700/**
701 * Tasklet wrapper function.
702 *
703 * \param data DRM device in disguise.
704 *
705 * Attempts to grab the HW lock and calls the driver callback on success. On
706 * failure, leave the lock marked as contended so the callback can be called
707 * from drm_unlock().
708 */
709static void drm_locked_tasklet_func(unsigned long data)
710{
711 struct drm_device *dev = (struct drm_device *)data;
712 unsigned long irqflags;
713 void (*tasklet_func)(struct drm_device *);
714
715 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
716 tasklet_func = dev->locked_tasklet_func;
717 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
718
719 if (!tasklet_func ||
720 !drm_lock_take(&dev->lock,
721 DRM_KERNEL_CONTEXT)) {
722 return;
723 }
724
725 dev->lock.lock_time = jiffies;
726 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
727
728 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
729 tasklet_func = dev->locked_tasklet_func;
730 dev->locked_tasklet_func = NULL;
731 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
732
733 if (tasklet_func != NULL)
734 tasklet_func(dev);
735
736 drm_lock_free(&dev->lock,
737 DRM_KERNEL_CONTEXT);
738}
739
740/**
741 * Schedule a tasklet to call back a driver hook with the HW lock held.
742 *
743 * \param dev DRM device.
744 * \param func Driver callback.
745 *
746 * This is intended for triggering actions that require the HW lock from an
747 * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
748 * completes. Note that the callback may be called from interrupt or process
749 * context, it must not make any assumptions about this. Also, the HW lock will
750 * be held with the kernel context or any client context.
751 */
752void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
753{
754 unsigned long irqflags;
755 static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
756
757 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
758 test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
759 return;
760
761 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
762
763 if (dev->locked_tasklet_func) {
764 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
765 return;
766 }
767
768 dev->locked_tasklet_func = func;
769
770 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
771
772 drm_tasklet.data = (unsigned long)dev;
773
774 tasklet_hi_schedule(&drm_tasklet);
775}
776EXPORT_SYMBOL(drm_locked_tasklet);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index a4caf95485d7..1cfa72031f8f 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -154,8 +154,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
154int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) 154int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
155{ 155{
156 struct drm_lock *lock = data; 156 struct drm_lock *lock = data;
157 unsigned long irqflags;
158 void (*tasklet_func)(struct drm_device *);
159 157
160 if (lock->context == DRM_KERNEL_CONTEXT) { 158 if (lock->context == DRM_KERNEL_CONTEXT) {
161 DRM_ERROR("Process %d using kernel context %d\n", 159 DRM_ERROR("Process %d using kernel context %d\n",
@@ -163,13 +161,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
163 return -EINVAL; 161 return -EINVAL;
164 } 162 }
165 163
166 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
167 tasklet_func = dev->locked_tasklet_func;
168 dev->locked_tasklet_func = NULL;
169 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
170 if (tasklet_func != NULL)
171 tasklet_func(dev);
172
173 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); 164 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
174 165
175 /* kernel_context_switch isn't used by any of the x86 drm 166 /* kernel_context_switch isn't used by any of the x86 drm
@@ -232,6 +223,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
232 } 223 }
233 return 0; 224 return 0;
234} 225}
226EXPORT_SYMBOL(drm_lock_take);
235 227
236/** 228/**
237 * This takes a lock forcibly and hands it to context. Should ONLY be used 229 * This takes a lock forcibly and hands it to context. Should ONLY be used
@@ -299,6 +291,7 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
299 wake_up_interruptible(&lock_data->lock_queue); 291 wake_up_interruptible(&lock_data->lock_queue);
300 return 0; 292 return 0;
301} 293}
294EXPORT_SYMBOL(drm_lock_free);
302 295
303/** 296/**
304 * If we get here, it means that the process has called DRM_IOCTL_LOCK 297 * If we get here, it means that the process has called DRM_IOCTL_LOCK
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 141e33004a76..66c96ec66672 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -92,7 +92,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
92 92
93 spin_lock_init(&dev->count_lock); 93 spin_lock_init(&dev->count_lock);
94 spin_lock_init(&dev->drw_lock); 94 spin_lock_init(&dev->drw_lock);
95 spin_lock_init(&dev->tasklet_lock);
96 spin_lock_init(&dev->lock.spinlock); 95 spin_lock_init(&dev->lock.spinlock);
97 init_timer(&dev->timer); 96 init_timer(&dev->timer);
98 mutex_init(&dev->struct_mutex); 97 mutex_init(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5ba78e4fd2b5..d8fb5d8ee7ea 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,13 +3,14 @@
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \ 6i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
7 i915_suspend.o \ 7 i915_suspend.o \
8 i915_gem.o \ 8 i915_gem.o \
9 i915_gem_debug.o \ 9 i915_gem_debug.o \
10 i915_gem_proc.o \ 10 i915_gem_proc.o \
11 i915_gem_tiling.o 11 i915_gem_tiling.o
12 12
13i915-$(CONFIG_ACPI) += i915_opregion.o
13i915-$(CONFIG_COMPAT) += i915_ioc32.o 14i915-$(CONFIG_COMPAT) += i915_ioc32.o
14 15
15obj-$(CONFIG_DRM_I915) += i915.o 16obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index db34780edbb2..afa8a12cd009 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -154,6 +154,9 @@ static int i915_dma_cleanup(struct drm_device * dev)
154 if (I915_NEED_GFX_HWS(dev)) 154 if (I915_NEED_GFX_HWS(dev))
155 i915_free_hws(dev); 155 i915_free_hws(dev);
156 156
157 dev_priv->sarea = NULL;
158 dev_priv->sarea_priv = NULL;
159
157 return 0; 160 return 0;
158} 161}
159 162
@@ -442,7 +445,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
442 445
443 BEGIN_LP_RING(4); 446 BEGIN_LP_RING(4);
444 OUT_RING(MI_STORE_DWORD_INDEX); 447 OUT_RING(MI_STORE_DWORD_INDEX);
445 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); 448 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
446 OUT_RING(dev_priv->counter); 449 OUT_RING(dev_priv->counter);
447 OUT_RING(0); 450 OUT_RING(0);
448 ADVANCE_LP_RING(); 451 ADVANCE_LP_RING();
@@ -573,7 +576,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
573 576
574 BEGIN_LP_RING(4); 577 BEGIN_LP_RING(4);
575 OUT_RING(MI_STORE_DWORD_INDEX); 578 OUT_RING(MI_STORE_DWORD_INDEX);
576 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); 579 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
577 OUT_RING(dev_priv->counter); 580 OUT_RING(dev_priv->counter);
578 OUT_RING(0); 581 OUT_RING(0);
579 ADVANCE_LP_RING(); 582 ADVANCE_LP_RING();
@@ -608,7 +611,6 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
608 struct drm_file *file_priv) 611 struct drm_file *file_priv)
609{ 612{
610 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 613 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
611 u32 *hw_status = dev_priv->hw_status_page;
612 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 614 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
613 dev_priv->sarea_priv; 615 dev_priv->sarea_priv;
614 drm_i915_batchbuffer_t *batch = data; 616 drm_i915_batchbuffer_t *batch = data;
@@ -634,7 +636,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
634 mutex_unlock(&dev->struct_mutex); 636 mutex_unlock(&dev->struct_mutex);
635 637
636 if (sarea_priv) 638 if (sarea_priv)
637 sarea_priv->last_dispatch = (int)hw_status[5]; 639 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
638 return ret; 640 return ret;
639} 641}
640 642
@@ -642,7 +644,6 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
642 struct drm_file *file_priv) 644 struct drm_file *file_priv)
643{ 645{
644 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 646 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
645 u32 *hw_status = dev_priv->hw_status_page;
646 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 647 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
647 dev_priv->sarea_priv; 648 dev_priv->sarea_priv;
648 drm_i915_cmdbuffer_t *cmdbuf = data; 649 drm_i915_cmdbuffer_t *cmdbuf = data;
@@ -670,7 +671,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
670 } 671 }
671 672
672 if (sarea_priv) 673 if (sarea_priv)
673 sarea_priv->last_dispatch = (int)hw_status[5]; 674 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
674 return 0; 675 return 0;
675} 676}
676 677
@@ -716,7 +717,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
716 value = dev->pci_device; 717 value = dev->pci_device;
717 break; 718 break;
718 case I915_PARAM_HAS_GEM: 719 case I915_PARAM_HAS_GEM:
719 value = 1; 720 value = dev_priv->has_gem;
720 break; 721 break;
721 default: 722 default:
722 DRM_ERROR("Unknown parameter %d\n", param->param); 723 DRM_ERROR("Unknown parameter %d\n", param->param);
@@ -829,6 +830,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
829 830
830 dev_priv->regs = ioremap(base, size); 831 dev_priv->regs = ioremap(base, size);
831 832
833#ifdef CONFIG_HIGHMEM64G
834 /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
835 dev_priv->has_gem = 0;
836#else
837 /* enable GEM by default */
838 dev_priv->has_gem = 1;
839#endif
840
832 i915_gem_load(dev); 841 i915_gem_load(dev);
833 842
834 /* Init HWS */ 843 /* Init HWS */
@@ -844,15 +853,25 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
844 * correctly in testing on 945G. 853 * correctly in testing on 945G.
845 * This may be a side effect of MSI having been made available for PEG 854 * This may be a side effect of MSI having been made available for PEG
846 * and the registers being closely associated. 855 * and the registers being closely associated.
856 *
857 * According to chipset errata, on the 965GM, MSI interrupts may
858 * be lost or delayed, but we use them anyways to avoid
859 * stuck interrupts on some machines.
847 */ 860 */
848 if (!IS_I945G(dev) && !IS_I945GM(dev)) 861 if (!IS_I945G(dev) && !IS_I945GM(dev))
849 if (pci_enable_msi(dev->pdev)) 862 pci_enable_msi(dev->pdev);
850 DRM_ERROR("failed to enable MSI\n");
851 863
852 intel_opregion_init(dev); 864 intel_opregion_init(dev);
853 865
854 spin_lock_init(&dev_priv->user_irq_lock); 866 spin_lock_init(&dev_priv->user_irq_lock);
855 867
868 ret = drm_vblank_init(dev, I915_NUM_PIPE);
869
870 if (ret) {
871 (void) i915_driver_unload(dev);
872 return ret;
873 }
874
856 return ret; 875 return ret;
857} 876}
858 877
@@ -957,6 +976,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
957 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), 976 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
958 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), 977 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
959 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), 978 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
979 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
960}; 980};
961 981
962int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 982int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index eae4ed3956e0..b3cc4731aa7c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -31,6 +31,7 @@
31#define _I915_DRV_H_ 31#define _I915_DRV_H_
32 32
33#include "i915_reg.h" 33#include "i915_reg.h"
34#include <linux/io-mapping.h>
34 35
35/* General customization: 36/* General customization:
36 */ 37 */
@@ -46,6 +47,8 @@ enum pipe {
46 PIPE_B, 47 PIPE_B,
47}; 48};
48 49
50#define I915_NUM_PIPE 2
51
49/* Interface history: 52/* Interface history:
50 * 53 *
51 * 1.1: Original. 54 * 1.1: Original.
@@ -87,13 +90,6 @@ struct mem_block {
87 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ 90 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
88}; 91};
89 92
90typedef struct _drm_i915_vbl_swap {
91 struct list_head head;
92 drm_drawable_t drw_id;
93 unsigned int plane;
94 unsigned int sequence;
95} drm_i915_vbl_swap_t;
96
97struct opregion_header; 93struct opregion_header;
98struct opregion_acpi; 94struct opregion_acpi;
99struct opregion_swsci; 95struct opregion_swsci;
@@ -110,6 +106,8 @@ struct intel_opregion {
110typedef struct drm_i915_private { 106typedef struct drm_i915_private {
111 struct drm_device *dev; 107 struct drm_device *dev;
112 108
109 int has_gem;
110
113 void __iomem *regs; 111 void __iomem *regs;
114 drm_local_map_t *sarea; 112 drm_local_map_t *sarea;
115 113
@@ -138,6 +136,7 @@ typedef struct drm_i915_private {
138 int user_irq_refcount; 136 int user_irq_refcount;
139 /** Cached value of IMR to avoid reads in updating the bitfield */ 137 /** Cached value of IMR to avoid reads in updating the bitfield */
140 u32 irq_mask_reg; 138 u32 irq_mask_reg;
139 u32 pipestat[2];
141 140
142 int tex_lru_log_granularity; 141 int tex_lru_log_granularity;
143 int allow_batchbuffer; 142 int allow_batchbuffer;
@@ -145,10 +144,6 @@ typedef struct drm_i915_private {
145 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 144 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
146 int vblank_pipe; 145 int vblank_pipe;
147 146
148 spinlock_t swaps_lock;
149 drm_i915_vbl_swap_t vbl_swaps;
150 unsigned int swaps_pending;
151
152 struct intel_opregion opregion; 147 struct intel_opregion opregion;
153 148
154 /* Register state */ 149 /* Register state */
@@ -156,6 +151,8 @@ typedef struct drm_i915_private {
156 u32 saveDSPACNTR; 151 u32 saveDSPACNTR;
157 u32 saveDSPBCNTR; 152 u32 saveDSPBCNTR;
158 u32 saveDSPARB; 153 u32 saveDSPARB;
154 u32 saveRENDERSTANDBY;
155 u32 saveHWS;
159 u32 savePIPEACONF; 156 u32 savePIPEACONF;
160 u32 savePIPEBCONF; 157 u32 savePIPEBCONF;
161 u32 savePIPEASRC; 158 u32 savePIPEASRC;
@@ -243,10 +240,16 @@ typedef struct drm_i915_private {
243 struct { 240 struct {
244 struct drm_mm gtt_space; 241 struct drm_mm gtt_space;
245 242
243 struct io_mapping *gtt_mapping;
244
246 /** 245 /**
247 * List of objects currently involved in rendering from the 246 * List of objects currently involved in rendering from the
248 * ringbuffer. 247 * ringbuffer.
249 * 248 *
249 * Includes buffers having the contents of their GPU caches
250 * flushed, not necessarily primitives. last_rendering_seqno
251 * represents when the rendering involved will be completed.
252 *
250 * A reference is held on the buffer while on this list. 253 * A reference is held on the buffer while on this list.
251 */ 254 */
252 struct list_head active_list; 255 struct list_head active_list;
@@ -256,6 +259,8 @@ typedef struct drm_i915_private {
256 * still have a write_domain which needs to be flushed before 259 * still have a write_domain which needs to be flushed before
257 * unbinding. 260 * unbinding.
258 * 261 *
262 * last_rendering_seqno is 0 while an object is in this list.
263 *
259 * A reference is held on the buffer while on this list. 264 * A reference is held on the buffer while on this list.
260 */ 265 */
261 struct list_head flushing_list; 266 struct list_head flushing_list;
@@ -264,6 +269,8 @@ typedef struct drm_i915_private {
264 * LRU list of objects which are not in the ringbuffer and 269 * LRU list of objects which are not in the ringbuffer and
265 * are ready to unbind, but are still in the GTT. 270 * are ready to unbind, but are still in the GTT.
266 * 271 *
272 * last_rendering_seqno is 0 while an object is in this list.
273 *
267 * A reference is not held on the buffer while on this list, 274 * A reference is not held on the buffer while on this list,
268 * as merely being GTT-bound shouldn't prevent its being 275 * as merely being GTT-bound shouldn't prevent its being
269 * freed, and we'll pull it off the list in the free path. 276 * freed, and we'll pull it off the list in the free path.
@@ -285,9 +292,6 @@ typedef struct drm_i915_private {
285 */ 292 */
286 struct delayed_work retire_work; 293 struct delayed_work retire_work;
287 294
288 /** Work task for vblank-related ring access */
289 struct work_struct vblank_work;
290
291 uint32_t next_gem_seqno; 295 uint32_t next_gem_seqno;
292 296
293 /** 297 /**
@@ -377,8 +381,8 @@ struct drm_i915_gem_object {
377 uint32_t agp_type; 381 uint32_t agp_type;
378 382
379 /** 383 /**
380 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when 384 * If present, while GEM_DOMAIN_CPU is in the read domain this array
381 * GEM_DOMAIN_CPU is not in the object's read domain. 385 * flags which individual pages are valid.
382 */ 386 */
383 uint8_t *page_cpu_valid; 387 uint8_t *page_cpu_valid;
384}; 388};
@@ -400,9 +404,6 @@ struct drm_i915_gem_request {
400 /** Time at which this request was emitted, in jiffies. */ 404 /** Time at which this request was emitted, in jiffies. */
401 unsigned long emitted_jiffies; 405 unsigned long emitted_jiffies;
402 406
403 /** Cache domains that were flushed at the start of the request. */
404 uint32_t flush_domains;
405
406 struct list_head list; 407 struct list_head list;
407}; 408};
408 409
@@ -441,7 +442,6 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
441void i915_user_irq_get(struct drm_device *dev); 442void i915_user_irq_get(struct drm_device *dev);
442void i915_user_irq_put(struct drm_device *dev); 443void i915_user_irq_put(struct drm_device *dev);
443 444
444extern void i915_gem_vblank_work_handler(struct work_struct *work);
445extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 445extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
446extern void i915_driver_irq_preinstall(struct drm_device * dev); 446extern void i915_driver_irq_preinstall(struct drm_device * dev);
447extern int i915_driver_irq_postinstall(struct drm_device *dev); 447extern int i915_driver_irq_postinstall(struct drm_device *dev);
@@ -457,6 +457,13 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data,
457 struct drm_file *file_priv); 457 struct drm_file *file_priv);
458extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); 458extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
459 459
460void
461i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
462
463void
464i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
465
466
460/* i915_mem.c */ 467/* i915_mem.c */
461extern int i915_mem_alloc(struct drm_device *dev, void *data, 468extern int i915_mem_alloc(struct drm_device *dev, void *data,
462 struct drm_file *file_priv); 469 struct drm_file *file_priv);
@@ -502,6 +509,8 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
502 struct drm_file *file_priv); 509 struct drm_file *file_priv);
503int i915_gem_get_tiling(struct drm_device *dev, void *data, 510int i915_gem_get_tiling(struct drm_device *dev, void *data,
504 struct drm_file *file_priv); 511 struct drm_file *file_priv);
512int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
513 struct drm_file *file_priv);
505void i915_gem_load(struct drm_device *dev); 514void i915_gem_load(struct drm_device *dev);
506int i915_gem_proc_init(struct drm_minor *minor); 515int i915_gem_proc_init(struct drm_minor *minor);
507void i915_gem_proc_cleanup(struct drm_minor *minor); 516void i915_gem_proc_cleanup(struct drm_minor *minor);
@@ -539,11 +548,18 @@ extern int i915_restore_state(struct drm_device *dev);
539extern int i915_save_state(struct drm_device *dev); 548extern int i915_save_state(struct drm_device *dev);
540extern int i915_restore_state(struct drm_device *dev); 549extern int i915_restore_state(struct drm_device *dev);
541 550
551#ifdef CONFIG_ACPI
542/* i915_opregion.c */ 552/* i915_opregion.c */
543extern int intel_opregion_init(struct drm_device *dev); 553extern int intel_opregion_init(struct drm_device *dev);
544extern void intel_opregion_free(struct drm_device *dev); 554extern void intel_opregion_free(struct drm_device *dev);
545extern void opregion_asle_intr(struct drm_device *dev); 555extern void opregion_asle_intr(struct drm_device *dev);
546extern void opregion_enable_asle(struct drm_device *dev); 556extern void opregion_enable_asle(struct drm_device *dev);
557#else
558static inline int intel_opregion_init(struct drm_device *dev) { return 0; }
559static inline void intel_opregion_free(struct drm_device *dev) { return; }
560static inline void opregion_asle_intr(struct drm_device *dev) { return; }
561static inline void opregion_enable_asle(struct drm_device *dev) { return; }
562#endif
547 563
548/** 564/**
549 * Lock test for when it's just for synchronization of ring access. 565 * Lock test for when it's just for synchronization of ring access.
@@ -610,8 +626,9 @@ extern void opregion_enable_asle(struct drm_device *dev);
610 * The area from dword 0x20 to 0x3ff is available for driver usage. 626 * The area from dword 0x20 to 0x3ff is available for driver usage.
611 */ 627 */
612#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 628#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
613#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5) 629#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
614#define I915_GEM_HWS_INDEX 0x20 630#define I915_GEM_HWS_INDEX 0x20
631#define I915_BREADCRUMB_INDEX 0x21
615 632
616extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 633extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
617 634
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dc2e6fdb6ca3..24fe8c10b4b2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,21 +31,23 @@
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include <linux/swap.h> 32#include <linux/swap.h>
33 33
34static int 34#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
35i915_gem_object_set_domain(struct drm_gem_object *obj, 35
36 uint32_t read_domains, 36static void
37 uint32_t write_domain); 37i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
38static int 38 uint32_t read_domains,
39i915_gem_object_set_domain_range(struct drm_gem_object *obj, 39 uint32_t write_domain);
40 uint64_t offset, 40static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41 uint64_t size, 41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42 uint32_t read_domains, 42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
43 uint32_t write_domain); 43static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
44static int 44 int write);
45i915_gem_set_domain(struct drm_gem_object *obj, 45static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46 struct drm_file *file_priv, 46 int write);
47 uint32_t read_domains, 47static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48 uint32_t write_domain); 48 uint64_t offset,
49 uint64_t size);
50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
49static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 51static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
50static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 52static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
51static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
@@ -79,6 +81,22 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
79 return 0; 81 return 0;
80} 82}
81 83
84int
85i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
86 struct drm_file *file_priv)
87{
88 struct drm_i915_gem_get_aperture *args = data;
89
90 if (!(dev->driver->driver_features & DRIVER_GEM))
91 return -ENODEV;
92
93 args->aper_size = dev->gtt_total;
94 args->aper_available_size = (args->aper_size -
95 atomic_read(&dev->pin_memory));
96
97 return 0;
98}
99
82 100
83/** 101/**
84 * Creates a new mm object and returns a handle to it. 102 * Creates a new mm object and returns a handle to it.
@@ -144,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
144 162
145 mutex_lock(&dev->struct_mutex); 163 mutex_lock(&dev->struct_mutex);
146 164
147 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, 165 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
148 I915_GEM_DOMAIN_CPU, 0); 166 args->size);
149 if (ret != 0) { 167 if (ret != 0) {
150 drm_gem_object_unreference(obj); 168 drm_gem_object_unreference(obj);
151 mutex_unlock(&dev->struct_mutex); 169 mutex_unlock(&dev->struct_mutex);
@@ -171,35 +189,50 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
171 return 0; 189 return 0;
172} 190}
173 191
174/* 192/* This is the fast write path which cannot handle
175 * Try to write quickly with an atomic kmap. Return true on success. 193 * page faults in the source data
176 *
177 * If this fails (which includes a partial write), we'll redo the whole
178 * thing with the slow version.
179 *
180 * This is a workaround for the low performance of iounmap (approximate
181 * 10% cpu cost on normal 3D workloads). kmap_atomic on HIGHMEM kernels
182 * happens to let us map card memory without taking IPIs. When the vmap
183 * rework lands we should be able to dump this hack.
184 */ 194 */
185static inline int fast_user_write(unsigned long pfn, char __user *user_data, 195
186 int l, int o) 196static inline int
197fast_user_write(struct io_mapping *mapping,
198 loff_t page_base, int page_offset,
199 char __user *user_data,
200 int length)
187{ 201{
188#ifdef CONFIG_HIGHMEM
189 unsigned long unwritten;
190 char *vaddr_atomic; 202 char *vaddr_atomic;
203 unsigned long unwritten;
191 204
192 vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0); 205 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
193#if WATCH_PWRITE 206 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
194 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", 207 user_data, length);
195 i, o, l, pfn, vaddr_atomic); 208 io_mapping_unmap_atomic(vaddr_atomic);
196#endif 209 if (unwritten)
197 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o, user_data, l); 210 return -EFAULT;
198 kunmap_atomic(vaddr_atomic, KM_USER0); 211 return 0;
199 return !unwritten; 212}
200#else 213
214/* Here's the write path which can sleep for
215 * page faults
216 */
217
218static inline int
219slow_user_write(struct io_mapping *mapping,
220 loff_t page_base, int page_offset,
221 char __user *user_data,
222 int length)
223{
224 char __iomem *vaddr;
225 unsigned long unwritten;
226
227 vaddr = io_mapping_map_wc(mapping, page_base);
228 if (vaddr == NULL)
229 return -EFAULT;
230 unwritten = __copy_from_user(vaddr + page_offset,
231 user_data, length);
232 io_mapping_unmap(vaddr);
233 if (unwritten)
234 return -EFAULT;
201 return 0; 235 return 0;
202#endif
203} 236}
204 237
205static int 238static int
@@ -208,10 +241,12 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
208 struct drm_file *file_priv) 241 struct drm_file *file_priv)
209{ 242{
210 struct drm_i915_gem_object *obj_priv = obj->driver_private; 243 struct drm_i915_gem_object *obj_priv = obj->driver_private;
244 drm_i915_private_t *dev_priv = dev->dev_private;
211 ssize_t remain; 245 ssize_t remain;
212 loff_t offset; 246 loff_t offset, page_base;
213 char __user *user_data; 247 char __user *user_data;
214 int ret = 0; 248 int page_offset, page_length;
249 int ret;
215 250
216 user_data = (char __user *) (uintptr_t) args->data_ptr; 251 user_data = (char __user *) (uintptr_t) args->data_ptr;
217 remain = args->size; 252 remain = args->size;
@@ -225,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
225 mutex_unlock(&dev->struct_mutex); 260 mutex_unlock(&dev->struct_mutex);
226 return ret; 261 return ret;
227 } 262 }
228 ret = i915_gem_set_domain(obj, file_priv, 263 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
229 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
230 if (ret) 264 if (ret)
231 goto fail; 265 goto fail;
232 266
@@ -235,57 +269,37 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
235 obj_priv->dirty = 1; 269 obj_priv->dirty = 1;
236 270
237 while (remain > 0) { 271 while (remain > 0) {
238 unsigned long pfn;
239 int i, o, l;
240
241 /* Operation in this page 272 /* Operation in this page
242 * 273 *
243 * i = page number 274 * page_base = page offset within aperture
244 * o = offset within page 275 * page_offset = offset within page
245 * l = bytes to copy 276 * page_length = bytes to copy for this page
246 */ 277 */
247 i = offset >> PAGE_SHIFT; 278 page_base = (offset & ~(PAGE_SIZE-1));
248 o = offset & (PAGE_SIZE-1); 279 page_offset = offset & (PAGE_SIZE-1);
249 l = remain; 280 page_length = remain;
250 if ((o + l) > PAGE_SIZE) 281 if ((page_offset + remain) > PAGE_SIZE)
251 l = PAGE_SIZE - o; 282 page_length = PAGE_SIZE - page_offset;
252 283
253 pfn = (dev->agp->base >> PAGE_SHIFT) + i; 284 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
254 285 page_offset, user_data, page_length);
255 if (!fast_user_write(pfn, user_data, l, o)) { 286
256 unsigned long unwritten; 287 /* If we get a fault while copying data, then (presumably) our
257 char __iomem *vaddr; 288 * source page isn't available. In this case, use the
258 289 * non-atomic function
259 vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); 290 */
260#if WATCH_PWRITE 291 if (ret) {
261 DRM_INFO("pwrite slow i %d o %d l %d " 292 ret = slow_user_write (dev_priv->mm.gtt_mapping,
262 "pfn %ld vaddr %p\n", 293 page_base, page_offset,
263 i, o, l, pfn, vaddr); 294 user_data, page_length);
264#endif 295 if (ret)
265 if (vaddr == NULL) {
266 ret = -EFAULT;
267 goto fail;
268 }
269 unwritten = __copy_from_user(vaddr + o, user_data, l);
270#if WATCH_PWRITE
271 DRM_INFO("unwritten %ld\n", unwritten);
272#endif
273 iounmap(vaddr);
274 if (unwritten) {
275 ret = -EFAULT;
276 goto fail; 296 goto fail;
277 }
278 } 297 }
279 298
280 remain -= l; 299 remain -= page_length;
281 user_data += l; 300 user_data += page_length;
282 offset += l; 301 offset += page_length;
283 } 302 }
284#if WATCH_PWRITE && 1
285 i915_gem_clflush_object(obj);
286 i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
287 i915_gem_clflush_object(obj);
288#endif
289 303
290fail: 304fail:
291 i915_gem_object_unpin(obj); 305 i915_gem_object_unpin(obj);
@@ -305,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
305 319
306 mutex_lock(&dev->struct_mutex); 320 mutex_lock(&dev->struct_mutex);
307 321
308 ret = i915_gem_set_domain(obj, file_priv, 322 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
309 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
310 if (ret) { 323 if (ret) {
311 mutex_unlock(&dev->struct_mutex); 324 mutex_unlock(&dev->struct_mutex);
312 return ret; 325 return ret;
@@ -382,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
382} 395}
383 396
384/** 397/**
385 * Called when user space prepares to use an object 398 * Called when user space prepares to use an object with the CPU, either
399 * through the mmap ioctl's mapping or a GTT mapping.
386 */ 400 */
387int 401int
388i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 402i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -390,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
390{ 404{
391 struct drm_i915_gem_set_domain *args = data; 405 struct drm_i915_gem_set_domain *args = data;
392 struct drm_gem_object *obj; 406 struct drm_gem_object *obj;
407 uint32_t read_domains = args->read_domains;
408 uint32_t write_domain = args->write_domain;
393 int ret; 409 int ret;
394 410
395 if (!(dev->driver->driver_features & DRIVER_GEM)) 411 if (!(dev->driver->driver_features & DRIVER_GEM))
396 return -ENODEV; 412 return -ENODEV;
397 413
414 /* Only handle setting domains to types used by the CPU. */
415 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
416 return -EINVAL;
417
418 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
419 return -EINVAL;
420
421 /* Having something in the write domain implies it's in the read
422 * domain, and only that read domain. Enforce that in the request.
423 */
424 if (write_domain != 0 && read_domains != write_domain)
425 return -EINVAL;
426
398 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 427 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
399 if (obj == NULL) 428 if (obj == NULL)
400 return -EBADF; 429 return -EBADF;
@@ -402,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
402 mutex_lock(&dev->struct_mutex); 431 mutex_lock(&dev->struct_mutex);
403#if WATCH_BUF 432#if WATCH_BUF
404 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 433 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
405 obj, obj->size, args->read_domains, args->write_domain); 434 obj, obj->size, read_domains, write_domain);
406#endif 435#endif
407 ret = i915_gem_set_domain(obj, file_priv, 436 if (read_domains & I915_GEM_DOMAIN_GTT) {
408 args->read_domains, args->write_domain); 437 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
438
439 /* Silently promote "you're not bound, there was nothing to do"
440 * to success, since the client was just asking us to
441 * make sure everything was done.
442 */
443 if (ret == -EINVAL)
444 ret = 0;
445 } else {
446 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
447 }
448
409 drm_gem_object_unreference(obj); 449 drm_gem_object_unreference(obj);
410 mutex_unlock(&dev->struct_mutex); 450 mutex_unlock(&dev->struct_mutex);
411 return ret; 451 return ret;
@@ -440,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
440 obj_priv = obj->driver_private; 480 obj_priv = obj->driver_private;
441 481
442 /* Pinned buffers may be scanout, so flush the cache */ 482 /* Pinned buffers may be scanout, so flush the cache */
443 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { 483 if (obj_priv->pin_count)
444 i915_gem_clflush_object(obj); 484 i915_gem_object_flush_cpu_write_domain(obj);
445 drm_agp_chipset_flush(dev); 485
446 }
447 drm_gem_object_unreference(obj); 486 drm_gem_object_unreference(obj);
448 mutex_unlock(&dev->struct_mutex); 487 mutex_unlock(&dev->struct_mutex);
449 return ret; 488 return ret;
@@ -517,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
517} 556}
518 557
519static void 558static void
520i915_gem_object_move_to_active(struct drm_gem_object *obj) 559i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
521{ 560{
522 struct drm_device *dev = obj->dev; 561 struct drm_device *dev = obj->dev;
523 drm_i915_private_t *dev_priv = dev->dev_private; 562 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -531,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
531 /* Move from whatever list we were on to the tail of execution. */ 570 /* Move from whatever list we were on to the tail of execution. */
532 list_move_tail(&obj_priv->list, 571 list_move_tail(&obj_priv->list,
533 &dev_priv->mm.active_list); 572 &dev_priv->mm.active_list);
573 obj_priv->last_rendering_seqno = seqno;
534} 574}
535 575
576static void
577i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
578{
579 struct drm_device *dev = obj->dev;
580 drm_i915_private_t *dev_priv = dev->dev_private;
581 struct drm_i915_gem_object *obj_priv = obj->driver_private;
582
583 BUG_ON(!obj_priv->active);
584 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
585 obj_priv->last_rendering_seqno = 0;
586}
536 587
537static void 588static void
538i915_gem_object_move_to_inactive(struct drm_gem_object *obj) 589i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@@ -547,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
547 else 598 else
548 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 599 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
549 600
601 obj_priv->last_rendering_seqno = 0;
550 if (obj_priv->active) { 602 if (obj_priv->active) {
551 obj_priv->active = 0; 603 obj_priv->active = 0;
552 drm_gem_object_unreference(obj); 604 drm_gem_object_unreference(obj);
@@ -595,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
595 647
596 request->seqno = seqno; 648 request->seqno = seqno;
597 request->emitted_jiffies = jiffies; 649 request->emitted_jiffies = jiffies;
598 request->flush_domains = flush_domains;
599 was_empty = list_empty(&dev_priv->mm.request_list); 650 was_empty = list_empty(&dev_priv->mm.request_list);
600 list_add_tail(&request->list, &dev_priv->mm.request_list); 651 list_add_tail(&request->list, &dev_priv->mm.request_list);
601 652
653 /* Associate any objects on the flushing list matching the write
654 * domain we're flushing with our flush.
655 */
656 if (flush_domains != 0) {
657 struct drm_i915_gem_object *obj_priv, *next;
658
659 list_for_each_entry_safe(obj_priv, next,
660 &dev_priv->mm.flushing_list, list) {
661 struct drm_gem_object *obj = obj_priv->obj;
662
663 if ((obj->write_domain & flush_domains) ==
664 obj->write_domain) {
665 obj->write_domain = 0;
666 i915_gem_object_move_to_active(obj, seqno);
667 }
668 }
669
670 }
671
602 if (was_empty && !dev_priv->mm.suspended) 672 if (was_empty && !dev_priv->mm.suspended)
603 schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 673 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
604 return seqno; 674 return seqno;
@@ -661,30 +731,10 @@ i915_gem_retire_request(struct drm_device *dev,
661 __func__, request->seqno, obj); 731 __func__, request->seqno, obj);
662#endif 732#endif
663 733
664 if (obj->write_domain != 0) { 734 if (obj->write_domain != 0)
665 list_move_tail(&obj_priv->list, 735 i915_gem_object_move_to_flushing(obj);
666 &dev_priv->mm.flushing_list); 736 else
667 } else {
668 i915_gem_object_move_to_inactive(obj); 737 i915_gem_object_move_to_inactive(obj);
669 }
670 }
671
672 if (request->flush_domains != 0) {
673 struct drm_i915_gem_object *obj_priv, *next;
674
675 /* Clear the write domain and activity from any buffers
676 * that are just waiting for a flush matching the one retired.
677 */
678 list_for_each_entry_safe(obj_priv, next,
679 &dev_priv->mm.flushing_list, list) {
680 struct drm_gem_object *obj = obj_priv->obj;
681
682 if (obj->write_domain & request->flush_domains) {
683 obj->write_domain = 0;
684 i915_gem_object_move_to_inactive(obj);
685 }
686 }
687
688 } 738 }
689} 739}
690 740
@@ -877,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
877 struct drm_i915_gem_object *obj_priv = obj->driver_private; 927 struct drm_i915_gem_object *obj_priv = obj->driver_private;
878 int ret; 928 int ret;
879 929
880 /* If there are writes queued to the buffer, flush and 930 /* This function only exists to support waiting for existing rendering,
881 * create a new seqno to wait for. 931 * not for emitting required flushes.
882 */ 932 */
883 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { 933 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
884 uint32_t write_domain = obj->write_domain;
885#if WATCH_BUF
886 DRM_INFO("%s: flushing object %p from write domain %08x\n",
887 __func__, obj, write_domain);
888#endif
889 i915_gem_flush(dev, 0, write_domain);
890
891 i915_gem_object_move_to_active(obj);
892 obj_priv->last_rendering_seqno = i915_add_request(dev,
893 write_domain);
894 BUG_ON(obj_priv->last_rendering_seqno == 0);
895#if WATCH_LRU
896 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
897#endif
898 }
899 934
900 /* If there is rendering queued on the buffer being evicted, wait for 935 /* If there is rendering queued on the buffer being evicted, wait for
901 * it. 936 * it.
@@ -935,24 +970,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
935 return -EINVAL; 970 return -EINVAL;
936 } 971 }
937 972
938 /* Wait for any rendering to complete
939 */
940 ret = i915_gem_object_wait_rendering(obj);
941 if (ret) {
942 DRM_ERROR("wait_rendering failed: %d\n", ret);
943 return ret;
944 }
945
946 /* Move the object to the CPU domain to ensure that 973 /* Move the object to the CPU domain to ensure that
947 * any possible CPU writes while it's not in the GTT 974 * any possible CPU writes while it's not in the GTT
948 * are flushed when we go to remap it. This will 975 * are flushed when we go to remap it. This will
949 * also ensure that all pending GPU writes are finished 976 * also ensure that all pending GPU writes are finished
950 * before we unbind. 977 * before we unbind.
951 */ 978 */
952 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, 979 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
953 I915_GEM_DOMAIN_CPU);
954 if (ret) { 980 if (ret) {
955 DRM_ERROR("set_domain failed: %d\n", ret); 981 if (ret != -ERESTARTSYS)
982 DRM_ERROR("set_domain failed: %d\n", ret);
956 return ret; 983 return ret;
957 } 984 }
958 985
@@ -1068,6 +1095,21 @@ i915_gem_evict_something(struct drm_device *dev)
1068} 1095}
1069 1096
1070static int 1097static int
1098i915_gem_evict_everything(struct drm_device *dev)
1099{
1100 int ret;
1101
1102 for (;;) {
1103 ret = i915_gem_evict_something(dev);
1104 if (ret != 0)
1105 break;
1106 }
1107 if (ret == -ENOMEM)
1108 return 0;
1109 return ret;
1110}
1111
1112static int
1071i915_gem_object_get_page_list(struct drm_gem_object *obj) 1113i915_gem_object_get_page_list(struct drm_gem_object *obj)
1072{ 1114{
1073 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1115 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1153,7 +1195,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1153 1195
1154 ret = i915_gem_evict_something(dev); 1196 ret = i915_gem_evict_something(dev);
1155 if (ret != 0) { 1197 if (ret != 0) {
1156 DRM_ERROR("Failed to evict a buffer %d\n", ret); 1198 if (ret != -ERESTARTSYS)
1199 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1157 return ret; 1200 return ret;
1158 } 1201 }
1159 goto search_free; 1202 goto search_free;
@@ -1213,6 +1256,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1213 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 1256 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1214} 1257}
1215 1258
1259/** Flushes any GPU write domain for the object if it's dirty. */
1260static void
1261i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
1262{
1263 struct drm_device *dev = obj->dev;
1264 uint32_t seqno;
1265
1266 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
1267 return;
1268
1269 /* Queue the GPU write cache flushing we need. */
1270 i915_gem_flush(dev, 0, obj->write_domain);
1271 seqno = i915_add_request(dev, obj->write_domain);
1272 obj->write_domain = 0;
1273 i915_gem_object_move_to_active(obj, seqno);
1274}
1275
1276/** Flushes the GTT write domain for the object if it's dirty. */
1277static void
1278i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
1279{
1280 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
1281 return;
1282
1283 /* No actual flushing is required for the GTT write domain. Writes
1284 * to it immediately go to main memory as far as we know, so there's
1285 * no chipset flush. It also doesn't land in render cache.
1286 */
1287 obj->write_domain = 0;
1288}
1289
1290/** Flushes the CPU write domain for the object if it's dirty. */
1291static void
1292i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
1293{
1294 struct drm_device *dev = obj->dev;
1295
1296 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
1297 return;
1298
1299 i915_gem_clflush_object(obj);
1300 drm_agp_chipset_flush(dev);
1301 obj->write_domain = 0;
1302}
1303
1304/**
1305 * Moves a single object to the GTT read, and possibly write domain.
1306 *
1307 * This function returns when the move is complete, including waiting on
1308 * flushes to occur.
1309 */
1310static int
1311i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1312{
1313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1314 int ret;
1315
1316 /* Not valid to be called on unbound objects. */
1317 if (obj_priv->gtt_space == NULL)
1318 return -EINVAL;
1319
1320 i915_gem_object_flush_gpu_write_domain(obj);
1321 /* Wait on any GPU rendering and flushing to occur. */
1322 ret = i915_gem_object_wait_rendering(obj);
1323 if (ret != 0)
1324 return ret;
1325
1326 /* If we're writing through the GTT domain, then CPU and GPU caches
1327 * will need to be invalidated at next use.
1328 */
1329 if (write)
1330 obj->read_domains &= I915_GEM_DOMAIN_GTT;
1331
1332 i915_gem_object_flush_cpu_write_domain(obj);
1333
1334 /* It should now be out of any other write domains, and we can update
1335 * the domain values for our changes.
1336 */
1337 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
1338 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1339 if (write) {
1340 obj->write_domain = I915_GEM_DOMAIN_GTT;
1341 obj_priv->dirty = 1;
1342 }
1343
1344 return 0;
1345}
1346
1347/**
1348 * Moves a single object to the CPU read, and possibly write domain.
1349 *
1350 * This function returns when the move is complete, including waiting on
1351 * flushes to occur.
1352 */
1353static int
1354i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1355{
1356 struct drm_device *dev = obj->dev;
1357 int ret;
1358
1359 i915_gem_object_flush_gpu_write_domain(obj);
1360 /* Wait on any GPU rendering and flushing to occur. */
1361 ret = i915_gem_object_wait_rendering(obj);
1362 if (ret != 0)
1363 return ret;
1364
1365 i915_gem_object_flush_gtt_write_domain(obj);
1366
1367 /* If we have a partially-valid cache of the object in the CPU,
1368 * finish invalidating it and free the per-page flags.
1369 */
1370 i915_gem_object_set_to_full_cpu_read_domain(obj);
1371
1372 /* Flush the CPU cache if it's still invalid. */
1373 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1374 i915_gem_clflush_object(obj);
1375 drm_agp_chipset_flush(dev);
1376
1377 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1378 }
1379
1380 /* It should now be out of any other write domains, and we can update
1381 * the domain values for our changes.
1382 */
1383 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1384
1385 /* If we're writing through the CPU, then the GPU read domains will
1386 * need to be invalidated at next use.
1387 */
1388 if (write) {
1389 obj->read_domains &= I915_GEM_DOMAIN_CPU;
1390 obj->write_domain = I915_GEM_DOMAIN_CPU;
1391 }
1392
1393 return 0;
1394}
1395
1216/* 1396/*
1217 * Set the next domain for the specified object. This 1397 * Set the next domain for the specified object. This
1218 * may not actually perform the necessary flushing/invaliding though, 1398 * may not actually perform the necessary flushing/invaliding though,
@@ -1324,16 +1504,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1324 * MI_FLUSH 1504 * MI_FLUSH
1325 * drm_agp_chipset_flush 1505 * drm_agp_chipset_flush
1326 */ 1506 */
1327static int 1507static void
1328i915_gem_object_set_domain(struct drm_gem_object *obj, 1508i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
1329 uint32_t read_domains, 1509 uint32_t read_domains,
1330 uint32_t write_domain) 1510 uint32_t write_domain)
1331{ 1511{
1332 struct drm_device *dev = obj->dev; 1512 struct drm_device *dev = obj->dev;
1333 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1513 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1334 uint32_t invalidate_domains = 0; 1514 uint32_t invalidate_domains = 0;
1335 uint32_t flush_domains = 0; 1515 uint32_t flush_domains = 0;
1336 int ret; 1516
1517 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
1518 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
1337 1519
1338#if WATCH_BUF 1520#if WATCH_BUF
1339 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 1521 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
@@ -1370,34 +1552,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
1370 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 1552 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1371 __func__, flush_domains, invalidate_domains); 1553 __func__, flush_domains, invalidate_domains);
1372#endif 1554#endif
1373 /*
1374 * If we're invaliding the CPU cache and flushing a GPU cache,
1375 * then pause for rendering so that the GPU caches will be
1376 * flushed before the cpu cache is invalidated
1377 */
1378 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1379 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1380 I915_GEM_DOMAIN_GTT))) {
1381 ret = i915_gem_object_wait_rendering(obj);
1382 if (ret)
1383 return ret;
1384 }
1385 i915_gem_clflush_object(obj); 1555 i915_gem_clflush_object(obj);
1386 } 1556 }
1387 1557
1388 if ((write_domain | flush_domains) != 0) 1558 if ((write_domain | flush_domains) != 0)
1389 obj->write_domain = write_domain; 1559 obj->write_domain = write_domain;
1390
1391 /* If we're invalidating the CPU domain, clear the per-page CPU
1392 * domain list as well.
1393 */
1394 if (obj_priv->page_cpu_valid != NULL &&
1395 (write_domain != 0 ||
1396 read_domains & I915_GEM_DOMAIN_CPU)) {
1397 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1398 DRM_MEM_DRIVER);
1399 obj_priv->page_cpu_valid = NULL;
1400 }
1401 obj->read_domains = read_domains; 1560 obj->read_domains = read_domains;
1402 1561
1403 dev->invalidate_domains |= invalidate_domains; 1562 dev->invalidate_domains |= invalidate_domains;
@@ -1408,49 +1567,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
1408 obj->read_domains, obj->write_domain, 1567 obj->read_domains, obj->write_domain,
1409 dev->invalidate_domains, dev->flush_domains); 1568 dev->invalidate_domains, dev->flush_domains);
1410#endif 1569#endif
1411 return 0;
1412} 1570}
1413 1571
1414/** 1572/**
1415 * Set the read/write domain on a range of the object. 1573 * Moves the object from a partially CPU read to a full one.
1416 * 1574 *
1417 * Currently only implemented for CPU reads, otherwise drops to normal 1575 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
1418 * i915_gem_object_set_domain(). 1576 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
1419 */ 1577 */
1420static int 1578static void
1421i915_gem_object_set_domain_range(struct drm_gem_object *obj, 1579i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
1422 uint64_t offset,
1423 uint64_t size,
1424 uint32_t read_domains,
1425 uint32_t write_domain)
1426{ 1580{
1581 struct drm_device *dev = obj->dev;
1427 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1582 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1428 int ret, i;
1429 1583
1430 if (obj->read_domains & I915_GEM_DOMAIN_CPU) 1584 if (!obj_priv->page_cpu_valid)
1431 return 0; 1585 return;
1432 1586
1433 if (read_domains != I915_GEM_DOMAIN_CPU || 1587 /* If we're partially in the CPU read domain, finish moving it in.
1434 write_domain != 0) 1588 */
1435 return i915_gem_object_set_domain(obj, 1589 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
1436 read_domains, write_domain); 1590 int i;
1437 1591
1438 /* Wait on any GPU rendering to the object to be flushed. */ 1592 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
1439 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) { 1593 if (obj_priv->page_cpu_valid[i])
1440 ret = i915_gem_object_wait_rendering(obj); 1594 continue;
1441 if (ret) 1595 drm_clflush_pages(obj_priv->page_list + i, 1);
1442 return ret; 1596 }
1597 drm_agp_chipset_flush(dev);
1443 } 1598 }
1444 1599
1600 /* Free the page_cpu_valid mappings which are now stale, whether
1601 * or not we've got I915_GEM_DOMAIN_CPU.
1602 */
1603 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1604 DRM_MEM_DRIVER);
1605 obj_priv->page_cpu_valid = NULL;
1606}
1607
1608/**
1609 * Set the CPU read domain on a range of the object.
1610 *
1611 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
1612 * not entirely valid. The page_cpu_valid member of the object flags which
1613 * pages have been flushed, and will be respected by
1614 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
1615 * of the whole object.
1616 *
1617 * This function returns when the move is complete, including waiting on
1618 * flushes to occur.
1619 */
1620static int
1621i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
1622 uint64_t offset, uint64_t size)
1623{
1624 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1625 int i, ret;
1626
1627 if (offset == 0 && size == obj->size)
1628 return i915_gem_object_set_to_cpu_domain(obj, 0);
1629
1630 i915_gem_object_flush_gpu_write_domain(obj);
1631 /* Wait on any GPU rendering and flushing to occur. */
1632 ret = i915_gem_object_wait_rendering(obj);
1633 if (ret != 0)
1634 return ret;
1635 i915_gem_object_flush_gtt_write_domain(obj);
1636
1637 /* If we're already fully in the CPU read domain, we're done. */
1638 if (obj_priv->page_cpu_valid == NULL &&
1639 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
1640 return 0;
1641
1642 /* Otherwise, create/clear the per-page CPU read domain flag if we're
1643 * newly adding I915_GEM_DOMAIN_CPU
1644 */
1445 if (obj_priv->page_cpu_valid == NULL) { 1645 if (obj_priv->page_cpu_valid == NULL) {
1446 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 1646 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1447 DRM_MEM_DRIVER); 1647 DRM_MEM_DRIVER);
1448 } 1648 if (obj_priv->page_cpu_valid == NULL)
1649 return -ENOMEM;
1650 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
1651 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
1449 1652
1450 /* Flush the cache on any pages that are still invalid from the CPU's 1653 /* Flush the cache on any pages that are still invalid from the CPU's
1451 * perspective. 1654 * perspective.
1452 */ 1655 */
1453 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { 1656 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
1657 i++) {
1454 if (obj_priv->page_cpu_valid[i]) 1658 if (obj_priv->page_cpu_valid[i])
1455 continue; 1659 continue;
1456 1660
@@ -1459,39 +1663,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1459 obj_priv->page_cpu_valid[i] = 1; 1663 obj_priv->page_cpu_valid[i] = 1;
1460 } 1664 }
1461 1665
1462 return 0; 1666 /* It should now be out of any other write domains, and we can update
1463} 1667 * the domain values for our changes.
1464
1465/**
1466 * Once all of the objects have been set in the proper domain,
1467 * perform the necessary flush and invalidate operations.
1468 *
1469 * Returns the write domains flushed, for use in flush tracking.
1470 */
1471static uint32_t
1472i915_gem_dev_set_domain(struct drm_device *dev)
1473{
1474 uint32_t flush_domains = dev->flush_domains;
1475
1476 /*
1477 * Now that all the buffers are synced to the proper domains,
1478 * flush and invalidate the collected domains
1479 */ 1668 */
1480 if (dev->invalidate_domains | dev->flush_domains) { 1669 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1481#if WATCH_EXEC
1482 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1483 __func__,
1484 dev->invalidate_domains,
1485 dev->flush_domains);
1486#endif
1487 i915_gem_flush(dev,
1488 dev->invalidate_domains,
1489 dev->flush_domains);
1490 dev->invalidate_domains = 0;
1491 dev->flush_domains = 0;
1492 }
1493 1670
1494 return flush_domains; 1671 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1672
1673 return 0;
1495} 1674}
1496 1675
1497/** 1676/**
@@ -1503,12 +1682,12 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1503 struct drm_i915_gem_exec_object *entry) 1682 struct drm_i915_gem_exec_object *entry)
1504{ 1683{
1505 struct drm_device *dev = obj->dev; 1684 struct drm_device *dev = obj->dev;
1685 drm_i915_private_t *dev_priv = dev->dev_private;
1506 struct drm_i915_gem_relocation_entry reloc; 1686 struct drm_i915_gem_relocation_entry reloc;
1507 struct drm_i915_gem_relocation_entry __user *relocs; 1687 struct drm_i915_gem_relocation_entry __user *relocs;
1508 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1688 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1509 int i, ret; 1689 int i, ret;
1510 uint32_t last_reloc_offset = -1; 1690 void __iomem *reloc_page;
1511 void __iomem *reloc_page = NULL;
1512 1691
1513 /* Choose the GTT offset for our buffer and put it there. */ 1692 /* Choose the GTT offset for our buffer and put it there. */
1514 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 1693 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
@@ -1572,6 +1751,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1572 return -EINVAL; 1751 return -EINVAL;
1573 } 1752 }
1574 1753
1754 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
1755 reloc.read_domains & I915_GEM_DOMAIN_CPU) {
1756 DRM_ERROR("reloc with read/write CPU domains: "
1757 "obj %p target %d offset %d "
1758 "read %08x write %08x",
1759 obj, reloc.target_handle,
1760 (int) reloc.offset,
1761 reloc.read_domains,
1762 reloc.write_domain);
1763 return -EINVAL;
1764 }
1765
1575 if (reloc.write_domain && target_obj->pending_write_domain && 1766 if (reloc.write_domain && target_obj->pending_write_domain &&
1576 reloc.write_domain != target_obj->pending_write_domain) { 1767 reloc.write_domain != target_obj->pending_write_domain) {
1577 DRM_ERROR("Write domain conflict: " 1768 DRM_ERROR("Write domain conflict: "
@@ -1612,45 +1803,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1612 continue; 1803 continue;
1613 } 1804 }
1614 1805
1615 /* Now that we're going to actually write some data in, 1806 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1616 * make sure that any rendering using this buffer's contents 1807 if (ret != 0) {
1617 * is completed. 1808 drm_gem_object_unreference(target_obj);
1618 */ 1809 i915_gem_object_unpin(obj);
1619 i915_gem_object_wait_rendering(obj); 1810 return -EINVAL;
1620
1621 /* As we're writing through the gtt, flush
1622 * any CPU writes before we write the relocations
1623 */
1624 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1625 i915_gem_clflush_object(obj);
1626 drm_agp_chipset_flush(dev);
1627 obj->write_domain = 0;
1628 } 1811 }
1629 1812
1630 /* Map the page containing the relocation we're going to 1813 /* Map the page containing the relocation we're going to
1631 * perform. 1814 * perform.
1632 */ 1815 */
1633 reloc_offset = obj_priv->gtt_offset + reloc.offset; 1816 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1634 if (reloc_page == NULL || 1817 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
1635 (last_reloc_offset & ~(PAGE_SIZE - 1)) != 1818 (reloc_offset &
1636 (reloc_offset & ~(PAGE_SIZE - 1))) { 1819 ~(PAGE_SIZE - 1)));
1637 if (reloc_page != NULL)
1638 iounmap(reloc_page);
1639
1640 reloc_page = ioremap_wc(dev->agp->base +
1641 (reloc_offset &
1642 ~(PAGE_SIZE - 1)),
1643 PAGE_SIZE);
1644 last_reloc_offset = reloc_offset;
1645 if (reloc_page == NULL) {
1646 drm_gem_object_unreference(target_obj);
1647 i915_gem_object_unpin(obj);
1648 return -ENOMEM;
1649 }
1650 }
1651
1652 reloc_entry = (uint32_t __iomem *)(reloc_page + 1820 reloc_entry = (uint32_t __iomem *)(reloc_page +
1653 (reloc_offset & (PAGE_SIZE - 1))); 1821 (reloc_offset & (PAGE_SIZE - 1)));
1654 reloc_val = target_obj_priv->gtt_offset + reloc.delta; 1822 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1655 1823
1656#if WATCH_BUF 1824#if WATCH_BUF
@@ -1659,6 +1827,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1659 readl(reloc_entry), reloc_val); 1827 readl(reloc_entry), reloc_val);
1660#endif 1828#endif
1661 writel(reloc_val, reloc_entry); 1829 writel(reloc_val, reloc_entry);
1830 io_mapping_unmap_atomic(reloc_page);
1662 1831
1663 /* Write the updated presumed offset for this entry back out 1832 /* Write the updated presumed offset for this entry back out
1664 * to the user. 1833 * to the user.
@@ -1674,9 +1843,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1674 drm_gem_object_unreference(target_obj); 1843 drm_gem_object_unreference(target_obj);
1675 } 1844 }
1676 1845
1677 if (reloc_page != NULL)
1678 iounmap(reloc_page);
1679
1680#if WATCH_BUF 1846#if WATCH_BUF
1681 if (0) 1847 if (0)
1682 i915_gem_dump_object(obj, 128, __func__, ~0); 1848 i915_gem_dump_object(obj, 128, __func__, ~0);
@@ -1783,6 +1949,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1783 int ret, i, pinned = 0; 1949 int ret, i, pinned = 0;
1784 uint64_t exec_offset; 1950 uint64_t exec_offset;
1785 uint32_t seqno, flush_domains; 1951 uint32_t seqno, flush_domains;
1952 int pin_tries;
1786 1953
1787#if WATCH_EXEC 1954#if WATCH_EXEC
1788 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 1955 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -1831,14 +1998,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1831 return -EBUSY; 1998 return -EBUSY;
1832 } 1999 }
1833 2000
1834 /* Zero the gloabl flush/invalidate flags. These 2001 /* Look up object handles */
1835 * will be modified as each object is bound to the
1836 * gtt
1837 */
1838 dev->invalidate_domains = 0;
1839 dev->flush_domains = 0;
1840
1841 /* Look up object handles and perform the relocations */
1842 for (i = 0; i < args->buffer_count; i++) { 2002 for (i = 0; i < args->buffer_count; i++) {
1843 object_list[i] = drm_gem_object_lookup(dev, file_priv, 2003 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1844 exec_list[i].handle); 2004 exec_list[i].handle);
@@ -1848,17 +2008,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1848 ret = -EBADF; 2008 ret = -EBADF;
1849 goto err; 2009 goto err;
1850 } 2010 }
2011 }
1851 2012
1852 object_list[i]->pending_read_domains = 0; 2013 /* Pin and relocate */
1853 object_list[i]->pending_write_domain = 0; 2014 for (pin_tries = 0; ; pin_tries++) {
1854 ret = i915_gem_object_pin_and_relocate(object_list[i], 2015 ret = 0;
1855 file_priv, 2016 for (i = 0; i < args->buffer_count; i++) {
1856 &exec_list[i]); 2017 object_list[i]->pending_read_domains = 0;
1857 if (ret) { 2018 object_list[i]->pending_write_domain = 0;
1858 DRM_ERROR("object bind and relocate failed %d\n", ret); 2019 ret = i915_gem_object_pin_and_relocate(object_list[i],
2020 file_priv,
2021 &exec_list[i]);
2022 if (ret)
2023 break;
2024 pinned = i + 1;
2025 }
2026 /* success */
2027 if (ret == 0)
2028 break;
2029
2030 /* error other than GTT full, or we've already tried again */
2031 if (ret != -ENOMEM || pin_tries >= 1) {
2032 DRM_ERROR("Failed to pin buffers %d\n", ret);
1859 goto err; 2033 goto err;
1860 } 2034 }
1861 pinned = i + 1; 2035
2036 /* unpin all of our buffers */
2037 for (i = 0; i < pinned; i++)
2038 i915_gem_object_unpin(object_list[i]);
2039
2040 /* evict everyone we can from the aperture */
2041 ret = i915_gem_evict_everything(dev);
2042 if (ret)
2043 goto err;
1862 } 2044 }
1863 2045
1864 /* Set the pending read domains for the batch buffer to COMMAND */ 2046 /* Set the pending read domains for the batch buffer to COMMAND */
@@ -1868,32 +2050,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1868 2050
1869 i915_verify_inactive(dev, __FILE__, __LINE__); 2051 i915_verify_inactive(dev, __FILE__, __LINE__);
1870 2052
2053 /* Zero the global flush/invalidate flags. These
2054 * will be modified as new domains are computed
2055 * for each object
2056 */
2057 dev->invalidate_domains = 0;
2058 dev->flush_domains = 0;
2059
1871 for (i = 0; i < args->buffer_count; i++) { 2060 for (i = 0; i < args->buffer_count; i++) {
1872 struct drm_gem_object *obj = object_list[i]; 2061 struct drm_gem_object *obj = object_list[i];
1873 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1874
1875 if (obj_priv->gtt_space == NULL) {
1876 /* We evicted the buffer in the process of validating
1877 * our set of buffers in. We could try to recover by
1878 * kicking them everything out and trying again from
1879 * the start.
1880 */
1881 ret = -ENOMEM;
1882 goto err;
1883 }
1884 2062
1885 /* make sure all previous memory operations have passed */ 2063 /* Compute new gpu domains and update invalidate/flush */
1886 ret = i915_gem_object_set_domain(obj, 2064 i915_gem_object_set_to_gpu_domain(obj,
1887 obj->pending_read_domains, 2065 obj->pending_read_domains,
1888 obj->pending_write_domain); 2066 obj->pending_write_domain);
1889 if (ret)
1890 goto err;
1891 } 2067 }
1892 2068
1893 i915_verify_inactive(dev, __FILE__, __LINE__); 2069 i915_verify_inactive(dev, __FILE__, __LINE__);
1894 2070
1895 /* Flush/invalidate caches and chipset buffer */ 2071 if (dev->invalidate_domains | dev->flush_domains) {
1896 flush_domains = i915_gem_dev_set_domain(dev); 2072#if WATCH_EXEC
2073 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2074 __func__,
2075 dev->invalidate_domains,
2076 dev->flush_domains);
2077#endif
2078 i915_gem_flush(dev,
2079 dev->invalidate_domains,
2080 dev->flush_domains);
2081 if (dev->flush_domains)
2082 (void)i915_add_request(dev, dev->flush_domains);
2083 }
1897 2084
1898 i915_verify_inactive(dev, __FILE__, __LINE__); 2085 i915_verify_inactive(dev, __FILE__, __LINE__);
1899 2086
@@ -1913,8 +2100,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1913 ~0); 2100 ~0);
1914#endif 2101#endif
1915 2102
1916 (void)i915_add_request(dev, flush_domains);
1917
1918 /* Exec the batchbuffer */ 2103 /* Exec the batchbuffer */
1919 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 2104 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1920 if (ret) { 2105 if (ret) {
@@ -1942,10 +2127,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1942 i915_file_priv->mm.last_gem_seqno = seqno; 2127 i915_file_priv->mm.last_gem_seqno = seqno;
1943 for (i = 0; i < args->buffer_count; i++) { 2128 for (i = 0; i < args->buffer_count; i++) {
1944 struct drm_gem_object *obj = object_list[i]; 2129 struct drm_gem_object *obj = object_list[i];
1945 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1946 2130
1947 i915_gem_object_move_to_active(obj); 2131 i915_gem_object_move_to_active(obj, seqno);
1948 obj_priv->last_rendering_seqno = seqno;
1949#if WATCH_LRU 2132#if WATCH_LRU
1950 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 2133 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1951#endif 2134#endif
@@ -2076,11 +2259,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2076 /* XXX - flush the CPU caches for pinned objects 2259 /* XXX - flush the CPU caches for pinned objects
2077 * as the X server doesn't manage domains yet 2260 * as the X server doesn't manage domains yet
2078 */ 2261 */
2079 if (obj->write_domain & I915_GEM_DOMAIN_CPU) { 2262 i915_gem_object_flush_cpu_write_domain(obj);
2080 i915_gem_clflush_object(obj);
2081 drm_agp_chipset_flush(dev);
2082 obj->write_domain = 0;
2083 }
2084 args->offset = obj_priv->gtt_offset; 2263 args->offset = obj_priv->gtt_offset;
2085 drm_gem_object_unreference(obj); 2264 drm_gem_object_unreference(obj);
2086 mutex_unlock(&dev->struct_mutex); 2265 mutex_unlock(&dev->struct_mutex);
@@ -2130,7 +2309,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2130 } 2309 }
2131 2310
2132 obj_priv = obj->driver_private; 2311 obj_priv = obj->driver_private;
2133 args->busy = obj_priv->active; 2312 /* Don't count being on the flushing list against the object being
2313 * done. Otherwise, a buffer left on the flushing list but not getting
2314 * flushed (because nobody's flushing that domain) won't ever return
2315 * unbusy and get reused by libdrm's bo cache. The other expected
2316 * consumer of this interface, OpenGL's occlusion queries, also specs
2317 * that the objects get unbusy "eventually" without any interference.
2318 */
2319 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
2134 2320
2135 drm_gem_object_unreference(obj); 2321 drm_gem_object_unreference(obj);
2136 mutex_unlock(&dev->struct_mutex); 2322 mutex_unlock(&dev->struct_mutex);
@@ -2182,29 +2368,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2182 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2368 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2183} 2369}
2184 2370
2185static int
2186i915_gem_set_domain(struct drm_gem_object *obj,
2187 struct drm_file *file_priv,
2188 uint32_t read_domains,
2189 uint32_t write_domain)
2190{
2191 struct drm_device *dev = obj->dev;
2192 int ret;
2193 uint32_t flush_domains;
2194
2195 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2196
2197 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2198 if (ret)
2199 return ret;
2200 flush_domains = i915_gem_dev_set_domain(obj->dev);
2201
2202 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2203 (void) i915_add_request(dev, flush_domains);
2204
2205 return 0;
2206}
2207
2208/** Unbinds all objects that are on the given buffer list. */ 2371/** Unbinds all objects that are on the given buffer list. */
2209static int 2372static int
2210i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) 2373i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
@@ -2299,29 +2462,52 @@ i915_gem_idle(struct drm_device *dev)
2299 2462
2300 i915_gem_retire_requests(dev); 2463 i915_gem_retire_requests(dev);
2301 2464
2302 /* Active and flushing should now be empty as we've 2465 if (!dev_priv->mm.wedged) {
2303 * waited for a sequence higher than any pending execbuffer 2466 /* Active and flushing should now be empty as we've
2304 */ 2467 * waited for a sequence higher than any pending execbuffer
2305 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 2468 */
2306 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 2469 WARN_ON(!list_empty(&dev_priv->mm.active_list));
2470 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
2471 /* Request should now be empty as we've also waited
2472 * for the last request in the list
2473 */
2474 WARN_ON(!list_empty(&dev_priv->mm.request_list));
2475 }
2307 2476
2308 /* Request should now be empty as we've also waited 2477 /* Empty the active and flushing lists to inactive. If there's
2309 * for the last request in the list 2478 * anything left at this point, it means that we're wedged and
2479 * nothing good's going to happen by leaving them there. So strip
2480 * the GPU domains and just stuff them onto inactive.
2310 */ 2481 */
2311 BUG_ON(!list_empty(&dev_priv->mm.request_list)); 2482 while (!list_empty(&dev_priv->mm.active_list)) {
2483 struct drm_i915_gem_object *obj_priv;
2484
2485 obj_priv = list_first_entry(&dev_priv->mm.active_list,
2486 struct drm_i915_gem_object,
2487 list);
2488 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
2489 i915_gem_object_move_to_inactive(obj_priv->obj);
2490 }
2491
2492 while (!list_empty(&dev_priv->mm.flushing_list)) {
2493 struct drm_i915_gem_object *obj_priv;
2312 2494
2313 /* Move all buffers out of the GTT. */ 2495 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
2496 struct drm_i915_gem_object,
2497 list);
2498 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
2499 i915_gem_object_move_to_inactive(obj_priv->obj);
2500 }
2501
2502
2503 /* Move all inactive buffers out of the GTT. */
2314 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); 2504 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2505 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
2315 if (ret) { 2506 if (ret) {
2316 mutex_unlock(&dev->struct_mutex); 2507 mutex_unlock(&dev->struct_mutex);
2317 return ret; 2508 return ret;
2318 } 2509 }
2319 2510
2320 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2321 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2322 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2323 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2324
2325 i915_gem_cleanup_ringbuffer(dev); 2511 i915_gem_cleanup_ringbuffer(dev);
2326 mutex_unlock(&dev->struct_mutex); 2512 mutex_unlock(&dev->struct_mutex);
2327 2513
@@ -2518,6 +2704,10 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2518 if (ret != 0) 2704 if (ret != 0)
2519 return ret; 2705 return ret;
2520 2706
2707 dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
2708 dev->agp->agp_info.aper_size
2709 * 1024 * 1024);
2710
2521 mutex_lock(&dev->struct_mutex); 2711 mutex_lock(&dev->struct_mutex);
2522 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 2712 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2523 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 2713 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
@@ -2535,11 +2725,13 @@ int
2535i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 2725i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2536 struct drm_file *file_priv) 2726 struct drm_file *file_priv)
2537{ 2727{
2728 drm_i915_private_t *dev_priv = dev->dev_private;
2538 int ret; 2729 int ret;
2539 2730
2540 ret = i915_gem_idle(dev); 2731 ret = i915_gem_idle(dev);
2541 drm_irq_uninstall(dev); 2732 drm_irq_uninstall(dev);
2542 2733
2734 io_mapping_free(dev_priv->mm.gtt_mapping);
2543 return ret; 2735 return ret;
2544} 2736}
2545 2737
@@ -2564,8 +2756,6 @@ i915_gem_load(struct drm_device *dev)
2564 INIT_LIST_HEAD(&dev_priv->mm.request_list); 2756 INIT_LIST_HEAD(&dev_priv->mm.request_list);
2565 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 2757 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2566 i915_gem_retire_work_handler); 2758 i915_gem_retire_work_handler);
2567 INIT_WORK(&dev_priv->mm.vblank_work,
2568 i915_gem_vblank_work_handler);
2569 dev_priv->mm.next_gem_seqno = 1; 2759 dev_priv->mm.next_gem_seqno = 1;
2570 2760
2571 i915_gem_detect_bit_6_swizzle(dev); 2761 i915_gem_detect_bit_6_swizzle(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 15d4160415b0..e8d5abe1250e 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
166 list_for_each_entry(gem_request, &dev_priv->mm.request_list, 166 list_for_each_entry(gem_request, &dev_priv->mm.request_list,
167 list) 167 list)
168 { 168 {
169 DRM_PROC_PRINT(" %d @ %d %08x\n", 169 DRM_PROC_PRINT(" %d @ %d\n",
170 gem_request->seqno, 170 gem_request->seqno,
171 (int) (jiffies - gem_request->emitted_jiffies), 171 (int) (jiffies - gem_request->emitted_jiffies));
172 gem_request->flush_domains);
173 } 172 }
174 if (len > request + offset) 173 if (len > request + offset)
175 return request; 174 return request;
@@ -192,7 +191,12 @@ static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
192 191
193 *start = &buf[offset]; 192 *start = &buf[offset];
194 *eof = 0; 193 *eof = 0;
195 DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev)); 194 if (dev_priv->hw_status_page != NULL) {
195 DRM_PROC_PRINT("Current sequence: %d\n",
196 i915_get_gem_seqno(dev));
197 } else {
198 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
199 }
196 DRM_PROC_PRINT("Waiter sequence: %d\n", 200 DRM_PROC_PRINT("Waiter sequence: %d\n",
197 dev_priv->mm.waiting_gem_seqno); 201 dev_priv->mm.waiting_gem_seqno);
198 DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); 202 DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
@@ -230,8 +234,12 @@ static int i915_interrupt_info(char *buf, char **start, off_t offset,
230 I915_READ(PIPEBSTAT)); 234 I915_READ(PIPEBSTAT));
231 DRM_PROC_PRINT("Interrupts received: %d\n", 235 DRM_PROC_PRINT("Interrupts received: %d\n",
232 atomic_read(&dev_priv->irq_received)); 236 atomic_read(&dev_priv->irq_received));
233 DRM_PROC_PRINT("Current sequence: %d\n", 237 if (dev_priv->hw_status_page != NULL) {
234 i915_get_gem_seqno(dev)); 238 DRM_PROC_PRINT("Current sequence: %d\n",
239 i915_get_gem_seqno(dev));
240 } else {
241 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
242 }
235 DRM_PROC_PRINT("Waiter sequence: %d\n", 243 DRM_PROC_PRINT("Waiter sequence: %d\n",
236 dev_priv->mm.waiting_gem_seqno); 244 dev_priv->mm.waiting_gem_seqno);
237 DRM_PROC_PRINT("IRQ sequence: %d\n", 245 DRM_PROC_PRINT("IRQ sequence: %d\n",
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index e8b85ac4ca04..a8cb69469c64 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
119 dcc & DCC_CHANNEL_XOR_DISABLE) { 119 dcc & DCC_CHANNEL_XOR_DISABLE) {
120 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 120 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
121 swizzle_y = I915_BIT_6_SWIZZLE_9; 121 swizzle_y = I915_BIT_6_SWIZZLE_9;
122 } else if (IS_I965GM(dev) || IS_GM45(dev)) { 122 } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
123 /* GM965 only does bit 11-based channel 123 (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
124 * randomization 124 /* GM965/GM45 does either bit 11 or bit 17
125 * swizzling.
125 */ 126 */
126 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 127 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
127 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 128 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index baae511c785b..69b9a42da95e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -33,11 +33,23 @@
33 33
34#define MAX_NOPID ((u32)~0) 34#define MAX_NOPID ((u32)~0)
35 35
36/** These are the interrupts used by the driver */ 36/**
37#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \ 37 * Interrupts that are always left unmasked.
38 I915_ASLE_INTERRUPT | \ 38 *
39 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 39 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
40 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) 40 * we leave them always unmasked in IMR and then control enabling them through
41 * PIPESTAT alone.
42 */
43#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
44 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
45 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
46
47/** Interrupts that we mask and unmask at runtime. */
48#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
49
50/** These are all of the interrupts used by the driver */
51#define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \
52 I915_INTERRUPT_ENABLE_VAR)
41 53
42void 54void
43i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 55i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -59,41 +71,39 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
59 } 71 }
60} 72}
61 73
62/** 74static inline u32
63 * i915_get_pipe - return the the pipe associated with a given plane 75i915_pipestat(int pipe)
64 * @dev: DRM device
65 * @plane: plane to look for
66 *
67 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
68 * rather than a pipe number, since they may not always be equal. This routine
69 * maps the given @plane back to a pipe number.
70 */
71static int
72i915_get_pipe(struct drm_device *dev, int plane)
73{ 76{
74 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 77 if (pipe == 0)
75 u32 dspcntr; 78 return PIPEASTAT;
79 if (pipe == 1)
80 return PIPEBSTAT;
81 BUG();
82}
76 83
77 dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR); 84void
85i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
86{
87 if ((dev_priv->pipestat[pipe] & mask) != mask) {
88 u32 reg = i915_pipestat(pipe);
78 89
79 return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0; 90 dev_priv->pipestat[pipe] |= mask;
91 /* Enable the interrupt, clear any pending status */
92 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
93 (void) I915_READ(reg);
94 }
80} 95}
81 96
82/** 97void
83 * i915_get_plane - return the the plane associated with a given pipe 98i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
84 * @dev: DRM device
85 * @pipe: pipe to look for
86 *
87 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
88 * rather than a plane number, since they may not always be equal. This routine
89 * maps the given @pipe back to a plane number.
90 */
91static int
92i915_get_plane(struct drm_device *dev, int pipe)
93{ 99{
94 if (i915_get_pipe(dev, 0) == pipe) 100 if ((dev_priv->pipestat[pipe] & mask) != 0) {
95 return 0; 101 u32 reg = i915_pipestat(pipe);
96 return 1; 102
103 dev_priv->pipestat[pipe] &= ~mask;
104 I915_WRITE(reg, dev_priv->pipestat[pipe]);
105 (void) I915_READ(reg);
106 }
97} 107}
98 108
99/** 109/**
@@ -117,211 +127,16 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
117 return 0; 127 return 0;
118} 128}
119 129
120/** 130/* Called from drm generic code, passed a 'crtc', which
121 * Emit blits for scheduled buffer swaps. 131 * we use as a pipe index
122 *
123 * This function will be called with the HW lock held.
124 */ 132 */
125static void i915_vblank_tasklet(struct drm_device *dev) 133u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
126{
127 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
128 unsigned long irqflags;
129 struct list_head *list, *tmp, hits, *hit;
130 int nhits, nrects, slice[2], upper[2], lower[2], i;
131 unsigned counter[2];
132 struct drm_drawable_info *drw;
133 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
134 u32 cpp = dev_priv->cpp;
135 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
136 XY_SRC_COPY_BLT_WRITE_ALPHA |
137 XY_SRC_COPY_BLT_WRITE_RGB)
138 : XY_SRC_COPY_BLT_CMD;
139 u32 src_pitch = sarea_priv->pitch * cpp;
140 u32 dst_pitch = sarea_priv->pitch * cpp;
141 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
142 RING_LOCALS;
143
144 if (IS_I965G(dev) && sarea_priv->front_tiled) {
145 cmd |= XY_SRC_COPY_BLT_DST_TILED;
146 dst_pitch >>= 2;
147 }
148 if (IS_I965G(dev) && sarea_priv->back_tiled) {
149 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
150 src_pitch >>= 2;
151 }
152
153 counter[0] = drm_vblank_count(dev, 0);
154 counter[1] = drm_vblank_count(dev, 1);
155
156 DRM_DEBUG("\n");
157
158 INIT_LIST_HEAD(&hits);
159
160 nhits = nrects = 0;
161
162 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
163
164 /* Find buffer swaps scheduled for this vertical blank */
165 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
166 drm_i915_vbl_swap_t *vbl_swap =
167 list_entry(list, drm_i915_vbl_swap_t, head);
168 int pipe = i915_get_pipe(dev, vbl_swap->plane);
169
170 if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
171 continue;
172
173 list_del(list);
174 dev_priv->swaps_pending--;
175 drm_vblank_put(dev, pipe);
176
177 spin_unlock(&dev_priv->swaps_lock);
178 spin_lock(&dev->drw_lock);
179
180 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
181
182 if (!drw) {
183 spin_unlock(&dev->drw_lock);
184 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
185 spin_lock(&dev_priv->swaps_lock);
186 continue;
187 }
188
189 list_for_each(hit, &hits) {
190 drm_i915_vbl_swap_t *swap_cmp =
191 list_entry(hit, drm_i915_vbl_swap_t, head);
192 struct drm_drawable_info *drw_cmp =
193 drm_get_drawable_info(dev, swap_cmp->drw_id);
194
195 if (drw_cmp &&
196 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
197 list_add_tail(list, hit);
198 break;
199 }
200 }
201
202 spin_unlock(&dev->drw_lock);
203
204 /* List of hits was empty, or we reached the end of it */
205 if (hit == &hits)
206 list_add_tail(list, hits.prev);
207
208 nhits++;
209
210 spin_lock(&dev_priv->swaps_lock);
211 }
212
213 if (nhits == 0) {
214 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
215 return;
216 }
217
218 spin_unlock(&dev_priv->swaps_lock);
219
220 i915_kernel_lost_context(dev);
221
222 if (IS_I965G(dev)) {
223 BEGIN_LP_RING(4);
224
225 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
226 OUT_RING(0);
227 OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
228 OUT_RING(0);
229 ADVANCE_LP_RING();
230 } else {
231 BEGIN_LP_RING(6);
232
233 OUT_RING(GFX_OP_DRAWRECT_INFO);
234 OUT_RING(0);
235 OUT_RING(0);
236 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
237 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
238 OUT_RING(0);
239
240 ADVANCE_LP_RING();
241 }
242
243 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
244
245 upper[0] = upper[1] = 0;
246 slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
247 slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
248 lower[0] = sarea_priv->pipeA_y + slice[0];
249 lower[1] = sarea_priv->pipeB_y + slice[0];
250
251 spin_lock(&dev->drw_lock);
252
253 /* Emit blits for buffer swaps, partitioning both outputs into as many
254 * slices as there are buffer swaps scheduled in order to avoid tearing
255 * (based on the assumption that a single buffer swap would always
256 * complete before scanout starts).
257 */
258 for (i = 0; i++ < nhits;
259 upper[0] = lower[0], lower[0] += slice[0],
260 upper[1] = lower[1], lower[1] += slice[1]) {
261 if (i == nhits)
262 lower[0] = lower[1] = sarea_priv->height;
263
264 list_for_each(hit, &hits) {
265 drm_i915_vbl_swap_t *swap_hit =
266 list_entry(hit, drm_i915_vbl_swap_t, head);
267 struct drm_clip_rect *rect;
268 int num_rects, plane;
269 unsigned short top, bottom;
270
271 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
272
273 if (!drw)
274 continue;
275
276 rect = drw->rects;
277 plane = swap_hit->plane;
278 top = upper[plane];
279 bottom = lower[plane];
280
281 for (num_rects = drw->num_rects; num_rects--; rect++) {
282 int y1 = max(rect->y1, top);
283 int y2 = min(rect->y2, bottom);
284
285 if (y1 >= y2)
286 continue;
287
288 BEGIN_LP_RING(8);
289
290 OUT_RING(cmd);
291 OUT_RING(ropcpp | dst_pitch);
292 OUT_RING((y1 << 16) | rect->x1);
293 OUT_RING((y2 << 16) | rect->x2);
294 OUT_RING(sarea_priv->front_offset);
295 OUT_RING((y1 << 16) | rect->x1);
296 OUT_RING(src_pitch);
297 OUT_RING(sarea_priv->back_offset);
298
299 ADVANCE_LP_RING();
300 }
301 }
302 }
303
304 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
305
306 list_for_each_safe(hit, tmp, &hits) {
307 drm_i915_vbl_swap_t *swap_hit =
308 list_entry(hit, drm_i915_vbl_swap_t, head);
309
310 list_del(hit);
311
312 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
313 }
314}
315
316u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
317{ 134{
318 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 135 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
319 unsigned long high_frame; 136 unsigned long high_frame;
320 unsigned long low_frame; 137 unsigned long low_frame;
321 u32 high1, high2, low, count; 138 u32 high1, high2, low, count;
322 int pipe;
323 139
324 pipe = i915_get_pipe(dev, plane);
325 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; 140 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
326 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; 141 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
327 142
@@ -349,106 +164,106 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
349 return count; 164 return count;
350} 165}
351 166
352void
353i915_gem_vblank_work_handler(struct work_struct *work)
354{
355 drm_i915_private_t *dev_priv;
356 struct drm_device *dev;
357
358 dev_priv = container_of(work, drm_i915_private_t,
359 mm.vblank_work);
360 dev = dev_priv->dev;
361
362 mutex_lock(&dev->struct_mutex);
363 i915_vblank_tasklet(dev);
364 mutex_unlock(&dev->struct_mutex);
365}
366
367irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 167irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
368{ 168{
369 struct drm_device *dev = (struct drm_device *) arg; 169 struct drm_device *dev = (struct drm_device *) arg;
370 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 170 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
371 u32 iir; 171 u32 iir, new_iir;
372 u32 pipea_stats, pipeb_stats; 172 u32 pipea_stats, pipeb_stats;
173 u32 vblank_status;
174 u32 vblank_enable;
373 int vblank = 0; 175 int vblank = 0;
176 unsigned long irqflags;
177 int irq_received;
178 int ret = IRQ_NONE;
374 179
375 atomic_inc(&dev_priv->irq_received); 180 atomic_inc(&dev_priv->irq_received);
376 181
377 if (dev->pdev->msi_enabled)
378 I915_WRITE(IMR, ~0);
379 iir = I915_READ(IIR); 182 iir = I915_READ(IIR);
380 183
381 if (iir == 0) { 184 if (IS_I965G(dev)) {
382 if (dev->pdev->msi_enabled) { 185 vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
383 I915_WRITE(IMR, dev_priv->irq_mask_reg); 186 vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
384 (void) I915_READ(IMR); 187 } else {
385 } 188 vblank_status = I915_VBLANK_INTERRUPT_STATUS;
386 return IRQ_NONE; 189 vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
387 } 190 }
388 191
389 /* 192 for (;;) {
390 * Clear the PIPE(A|B)STAT regs before the IIR otherwise 193 irq_received = iir != 0;
391 * we may get extra interrupts. 194
392 */ 195 /* Can't rely on pipestat interrupt bit in iir as it might
393 if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) { 196 * have been cleared after the pipestat interrupt was received.
197 * It doesn't set the bit in iir again, but it still produces
198 * interrupts (for non-MSI).
199 */
200 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
394 pipea_stats = I915_READ(PIPEASTAT); 201 pipea_stats = I915_READ(PIPEASTAT);
395 if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)) 202 pipeb_stats = I915_READ(PIPEBSTAT);
396 pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | 203 /*
397 PIPE_VBLANK_INTERRUPT_ENABLE); 204 * Clear the PIPE(A|B)STAT regs before the IIR
398 else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| 205 */
399 PIPE_VBLANK_INTERRUPT_STATUS)) { 206 if (pipea_stats & 0x8000ffff) {
400 vblank++; 207 I915_WRITE(PIPEASTAT, pipea_stats);
401 drm_handle_vblank(dev, i915_get_plane(dev, 0)); 208 irq_received = 1;
402 } 209 }
403 210
404 I915_WRITE(PIPEASTAT, pipea_stats); 211 if (pipeb_stats & 0x8000ffff) {
405 } 212 I915_WRITE(PIPEBSTAT, pipeb_stats);
406 if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) { 213 irq_received = 1;
407 pipeb_stats = I915_READ(PIPEBSTAT);
408 /* Ack the event */
409 I915_WRITE(PIPEBSTAT, pipeb_stats);
410
411 /* The vblank interrupt gets enabled even if we didn't ask for
412 it, so make sure it's shut down again */
413 if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
414 pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
415 PIPE_VBLANK_INTERRUPT_ENABLE);
416 else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
417 PIPE_VBLANK_INTERRUPT_STATUS)) {
418 vblank++;
419 drm_handle_vblank(dev, i915_get_plane(dev, 1));
420 } 214 }
215 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
421 216
422 if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) 217 if (!irq_received)
423 opregion_asle_intr(dev); 218 break;
424 I915_WRITE(PIPEBSTAT, pipeb_stats);
425 }
426 219
427 I915_WRITE(IIR, iir); 220 ret = IRQ_HANDLED;
428 if (dev->pdev->msi_enabled)
429 I915_WRITE(IMR, dev_priv->irq_mask_reg);
430 (void) I915_READ(IIR); /* Flush posted writes */
431 221
432 if (dev_priv->sarea_priv) 222 I915_WRITE(IIR, iir);
433 dev_priv->sarea_priv->last_dispatch = 223 new_iir = I915_READ(IIR); /* Flush posted writes */
434 READ_BREADCRUMB(dev_priv);
435 224
436 if (iir & I915_USER_INTERRUPT) { 225 if (dev_priv->sarea_priv)
437 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 226 dev_priv->sarea_priv->last_dispatch =
438 DRM_WAKEUP(&dev_priv->irq_queue); 227 READ_BREADCRUMB(dev_priv);
439 } 228
229 if (iir & I915_USER_INTERRUPT) {
230 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
231 DRM_WAKEUP(&dev_priv->irq_queue);
232 }
233
234 if (pipea_stats & vblank_status) {
235 vblank++;
236 drm_handle_vblank(dev, 0);
237 }
440 238
441 if (iir & I915_ASLE_INTERRUPT) 239 if (pipeb_stats & vblank_status) {
442 opregion_asle_intr(dev); 240 vblank++;
241 drm_handle_vblank(dev, 1);
242 }
243
244 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
245 (iir & I915_ASLE_INTERRUPT))
246 opregion_asle_intr(dev);
443 247
444 if (vblank && dev_priv->swaps_pending > 0) { 248 /* With MSI, interrupts are only generated when iir
445 if (dev_priv->ring.ring_obj == NULL) 249 * transitions from zero to nonzero. If another bit got
446 drm_locked_tasklet(dev, i915_vblank_tasklet); 250 * set while we were handling the existing iir bits, then
447 else 251 * we would never get another interrupt.
448 schedule_work(&dev_priv->mm.vblank_work); 252 *
253 * This is fine on non-MSI as well, as if we hit this path
254 * we avoid exiting the interrupt handler only to generate
255 * another one.
256 *
257 * Note that for MSI this could cause a stray interrupt report
258 * if an interrupt landed in the time between writing IIR and
259 * the posting read. This should be rare enough to never
260 * trigger the 99% of 100,000 interrupts test for disabling
261 * stray interrupts.
262 */
263 iir = new_iir;
449 } 264 }
450 265
451 return IRQ_HANDLED; 266 return ret;
452} 267}
453 268
454static int i915_emit_irq(struct drm_device * dev) 269static int i915_emit_irq(struct drm_device * dev)
@@ -466,12 +281,10 @@ static int i915_emit_irq(struct drm_device * dev)
466 if (dev_priv->sarea_priv) 281 if (dev_priv->sarea_priv)
467 dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 282 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
468 283
469 BEGIN_LP_RING(6); 284 BEGIN_LP_RING(4);
470 OUT_RING(MI_STORE_DWORD_INDEX); 285 OUT_RING(MI_STORE_DWORD_INDEX);
471 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); 286 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
472 OUT_RING(dev_priv->counter); 287 OUT_RING(dev_priv->counter);
473 OUT_RING(0);
474 OUT_RING(0);
475 OUT_RING(MI_USER_INTERRUPT); 288 OUT_RING(MI_USER_INTERRUPT);
476 ADVANCE_LP_RING(); 289 ADVANCE_LP_RING();
477 290
@@ -481,22 +294,24 @@ static int i915_emit_irq(struct drm_device * dev)
481void i915_user_irq_get(struct drm_device *dev) 294void i915_user_irq_get(struct drm_device *dev)
482{ 295{
483 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 296 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
297 unsigned long irqflags;
484 298
485 spin_lock(&dev_priv->user_irq_lock); 299 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
486 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) 300 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
487 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 301 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
488 spin_unlock(&dev_priv->user_irq_lock); 302 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
489} 303}
490 304
491void i915_user_irq_put(struct drm_device *dev) 305void i915_user_irq_put(struct drm_device *dev)
492{ 306{
493 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 307 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
308 unsigned long irqflags;
494 309
495 spin_lock(&dev_priv->user_irq_lock); 310 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
496 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 311 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
497 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) 312 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
498 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 313 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
499 spin_unlock(&dev_priv->user_irq_lock); 314 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
500} 315}
501 316
502static int i915_wait_irq(struct drm_device * dev, int irq_nr) 317static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -578,74 +393,38 @@ int i915_irq_wait(struct drm_device *dev, void *data,
578 return i915_wait_irq(dev, irqwait->irq_seq); 393 return i915_wait_irq(dev, irqwait->irq_seq);
579} 394}
580 395
581int i915_enable_vblank(struct drm_device *dev, int plane) 396/* Called from drm generic code, passed 'crtc' which
397 * we use as a pipe index
398 */
399int i915_enable_vblank(struct drm_device *dev, int pipe)
582{ 400{
583 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 401 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
584 int pipe = i915_get_pipe(dev, plane); 402 unsigned long irqflags;
585 u32 pipestat_reg = 0;
586 u32 pipestat;
587
588 switch (pipe) {
589 case 0:
590 pipestat_reg = PIPEASTAT;
591 i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
592 break;
593 case 1:
594 pipestat_reg = PIPEBSTAT;
595 i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
596 break;
597 default:
598 DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
599 pipe);
600 break;
601 }
602
603 if (pipestat_reg) {
604 pipestat = I915_READ(pipestat_reg);
605 if (IS_I965G(dev))
606 pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
607 else
608 pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
609 /* Clear any stale interrupt status */
610 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
611 PIPE_VBLANK_INTERRUPT_STATUS);
612 I915_WRITE(pipestat_reg, pipestat);
613 }
614 403
404 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
405 if (IS_I965G(dev))
406 i915_enable_pipestat(dev_priv, pipe,
407 PIPE_START_VBLANK_INTERRUPT_ENABLE);
408 else
409 i915_enable_pipestat(dev_priv, pipe,
410 PIPE_VBLANK_INTERRUPT_ENABLE);
411 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
615 return 0; 412 return 0;
616} 413}
617 414
618void i915_disable_vblank(struct drm_device *dev, int plane) 415/* Called from drm generic code, passed 'crtc' which
416 * we use as a pipe index
417 */
418void i915_disable_vblank(struct drm_device *dev, int pipe)
619{ 419{
620 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 420 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
621 int pipe = i915_get_pipe(dev, plane); 421 unsigned long irqflags;
622 u32 pipestat_reg = 0;
623 u32 pipestat;
624
625 switch (pipe) {
626 case 0:
627 pipestat_reg = PIPEASTAT;
628 i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
629 break;
630 case 1:
631 pipestat_reg = PIPEBSTAT;
632 i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
633 break;
634 default:
635 DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
636 pipe);
637 break;
638 }
639 422
640 if (pipestat_reg) { 423 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
641 pipestat = I915_READ(pipestat_reg); 424 i915_disable_pipestat(dev_priv, pipe,
642 pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | 425 PIPE_VBLANK_INTERRUPT_ENABLE |
643 PIPE_VBLANK_INTERRUPT_ENABLE); 426 PIPE_START_VBLANK_INTERRUPT_ENABLE);
644 /* Clear any stale interrupt status */ 427 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
645 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
646 PIPE_VBLANK_INTERRUPT_STATUS);
647 I915_WRITE(pipestat_reg, pipestat);
648 }
649} 428}
650 429
651/* Set the vblank monitor pipe 430/* Set the vblank monitor pipe
@@ -685,116 +464,21 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
685int i915_vblank_swap(struct drm_device *dev, void *data, 464int i915_vblank_swap(struct drm_device *dev, void *data,
686 struct drm_file *file_priv) 465 struct drm_file *file_priv)
687{ 466{
688 drm_i915_private_t *dev_priv = dev->dev_private; 467 /* The delayed swap mechanism was fundamentally racy, and has been
689 drm_i915_vblank_swap_t *swap = data; 468 * removed. The model was that the client requested a delayed flip/swap
690 drm_i915_vbl_swap_t *vbl_swap; 469 * from the kernel, then waited for vblank before continuing to perform
691 unsigned int pipe, seqtype, curseq, plane; 470 * rendering. The problem was that the kernel might wake the client
692 unsigned long irqflags; 471 * up before it dispatched the vblank swap (since the lock has to be
693 struct list_head *list; 472 * held while touching the ringbuffer), in which case the client would
694 int ret; 473 * clear and start the next frame before the swap occurred, and
695 474 * flicker would occur in addition to likely missing the vblank.
696 if (!dev_priv || !dev_priv->sarea_priv) { 475 *
697 DRM_ERROR("%s called with no initialization\n", __func__); 476 * In the absence of this ioctl, userland falls back to a correct path
698 return -EINVAL; 477 * of waiting for a vblank, then dispatching the swap on its own.
699 } 478 * Context switching to userland and back is plenty fast enough for
700 479 * meeting the requirements of vblank swapping.
701 if (dev_priv->sarea_priv->rotation) {
702 DRM_DEBUG("Rotation not supported\n");
703 return -EINVAL;
704 }
705
706 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
707 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
708 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
709 return -EINVAL;
710 }
711
712 plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
713 pipe = i915_get_pipe(dev, plane);
714
715 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
716
717 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
718 DRM_ERROR("Invalid pipe %d\n", pipe);
719 return -EINVAL;
720 }
721
722 spin_lock_irqsave(&dev->drw_lock, irqflags);
723
724 if (!drm_get_drawable_info(dev, swap->drawable)) {
725 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
726 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
727 return -EINVAL;
728 }
729
730 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
731
732 /*
733 * We take the ref here and put it when the swap actually completes
734 * in the tasklet.
735 */ 480 */
736 ret = drm_vblank_get(dev, pipe); 481 return -EINVAL;
737 if (ret)
738 return ret;
739 curseq = drm_vblank_count(dev, pipe);
740
741 if (seqtype == _DRM_VBLANK_RELATIVE)
742 swap->sequence += curseq;
743
744 if ((curseq - swap->sequence) <= (1<<23)) {
745 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
746 swap->sequence = curseq + 1;
747 } else {
748 DRM_DEBUG("Missed target sequence\n");
749 drm_vblank_put(dev, pipe);
750 return -EINVAL;
751 }
752 }
753
754 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
755
756 list_for_each(list, &dev_priv->vbl_swaps.head) {
757 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
758
759 if (vbl_swap->drw_id == swap->drawable &&
760 vbl_swap->plane == plane &&
761 vbl_swap->sequence == swap->sequence) {
762 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
763 DRM_DEBUG("Already scheduled\n");
764 return 0;
765 }
766 }
767
768 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
769
770 if (dev_priv->swaps_pending >= 100) {
771 DRM_DEBUG("Too many swaps queued\n");
772 drm_vblank_put(dev, pipe);
773 return -EBUSY;
774 }
775
776 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
777
778 if (!vbl_swap) {
779 DRM_ERROR("Failed to allocate memory to queue swap\n");
780 drm_vblank_put(dev, pipe);
781 return -ENOMEM;
782 }
783
784 DRM_DEBUG("\n");
785
786 vbl_swap->drw_id = swap->drawable;
787 vbl_swap->plane = plane;
788 vbl_swap->sequence = swap->sequence;
789
790 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
791
792 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
793 dev_priv->swaps_pending++;
794
795 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
796
797 return 0;
798} 482}
799 483
800/* drm_dma.h hooks 484/* drm_dma.h hooks
@@ -804,36 +488,35 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
804 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 488 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
805 489
806 I915_WRITE(HWSTAM, 0xeffe); 490 I915_WRITE(HWSTAM, 0xeffe);
491 I915_WRITE(PIPEASTAT, 0);
492 I915_WRITE(PIPEBSTAT, 0);
807 I915_WRITE(IMR, 0xffffffff); 493 I915_WRITE(IMR, 0xffffffff);
808 I915_WRITE(IER, 0x0); 494 I915_WRITE(IER, 0x0);
495 (void) I915_READ(IER);
809} 496}
810 497
811int i915_driver_irq_postinstall(struct drm_device *dev) 498int i915_driver_irq_postinstall(struct drm_device *dev)
812{ 499{
813 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 500 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
814 int ret, num_pipes = 2;
815
816 spin_lock_init(&dev_priv->swaps_lock);
817 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
818 dev_priv->swaps_pending = 0;
819
820 /* Set initial unmasked IRQs to just the selected vblank pipes. */
821 dev_priv->irq_mask_reg = ~0;
822
823 ret = drm_vblank_init(dev, num_pipes);
824 if (ret)
825 return ret;
826 501
827 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 502 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
828 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
829 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
830 503
831 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 504 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
832 505
833 dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK; 506 /* Unmask the interrupts that we always want on. */
507 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
508
509 dev_priv->pipestat[0] = 0;
510 dev_priv->pipestat[1] = 0;
511
512 /* Disable pipe interrupt enables, clear pending pipe status */
513 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
514 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
515 /* Clear pending interrupt status */
516 I915_WRITE(IIR, I915_READ(IIR));
834 517
835 I915_WRITE(IMR, dev_priv->irq_mask_reg);
836 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); 518 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
519 I915_WRITE(IMR, dev_priv->irq_mask_reg);
837 (void) I915_READ(IER); 520 (void) I915_READ(IER);
838 521
839 opregion_enable_asle(dev); 522 opregion_enable_asle(dev);
@@ -845,7 +528,6 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
845void i915_driver_irq_uninstall(struct drm_device * dev) 528void i915_driver_irq_uninstall(struct drm_device * dev)
846{ 529{
847 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
848 u32 temp;
849 531
850 if (!dev_priv) 532 if (!dev_priv)
851 return; 533 return;
@@ -853,13 +535,12 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
853 dev_priv->vblank_pipe = 0; 535 dev_priv->vblank_pipe = 0;
854 536
855 I915_WRITE(HWSTAM, 0xffffffff); 537 I915_WRITE(HWSTAM, 0xffffffff);
538 I915_WRITE(PIPEASTAT, 0);
539 I915_WRITE(PIPEBSTAT, 0);
856 I915_WRITE(IMR, 0xffffffff); 540 I915_WRITE(IMR, 0xffffffff);
857 I915_WRITE(IER, 0x0); 541 I915_WRITE(IER, 0x0);
858 542
859 temp = I915_READ(PIPEASTAT); 543 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
860 I915_WRITE(PIPEASTAT, temp); 544 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
861 temp = I915_READ(PIPEBSTAT); 545 I915_WRITE(IIR, I915_READ(IIR));
862 I915_WRITE(PIPEBSTAT, temp);
863 temp = I915_READ(IIR);
864 I915_WRITE(IIR, temp);
865} 546}
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 1787a0c7e3ab..13ae731a33db 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -235,17 +235,15 @@ void opregion_enable_asle(struct drm_device *dev)
235 struct opregion_asle *asle = dev_priv->opregion.asle; 235 struct opregion_asle *asle = dev_priv->opregion.asle;
236 236
237 if (asle) { 237 if (asle) {
238 u32 pipeb_stats = I915_READ(PIPEBSTAT);
239 if (IS_MOBILE(dev)) { 238 if (IS_MOBILE(dev)) {
240 /* Many devices trigger events with a write to the 239 unsigned long irqflags;
241 legacy backlight controller, so we need to ensure 240
242 that it's able to generate interrupts */ 241 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
243 I915_WRITE(PIPEBSTAT, pipeb_stats |= 242 i915_enable_pipestat(dev_priv, 1,
244 I915_LEGACY_BLC_EVENT_ENABLE); 243 I915_LEGACY_BLC_EVENT_ENABLE);
245 i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT | 244 spin_unlock_irqrestore(&dev_priv->user_irq_lock,
246 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 245 irqflags);
247 } else 246 }
248 i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT);
249 247
250 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | 248 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
251 ASLE_PFMB_EN; 249 ASLE_PFMB_EN;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5c2d9f206d05..9d24aaeb8a45 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -522,11 +522,15 @@
522#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) 522#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
523#define DCC_ADDRESSING_MODE_MASK (3 << 0) 523#define DCC_ADDRESSING_MODE_MASK (3 << 0)
524#define DCC_CHANNEL_XOR_DISABLE (1 << 10) 524#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
525#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
525 526
526/** 965 MCH register controlling DRAM channel configuration */ 527/** 965 MCH register controlling DRAM channel configuration */
527#define C0DRB3 0x10206 528#define C0DRB3 0x10206
528#define C1DRB3 0x10606 529#define C1DRB3 0x10606
529 530
531/** GM965 GM45 render standby register */
532#define MCHBAR_RENDER_STANDBY 0x111B8
533
530/* 534/*
531 * Overlay regs 535 * Overlay regs
532 */ 536 */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 603fe742ccd4..5d84027ee8f3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -240,6 +240,13 @@ int i915_save_state(struct drm_device *dev)
240 240
241 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 241 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
242 242
243 /* Render Standby */
244 if (IS_I965G(dev) && IS_MOBILE(dev))
245 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
246
247 /* Hardware status page */
248 dev_priv->saveHWS = I915_READ(HWS_PGA);
249
243 /* Display arbitration control */ 250 /* Display arbitration control */
244 dev_priv->saveDSPARB = I915_READ(DSPARB); 251 dev_priv->saveDSPARB = I915_READ(DSPARB);
245 252
@@ -365,6 +372,14 @@ int i915_restore_state(struct drm_device *dev)
365 372
366 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 373 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
367 374
375 /* Render Standby */
376 if (IS_I965G(dev) && IS_MOBILE(dev))
377 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
378
379 /* Hardware status page */
380 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
381
382 /* Display arbitration */
368 I915_WRITE(DSPARB, dev_priv->saveDSPARB); 383 I915_WRITE(DSPARB, dev_priv->saveDSPARB);
369 384
370 /* Pipe & plane A info */ 385 /* Pipe & plane A info */
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index c1d12dbfa8d8..b49c5ff29585 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -396,6 +396,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
396int mga_driver_load(struct drm_device * dev, unsigned long flags) 396int mga_driver_load(struct drm_device * dev, unsigned long flags)
397{ 397{
398 drm_mga_private_t *dev_priv; 398 drm_mga_private_t *dev_priv;
399 int ret;
399 400
400 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 401 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
401 if (!dev_priv) 402 if (!dev_priv)
@@ -415,6 +416,13 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
415 dev->types[7] = _DRM_STAT_PRIMARY; 416 dev->types[7] = _DRM_STAT_PRIMARY;
416 dev->types[8] = _DRM_STAT_SECONDARY; 417 dev->types[8] = _DRM_STAT_SECONDARY;
417 418
419 ret = drm_vblank_init(dev, 1);
420
421 if (ret) {
422 (void) mga_driver_unload(dev);
423 return ret;
424 }
425
418 return 0; 426 return 0;
419} 427}
420 428
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index bab42f41188b..daa6041a483a 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -152,11 +152,6 @@ void mga_driver_irq_preinstall(struct drm_device * dev)
152int mga_driver_irq_postinstall(struct drm_device *dev) 152int mga_driver_irq_postinstall(struct drm_device *dev)
153{ 153{
154 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 154 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
155 int ret;
156
157 ret = drm_vblank_init(dev, 1);
158 if (ret)
159 return ret;
160 155
161 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); 156 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
162 157
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 3265d53ba91f..601f4c0e5da5 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -45,6 +45,7 @@ static struct drm_driver driver = {
45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
46 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 46 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
47 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 47 .dev_priv_size = sizeof(drm_r128_buf_priv_t),
48 .load = r128_driver_load,
48 .preclose = r128_driver_preclose, 49 .preclose = r128_driver_preclose,
49 .lastclose = r128_driver_lastclose, 50 .lastclose = r128_driver_lastclose,
50 .get_vblank_counter = r128_get_vblank_counter, 51 .get_vblank_counter = r128_get_vblank_counter,
@@ -84,6 +85,11 @@ static struct drm_driver driver = {
84 .patchlevel = DRIVER_PATCHLEVEL, 85 .patchlevel = DRIVER_PATCHLEVEL,
85}; 86};
86 87
88int r128_driver_load(struct drm_device * dev, unsigned long flags)
89{
90 return drm_vblank_init(dev, 1);
91}
92
87static int __init r128_init(void) 93static int __init r128_init(void)
88{ 94{
89 driver.num_ioctls = r128_max_ioctl; 95 driver.num_ioctls = r128_max_ioctl;
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 5898b274279d..797a26c42dab 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -159,6 +159,7 @@ extern void r128_driver_irq_preinstall(struct drm_device * dev);
159extern int r128_driver_irq_postinstall(struct drm_device *dev); 159extern int r128_driver_irq_postinstall(struct drm_device *dev);
160extern void r128_driver_irq_uninstall(struct drm_device * dev); 160extern void r128_driver_irq_uninstall(struct drm_device * dev);
161extern void r128_driver_lastclose(struct drm_device * dev); 161extern void r128_driver_lastclose(struct drm_device * dev);
162extern int r128_driver_load(struct drm_device * dev, unsigned long flags);
162extern void r128_driver_preclose(struct drm_device * dev, 163extern void r128_driver_preclose(struct drm_device * dev,
163 struct drm_file *file_priv); 164 struct drm_file *file_priv);
164 165
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index d7349012a680..69810fb8ac49 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -102,7 +102,7 @@ void r128_driver_irq_preinstall(struct drm_device * dev)
102 102
103int r128_driver_irq_postinstall(struct drm_device *dev) 103int r128_driver_irq_postinstall(struct drm_device *dev)
104{ 104{
105 return drm_vblank_init(dev, 1); 105 return 0;
106} 106}
107 107
108void r128_driver_irq_uninstall(struct drm_device * dev) 108void r128_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 59a2132a8f57..dcebb4bee7aa 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -653,15 +653,16 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
653 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); 653 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
654 654
655 /* Turn on bus mastering */ 655 /* Turn on bus mastering */
656 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || 656 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
657 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
658 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { 657 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
659 /* rs400, rs690/rs740 */ 658 /* rs600/rs690/rs740 */
660 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS400_BUS_MASTER_DIS; 659 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
661 RADEON_WRITE(RADEON_BUS_CNTL, tmp); 660 RADEON_WRITE(RADEON_BUS_CNTL, tmp);
662 } else if (!(((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || 661 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) ||
663 ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R423))) { 662 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
664 /* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */ 663 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
664 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
665 /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
665 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 666 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
666 RADEON_WRITE(RADEON_BUS_CNTL, tmp); 667 RADEON_WRITE(RADEON_BUS_CNTL, tmp);
667 } /* PCIE cards appears to not need this */ 668 } /* PCIE cards appears to not need this */
@@ -1750,6 +1751,18 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
1750 else 1751 else
1751 dev_priv->flags |= RADEON_IS_PCI; 1752 dev_priv->flags |= RADEON_IS_PCI;
1752 1753
1754 ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
1755 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
1756 _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
1757 if (ret != 0)
1758 return ret;
1759
1760 ret = drm_vblank_init(dev, 2);
1761 if (ret) {
1762 radeon_driver_unload(dev);
1763 return ret;
1764 }
1765
1753 DRM_DEBUG("%s card detected\n", 1766 DRM_DEBUG("%s card detected\n",
1754 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); 1767 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
1755 return ret; 1768 return ret;
@@ -1766,12 +1779,6 @@ int radeon_driver_firstopen(struct drm_device *dev)
1766 1779
1767 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; 1780 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
1768 1781
1769 ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
1770 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
1771 _DRM_READ_ONLY, &dev_priv->mmio);
1772 if (ret != 0)
1773 return ret;
1774
1775 dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); 1782 dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
1776 ret = drm_addmap(dev, dev_priv->fb_aper_offset, 1783 ret = drm_addmap(dev, dev_priv->fb_aper_offset,
1777 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, 1784 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
@@ -1787,6 +1794,9 @@ int radeon_driver_unload(struct drm_device *dev)
1787 drm_radeon_private_t *dev_priv = dev->dev_private; 1794 drm_radeon_private_t *dev_priv = dev->dev_private;
1788 1795
1789 DRM_DEBUG("\n"); 1796 DRM_DEBUG("\n");
1797
1798 drm_rmmap(dev, dev_priv->mmio);
1799
1790 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 1800 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
1791 1801
1792 dev->dev_private = NULL; 1802 dev->dev_private = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 4dbb813910c3..3bbb871b25d5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -287,7 +287,6 @@ typedef struct drm_radeon_private {
287 unsigned long gart_textures_offset; 287 unsigned long gart_textures_offset;
288 288
289 drm_local_map_t *sarea; 289 drm_local_map_t *sarea;
290 drm_local_map_t *mmio;
291 drm_local_map_t *cp_ring; 290 drm_local_map_t *cp_ring;
292 drm_local_map_t *ring_rptr; 291 drm_local_map_t *ring_rptr;
293 drm_local_map_t *gart_textures; 292 drm_local_map_t *gart_textures;
@@ -300,7 +299,6 @@ typedef struct drm_radeon_private {
300 atomic_t swi_emitted; 299 atomic_t swi_emitted;
301 int vblank_crtc; 300 int vblank_crtc;
302 uint32_t irq_enable_reg; 301 uint32_t irq_enable_reg;
303 int irq_enabled;
304 uint32_t r500_disp_irq_reg; 302 uint32_t r500_disp_irq_reg;
305 303
306 struct radeon_surface surfaces[RADEON_MAX_SURFACES]; 304 struct radeon_surface surfaces[RADEON_MAX_SURFACES];
@@ -318,6 +316,7 @@ typedef struct drm_radeon_private {
318 316
319 int num_gb_pipes; 317 int num_gb_pipes;
320 int track_flush; 318 int track_flush;
319 drm_local_map_t *mmio;
321} drm_radeon_private_t; 320} drm_radeon_private_t;
322 321
323typedef struct drm_radeon_buf_priv { 322typedef struct drm_radeon_buf_priv {
@@ -447,12 +446,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
447 * handling, not bus mastering itself. 446 * handling, not bus mastering itself.
448 */ 447 */
449#define RADEON_BUS_CNTL 0x0030 448#define RADEON_BUS_CNTL 0x0030
450/* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */ 449/* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
451# define RADEON_BUS_MASTER_DIS (1 << 6) 450# define RADEON_BUS_MASTER_DIS (1 << 6)
452/* rs400, rs690/rs740 */ 451/* rs600/rs690/rs740 */
453# define RS400_BUS_MASTER_DIS (1 << 14) 452# define RS600_BUS_MASTER_DIS (1 << 14)
454# define RS400_MSI_REARM (1 << 20) 453# define RS600_MSI_REARM (1 << 20)
455/* see RS480_MSI_REARM in AIC_CNTL for rs480 */ 454/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
456 455
457#define RADEON_BUS_CNTL1 0x0034 456#define RADEON_BUS_CNTL1 0x0034
458# define RADEON_PMI_BM_DIS (1 << 2) 457# define RADEON_PMI_BM_DIS (1 << 2)
@@ -937,7 +936,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
937 936
938#define RADEON_AIC_CNTL 0x01d0 937#define RADEON_AIC_CNTL 0x01d0
939# define RADEON_PCIGART_TRANSLATE_EN (1 << 0) 938# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
940# define RS480_MSI_REARM (1 << 3) 939# define RS400_MSI_REARM (1 << 3)
941#define RADEON_AIC_STAT 0x01d4 940#define RADEON_AIC_STAT 0x01d4
942#define RADEON_AIC_PT_BASE 0x01d8 941#define RADEON_AIC_PT_BASE 0x01d8
943#define RADEON_AIC_LO_ADDR 0x01dc 942#define RADEON_AIC_LO_ADDR 0x01dc
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 5079f7054a2f..99be11418ac2 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -44,7 +44,8 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
44 else 44 else
45 dev_priv->irq_enable_reg &= ~mask; 45 dev_priv->irq_enable_reg &= ~mask;
46 46
47 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); 47 if (!dev->irq_enabled)
48 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
48} 49}
49 50
50static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) 51static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
@@ -56,7 +57,8 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
56 else 57 else
57 dev_priv->r500_disp_irq_reg &= ~mask; 58 dev_priv->r500_disp_irq_reg &= ~mask;
58 59
59 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); 60 if (!dev->irq_enabled)
61 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
60} 62}
61 63
62int radeon_enable_vblank(struct drm_device *dev, int crtc) 64int radeon_enable_vblank(struct drm_device *dev, int crtc)
@@ -337,15 +339,10 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
337{ 339{
338 drm_radeon_private_t *dev_priv = 340 drm_radeon_private_t *dev_priv =
339 (drm_radeon_private_t *) dev->dev_private; 341 (drm_radeon_private_t *) dev->dev_private;
340 int ret;
341 342
342 atomic_set(&dev_priv->swi_emitted, 0); 343 atomic_set(&dev_priv->swi_emitted, 0);
343 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); 344 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
344 345
345 ret = drm_vblank_init(dev, 2);
346 if (ret)
347 return ret;
348
349 dev->max_vblank_count = 0x001fffff; 346 dev->max_vblank_count = 0x001fffff;
350 347
351 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); 348 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
@@ -360,8 +357,6 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
360 if (!dev_priv) 357 if (!dev_priv)
361 return; 358 return;
362 359
363 dev_priv->irq_enabled = 0;
364
365 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) 360 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
366 RADEON_WRITE(R500_DxMODE_INT_MASK, 0); 361 RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
367 /* Disable *all* interrupts */ 362 /* Disable *all* interrupts */
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index 665d319b927b..c248c1d37268 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -314,7 +314,6 @@ int via_driver_irq_postinstall(struct drm_device *dev)
314 if (!dev_priv) 314 if (!dev_priv)
315 return -EINVAL; 315 return -EINVAL;
316 316
317 drm_vblank_init(dev, 1);
318 status = VIA_READ(VIA_REG_INTERRUPT); 317 status = VIA_READ(VIA_REG_INTERRUPT);
319 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL 318 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
320 | dev_priv->irq_enable_mask); 319 | dev_priv->irq_enable_mask);
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index a967556be014..2c4f0b485792 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -107,8 +107,17 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
107 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 107 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
108 if (ret) { 108 if (ret) {
109 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 109 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
110 return ret;
110 } 111 }
111 return ret; 112
113 ret = drm_vblank_init(dev, 1);
114 if (ret) {
115 drm_sman_takedown(&dev_priv->sman);
116 drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
117 return ret;
118 }
119
120 return 0;
112} 121}
113 122
114int via_driver_unload(struct drm_device *dev) 123int via_driver_unload(struct drm_device *dev)
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index f5999a91614e..b4fd8ca701a4 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -247,7 +247,6 @@ config HID_SUNPLUS
247 247
248config THRUSTMASTER_FF 248config THRUSTMASTER_FF
249 tristate "ThrustMaster devices support" 249 tristate "ThrustMaster devices support"
250 default m
251 depends on USB_HID 250 depends on USB_HID
252 select INPUT_FF_MEMLESS 251 select INPUT_FF_MEMLESS
253 help 252 help
@@ -256,7 +255,6 @@ config THRUSTMASTER_FF
256 255
257config ZEROPLUS_FF 256config ZEROPLUS_FF
258 tristate "Zeroplus based game controller support" 257 tristate "Zeroplus based game controller support"
259 default m
260 depends on USB_HID 258 depends on USB_HID
261 select INPUT_FF_MEMLESS 259 select INPUT_FF_MEMLESS
262 help 260 help
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index fd7f896b34f7..aa28aed0e46c 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -55,10 +55,11 @@ struct apple_key_translation {
55 55
56static struct apple_key_translation apple_fn_keys[] = { 56static struct apple_key_translation apple_fn_keys[] = {
57 { KEY_BACKSPACE, KEY_DELETE }, 57 { KEY_BACKSPACE, KEY_DELETE },
58 { KEY_ENTER, KEY_INSERT },
58 { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY }, 59 { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
59 { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY }, 60 { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
60 { KEY_F3, KEY_FN_F5, APPLE_FLAG_FKEY }, /* Exposé */ 61 { KEY_F3, KEY_SCALE, APPLE_FLAG_FKEY },
61 { KEY_F4, KEY_FN_F4, APPLE_FLAG_FKEY }, /* Dashboard */ 62 { KEY_F4, KEY_DASHBOARD, APPLE_FLAG_FKEY },
62 { KEY_F5, KEY_KBDILLUMDOWN, APPLE_FLAG_FKEY }, 63 { KEY_F5, KEY_KBDILLUMDOWN, APPLE_FLAG_FKEY },
63 { KEY_F6, KEY_KBDILLUMUP, APPLE_FLAG_FKEY }, 64 { KEY_F6, KEY_KBDILLUMUP, APPLE_FLAG_FKEY },
64 { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY }, 65 { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
@@ -312,13 +313,6 @@ static int apple_probe(struct hid_device *hdev,
312 unsigned int connect_mask = HID_CONNECT_DEFAULT; 313 unsigned int connect_mask = HID_CONNECT_DEFAULT;
313 int ret; 314 int ret;
314 315
315 /* return something else or move to hid layer? device will reside
316 allocated */
317 if (id->bus == BUS_USB && (quirks & APPLE_IGNORE_MOUSE) &&
318 to_usb_interface(hdev->dev.parent)->cur_altsetting->
319 desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE)
320 return -ENODEV;
321
322 asc = kzalloc(sizeof(*asc), GFP_KERNEL); 316 asc = kzalloc(sizeof(*asc), GFP_KERNEL);
323 if (asc == NULL) { 317 if (asc == NULL) {
324 dev_err(&hdev->dev, "can't alloc apple descriptor\n"); 318 dev_err(&hdev->dev, "can't alloc apple descriptor\n");
@@ -367,38 +361,32 @@ static const struct hid_device_id apple_devices[] = {
367 .driver_data = APPLE_MIGHTYMOUSE | APPLE_INVERT_HWHEEL }, 361 .driver_data = APPLE_MIGHTYMOUSE | APPLE_INVERT_HWHEEL },
368 362
369 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI), 363 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI),
370 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 364 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
371 APPLE_IGNORE_MOUSE },
372 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO), 365 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO),
373 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 366 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
374 APPLE_IGNORE_MOUSE },
375 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI), 367 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI),
376 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 368 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
377 APPLE_IGNORE_MOUSE },
378 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO), 369 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO),
379 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 370 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
380 APPLE_IGNORE_MOUSE | APPLE_ISO_KEYBOARD }, 371 APPLE_ISO_KEYBOARD },
381 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS), 372 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS),
382 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 373 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
383 APPLE_IGNORE_MOUSE },
384 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI), 374 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI),
385 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 375 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
386 APPLE_IGNORE_MOUSE },
387 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO), 376 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO),
388 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 377 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
389 APPLE_IGNORE_MOUSE | APPLE_ISO_KEYBOARD }, 378 APPLE_ISO_KEYBOARD },
390 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS), 379 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS),
391 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 380 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
392 APPLE_IGNORE_MOUSE | APPLE_RDESC_JIS }, 381 APPLE_RDESC_JIS },
393 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI), 382 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI),
394 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 383 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
395 APPLE_IGNORE_MOUSE },
396 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO), 384 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO),
397 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 385 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
398 APPLE_IGNORE_MOUSE | APPLE_ISO_KEYBOARD }, 386 APPLE_ISO_KEYBOARD },
399 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS), 387 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS),
400 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 388 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
401 APPLE_IGNORE_MOUSE | APPLE_RDESC_JIS}, 389 APPLE_RDESC_JIS },
402 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI), 390 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI),
403 .driver_data = APPLE_HAS_FN }, 391 .driver_data = APPLE_HAS_FN },
404 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO), 392 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO),
@@ -406,41 +394,41 @@ static const struct hid_device_id apple_devices[] = {
406 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS), 394 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS),
407 .driver_data = APPLE_HAS_FN }, 395 .driver_data = APPLE_HAS_FN },
408 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI), 396 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI),
409 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 397 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
410 APPLE_IGNORE_MOUSE },
411 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO), 398 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO),
412 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 399 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
413 APPLE_IGNORE_MOUSE },
414 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS), 400 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS),
415 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 401 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
416 APPLE_IGNORE_MOUSE | APPLE_RDESC_JIS }, 402 APPLE_RDESC_JIS },
417 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), 403 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
418 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 404 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
419 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO), 405 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO),
420 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 406 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
421 APPLE_ISO_KEYBOARD }, 407 APPLE_ISO_KEYBOARD },
422 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS), 408 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
423 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 409 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
424 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), 410 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
425 .driver_data = APPLE_HAS_FN | APPLE_IGNORE_MOUSE }, 411 .driver_data = APPLE_HAS_FN },
426 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO), 412 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
427 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD | 413 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
428 APPLE_IGNORE_MOUSE },
429 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS), 414 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS),
430 .driver_data = APPLE_HAS_FN | APPLE_IGNORE_MOUSE | APPLE_RDESC_JIS }, 415 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
431 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI), 416 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI),
432 .driver_data = APPLE_HAS_FN | APPLE_IGNORE_MOUSE }, 417 .driver_data = APPLE_HAS_FN },
433 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO), 418 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO),
434 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD | 419 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
435 APPLE_IGNORE_MOUSE },
436 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS), 420 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS),
437 .driver_data = APPLE_HAS_FN | APPLE_IGNORE_MOUSE | APPLE_RDESC_JIS }, 421 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
422 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI),
423 .driver_data = APPLE_HAS_FN },
424 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO),
425 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
426 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
427 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
438 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY), 428 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY),
439 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 429 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
440 APPLE_IGNORE_MOUSE },
441 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY), 430 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY),
442 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 431 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
443 APPLE_IGNORE_MOUSE },
444 432
445 /* Apple wireless Mighty Mouse */ 433 /* Apple wireless Mighty Mouse */
446 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 0x030c), 434 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 0x030c),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 721a36d97582..40df3e1b4bd1 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1241,18 +1241,20 @@ static const struct hid_device_id hid_blacklist[] = {
1241 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, 1241 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
1242 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) }, 1242 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
1243 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) }, 1243 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
1244 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) }, 1244 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
1245 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) }, 1245 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
1246 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) }, 1246 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
1247 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) }, 1247 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
1248 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) }, 1248 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
1249 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) }, 1249 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
1250 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) }, 1250 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
1251 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) }, 1251 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
1252 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) }, 1252 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
1253 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
1254 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
1255 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
1253 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1256 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1254 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1257 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1255 { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
1256 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1258 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
1257 { HID_USB_DEVICE(USB_VENDOR_ID_BRIGHT, USB_DEVICE_ID_BRIGHT_ABNT2) }, 1259 { HID_USB_DEVICE(USB_VENDOR_ID_BRIGHT, USB_DEVICE_ID_BRIGHT_ABNT2) },
1258 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 1260 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
@@ -1263,7 +1265,12 @@ static const struct hid_device_id hid_blacklist[] = {
1263 { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658) }, 1265 { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658) },
1264 { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_SK8115) }, 1266 { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_SK8115) },
1265 { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, 1267 { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
1268 { HID_USB_DEVICE(USB_VENDOR_ID_GENERIC_13BA, USB_DEVICE_ID_GENERIC_13BA_KBD_MOUSE) },
1269 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
1270 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
1271 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
1266 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, 1272 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
1273 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
1267 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 1274 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
1268 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, 1275 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
1269 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, 1276 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
@@ -1275,8 +1282,6 @@ static const struct hid_device_id hid_blacklist[] = {
1275 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KBD) }, 1282 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KBD) },
1276 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) }, 1283 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
1277 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) }, 1284 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
1278 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_LX3) },
1279 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_V150) },
1280 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) }, 1285 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
1281 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) }, 1286 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
1282 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) }, 1287 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
@@ -1295,6 +1300,7 @@ static const struct hid_device_id hid_blacklist[] = {
1295 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, 1300 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
1296 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, 1301 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
1297 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, 1302 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
1303 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
1298 { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, 1304 { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
1299 1305
1300 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 0x030c) }, 1306 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 0x030c) },
@@ -1406,6 +1412,8 @@ static const struct hid_device_id hid_ignore_list[] = {
1406 { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, 1412 { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
1407 { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, 1413 { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
1408 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM)}, 1414 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM)},
1415 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2)},
1416 { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
1409 { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, 1417 { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
1410 { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, 1418 { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
1411 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) }, 1419 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
@@ -1415,7 +1423,6 @@ static const struct hid_device_id hid_ignore_list[] = {
1415 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, 1423 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
1416 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, 1424 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
1417 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, 1425 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
1418 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
1419 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) }, 1426 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
1420 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) }, 1427 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
1421 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) }, 1428 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
@@ -1431,7 +1438,6 @@ static const struct hid_device_id hid_ignore_list[] = {
1431 { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) }, 1438 { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) },
1432 { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) }, 1439 { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) },
1433 { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) }, 1440 { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) },
1434 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
1435 { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) }, 1441 { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) },
1436 { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) }, 1442 { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) },
1437 { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) }, 1443 { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) },
@@ -1483,6 +1489,7 @@ static const struct hid_device_id hid_ignore_list[] = {
1483 { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) }, 1489 { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
1484 { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) }, 1490 { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
1485 { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) }, 1491 { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
1492 { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
1486 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) }, 1493 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
1487 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) }, 1494 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
1488 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) }, 1495 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
@@ -1541,6 +1548,40 @@ static const struct hid_device_id hid_ignore_list[] = {
1541 { } 1548 { }
1542}; 1549};
1543 1550
1551/**
1552 * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
1553 *
1554 * There are composite devices for which we want to ignore only a certain
1555 * interface. This is a list of devices for which only the mouse interface will
1556 * be ignored. This allows a dedicated driver to take care of the interface.
1557 */
1558static const struct hid_device_id hid_mouse_ignore_list[] = {
1559 /* appletouch driver */
1560 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
1561 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
1562 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
1563 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
1564 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
1565 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
1566 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
1567 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
1568 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
1569 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
1570 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
1571 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
1572 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
1573 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
1574 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
1575 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
1576 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
1577 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
1578 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
1579 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
1580 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1581 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1582 { }
1583};
1584
1544static bool hid_ignore(struct hid_device *hdev) 1585static bool hid_ignore(struct hid_device *hdev)
1545{ 1586{
1546 switch (hdev->vendor) { 1587 switch (hdev->vendor) {
@@ -1557,6 +1598,10 @@ static bool hid_ignore(struct hid_device *hdev)
1557 break; 1598 break;
1558 } 1599 }
1559 1600
1601 if (hdev->type == HID_TYPE_USBMOUSE &&
1602 hid_match_id(hdev, hid_mouse_ignore_list))
1603 return true;
1604
1560 return !!hid_match_id(hdev, hid_ignore_list); 1605 return !!hid_match_id(hdev, hid_ignore_list);
1561} 1606}
1562 1607
@@ -1689,7 +1734,7 @@ static int __init hid_init(void)
1689 goto err_bus; 1734 goto err_bus;
1690 1735
1691#ifdef CONFIG_HID_COMPAT 1736#ifdef CONFIG_HID_COMPAT
1692 hid_compat_wq = create_workqueue("hid_compat"); 1737 hid_compat_wq = create_singlethread_workqueue("hid_compat");
1693 if (!hid_compat_wq) { 1738 if (!hid_compat_wq) {
1694 hidraw_exit(); 1739 hidraw_exit();
1695 goto err; 1740 goto err;
diff --git a/drivers/hid/hid-dell.c b/drivers/hid/hid-dell.c
index 1a0d0dfc62fc..f5474300b83a 100644
--- a/drivers/hid/hid-dell.c
+++ b/drivers/hid/hid-dell.c
@@ -48,6 +48,7 @@ err_free:
48static const struct hid_device_id dell_devices[] = { 48static const struct hid_device_id dell_devices[] = {
49 { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658) }, 49 { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658) },
50 { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_SK8115) }, 50 { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_SK8115) },
51 { HID_USB_DEVICE(USB_VENDOR_ID_GENERIC_13BA, USB_DEVICE_ID_GENERIC_13BA_KBD_MOUSE) },
51 { } 52 { }
52}; 53};
53MODULE_DEVICE_TABLE(hid, dell_devices); 54MODULE_DEVICE_TABLE(hid, dell_devices);
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index ac5120f542cc..04a0afec52ac 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -4,9 +4,9 @@
4 * Copyright (c) 1999 Andreas Gal 4 * Copyright (c) 1999 Andreas Gal
5 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 5 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
6 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 6 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
7 * Copyright (c) 2006-2007 Jiri Kosina
8 * Copyright (c) 2007 Paul Walmsley 7 * Copyright (c) 2007 Paul Walmsley
9 * Copyright (c) 2008 Jiri Slaby 8 * Copyright (c) 2008 Jiri Slaby
9 * Copyright (c) 2006-2008 Jiri Kosina
10 */ 10 */
11 11
12/* 12/*
@@ -40,6 +40,7 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
40 case 0x025: gy_map_key_clear(KEY_PVR); break; 40 case 0x025: gy_map_key_clear(KEY_PVR); break;
41 case 0x046: gy_map_key_clear(KEY_MEDIA); break; 41 case 0x046: gy_map_key_clear(KEY_MEDIA); break;
42 case 0x047: gy_map_key_clear(KEY_MP3); break; 42 case 0x047: gy_map_key_clear(KEY_MP3); break;
43 case 0x048: gy_map_key_clear(KEY_MEDIA); break;
43 case 0x049: gy_map_key_clear(KEY_CAMERA); break; 44 case 0x049: gy_map_key_clear(KEY_CAMERA); break;
44 case 0x04a: gy_map_key_clear(KEY_VIDEO); break; 45 case 0x04a: gy_map_key_clear(KEY_VIDEO); break;
45 46
@@ -68,6 +69,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
68 69
69static const struct hid_device_id gyration_devices[] = { 70static const struct hid_device_id gyration_devices[] = {
70 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, 71 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
72 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
71 { } 73 { }
72}; 74};
73MODULE_DEVICE_TABLE(hid, gyration_devices); 75MODULE_DEVICE_TABLE(hid, gyration_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d9a1ba920c23..39289699c32f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -82,6 +82,9 @@
82#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230 82#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230
83#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231 83#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231
84#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232 84#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232
85#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
86#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
87#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
85#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 88#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
86#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 89#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
87#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 90#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241
@@ -89,6 +92,7 @@
89 92
90#define USB_VENDOR_ID_ASUS 0x0b05 93#define USB_VENDOR_ID_ASUS 0x0b05
91#define USB_DEVICE_ID_ASUS_LCM 0x1726 94#define USB_DEVICE_ID_ASUS_LCM 0x1726
95#define USB_DEVICE_ID_ASUS_LCM2 0x175b
92 96
93#define USB_VENDOR_ID_ATEN 0x0557 97#define USB_VENDOR_ID_ATEN 0x0557
94#define USB_DEVICE_ID_ATEN_UC100KM 0x2004 98#define USB_DEVICE_ID_ATEN_UC100KM 0x2004
@@ -159,9 +163,13 @@
159 163
160#define USB_VENDOR_ID_GAMERON 0x0810 164#define USB_VENDOR_ID_GAMERON 0x0810
161#define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001 165#define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001
166#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
162 167
163#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc 168#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
164 169
170#define USB_VENDOR_ID_GENERIC_13BA 0x13ba
171#define USB_DEVICE_ID_GENERIC_13BA_KBD_MOUSE 0x0017
172
165#define USB_VENDOR_ID_GLAB 0x06c2 173#define USB_VENDOR_ID_GLAB 0x06c2
166#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038 174#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
167#define USB_DEVICE_ID_1_PHIDGETSERVO_30 0x0039 175#define USB_DEVICE_ID_1_PHIDGETSERVO_30 0x0039
@@ -236,6 +244,7 @@
236 244
237#define USB_VENDOR_ID_GYRATION 0x0c16 245#define USB_VENDOR_ID_GYRATION 0x0c16
238#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002 246#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
247#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
239 248
240#define USB_VENDOR_ID_HAPP 0x078b 249#define USB_VENDOR_ID_HAPP 0x078b
241#define USB_DEVICE_ID_UGCI_DRIVING 0x0010 250#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
@@ -248,6 +257,9 @@
248#define USB_VENDOR_ID_KBGEAR 0x084e 257#define USB_VENDOR_ID_KBGEAR 0x084e
249#define USB_DEVICE_ID_KBGEAR_JAMSTUDIO 0x1001 258#define USB_DEVICE_ID_KBGEAR_JAMSTUDIO 0x1001
250 259
260#define USB_VENDOR_ID_KWORLD 0x1b80
261#define USB_DEVICE_ID_KWORLD_RADIO_FM700 0xd700
262
251#define USB_VENDOR_ID_LABTEC 0x1020 263#define USB_VENDOR_ID_LABTEC 0x1020
252#define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006 264#define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006
253 265
@@ -268,8 +280,6 @@
268#define USB_DEVICE_ID_LD_MACHINETEST 0x2040 280#define USB_DEVICE_ID_LD_MACHINETEST 0x2040
269 281
270#define USB_VENDOR_ID_LOGITECH 0x046d 282#define USB_VENDOR_ID_LOGITECH 0x046d
271#define USB_DEVICE_ID_LOGITECH_LX3 0xc044
272#define USB_DEVICE_ID_LOGITECH_V150 0xc047
273#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 283#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
274#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 284#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
275#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f 285#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
@@ -350,6 +360,7 @@
350#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 360#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
351 361
352#define USB_VENDOR_ID_SONY 0x054c 362#define USB_VENDOR_ID_SONY 0x054c
363#define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
353#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268 364#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
354 365
355#define USB_VENDOR_ID_SOUNDGRAPH 0x15c2 366#define USB_VENDOR_ID_SOUNDGRAPH 0x15c2
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 406d8c82abf1..2bae340eafe2 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -287,11 +287,6 @@ static const struct hid_device_id lg_devices[] = {
287 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500), 287 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500),
288 .driver_data = LG_IGNORE_DOUBLED_WHEEL | LG_EXPANDED_KEYMAP }, 288 .driver_data = LG_IGNORE_DOUBLED_WHEEL | LG_EXPANDED_KEYMAP },
289 289
290 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_LX3),
291 .driver_data = LG_INVERT_HWHEEL },
292 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_V150),
293 .driver_data = LG_INVERT_HWHEEL },
294
295 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D), 290 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D),
296 .driver_data = LG_NOGET }, 291 .driver_data = LG_NOGET },
297 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL), 292 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL),
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index acd815586182..46941f979b9d 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -178,6 +178,8 @@ err:
178static const struct hid_device_id pl_devices[] = { 178static const struct hid_device_id pl_devices[] = {
179 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR), 179 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR),
180 .driver_data = 1 }, /* Twin USB Joystick */ 180 .driver_data = 1 }, /* Twin USB Joystick */
181 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR),
182 .driver_data = 1 }, /* Twin USB Joystick */
181 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003), }, /* GreenAsia Inc. USB Joystick */ 183 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003), }, /* GreenAsia Inc. USB Joystick */
182 { } 184 { }
183}; 185};
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 3af8095a7de1..86e563b8d644 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -4,9 +4,9 @@
4 * Copyright (c) 1999 Andreas Gal 4 * Copyright (c) 1999 Andreas Gal
5 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 5 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
6 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 6 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
7 * Copyright (c) 2006-2007 Jiri Kosina
8 * Copyright (c) 2007 Paul Walmsley 7 * Copyright (c) 2007 Paul Walmsley
9 * Copyright (c) 2008 Jiri Slaby 8 * Copyright (c) 2008 Jiri Slaby
9 * Copyright (c) 2006-2008 Jiri Kosina
10 */ 10 */
11 11
12/* 12/*
@@ -23,6 +23,26 @@
23 23
24#include "hid-ids.h" 24#include "hid-ids.h"
25 25
26#define VAIO_RDESC_CONSTANT 0x0001
27
28struct sony_sc {
29 unsigned long quirks;
30};
31
32/* Sony Vaio VGX has wrongly mouse pointer declared as constant */
33static void sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
34 unsigned int rsize)
35{
36 struct sony_sc *sc = hid_get_drvdata(hdev);
37
38 if ((sc->quirks & VAIO_RDESC_CONSTANT) &&
39 rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) {
40 dev_info(&hdev->dev, "Fixing up Sony Vaio VGX report "
41 "descriptor\n");
42 rdesc[55] = 0x06;
43 }
44}
45
26/* 46/*
27 * Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller 47 * Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller
28 * to "operational". Without this, the ps3 controller will not report any 48 * to "operational". Without this, the ps3 controller will not report any
@@ -56,6 +76,17 @@ static int sony_set_operational(struct hid_device *hdev)
56static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) 76static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
57{ 77{
58 int ret; 78 int ret;
79 unsigned long quirks = id->driver_data;
80 struct sony_sc *sc;
81
82 sc = kzalloc(sizeof(*sc), GFP_KERNEL);
83 if (sc == NULL) {
84 dev_err(&hdev->dev, "can't alloc apple descriptor\n");
85 return -ENOMEM;
86 }
87
88 sc->quirks = quirks;
89 hid_set_drvdata(hdev, sc);
59 90
60 ret = hid_parse(hdev); 91 ret = hid_parse(hdev);
61 if (ret) { 92 if (ret) {
@@ -78,11 +109,20 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
78err_stop: 109err_stop:
79 hid_hw_stop(hdev); 110 hid_hw_stop(hdev);
80err_free: 111err_free:
112 kfree(sc);
81 return ret; 113 return ret;
82} 114}
83 115
116static void sony_remove(struct hid_device *hdev)
117{
118 hid_hw_stop(hdev);
119 kfree(hid_get_drvdata(hdev));
120}
121
84static const struct hid_device_id sony_devices[] = { 122static const struct hid_device_id sony_devices[] = {
85 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, 123 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
124 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE),
125 .driver_data = VAIO_RDESC_CONSTANT },
86 { } 126 { }
87}; 127};
88MODULE_DEVICE_TABLE(hid, sony_devices); 128MODULE_DEVICE_TABLE(hid, sony_devices);
@@ -91,6 +131,8 @@ static struct hid_driver sony_driver = {
91 .name = "sony", 131 .name = "sony",
92 .id_table = sony_devices, 132 .id_table = sony_devices,
93 .probe = sony_probe, 133 .probe = sony_probe,
134 .remove = sony_remove,
135 .report_fixup = sony_report_fixup,
94}; 136};
95 137
96static int sony_init(void) 138static int sony_init(void)
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index af3edb98df43..7685ae6808c4 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -38,7 +38,7 @@ static int hidraw_major;
38static struct cdev hidraw_cdev; 38static struct cdev hidraw_cdev;
39static struct class *hidraw_class; 39static struct class *hidraw_class;
40static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES]; 40static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES];
41static DEFINE_SPINLOCK(minors_lock); 41static DEFINE_MUTEX(minors_lock);
42 42
43static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) 43static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
44{ 44{
@@ -159,13 +159,13 @@ static int hidraw_open(struct inode *inode, struct file *file)
159 struct hidraw_list *list; 159 struct hidraw_list *list;
160 int err = 0; 160 int err = 0;
161 161
162 lock_kernel();
163 if (!(list = kzalloc(sizeof(struct hidraw_list), GFP_KERNEL))) { 162 if (!(list = kzalloc(sizeof(struct hidraw_list), GFP_KERNEL))) {
164 err = -ENOMEM; 163 err = -ENOMEM;
165 goto out; 164 goto out;
166 } 165 }
167 166
168 spin_lock(&minors_lock); 167 lock_kernel();
168 mutex_lock(&minors_lock);
169 if (!hidraw_table[minor]) { 169 if (!hidraw_table[minor]) {
170 printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n", 170 printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n",
171 minor); 171 minor);
@@ -180,13 +180,16 @@ static int hidraw_open(struct inode *inode, struct file *file)
180 file->private_data = list; 180 file->private_data = list;
181 181
182 dev = hidraw_table[minor]; 182 dev = hidraw_table[minor];
183 if (!dev->open++) 183 if (!dev->open++) {
184 dev->hid->ll_driver->open(dev->hid); 184 err = dev->hid->ll_driver->open(dev->hid);
185 if (err < 0)
186 dev->open--;
187 }
185 188
186out_unlock: 189out_unlock:
187 spin_unlock(&minors_lock); 190 mutex_unlock(&minors_lock);
188out:
189 unlock_kernel(); 191 unlock_kernel();
192out:
190 return err; 193 return err;
191 194
192} 195}
@@ -264,6 +267,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
264 default: 267 default:
265 ret = -ENOTTY; 268 ret = -ENOTTY;
266 } 269 }
270 unlock_kernel();
267 return ret; 271 return ret;
268} 272}
269 273
@@ -309,7 +313,7 @@ int hidraw_connect(struct hid_device *hid)
309 313
310 result = -EINVAL; 314 result = -EINVAL;
311 315
312 spin_lock(&minors_lock); 316 mutex_lock(&minors_lock);
313 317
314 for (minor = 0; minor < HIDRAW_MAX_DEVICES; minor++) { 318 for (minor = 0; minor < HIDRAW_MAX_DEVICES; minor++) {
315 if (hidraw_table[minor]) 319 if (hidraw_table[minor])
@@ -319,9 +323,8 @@ int hidraw_connect(struct hid_device *hid)
319 break; 323 break;
320 } 324 }
321 325
322 spin_unlock(&minors_lock);
323
324 if (result) { 326 if (result) {
327 mutex_unlock(&minors_lock);
325 kfree(dev); 328 kfree(dev);
326 goto out; 329 goto out;
327 } 330 }
@@ -330,14 +333,14 @@ int hidraw_connect(struct hid_device *hid)
330 NULL, "%s%d", "hidraw", minor); 333 NULL, "%s%d", "hidraw", minor);
331 334
332 if (IS_ERR(dev->dev)) { 335 if (IS_ERR(dev->dev)) {
333 spin_lock(&minors_lock);
334 hidraw_table[minor] = NULL; 336 hidraw_table[minor] = NULL;
335 spin_unlock(&minors_lock); 337 mutex_unlock(&minors_lock);
336 result = PTR_ERR(dev->dev); 338 result = PTR_ERR(dev->dev);
337 kfree(dev); 339 kfree(dev);
338 goto out; 340 goto out;
339 } 341 }
340 342
343 mutex_unlock(&minors_lock);
341 init_waitqueue_head(&dev->wait); 344 init_waitqueue_head(&dev->wait);
342 INIT_LIST_HEAD(&dev->list); 345 INIT_LIST_HEAD(&dev->list);
343 346
@@ -359,9 +362,9 @@ void hidraw_disconnect(struct hid_device *hid)
359 362
360 hidraw->exist = 0; 363 hidraw->exist = 0;
361 364
362 spin_lock(&minors_lock); 365 mutex_lock(&minors_lock);
363 hidraw_table[hidraw->minor] = NULL; 366 hidraw_table[hidraw->minor] = NULL;
364 spin_unlock(&minors_lock); 367 mutex_unlock(&minors_lock);
365 368
366 device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); 369 device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
367 370
@@ -403,7 +406,7 @@ out:
403 return result; 406 return result;
404} 407}
405 408
406void __exit hidraw_exit(void) 409void hidraw_exit(void)
407{ 410{
408 dev_t dev_id = MKDEV(hidraw_major, 0); 411 dev_t dev_id = MKDEV(hidraw_major, 0);
409 412
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 705a43cdeea4..606369ea24ca 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -20,6 +20,7 @@
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/mutex.h>
23#include <linux/smp_lock.h> 24#include <linux/smp_lock.h>
24#include <linux/spinlock.h> 25#include <linux/spinlock.h>
25#include <asm/unaligned.h> 26#include <asm/unaligned.h>
@@ -776,20 +777,11 @@ static int usbhid_start(struct hid_device *hid)
776 struct usb_interface *intf = to_usb_interface(hid->dev.parent); 777 struct usb_interface *intf = to_usb_interface(hid->dev.parent);
777 struct usb_host_interface *interface = intf->cur_altsetting; 778 struct usb_host_interface *interface = intf->cur_altsetting;
778 struct usb_device *dev = interface_to_usbdev(intf); 779 struct usb_device *dev = interface_to_usbdev(intf);
779 struct usbhid_device *usbhid; 780 struct usbhid_device *usbhid = hid->driver_data;
780 unsigned int n, insize = 0; 781 unsigned int n, insize = 0;
781 int ret; 782 int ret;
782 783
783 WARN_ON(hid->driver_data); 784 clear_bit(HID_DISCONNECTED, &usbhid->iofl);
784
785 usbhid = kzalloc(sizeof(struct usbhid_device), GFP_KERNEL);
786 if (usbhid == NULL) {
787 ret = -ENOMEM;
788 goto err;
789 }
790
791 hid->driver_data = usbhid;
792 usbhid->hid = hid;
793 785
794 usbhid->bufsize = HID_MIN_BUFFER_SIZE; 786 usbhid->bufsize = HID_MIN_BUFFER_SIZE;
795 hid_find_max_report(hid, HID_INPUT_REPORT, &usbhid->bufsize); 787 hid_find_max_report(hid, HID_INPUT_REPORT, &usbhid->bufsize);
@@ -856,12 +848,6 @@ static int usbhid_start(struct hid_device *hid)
856 } 848 }
857 } 849 }
858 850
859 if (!usbhid->urbin) {
860 err_hid("couldn't find an input interrupt endpoint");
861 ret = -ENODEV;
862 goto fail;
863 }
864
865 init_waitqueue_head(&usbhid->wait); 851 init_waitqueue_head(&usbhid->wait);
866 INIT_WORK(&usbhid->reset_work, hid_reset); 852 INIT_WORK(&usbhid->reset_work, hid_reset);
867 setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid); 853 setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
@@ -888,15 +874,18 @@ static int usbhid_start(struct hid_device *hid)
888 usbhid_init_reports(hid); 874 usbhid_init_reports(hid);
889 hid_dump_device(hid); 875 hid_dump_device(hid);
890 876
877 set_bit(HID_STARTED, &usbhid->iofl);
878
891 return 0; 879 return 0;
892 880
893fail: 881fail:
894 usb_free_urb(usbhid->urbin); 882 usb_free_urb(usbhid->urbin);
895 usb_free_urb(usbhid->urbout); 883 usb_free_urb(usbhid->urbout);
896 usb_free_urb(usbhid->urbctrl); 884 usb_free_urb(usbhid->urbctrl);
885 usbhid->urbin = NULL;
886 usbhid->urbout = NULL;
887 usbhid->urbctrl = NULL;
897 hid_free_buffers(dev, hid); 888 hid_free_buffers(dev, hid);
898 kfree(usbhid);
899err:
900 return ret; 889 return ret;
901} 890}
902 891
@@ -907,6 +896,7 @@ static void usbhid_stop(struct hid_device *hid)
907 if (WARN_ON(!usbhid)) 896 if (WARN_ON(!usbhid))
908 return; 897 return;
909 898
899 clear_bit(HID_STARTED, &usbhid->iofl);
910 spin_lock_irq(&usbhid->inlock); /* Sync with error handler */ 900 spin_lock_irq(&usbhid->inlock); /* Sync with error handler */
911 set_bit(HID_DISCONNECTED, &usbhid->iofl); 901 set_bit(HID_DISCONNECTED, &usbhid->iofl);
912 spin_unlock_irq(&usbhid->inlock); 902 spin_unlock_irq(&usbhid->inlock);
@@ -929,10 +919,11 @@ static void usbhid_stop(struct hid_device *hid)
929 usb_free_urb(usbhid->urbin); 919 usb_free_urb(usbhid->urbin);
930 usb_free_urb(usbhid->urbctrl); 920 usb_free_urb(usbhid->urbctrl);
931 usb_free_urb(usbhid->urbout); 921 usb_free_urb(usbhid->urbout);
922 usbhid->urbin = NULL; /* don't mess up next start */
923 usbhid->urbctrl = NULL;
924 usbhid->urbout = NULL;
932 925
933 hid_free_buffers(hid_to_usb_dev(hid), hid); 926 hid_free_buffers(hid_to_usb_dev(hid), hid);
934 kfree(usbhid);
935 hid->driver_data = NULL;
936} 927}
937 928
938static struct hid_ll_driver usb_hid_driver = { 929static struct hid_ll_driver usb_hid_driver = {
@@ -946,14 +937,26 @@ static struct hid_ll_driver usb_hid_driver = {
946 937
947static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id) 938static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id)
948{ 939{
940 struct usb_host_interface *interface = intf->cur_altsetting;
949 struct usb_device *dev = interface_to_usbdev(intf); 941 struct usb_device *dev = interface_to_usbdev(intf);
942 struct usbhid_device *usbhid;
950 struct hid_device *hid; 943 struct hid_device *hid;
944 unsigned int n, has_in = 0;
951 size_t len; 945 size_t len;
952 int ret; 946 int ret;
953 947
954 dbg_hid("HID probe called for ifnum %d\n", 948 dbg_hid("HID probe called for ifnum %d\n",
955 intf->altsetting->desc.bInterfaceNumber); 949 intf->altsetting->desc.bInterfaceNumber);
956 950
951 for (n = 0; n < interface->desc.bNumEndpoints; n++)
952 if (usb_endpoint_is_int_in(&interface->endpoint[n].desc))
953 has_in++;
954 if (!has_in) {
955 dev_err(&intf->dev, "couldn't find an input interrupt "
956 "endpoint\n");
957 return -ENODEV;
958 }
959
957 hid = hid_allocate_device(); 960 hid = hid_allocate_device();
958 if (IS_ERR(hid)) 961 if (IS_ERR(hid))
959 return PTR_ERR(hid); 962 return PTR_ERR(hid);
@@ -972,6 +975,9 @@ static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id)
972 hid->vendor = le16_to_cpu(dev->descriptor.idVendor); 975 hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
973 hid->product = le16_to_cpu(dev->descriptor.idProduct); 976 hid->product = le16_to_cpu(dev->descriptor.idProduct);
974 hid->name[0] = 0; 977 hid->name[0] = 0;
978 if (intf->cur_altsetting->desc.bInterfaceProtocol ==
979 USB_INTERFACE_PROTOCOL_MOUSE)
980 hid->type = HID_TYPE_USBMOUSE;
975 981
976 if (dev->manufacturer) 982 if (dev->manufacturer)
977 strlcpy(hid->name, dev->manufacturer, sizeof(hid->name)); 983 strlcpy(hid->name, dev->manufacturer, sizeof(hid->name));
@@ -997,14 +1003,25 @@ static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id)
997 if (usb_string(dev, dev->descriptor.iSerialNumber, hid->uniq, 64) <= 0) 1003 if (usb_string(dev, dev->descriptor.iSerialNumber, hid->uniq, 64) <= 0)
998 hid->uniq[0] = 0; 1004 hid->uniq[0] = 0;
999 1005
1006 usbhid = kzalloc(sizeof(*usbhid), GFP_KERNEL);
1007 if (usbhid == NULL) {
1008 ret = -ENOMEM;
1009 goto err;
1010 }
1011
1012 hid->driver_data = usbhid;
1013 usbhid->hid = hid;
1014
1000 ret = hid_add_device(hid); 1015 ret = hid_add_device(hid);
1001 if (ret) { 1016 if (ret) {
1002 if (ret != -ENODEV) 1017 if (ret != -ENODEV)
1003 dev_err(&intf->dev, "can't add hid device: %d\n", ret); 1018 dev_err(&intf->dev, "can't add hid device: %d\n", ret);
1004 goto err; 1019 goto err_free;
1005 } 1020 }
1006 1021
1007 return 0; 1022 return 0;
1023err_free:
1024 kfree(usbhid);
1008err: 1025err:
1009 hid_destroy_device(hid); 1026 hid_destroy_device(hid);
1010 return ret; 1027 return ret;
@@ -1013,11 +1030,14 @@ err:
1013static void hid_disconnect(struct usb_interface *intf) 1030static void hid_disconnect(struct usb_interface *intf)
1014{ 1031{
1015 struct hid_device *hid = usb_get_intfdata(intf); 1032 struct hid_device *hid = usb_get_intfdata(intf);
1033 struct usbhid_device *usbhid;
1016 1034
1017 if (WARN_ON(!hid)) 1035 if (WARN_ON(!hid))
1018 return; 1036 return;
1019 1037
1038 usbhid = hid->driver_data;
1020 hid_destroy_device(hid); 1039 hid_destroy_device(hid);
1040 kfree(usbhid);
1021} 1041}
1022 1042
1023static int hid_suspend(struct usb_interface *intf, pm_message_t message) 1043static int hid_suspend(struct usb_interface *intf, pm_message_t message)
@@ -1025,10 +1045,13 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
1025 struct hid_device *hid = usb_get_intfdata (intf); 1045 struct hid_device *hid = usb_get_intfdata (intf);
1026 struct usbhid_device *usbhid = hid->driver_data; 1046 struct usbhid_device *usbhid = hid->driver_data;
1027 1047
1048 if (!test_bit(HID_STARTED, &usbhid->iofl))
1049 return 0;
1050
1028 spin_lock_irq(&usbhid->inlock); /* Sync with error handler */ 1051 spin_lock_irq(&usbhid->inlock); /* Sync with error handler */
1029 set_bit(HID_SUSPENDED, &usbhid->iofl); 1052 set_bit(HID_SUSPENDED, &usbhid->iofl);
1030 spin_unlock_irq(&usbhid->inlock); 1053 spin_unlock_irq(&usbhid->inlock);
1031 del_timer(&usbhid->io_retry); 1054 del_timer_sync(&usbhid->io_retry);
1032 usb_kill_urb(usbhid->urbin); 1055 usb_kill_urb(usbhid->urbin);
1033 dev_dbg(&intf->dev, "suspend\n"); 1056 dev_dbg(&intf->dev, "suspend\n");
1034 return 0; 1057 return 0;
@@ -1040,6 +1063,9 @@ static int hid_resume(struct usb_interface *intf)
1040 struct usbhid_device *usbhid = hid->driver_data; 1063 struct usbhid_device *usbhid = hid->driver_data;
1041 int status; 1064 int status;
1042 1065
1066 if (!test_bit(HID_STARTED, &usbhid->iofl))
1067 return 0;
1068
1043 clear_bit(HID_SUSPENDED, &usbhid->iofl); 1069 clear_bit(HID_SUSPENDED, &usbhid->iofl);
1044 usbhid->retry_delay = 0; 1070 usbhid->retry_delay = 0;
1045 status = hid_start_in(hid); 1071 status = hid_start_in(hid);
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index babd65dd46ad..83e851a5ed30 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -242,8 +242,6 @@ static int hiddev_release(struct inode * inode, struct file * file)
242 struct hiddev_list *list = file->private_data; 242 struct hiddev_list *list = file->private_data;
243 unsigned long flags; 243 unsigned long flags;
244 244
245 hiddev_fasync(-1, file, 0);
246
247 spin_lock_irqsave(&list->hiddev->list_lock, flags); 245 spin_lock_irqsave(&list->hiddev->list_lock, flags);
248 list_del(&list->node); 246 list_del(&list->node);
249 spin_unlock_irqrestore(&list->hiddev->list_lock, flags); 247 spin_unlock_irqrestore(&list->hiddev->list_lock, flags);
@@ -436,8 +434,7 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
436 if (copy_to_user(user_arg, uref, sizeof(*uref))) 434 if (copy_to_user(user_arg, uref, sizeof(*uref)))
437 goto fault; 435 goto fault;
438 436
439 kfree(uref_multi); 437 goto goodreturn;
440 return 0;
441 438
442 default: 439 default:
443 if (cmd != HIDIOCGUSAGE && 440 if (cmd != HIDIOCGUSAGE &&
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index abedb13c623e..332abcdf9956 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -27,6 +27,7 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/mutex.h>
30#include <linux/timer.h> 31#include <linux/timer.h>
31#include <linux/wait.h> 32#include <linux/wait.h>
32#include <linux/workqueue.h> 33#include <linux/workqueue.h>
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6de1e0ffd391..c709e821f04b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -159,6 +159,16 @@ config SENSORS_ADM9240
159 This driver can also be built as a module. If so, the module 159 This driver can also be built as a module. If so, the module
160 will be called adm9240. 160 will be called adm9240.
161 161
162config SENSORS_ADT7462
163 tristate "Analog Devices ADT7462"
164 depends on I2C && EXPERIMENTAL
165 help
166 If you say yes here you get support for the Analog Devices
167 ADT7462 temperature monitoring chips.
168
169 This driver can also be built as a module. If so, the module
170 will be called adt7462.
171
162config SENSORS_ADT7470 172config SENSORS_ADT7470
163 tristate "Analog Devices ADT7470" 173 tristate "Analog Devices ADT7470"
164 depends on I2C && EXPERIMENTAL 174 depends on I2C && EXPERIMENTAL
@@ -825,6 +835,25 @@ config SENSORS_HDAPS
825 Say Y here if you have an applicable laptop and want to experience 835 Say Y here if you have an applicable laptop and want to experience
826 the awesome power of hdaps. 836 the awesome power of hdaps.
827 837
838config SENSORS_LIS3LV02D
839 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer"
840 depends on ACPI && INPUT
841 default n
842 help
843 This driver provides support for the LIS3LV02Dx accelerometer. In
844 particular, it can be found in a number of HP laptops, which have the
845 "Mobile Data Protection System 3D" or "3D DriveGuard" feature. On such
846 systems the driver should load automatically (via ACPI). The
847 accelerometer might also be found in other systems, connected via SPI
848 or I2C. The accelerometer data is readable via
849 /sys/devices/platform/lis3lv02d.
850
851 This driver also provides an absolute input class device, allowing
852 the laptop to act as a pinball machine-esque joystick.
853
854 This driver can also be built as a module. If so, the module
855 will be called lis3lv02d.
856
828config SENSORS_APPLESMC 857config SENSORS_APPLESMC
829 tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)" 858 tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
830 depends on INPUT && X86 859 depends on INPUT && X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 042d5a78622e..58fc5be5355d 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
25obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o 25obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
26obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o 26obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
27obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o 27obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
28obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
28obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o 29obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
29obj-$(CONFIG_SENSORS_ADT7473) += adt7473.o 30obj-$(CONFIG_SENSORS_ADT7473) += adt7473.o
30obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o 31obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
@@ -48,6 +49,7 @@ obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
48obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o 49obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
49obj-$(CONFIG_SENSORS_IT87) += it87.o 50obj-$(CONFIG_SENSORS_IT87) += it87.o
50obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o 51obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
52obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o
51obj-$(CONFIG_SENSORS_LM63) += lm63.o 53obj-$(CONFIG_SENSORS_LM63) += lm63.o
52obj-$(CONFIG_SENSORS_LM70) += lm70.o 54obj-$(CONFIG_SENSORS_LM70) += lm70.o
53obj-$(CONFIG_SENSORS_LM75) += lm75.o 55obj-$(CONFIG_SENSORS_LM75) += lm75.o
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index d9e7a49d6cbf..70bb854086df 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -178,7 +178,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
178 { "+3.3V", 10, 0, 20, 1, 0 }, 178 { "+3.3V", 10, 0, 20, 1, 0 },
179 { "5VSB", 11, 0, 30, 1, 0 }, 179 { "5VSB", 11, 0, 30, 1, 0 },
180 { "CPU", 24, 1, 1, 1, 0 }, 180 { "CPU", 24, 1, 1, 1, 0 },
181 { "System ", 25, 1, 1, 1, 0 }, 181 { "System", 25, 1, 1, 1, 0 },
182 { "PWM", 26, 1, 1, 1, 0 }, 182 { "PWM", 26, 1, 1, 1, 0 },
183 { "CPU Fan", 32, 2, 60, 1, 0 }, 183 { "CPU Fan", 32, 2, 60, 1, 0 },
184 { "NB Fan", 33, 2, 60, 1, 0 }, 184 { "NB Fan", 33, 2, 60, 1, 0 },
@@ -200,7 +200,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
200 { "+3.3V", 10, 0, 20, 1, 0 }, 200 { "+3.3V", 10, 0, 20, 1, 0 },
201 { "5VSB", 11, 0, 30, 1, 0 }, 201 { "5VSB", 11, 0, 30, 1, 0 },
202 { "CPU", 24, 1, 1, 1, 0 }, 202 { "CPU", 24, 1, 1, 1, 0 },
203 { "System ", 25, 1, 1, 1, 0 }, 203 { "System", 25, 1, 1, 1, 0 },
204 { "PWM1", 26, 1, 1, 1, 0 }, 204 { "PWM1", 26, 1, 1, 1, 0 },
205 { "PWM2", 27, 1, 1, 1, 0 }, 205 { "PWM2", 27, 1, 1, 1, 0 },
206 { "PWM3", 28, 1, 1, 1, 0 }, 206 { "PWM3", 28, 1, 1, 1, 0 },
@@ -229,7 +229,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
229 { "+3.3V", 10, 0, 20, 1, 0 }, 229 { "+3.3V", 10, 0, 20, 1, 0 },
230 { "5VSB", 11, 0, 30, 1, 0 }, 230 { "5VSB", 11, 0, 30, 1, 0 },
231 { "CPU", 24, 1, 1, 1, 0 }, 231 { "CPU", 24, 1, 1, 1, 0 },
232 { "System ", 25, 1, 1, 1, 0 }, 232 { "System", 25, 1, 1, 1, 0 },
233 { "PWM", 26, 1, 1, 1, 0 }, 233 { "PWM", 26, 1, 1, 1, 0 },
234 { "CPU Fan", 32, 2, 60, 1, 0 }, 234 { "CPU Fan", 32, 2, 60, 1, 0 },
235 { "NB Fan", 33, 2, 60, 1, 0 }, 235 { "NB Fan", 33, 2, 60, 1, 0 },
@@ -250,7 +250,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
250 { "+3.3V", 10, 0, 20, 1, 0 }, 250 { "+3.3V", 10, 0, 20, 1, 0 },
251 { "5VSB", 11, 0, 30, 1, 0 }, 251 { "5VSB", 11, 0, 30, 1, 0 },
252 { "CPU", 24, 1, 1, 1, 0 }, 252 { "CPU", 24, 1, 1, 1, 0 },
253 { "System ", 25, 1, 1, 1, 0 }, 253 { "System", 25, 1, 1, 1, 0 },
254 { "PWM", 26, 1, 1, 1, 0 }, 254 { "PWM", 26, 1, 1, 1, 0 },
255 { "CPU Fan", 32, 2, 60, 1, 0 }, 255 { "CPU Fan", 32, 2, 60, 1, 0 },
256 { "NB Fan", 33, 2, 60, 1, 0 }, 256 { "NB Fan", 33, 2, 60, 1, 0 },
@@ -342,7 +342,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
342 { "+3.3V", 10, 0, 20, 1, 0 }, 342 { "+3.3V", 10, 0, 20, 1, 0 },
343 { "5VSB", 11, 0, 30, 1, 0 }, 343 { "5VSB", 11, 0, 30, 1, 0 },
344 { "CPU", 24, 1, 1, 1, 0 }, 344 { "CPU", 24, 1, 1, 1, 0 },
345 { "System ", 25, 1, 1, 1, 0 }, 345 { "System", 25, 1, 1, 1, 0 },
346 { "PWM1", 26, 1, 1, 1, 0 }, 346 { "PWM1", 26, 1, 1, 1, 0 },
347 { "PWM2", 27, 1, 1, 1, 0 }, 347 { "PWM2", 27, 1, 1, 1, 0 },
348 { "PWM3", 28, 1, 1, 1, 0 }, 348 { "PWM3", 28, 1, 1, 1, 0 },
@@ -371,7 +371,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
371 { "+3.3V", 10, 0, 20, 1, 0 }, 371 { "+3.3V", 10, 0, 20, 1, 0 },
372 { "5VSB", 11, 0, 30, 1, 0 }, 372 { "5VSB", 11, 0, 30, 1, 0 },
373 { "CPU", 24, 1, 1, 1, 0 }, 373 { "CPU", 24, 1, 1, 1, 0 },
374 { "System ", 25, 1, 1, 1, 0 }, 374 { "System", 25, 1, 1, 1, 0 },
375 { "PWM", 26, 1, 1, 1, 0 }, 375 { "PWM", 26, 1, 1, 1, 0 },
376 { "CPU Fan", 32, 2, 60, 1, 0 }, 376 { "CPU Fan", 32, 2, 60, 1, 0 },
377 { "NB Fan", 33, 2, 60, 1, 0 }, 377 { "NB Fan", 33, 2, 60, 1, 0 },
@@ -402,7 +402,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
402 { "AUX3 Fan", 36, 2, 60, 1, 0 }, 402 { "AUX3 Fan", 36, 2, 60, 1, 0 },
403 { NULL, 0, 0, 0, 0, 0 } } 403 { NULL, 0, 0, 0, 0, 0 } }
404 }, 404 },
405 { 0x0016, NULL /* AW9D-MAX, need DMI string */, { 405 { 0x0016, "AW9D-MAX (Intel i975-ICH7)", {
406 { "CPU Core", 0, 0, 10, 1, 0 }, 406 { "CPU Core", 0, 0, 10, 1, 0 },
407 { "DDR2", 1, 0, 20, 1, 0 }, 407 { "DDR2", 1, 0, 20, 1, 0 },
408 { "DDR2 VTT", 2, 0, 10, 1, 0 }, 408 { "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -416,7 +416,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
416 { "+3.3V", 10, 0, 20, 1, 0 }, 416 { "+3.3V", 10, 0, 20, 1, 0 },
417 { "5VSB", 11, 0, 30, 1, 0 }, 417 { "5VSB", 11, 0, 30, 1, 0 },
418 { "CPU", 24, 1, 1, 1, 0 }, 418 { "CPU", 24, 1, 1, 1, 0 },
419 { "System ", 25, 1, 1, 1, 0 }, 419 { "System", 25, 1, 1, 1, 0 },
420 { "PWM1", 26, 1, 1, 1, 0 }, 420 { "PWM1", 26, 1, 1, 1, 0 },
421 { "PWM2", 27, 1, 1, 1, 0 }, 421 { "PWM2", 27, 1, 1, 1, 0 },
422 { "PWM3", 28, 1, 1, 1, 0 }, 422 { "PWM3", 28, 1, 1, 1, 0 },
@@ -446,7 +446,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
446 { "ATX +3.3V", 10, 0, 20, 1, 0 }, 446 { "ATX +3.3V", 10, 0, 20, 1, 0 },
447 { "ATX 5VSB", 11, 0, 30, 1, 0 }, 447 { "ATX 5VSB", 11, 0, 30, 1, 0 },
448 { "CPU", 24, 1, 1, 1, 0 }, 448 { "CPU", 24, 1, 1, 1, 0 },
449 { "System ", 26, 1, 1, 1, 0 }, 449 { "System", 26, 1, 1, 1, 0 },
450 { "PWM", 27, 1, 1, 1, 0 }, 450 { "PWM", 27, 1, 1, 1, 0 },
451 { "CPU FAN", 32, 2, 60, 1, 0 }, 451 { "CPU FAN", 32, 2, 60, 1, 0 },
452 { "SYS FAN", 34, 2, 60, 1, 0 }, 452 { "SYS FAN", 34, 2, 60, 1, 0 },
@@ -469,7 +469,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
469 { "+3.3V", 10, 0, 20, 1, 0 }, 469 { "+3.3V", 10, 0, 20, 1, 0 },
470 { "5VSB", 11, 0, 30, 1, 0 }, 470 { "5VSB", 11, 0, 30, 1, 0 },
471 { "CPU", 24, 1, 1, 1, 0 }, 471 { "CPU", 24, 1, 1, 1, 0 },
472 { "System ", 25, 1, 1, 1, 0 }, 472 { "System", 25, 1, 1, 1, 0 },
473 { "PWM Phase1", 26, 1, 1, 1, 0 }, 473 { "PWM Phase1", 26, 1, 1, 1, 0 },
474 { "PWM Phase2", 27, 1, 1, 1, 0 }, 474 { "PWM Phase2", 27, 1, 1, 1, 0 },
475 { "PWM Phase3", 28, 1, 1, 1, 0 }, 475 { "PWM Phase3", 28, 1, 1, 1, 0 },
@@ -487,7 +487,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
487 { "DDR2", 13, 0, 20, 1, 0 }, 487 { "DDR2", 13, 0, 20, 1, 0 },
488 { "DDR2 VTT", 14, 0, 10, 1, 0 }, 488 { "DDR2 VTT", 14, 0, 10, 1, 0 },
489 { "CPU VTT", 3, 0, 20, 1, 0 }, 489 { "CPU VTT", 3, 0, 20, 1, 0 },
490 { "NB 1.2V ", 4, 0, 10, 1, 0 }, 490 { "NB 1.2V", 4, 0, 10, 1, 0 },
491 { "SB 1.5V", 6, 0, 10, 1, 0 }, 491 { "SB 1.5V", 6, 0, 10, 1, 0 },
492 { "HyperTransport", 5, 0, 10, 1, 0 }, 492 { "HyperTransport", 5, 0, 10, 1, 0 },
493 { "ATX +12V (24-Pin)", 12, 0, 60, 1, 0 }, 493 { "ATX +12V (24-Pin)", 12, 0, 60, 1, 0 },
@@ -496,7 +496,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
496 { "ATX +3.3V", 10, 0, 20, 1, 0 }, 496 { "ATX +3.3V", 10, 0, 20, 1, 0 },
497 { "ATX 5VSB", 11, 0, 30, 1, 0 }, 497 { "ATX 5VSB", 11, 0, 30, 1, 0 },
498 { "CPU", 24, 1, 1, 1, 0 }, 498 { "CPU", 24, 1, 1, 1, 0 },
499 { "System ", 25, 1, 1, 1, 0 }, 499 { "System", 25, 1, 1, 1, 0 },
500 { "PWM Phase1", 26, 1, 1, 1, 0 }, 500 { "PWM Phase1", 26, 1, 1, 1, 0 },
501 { "PWM Phase2", 27, 1, 1, 1, 0 }, 501 { "PWM Phase2", 27, 1, 1, 1, 0 },
502 { "PWM Phase3", 28, 1, 1, 1, 0 }, 502 { "PWM Phase3", 28, 1, 1, 1, 0 },
@@ -523,8 +523,8 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
523 { "+3.3V", 10, 0, 20, 1, 0 }, 523 { "+3.3V", 10, 0, 20, 1, 0 },
524 { "5VSB", 11, 0, 30, 1, 0 }, 524 { "5VSB", 11, 0, 30, 1, 0 },
525 { "CPU", 24, 1, 1, 1, 0 }, 525 { "CPU", 24, 1, 1, 1, 0 },
526 { "System ", 25, 1, 1, 1, 0 }, 526 { "System", 25, 1, 1, 1, 0 },
527 { "PWM ", 26, 1, 1, 1, 0 }, 527 { "PWM", 26, 1, 1, 1, 0 },
528 { "PWM Phase2", 27, 1, 1, 1, 0 }, 528 { "PWM Phase2", 27, 1, 1, 1, 0 },
529 { "PWM Phase3", 28, 1, 1, 1, 0 }, 529 { "PWM Phase3", 28, 1, 1, 1, 0 },
530 { "PWM Phase4", 29, 1, 1, 1, 0 }, 530 { "PWM Phase4", 29, 1, 1, 1, 0 },
@@ -947,7 +947,7 @@ static int __devinit abituguru3_probe(struct platform_device *pdev)
947 if (!abituguru3_motherboards[i].dmi_name) { 947 if (!abituguru3_motherboards[i].dmi_name) {
948 printk(KERN_WARNING ABIT_UGURU3_NAME ": this motherboard was " 948 printk(KERN_WARNING ABIT_UGURU3_NAME ": this motherboard was "
949 "not detected using DMI. Please send the output of " 949 "not detected using DMI. Please send the output of "
950 "\"dmidecode\" to the abituguru3 maintainer" 950 "\"dmidecode\" to the abituguru3 maintainer "
951 "(see MAINTAINERS)\n"); 951 "(see MAINTAINERS)\n");
952 } 952 }
953#endif 953#endif
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
new file mode 100644
index 000000000000..66107b4dc12a
--- /dev/null
+++ b/drivers/hwmon/adt7462.c
@@ -0,0 +1,2002 @@
1/*
2 * A hwmon driver for the Analog Devices ADT7462
3 * Copyright (C) 2008 IBM
4 *
5 * Author: Darrick J. Wong <djwong@us.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/module.h>
23#include <linux/jiffies.h>
24#include <linux/i2c.h>
25#include <linux/hwmon.h>
26#include <linux/hwmon-sysfs.h>
27#include <linux/err.h>
28#include <linux/mutex.h>
29#include <linux/delay.h>
30#include <linux/log2.h>
31
32/* Addresses to scan */
33static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
34
35/* Insmod parameters */
36I2C_CLIENT_INSMOD_1(adt7462);
37
38/* ADT7462 registers */
39#define ADT7462_REG_DEVICE 0x3D
40#define ADT7462_REG_VENDOR 0x3E
41#define ADT7462_REG_REVISION 0x3F
42
43#define ADT7462_REG_MIN_TEMP_BASE_ADDR 0x44
44#define ADT7462_REG_MIN_TEMP_MAX_ADDR 0x47
45#define ADT7462_REG_MAX_TEMP_BASE_ADDR 0x48
46#define ADT7462_REG_MAX_TEMP_MAX_ADDR 0x4B
47#define ADT7462_REG_TEMP_BASE_ADDR 0x88
48#define ADT7462_REG_TEMP_MAX_ADDR 0x8F
49
50#define ADT7462_REG_FAN_BASE_ADDR 0x98
51#define ADT7462_REG_FAN_MAX_ADDR 0x9F
52#define ADT7462_REG_FAN2_BASE_ADDR 0xA2
53#define ADT7462_REG_FAN2_MAX_ADDR 0xA9
54#define ADT7462_REG_FAN_ENABLE 0x07
55#define ADT7462_REG_FAN_MIN_BASE_ADDR 0x78
56#define ADT7462_REG_FAN_MIN_MAX_ADDR 0x7F
57
58#define ADT7462_REG_CFG2 0x02
59#define ADT7462_FSPD_MASK 0x20
60
61#define ADT7462_REG_PWM_BASE_ADDR 0xAA
62#define ADT7462_REG_PWM_MAX_ADDR 0xAD
63#define ADT7462_REG_PWM_MIN_BASE_ADDR 0x28
64#define ADT7462_REG_PWM_MIN_MAX_ADDR 0x2B
65#define ADT7462_REG_PWM_MAX 0x2C
66#define ADT7462_REG_PWM_TEMP_MIN_BASE_ADDR 0x5C
67#define ADT7462_REG_PWM_TEMP_MIN_MAX_ADDR 0x5F
68#define ADT7462_REG_PWM_TEMP_RANGE_BASE_ADDR 0x60
69#define ADT7462_REG_PWM_TEMP_RANGE_MAX_ADDR 0x63
70#define ADT7462_PWM_HYST_MASK 0x0F
71#define ADT7462_PWM_RANGE_MASK 0xF0
72#define ADT7462_PWM_RANGE_SHIFT 4
73#define ADT7462_REG_PWM_CFG_BASE_ADDR 0x21
74#define ADT7462_REG_PWM_CFG_MAX_ADDR 0x24
75#define ADT7462_PWM_CHANNEL_MASK 0xE0
76#define ADT7462_PWM_CHANNEL_SHIFT 5
77
78#define ADT7462_REG_PIN_CFG_BASE_ADDR 0x10
79#define ADT7462_REG_PIN_CFG_MAX_ADDR 0x13
80#define ADT7462_PIN7_INPUT 0x01 /* cfg0 */
81#define ADT7462_DIODE3_INPUT 0x20
82#define ADT7462_DIODE1_INPUT 0x40
83#define ADT7462_VID_INPUT 0x80
84#define ADT7462_PIN22_INPUT 0x04 /* cfg1 */
85#define ADT7462_PIN21_INPUT 0x08
86#define ADT7462_PIN19_INPUT 0x10
87#define ADT7462_PIN15_INPUT 0x20
88#define ADT7462_PIN13_INPUT 0x40
89#define ADT7462_PIN8_INPUT 0x80
90#define ADT7462_PIN23_MASK 0x03
91#define ADT7462_PIN23_SHIFT 0
92#define ADT7462_PIN26_MASK 0x0C /* cfg2 */
93#define ADT7462_PIN26_SHIFT 2
94#define ADT7462_PIN25_MASK 0x30
95#define ADT7462_PIN25_SHIFT 4
96#define ADT7462_PIN24_MASK 0xC0
97#define ADT7462_PIN24_SHIFT 6
98#define ADT7462_PIN26_VOLT_INPUT 0x08
99#define ADT7462_PIN25_VOLT_INPUT 0x20
100#define ADT7462_PIN28_SHIFT 6 /* cfg3 */
101#define ADT7462_PIN28_VOLT 0x5
102
103#define ADT7462_REG_ALARM1 0xB8
104#define ADT7462_LT_ALARM 0x02
105#define ADT7462_R1T_ALARM 0x04
106#define ADT7462_R2T_ALARM 0x08
107#define ADT7462_R3T_ALARM 0x10
108#define ADT7462_REG_ALARM2 0xBB
109#define ADT7462_V0_ALARM 0x01
110#define ADT7462_V1_ALARM 0x02
111#define ADT7462_V2_ALARM 0x04
112#define ADT7462_V3_ALARM 0x08
113#define ADT7462_V4_ALARM 0x10
114#define ADT7462_V5_ALARM 0x20
115#define ADT7462_V6_ALARM 0x40
116#define ADT7462_V7_ALARM 0x80
117#define ADT7462_REG_ALARM3 0xBC
118#define ADT7462_V8_ALARM 0x08
119#define ADT7462_V9_ALARM 0x10
120#define ADT7462_V10_ALARM 0x20
121#define ADT7462_V11_ALARM 0x40
122#define ADT7462_V12_ALARM 0x80
123#define ADT7462_REG_ALARM4 0xBD
124#define ADT7462_F0_ALARM 0x01
125#define ADT7462_F1_ALARM 0x02
126#define ADT7462_F2_ALARM 0x04
127#define ADT7462_F3_ALARM 0x08
128#define ADT7462_F4_ALARM 0x10
129#define ADT7462_F5_ALARM 0x20
130#define ADT7462_F6_ALARM 0x40
131#define ADT7462_F7_ALARM 0x80
132#define ADT7462_ALARM1 0x0000
133#define ADT7462_ALARM2 0x0100
134#define ADT7462_ALARM3 0x0200
135#define ADT7462_ALARM4 0x0300
136#define ADT7462_ALARM_REG_SHIFT 8
137#define ADT7462_ALARM_FLAG_MASK 0x0F
138
139#define ADT7462_TEMP_COUNT 4
140#define ADT7462_TEMP_REG(x) (ADT7462_REG_TEMP_BASE_ADDR + (x * 2))
141#define ADT7462_TEMP_MIN_REG(x) (ADT7462_REG_MIN_TEMP_BASE_ADDR + (x))
142#define ADT7462_TEMP_MAX_REG(x) (ADT7462_REG_MAX_TEMP_BASE_ADDR + (x))
143#define TEMP_FRAC_OFFSET 6
144
145#define ADT7462_FAN_COUNT 8
146#define ADT7462_REG_FAN_MIN(x) (ADT7462_REG_FAN_MIN_BASE_ADDR + (x))
147
148#define ADT7462_PWM_COUNT 4
149#define ADT7462_REG_PWM(x) (ADT7462_REG_PWM_BASE_ADDR + (x))
150#define ADT7462_REG_PWM_MIN(x) (ADT7462_REG_PWM_MIN_BASE_ADDR + (x))
151#define ADT7462_REG_PWM_TMIN(x) \
152 (ADT7462_REG_PWM_TEMP_MIN_BASE_ADDR + (x))
153#define ADT7462_REG_PWM_TRANGE(x) \
154 (ADT7462_REG_PWM_TEMP_RANGE_BASE_ADDR + (x))
155
156#define ADT7462_PIN_CFG_REG_COUNT 4
157#define ADT7462_REG_PIN_CFG(x) (ADT7462_REG_PIN_CFG_BASE_ADDR + (x))
158#define ADT7462_REG_PWM_CFG(x) (ADT7462_REG_PWM_CFG_BASE_ADDR + (x))
159
160#define ADT7462_ALARM_REG_COUNT 4
161
162/*
163 * The chip can measure 13 different voltage sources:
164 *
165 * 1. +12V1 (pin 7)
166 * 2. Vccp1/+2.5V/+1.8V/+1.5V (pin 23)
167 * 3. +12V3 (pin 22)
168 * 4. +5V (pin 21)
169 * 5. +1.25V/+0.9V (pin 19)
170 * 6. +2.5V/+1.8V (pin 15)
171 * 7. +3.3v (pin 13)
172 * 8. +12V2 (pin 8)
173 * 9. Vbatt/FSB_Vtt (pin 26)
174 * A. +3.3V/+1.2V1 (pin 25)
175 * B. Vccp2/+2.5V/+1.8V/+1.5V (pin 24)
176 * C. +1.5V ICH (only if BOTH pin 28/29 are set to +1.5V)
177 * D. +1.5V 3GPIO (only if BOTH pin 28/29 are set to +1.5V)
178 *
179 * Each of these 13 has a factor to convert raw to voltage. Even better,
180 * the pins can be connected to other sensors (tach/gpio/hot/etc), which
181 * makes the bookkeeping tricky.
182 *
183 * Some, but not all, of these voltages have low/high limits.
184 */
185#define ADT7462_VOLT_COUNT 12
186
187#define ADT7462_VENDOR 0x41
188#define ADT7462_DEVICE 0x62
189/* datasheet only mentions a revision 4 */
190#define ADT7462_REVISION 0x04
191
192/* How often do we reread sensors values? (In jiffies) */
193#define SENSOR_REFRESH_INTERVAL (2 * HZ)
194
195/* How often do we reread sensor limit values? (In jiffies) */
196#define LIMIT_REFRESH_INTERVAL (60 * HZ)
197
198/* datasheet says to divide this number by the fan reading to get fan rpm */
199#define FAN_PERIOD_TO_RPM(x) ((90000 * 60) / (x))
200#define FAN_RPM_TO_PERIOD FAN_PERIOD_TO_RPM
201#define FAN_PERIOD_INVALID 65535
202#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID)
203
204#define MASK_AND_SHIFT(value, prefix) \
205 (((value) & prefix##_MASK) >> prefix##_SHIFT)
206
207#define ROUND_DIV(x, divisor) (((x) + ((divisor) / 2)) / (divisor))
208
209struct adt7462_data {
210 struct device *hwmon_dev;
211 struct attribute_group attrs;
212 struct mutex lock;
213 char sensors_valid;
214 char limits_valid;
215 unsigned long sensors_last_updated; /* In jiffies */
216 unsigned long limits_last_updated; /* In jiffies */
217
218 u8 temp[ADT7462_TEMP_COUNT];
219 /* bits 6-7 are quarter pieces of temp */
220 u8 temp_frac[ADT7462_TEMP_COUNT];
221 u8 temp_min[ADT7462_TEMP_COUNT];
222 u8 temp_max[ADT7462_TEMP_COUNT];
223 u16 fan[ADT7462_FAN_COUNT];
224 u8 fan_enabled;
225 u8 fan_min[ADT7462_FAN_COUNT];
226 u8 cfg2;
227 u8 pwm[ADT7462_PWM_COUNT];
228 u8 pin_cfg[ADT7462_PIN_CFG_REG_COUNT];
229 u8 voltages[ADT7462_VOLT_COUNT];
230 u8 volt_max[ADT7462_VOLT_COUNT];
231 u8 volt_min[ADT7462_VOLT_COUNT];
232 u8 pwm_min[ADT7462_PWM_COUNT];
233 u8 pwm_tmin[ADT7462_PWM_COUNT];
234 u8 pwm_trange[ADT7462_PWM_COUNT];
235 u8 pwm_max; /* only one per chip */
236 u8 pwm_cfg[ADT7462_PWM_COUNT];
237 u8 alarms[ADT7462_ALARM_REG_COUNT];
238};
239
240static int adt7462_probe(struct i2c_client *client,
241 const struct i2c_device_id *id);
242static int adt7462_detect(struct i2c_client *client, int kind,
243 struct i2c_board_info *info);
244static int adt7462_remove(struct i2c_client *client);
245
246static const struct i2c_device_id adt7462_id[] = {
247 { "adt7462", adt7462 },
248 { }
249};
250MODULE_DEVICE_TABLE(i2c, adt7462_id);
251
252static struct i2c_driver adt7462_driver = {
253 .class = I2C_CLASS_HWMON,
254 .driver = {
255 .name = "adt7462",
256 },
257 .probe = adt7462_probe,
258 .remove = adt7462_remove,
259 .id_table = adt7462_id,
260 .detect = adt7462_detect,
261 .address_data = &addr_data,
262};
263
264/*
265 * 16-bit registers on the ADT7462 are low-byte first. The data sheet says
266 * that the low byte must be read before the high byte.
267 */
268static inline int adt7462_read_word_data(struct i2c_client *client, u8 reg)
269{
270 u16 foo;
271 foo = i2c_smbus_read_byte_data(client, reg);
272 foo |= ((u16)i2c_smbus_read_byte_data(client, reg + 1) << 8);
273 return foo;
274}
275
276/* For some reason these registers are not contiguous. */
277static int ADT7462_REG_FAN(int fan)
278{
279 if (fan < 4)
280 return ADT7462_REG_FAN_BASE_ADDR + (2 * fan);
281 return ADT7462_REG_FAN2_BASE_ADDR + (2 * (fan - 4));
282}
283
284/* Voltage registers are scattered everywhere */
285static int ADT7462_REG_VOLT_MAX(struct adt7462_data *data, int which)
286{
287 switch (which) {
288 case 0:
289 if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT))
290 return 0x7C;
291 break;
292 case 1:
293 return 0x69;
294 case 2:
295 if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
296 return 0x7F;
297 break;
298 case 3:
299 if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT))
300 return 0x7E;
301 break;
302 case 4:
303 if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT))
304 return 0x4B;
305 break;
306 case 5:
307 if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT))
308 return 0x49;
309 break;
310 case 6:
311 if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT))
312 return 0x68;
313 break;
314 case 7:
315 if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT))
316 return 0x7D;
317 break;
318 case 8:
319 if (!(data->pin_cfg[2] & ADT7462_PIN26_VOLT_INPUT))
320 return 0x6C;
321 break;
322 case 9:
323 if (!(data->pin_cfg[2] & ADT7462_PIN25_VOLT_INPUT))
324 return 0x6B;
325 break;
326 case 10:
327 return 0x6A;
328 case 11:
329 if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
330 ADT7462_PIN28_VOLT &&
331 !(data->pin_cfg[0] & ADT7462_VID_INPUT))
332 return 0x50;
333 break;
334 case 12:
335 if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
336 ADT7462_PIN28_VOLT &&
337 !(data->pin_cfg[0] & ADT7462_VID_INPUT))
338 return 0x4C;
339 break;
340 }
341 return -ENODEV;
342}
343
344static int ADT7462_REG_VOLT_MIN(struct adt7462_data *data, int which)
345{
346 switch (which) {
347 case 0:
348 if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT))
349 return 0x6D;
350 break;
351 case 1:
352 return 0x72;
353 case 2:
354 if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
355 return 0x6F;
356 break;
357 case 3:
358 if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT))
359 return 0x71;
360 break;
361 case 4:
362 if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT))
363 return 0x47;
364 break;
365 case 5:
366 if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT))
367 return 0x45;
368 break;
369 case 6:
370 if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT))
371 return 0x70;
372 break;
373 case 7:
374 if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT))
375 return 0x6E;
376 break;
377 case 8:
378 if (!(data->pin_cfg[2] & ADT7462_PIN26_VOLT_INPUT))
379 return 0x75;
380 break;
381 case 9:
382 if (!(data->pin_cfg[2] & ADT7462_PIN25_VOLT_INPUT))
383 return 0x74;
384 break;
385 case 10:
386 return 0x73;
387 case 11:
388 if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
389 ADT7462_PIN28_VOLT &&
390 !(data->pin_cfg[0] & ADT7462_VID_INPUT))
391 return 0x76;
392 break;
393 case 12:
394 if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
395 ADT7462_PIN28_VOLT &&
396 !(data->pin_cfg[0] & ADT7462_VID_INPUT))
397 return 0x77;
398 break;
399 }
400 return -ENODEV;
401}
402
403static int ADT7462_REG_VOLT(struct adt7462_data *data, int which)
404{
405 switch (which) {
406 case 0:
407 if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT))
408 return 0xA3;
409 break;
410 case 1:
411 return 0x90;
412 case 2:
413 if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
414 return 0xA9;
415 break;
416 case 3:
417 if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT))
418 return 0xA7;
419 break;
420 case 4:
421 if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT))
422 return 0x8F;
423 break;
424 case 5:
425 if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT))
426 return 0x8B;
427 break;
428 case 6:
429 if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT))
430 return 0x96;
431 break;
432 case 7:
433 if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT))
434 return 0xA5;
435 break;
436 case 8:
437 if (!(data->pin_cfg[2] & ADT7462_PIN26_VOLT_INPUT))
438 return 0x93;
439 break;
440 case 9:
441 if (!(data->pin_cfg[2] & ADT7462_PIN25_VOLT_INPUT))
442 return 0x92;
443 break;
444 case 10:
445 return 0x91;
446 case 11:
447 if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
448 ADT7462_PIN28_VOLT &&
449 !(data->pin_cfg[0] & ADT7462_VID_INPUT))
450 return 0x94;
451 break;
452 case 12:
453 if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
454 ADT7462_PIN28_VOLT &&
455 !(data->pin_cfg[0] & ADT7462_VID_INPUT))
456 return 0x95;
457 break;
458 }
459 return -ENODEV;
460}
461
462/* Provide labels for sysfs */
463static const char *voltage_label(struct adt7462_data *data, int which)
464{
465 switch (which) {
466 case 0:
467 if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT))
468 return "+12V1";
469 break;
470 case 1:
471 switch (MASK_AND_SHIFT(data->pin_cfg[1], ADT7462_PIN23)) {
472 case 0:
473 return "Vccp1";
474 case 1:
475 return "+2.5V";
476 case 2:
477 return "+1.8V";
478 case 3:
479 return "+1.5V";
480 }
481 case 2:
482 if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
483 return "+12V3";
484 break;
485 case 3:
486 if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT))
487 return "+5V";
488 break;
489 case 4:
490 if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT)) {
491 if (data->pin_cfg[1] & ADT7462_PIN19_INPUT)
492 return "+0.9V";
493 return "+1.25V";
494 }
495 break;
496 case 5:
497 if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT)) {
498 if (data->pin_cfg[1] & ADT7462_PIN19_INPUT)
499 return "+1.8V";
500 return "+2.5V";
501 }
502 break;
503 case 6:
504 if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT))
505 return "+3.3V";
506 break;
507 case 7:
508 if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT))
509 return "+12V2";
510 break;
511 case 8:
512 switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN26)) {
513 case 0:
514 return "Vbatt";
515 case 1:
516 return "FSB_Vtt";
517 }
518 break;
519 case 9:
520 switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN25)) {
521 case 0:
522 return "+3.3V";
523 case 1:
524 return "+1.2V1";
525 }
526 break;
527 case 10:
528 switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN24)) {
529 case 0:
530 return "Vccp2";
531 case 1:
532 return "+2.5V";
533 case 2:
534 return "+1.8V";
535 case 3:
536 return "+1.5";
537 }
538 case 11:
539 if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
540 ADT7462_PIN28_VOLT &&
541 !(data->pin_cfg[0] & ADT7462_VID_INPUT))
542 return "+1.5V ICH";
543 break;
544 case 12:
545 if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
546 ADT7462_PIN28_VOLT &&
547 !(data->pin_cfg[0] & ADT7462_VID_INPUT))
548 return "+1.5V 3GPIO";
549 break;
550 }
551 return "N/A";
552}
553
554/* Multipliers are actually in uV, not mV. */
555static int voltage_multiplier(struct adt7462_data *data, int which)
556{
557 switch (which) {
558 case 0:
559 if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT))
560 return 62500;
561 break;
562 case 1:
563 switch (MASK_AND_SHIFT(data->pin_cfg[1], ADT7462_PIN23)) {
564 case 0:
565 if (data->pin_cfg[0] & ADT7462_VID_INPUT)
566 return 12500;
567 return 6250;
568 case 1:
569 return 13000;
570 case 2:
571 return 9400;
572 case 3:
573 return 7800;
574 }
575 case 2:
576 if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
577 return 62500;
578 break;
579 case 3:
580 if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT))
581 return 26000;
582 break;
583 case 4:
584 if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT)) {
585 if (data->pin_cfg[1] & ADT7462_PIN19_INPUT)
586 return 4690;
587 return 6500;
588 }
589 break;
590 case 5:
591 if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT)) {
592 if (data->pin_cfg[1] & ADT7462_PIN15_INPUT)
593 return 9400;
594 return 13000;
595 }
596 break;
597 case 6:
598 if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT))
599 return 17200;
600 break;
601 case 7:
602 if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT))
603 return 62500;
604 break;
605 case 8:
606 switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN26)) {
607 case 0:
608 return 15600;
609 case 1:
610 return 6250;
611 }
612 break;
613 case 9:
614 switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN25)) {
615 case 0:
616 return 17200;
617 case 1:
618 return 6250;
619 }
620 break;
621 case 10:
622 switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN24)) {
623 case 0:
624 return 6250;
625 case 1:
626 return 13000;
627 case 2:
628 return 9400;
629 case 3:
630 return 7800;
631 }
632 case 11:
633 case 12:
634 if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
635 ADT7462_PIN28_VOLT &&
636 !(data->pin_cfg[0] & ADT7462_VID_INPUT))
637 return 7800;
638 }
639 return 0;
640}
641
642static int temp_enabled(struct adt7462_data *data, int which)
643{
644 switch (which) {
645 case 0:
646 case 2:
647 return 1;
648 case 1:
649 if (data->pin_cfg[0] & ADT7462_DIODE1_INPUT)
650 return 1;
651 break;
652 case 3:
653 if (data->pin_cfg[0] & ADT7462_DIODE3_INPUT)
654 return 1;
655 break;
656 }
657 return 0;
658}
659
660static const char *temp_label(struct adt7462_data *data, int which)
661{
662 switch (which) {
663 case 0:
664 return "local";
665 case 1:
666 if (data->pin_cfg[0] & ADT7462_DIODE1_INPUT)
667 return "remote1";
668 break;
669 case 2:
670 return "remote2";
671 case 3:
672 if (data->pin_cfg[0] & ADT7462_DIODE3_INPUT)
673 return "remote3";
674 break;
675 }
676 return "N/A";
677}
678
679/* Map Trange register values to mC */
680#define NUM_TRANGE_VALUES 16
681static const int trange_values[NUM_TRANGE_VALUES] = {
682 2000,
683 2500,
684 3300,
685 4000,
686 5000,
687 6700,
688 8000,
689 10000,
690 13300,
691 16000,
692 20000,
693 26700,
694 32000,
695 40000,
696 53300,
697 80000
698};
699
700static int find_trange_value(int trange)
701{
702 int i;
703
704 for (i = 0; i < NUM_TRANGE_VALUES; i++)
705 if (trange_values[i] == trange)
706 return i;
707
708 return -ENODEV;
709}
710
711static struct adt7462_data *adt7462_update_device(struct device *dev)
712{
713 struct i2c_client *client = to_i2c_client(dev);
714 struct adt7462_data *data = i2c_get_clientdata(client);
715 unsigned long local_jiffies = jiffies;
716 int i;
717
718 mutex_lock(&data->lock);
719 if (time_before(local_jiffies, data->sensors_last_updated +
720 SENSOR_REFRESH_INTERVAL)
721 && data->sensors_valid)
722 goto no_sensor_update;
723
724 for (i = 0; i < ADT7462_TEMP_COUNT; i++) {
725 /*
726 * Reading the fractional register locks the integral
727 * register until both have been read.
728 */
729 data->temp_frac[i] = i2c_smbus_read_byte_data(client,
730 ADT7462_TEMP_REG(i));
731 data->temp[i] = i2c_smbus_read_byte_data(client,
732 ADT7462_TEMP_REG(i) + 1);
733 }
734
735 for (i = 0; i < ADT7462_FAN_COUNT; i++)
736 data->fan[i] = adt7462_read_word_data(client,
737 ADT7462_REG_FAN(i));
738
739 data->fan_enabled = i2c_smbus_read_byte_data(client,
740 ADT7462_REG_FAN_ENABLE);
741
742 for (i = 0; i < ADT7462_PWM_COUNT; i++)
743 data->pwm[i] = i2c_smbus_read_byte_data(client,
744 ADT7462_REG_PWM(i));
745
746 for (i = 0; i < ADT7462_PIN_CFG_REG_COUNT; i++)
747 data->pin_cfg[i] = i2c_smbus_read_byte_data(client,
748 ADT7462_REG_PIN_CFG(i));
749
750 for (i = 0; i < ADT7462_VOLT_COUNT; i++) {
751 int reg = ADT7462_REG_VOLT(data, i);
752 if (!reg)
753 data->voltages[i] = 0;
754 else
755 data->voltages[i] = i2c_smbus_read_byte_data(client,
756 reg);
757 }
758
759 data->alarms[0] = i2c_smbus_read_byte_data(client, ADT7462_REG_ALARM1);
760 data->alarms[1] = i2c_smbus_read_byte_data(client, ADT7462_REG_ALARM2);
761 data->alarms[2] = i2c_smbus_read_byte_data(client, ADT7462_REG_ALARM3);
762 data->alarms[3] = i2c_smbus_read_byte_data(client, ADT7462_REG_ALARM4);
763
764 data->sensors_last_updated = local_jiffies;
765 data->sensors_valid = 1;
766
767no_sensor_update:
768 if (time_before(local_jiffies, data->limits_last_updated +
769 LIMIT_REFRESH_INTERVAL)
770 && data->limits_valid)
771 goto out;
772
773 for (i = 0; i < ADT7462_TEMP_COUNT; i++) {
774 data->temp_min[i] = i2c_smbus_read_byte_data(client,
775 ADT7462_TEMP_MIN_REG(i));
776 data->temp_max[i] = i2c_smbus_read_byte_data(client,
777 ADT7462_TEMP_MAX_REG(i));
778 }
779
780 for (i = 0; i < ADT7462_FAN_COUNT; i++)
781 data->fan_min[i] = i2c_smbus_read_byte_data(client,
782 ADT7462_REG_FAN_MIN(i));
783
784 for (i = 0; i < ADT7462_VOLT_COUNT; i++) {
785 int reg = ADT7462_REG_VOLT_MAX(data, i);
786 data->volt_max[i] =
787 (reg ? i2c_smbus_read_byte_data(client, reg) : 0);
788
789 reg = ADT7462_REG_VOLT_MIN(data, i);
790 data->volt_min[i] =
791 (reg ? i2c_smbus_read_byte_data(client, reg) : 0);
792 }
793
794 for (i = 0; i < ADT7462_PWM_COUNT; i++) {
795 data->pwm_min[i] = i2c_smbus_read_byte_data(client,
796 ADT7462_REG_PWM_MIN(i));
797 data->pwm_tmin[i] = i2c_smbus_read_byte_data(client,
798 ADT7462_REG_PWM_TMIN(i));
799 data->pwm_trange[i] = i2c_smbus_read_byte_data(client,
800 ADT7462_REG_PWM_TRANGE(i));
801 data->pwm_cfg[i] = i2c_smbus_read_byte_data(client,
802 ADT7462_REG_PWM_CFG(i));
803 }
804
805 data->pwm_max = i2c_smbus_read_byte_data(client, ADT7462_REG_PWM_MAX);
806
807 data->cfg2 = i2c_smbus_read_byte_data(client, ADT7462_REG_CFG2);
808
809 data->limits_last_updated = local_jiffies;
810 data->limits_valid = 1;
811
812out:
813 mutex_unlock(&data->lock);
814 return data;
815}
816
817static ssize_t show_temp_min(struct device *dev,
818 struct device_attribute *devattr,
819 char *buf)
820{
821 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
822 struct adt7462_data *data = adt7462_update_device(dev);
823
824 if (!temp_enabled(data, attr->index))
825 return sprintf(buf, "0\n");
826
827 return sprintf(buf, "%d\n", 1000 * (data->temp_min[attr->index] - 64));
828}
829
830static ssize_t set_temp_min(struct device *dev,
831 struct device_attribute *devattr,
832 const char *buf,
833 size_t count)
834{
835 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
836 struct i2c_client *client = to_i2c_client(dev);
837 struct adt7462_data *data = i2c_get_clientdata(client);
838 long temp;
839
840 if (strict_strtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
841 return -EINVAL;
842
843 temp = ROUND_DIV(temp, 1000) + 64;
844 temp = SENSORS_LIMIT(temp, 0, 255);
845
846 mutex_lock(&data->lock);
847 data->temp_min[attr->index] = temp;
848 i2c_smbus_write_byte_data(client, ADT7462_TEMP_MIN_REG(attr->index),
849 temp);
850 mutex_unlock(&data->lock);
851
852 return count;
853}
854
855static ssize_t show_temp_max(struct device *dev,
856 struct device_attribute *devattr,
857 char *buf)
858{
859 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
860 struct adt7462_data *data = adt7462_update_device(dev);
861
862 if (!temp_enabled(data, attr->index))
863 return sprintf(buf, "0\n");
864
865 return sprintf(buf, "%d\n", 1000 * (data->temp_max[attr->index] - 64));
866}
867
868static ssize_t set_temp_max(struct device *dev,
869 struct device_attribute *devattr,
870 const char *buf,
871 size_t count)
872{
873 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
874 struct i2c_client *client = to_i2c_client(dev);
875 struct adt7462_data *data = i2c_get_clientdata(client);
876 long temp;
877
878 if (strict_strtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
879 return -EINVAL;
880
881 temp = ROUND_DIV(temp, 1000) + 64;
882 temp = SENSORS_LIMIT(temp, 0, 255);
883
884 mutex_lock(&data->lock);
885 data->temp_max[attr->index] = temp;
886 i2c_smbus_write_byte_data(client, ADT7462_TEMP_MAX_REG(attr->index),
887 temp);
888 mutex_unlock(&data->lock);
889
890 return count;
891}
892
893static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
894 char *buf)
895{
896 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
897 struct adt7462_data *data = adt7462_update_device(dev);
898 u8 frac = data->temp_frac[attr->index] >> TEMP_FRAC_OFFSET;
899
900 if (!temp_enabled(data, attr->index))
901 return sprintf(buf, "0\n");
902
903 return sprintf(buf, "%d\n", 1000 * (data->temp[attr->index] - 64) +
904 250 * frac);
905}
906
907static ssize_t show_temp_label(struct device *dev,
908 struct device_attribute *devattr,
909 char *buf)
910{
911 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
912 struct adt7462_data *data = adt7462_update_device(dev);
913
914 return sprintf(buf, "%s\n", temp_label(data, attr->index));
915}
916
917static ssize_t show_volt_max(struct device *dev,
918 struct device_attribute *devattr,
919 char *buf)
920{
921 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
922 struct adt7462_data *data = adt7462_update_device(dev);
923 int x = voltage_multiplier(data, attr->index);
924
925 x *= data->volt_max[attr->index];
926 x /= 1000; /* convert from uV to mV */
927
928 return sprintf(buf, "%d\n", x);
929}
930
931static ssize_t set_volt_max(struct device *dev,
932 struct device_attribute *devattr,
933 const char *buf,
934 size_t count)
935{
936 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
937 struct i2c_client *client = to_i2c_client(dev);
938 struct adt7462_data *data = i2c_get_clientdata(client);
939 int x = voltage_multiplier(data, attr->index);
940 long temp;
941
942 if (strict_strtol(buf, 10, &temp) || !x)
943 return -EINVAL;
944
945 temp *= 1000; /* convert mV to uV */
946 temp = ROUND_DIV(temp, x);
947 temp = SENSORS_LIMIT(temp, 0, 255);
948
949 mutex_lock(&data->lock);
950 data->volt_max[attr->index] = temp;
951 i2c_smbus_write_byte_data(client,
952 ADT7462_REG_VOLT_MAX(data, attr->index),
953 temp);
954 mutex_unlock(&data->lock);
955
956 return count;
957}
958
959static ssize_t show_volt_min(struct device *dev,
960 struct device_attribute *devattr,
961 char *buf)
962{
963 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
964 struct adt7462_data *data = adt7462_update_device(dev);
965 int x = voltage_multiplier(data, attr->index);
966
967 x *= data->volt_min[attr->index];
968 x /= 1000; /* convert from uV to mV */
969
970 return sprintf(buf, "%d\n", x);
971}
972
973static ssize_t set_volt_min(struct device *dev,
974 struct device_attribute *devattr,
975 const char *buf,
976 size_t count)
977{
978 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
979 struct i2c_client *client = to_i2c_client(dev);
980 struct adt7462_data *data = i2c_get_clientdata(client);
981 int x = voltage_multiplier(data, attr->index);
982 long temp;
983
984 if (strict_strtol(buf, 10, &temp) || !x)
985 return -EINVAL;
986
987 temp *= 1000; /* convert mV to uV */
988 temp = ROUND_DIV(temp, x);
989 temp = SENSORS_LIMIT(temp, 0, 255);
990
991 mutex_lock(&data->lock);
992 data->volt_min[attr->index] = temp;
993 i2c_smbus_write_byte_data(client,
994 ADT7462_REG_VOLT_MIN(data, attr->index),
995 temp);
996 mutex_unlock(&data->lock);
997
998 return count;
999}
1000
1001static ssize_t show_voltage(struct device *dev,
1002 struct device_attribute *devattr,
1003 char *buf)
1004{
1005 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1006 struct adt7462_data *data = adt7462_update_device(dev);
1007 int x = voltage_multiplier(data, attr->index);
1008
1009 x *= data->voltages[attr->index];
1010 x /= 1000; /* convert from uV to mV */
1011
1012 return sprintf(buf, "%d\n", x);
1013}
1014
1015static ssize_t show_voltage_label(struct device *dev,
1016 struct device_attribute *devattr,
1017 char *buf)
1018{
1019 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1020 struct adt7462_data *data = adt7462_update_device(dev);
1021
1022 return sprintf(buf, "%s\n", voltage_label(data, attr->index));
1023}
1024
1025static ssize_t show_alarm(struct device *dev,
1026 struct device_attribute *devattr,
1027 char *buf)
1028{
1029 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1030 struct adt7462_data *data = adt7462_update_device(dev);
1031 int reg = attr->index >> ADT7462_ALARM_REG_SHIFT;
1032 int mask = attr->index & ADT7462_ALARM_FLAG_MASK;
1033
1034 if (data->alarms[reg] & mask)
1035 return sprintf(buf, "1\n");
1036 else
1037 return sprintf(buf, "0\n");
1038}
1039
1040static int fan_enabled(struct adt7462_data *data, int fan)
1041{
1042 return data->fan_enabled & (1 << fan);
1043}
1044
1045static ssize_t show_fan_min(struct device *dev,
1046 struct device_attribute *devattr,
1047 char *buf)
1048{
1049 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1050 struct adt7462_data *data = adt7462_update_device(dev);
1051 u16 temp;
1052
1053 /* Only the MSB of the min fan period is stored... */
1054 temp = data->fan_min[attr->index];
1055 temp <<= 8;
1056
1057 if (!fan_enabled(data, attr->index) ||
1058 !FAN_DATA_VALID(temp))
1059 return sprintf(buf, "0\n");
1060
1061 return sprintf(buf, "%d\n", FAN_PERIOD_TO_RPM(temp));
1062}
1063
1064static ssize_t set_fan_min(struct device *dev,
1065 struct device_attribute *devattr,
1066 const char *buf, size_t count)
1067{
1068 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1069 struct i2c_client *client = to_i2c_client(dev);
1070 struct adt7462_data *data = i2c_get_clientdata(client);
1071 long temp;
1072
1073 if (strict_strtol(buf, 10, &temp) || !temp ||
1074 !fan_enabled(data, attr->index))
1075 return -EINVAL;
1076
1077 temp = FAN_RPM_TO_PERIOD(temp);
1078 temp >>= 8;
1079 temp = SENSORS_LIMIT(temp, 1, 255);
1080
1081 mutex_lock(&data->lock);
1082 data->fan_min[attr->index] = temp;
1083 i2c_smbus_write_byte_data(client, ADT7462_REG_FAN_MIN(attr->index),
1084 temp);
1085 mutex_unlock(&data->lock);
1086
1087 return count;
1088}
1089
1090static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
1091 char *buf)
1092{
1093 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1094 struct adt7462_data *data = adt7462_update_device(dev);
1095
1096 if (!fan_enabled(data, attr->index) ||
1097 !FAN_DATA_VALID(data->fan[attr->index]))
1098 return sprintf(buf, "0\n");
1099
1100 return sprintf(buf, "%d\n",
1101 FAN_PERIOD_TO_RPM(data->fan[attr->index]));
1102}
1103
1104static ssize_t show_force_pwm_max(struct device *dev,
1105 struct device_attribute *devattr,
1106 char *buf)
1107{
1108 struct adt7462_data *data = adt7462_update_device(dev);
1109 return sprintf(buf, "%d\n", (data->cfg2 & ADT7462_FSPD_MASK ? 1 : 0));
1110}
1111
1112static ssize_t set_force_pwm_max(struct device *dev,
1113 struct device_attribute *devattr,
1114 const char *buf,
1115 size_t count)
1116{
1117 struct i2c_client *client = to_i2c_client(dev);
1118 struct adt7462_data *data = i2c_get_clientdata(client);
1119 long temp;
1120 u8 reg;
1121
1122 if (strict_strtol(buf, 10, &temp))
1123 return -EINVAL;
1124
1125 mutex_lock(&data->lock);
1126 reg = i2c_smbus_read_byte_data(client, ADT7462_REG_CFG2);
1127 if (temp)
1128 reg |= ADT7462_FSPD_MASK;
1129 else
1130 reg &= ~ADT7462_FSPD_MASK;
1131 data->cfg2 = reg;
1132 i2c_smbus_write_byte_data(client, ADT7462_REG_CFG2, reg);
1133 mutex_unlock(&data->lock);
1134
1135 return count;
1136}
1137
1138static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
1139 char *buf)
1140{
1141 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1142 struct adt7462_data *data = adt7462_update_device(dev);
1143 return sprintf(buf, "%d\n", data->pwm[attr->index]);
1144}
1145
1146static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
1147 const char *buf, size_t count)
1148{
1149 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1150 struct i2c_client *client = to_i2c_client(dev);
1151 struct adt7462_data *data = i2c_get_clientdata(client);
1152 long temp;
1153
1154 if (strict_strtol(buf, 10, &temp))
1155 return -EINVAL;
1156
1157 temp = SENSORS_LIMIT(temp, 0, 255);
1158
1159 mutex_lock(&data->lock);
1160 data->pwm[attr->index] = temp;
1161 i2c_smbus_write_byte_data(client, ADT7462_REG_PWM(attr->index), temp);
1162 mutex_unlock(&data->lock);
1163
1164 return count;
1165}
1166
1167static ssize_t show_pwm_max(struct device *dev,
1168 struct device_attribute *devattr,
1169 char *buf)
1170{
1171 struct adt7462_data *data = adt7462_update_device(dev);
1172 return sprintf(buf, "%d\n", data->pwm_max);
1173}
1174
1175static ssize_t set_pwm_max(struct device *dev,
1176 struct device_attribute *devattr,
1177 const char *buf,
1178 size_t count)
1179{
1180 struct i2c_client *client = to_i2c_client(dev);
1181 struct adt7462_data *data = i2c_get_clientdata(client);
1182 long temp;
1183
1184 if (strict_strtol(buf, 10, &temp))
1185 return -EINVAL;
1186
1187 temp = SENSORS_LIMIT(temp, 0, 255);
1188
1189 mutex_lock(&data->lock);
1190 data->pwm_max = temp;
1191 i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_MAX, temp);
1192 mutex_unlock(&data->lock);
1193
1194 return count;
1195}
1196
1197static ssize_t show_pwm_min(struct device *dev,
1198 struct device_attribute *devattr,
1199 char *buf)
1200{
1201 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1202 struct adt7462_data *data = adt7462_update_device(dev);
1203 return sprintf(buf, "%d\n", data->pwm_min[attr->index]);
1204}
1205
1206static ssize_t set_pwm_min(struct device *dev,
1207 struct device_attribute *devattr,
1208 const char *buf,
1209 size_t count)
1210{
1211 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1212 struct i2c_client *client = to_i2c_client(dev);
1213 struct adt7462_data *data = i2c_get_clientdata(client);
1214 long temp;
1215
1216 if (strict_strtol(buf, 10, &temp))
1217 return -EINVAL;
1218
1219 temp = SENSORS_LIMIT(temp, 0, 255);
1220
1221 mutex_lock(&data->lock);
1222 data->pwm_min[attr->index] = temp;
1223 i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_MIN(attr->index),
1224 temp);
1225 mutex_unlock(&data->lock);
1226
1227 return count;
1228}
1229
1230static ssize_t show_pwm_hyst(struct device *dev,
1231 struct device_attribute *devattr,
1232 char *buf)
1233{
1234 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1235 struct adt7462_data *data = adt7462_update_device(dev);
1236 return sprintf(buf, "%d\n", 1000 *
1237 (data->pwm_trange[attr->index] & ADT7462_PWM_HYST_MASK));
1238}
1239
1240static ssize_t set_pwm_hyst(struct device *dev,
1241 struct device_attribute *devattr,
1242 const char *buf,
1243 size_t count)
1244{
1245 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1246 struct i2c_client *client = to_i2c_client(dev);
1247 struct adt7462_data *data = i2c_get_clientdata(client);
1248 long temp;
1249
1250 if (strict_strtol(buf, 10, &temp))
1251 return -EINVAL;
1252
1253 temp = ROUND_DIV(temp, 1000);
1254 temp = SENSORS_LIMIT(temp, 0, 15);
1255
1256 /* package things up */
1257 temp &= ADT7462_PWM_HYST_MASK;
1258 temp |= data->pwm_trange[attr->index] & ADT7462_PWM_RANGE_MASK;
1259
1260 mutex_lock(&data->lock);
1261 data->pwm_trange[attr->index] = temp;
1262 i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_TRANGE(attr->index),
1263 temp);
1264 mutex_unlock(&data->lock);
1265
1266 return count;
1267}
1268
1269static ssize_t show_pwm_tmax(struct device *dev,
1270 struct device_attribute *devattr,
1271 char *buf)
1272{
1273 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1274 struct adt7462_data *data = adt7462_update_device(dev);
1275
1276 /* tmax = tmin + trange */
1277 int trange = trange_values[data->pwm_trange[attr->index] >>
1278 ADT7462_PWM_RANGE_SHIFT];
1279 int tmin = (data->pwm_tmin[attr->index] - 64) * 1000;
1280
1281 return sprintf(buf, "%d\n", tmin + trange);
1282}
1283
1284static ssize_t set_pwm_tmax(struct device *dev,
1285 struct device_attribute *devattr,
1286 const char *buf,
1287 size_t count)
1288{
1289 int temp;
1290 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1291 struct i2c_client *client = to_i2c_client(dev);
1292 struct adt7462_data *data = i2c_get_clientdata(client);
1293 int tmin, trange_value;
1294 long trange;
1295
1296 if (strict_strtol(buf, 10, &trange))
1297 return -EINVAL;
1298
1299 /* trange = tmax - tmin */
1300 tmin = (data->pwm_tmin[attr->index] - 64) * 1000;
1301 trange_value = find_trange_value(trange - tmin);
1302
1303 if (trange_value < 0)
1304 return -EINVAL;
1305
1306 temp = trange_value << ADT7462_PWM_RANGE_SHIFT;
1307 temp |= data->pwm_trange[attr->index] & ADT7462_PWM_HYST_MASK;
1308
1309 mutex_lock(&data->lock);
1310 data->pwm_trange[attr->index] = temp;
1311 i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_TRANGE(attr->index),
1312 temp);
1313 mutex_unlock(&data->lock);
1314
1315 return count;
1316}
1317
1318static ssize_t show_pwm_tmin(struct device *dev,
1319 struct device_attribute *devattr,
1320 char *buf)
1321{
1322 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1323 struct adt7462_data *data = adt7462_update_device(dev);
1324 return sprintf(buf, "%d\n", 1000 * (data->pwm_tmin[attr->index] - 64));
1325}
1326
1327static ssize_t set_pwm_tmin(struct device *dev,
1328 struct device_attribute *devattr,
1329 const char *buf,
1330 size_t count)
1331{
1332 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1333 struct i2c_client *client = to_i2c_client(dev);
1334 struct adt7462_data *data = i2c_get_clientdata(client);
1335 long temp;
1336
1337 if (strict_strtol(buf, 10, &temp))
1338 return -EINVAL;
1339
1340 temp = ROUND_DIV(temp, 1000) + 64;
1341 temp = SENSORS_LIMIT(temp, 0, 255);
1342
1343 mutex_lock(&data->lock);
1344 data->pwm_tmin[attr->index] = temp;
1345 i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_TMIN(attr->index),
1346 temp);
1347 mutex_unlock(&data->lock);
1348
1349 return count;
1350}
1351
1352static ssize_t show_pwm_auto(struct device *dev,
1353 struct device_attribute *devattr,
1354 char *buf)
1355{
1356 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1357 struct adt7462_data *data = adt7462_update_device(dev);
1358 int cfg = data->pwm_cfg[attr->index] >> ADT7462_PWM_CHANNEL_SHIFT;
1359
1360 switch (cfg) {
1361 case 4: /* off */
1362 return sprintf(buf, "0\n");
1363 case 7: /* manual */
1364 return sprintf(buf, "1\n");
1365 default: /* automatic */
1366 return sprintf(buf, "2\n");
1367 }
1368}
1369
1370static void set_pwm_channel(struct i2c_client *client,
1371 struct adt7462_data *data,
1372 int which,
1373 int value)
1374{
1375 int temp = data->pwm_cfg[which] & ~ADT7462_PWM_CHANNEL_MASK;
1376 temp |= value << ADT7462_PWM_CHANNEL_SHIFT;
1377
1378 mutex_lock(&data->lock);
1379 data->pwm_cfg[which] = temp;
1380 i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_CFG(which), temp);
1381 mutex_unlock(&data->lock);
1382}
1383
1384static ssize_t set_pwm_auto(struct device *dev,
1385 struct device_attribute *devattr,
1386 const char *buf,
1387 size_t count)
1388{
1389 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1390 struct i2c_client *client = to_i2c_client(dev);
1391 struct adt7462_data *data = i2c_get_clientdata(client);
1392 long temp;
1393
1394 if (strict_strtol(buf, 10, &temp))
1395 return -EINVAL;
1396
1397 switch (temp) {
1398 case 0: /* off */
1399 set_pwm_channel(client, data, attr->index, 4);
1400 return count;
1401 case 1: /* manual */
1402 set_pwm_channel(client, data, attr->index, 7);
1403 return count;
1404 default:
1405 return -EINVAL;
1406 }
1407}
1408
1409static ssize_t show_pwm_auto_temp(struct device *dev,
1410 struct device_attribute *devattr,
1411 char *buf)
1412{
1413 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1414 struct adt7462_data *data = adt7462_update_device(dev);
1415 int channel = data->pwm_cfg[attr->index] >> ADT7462_PWM_CHANNEL_SHIFT;
1416
1417 switch (channel) {
1418 case 0: /* temp[1234] only */
1419 case 1:
1420 case 2:
1421 case 3:
1422 return sprintf(buf, "%d\n", (1 << channel));
1423 case 5: /* temp1 & temp4 */
1424 return sprintf(buf, "9\n");
1425 case 6:
1426 return sprintf(buf, "15\n");
1427 default:
1428 return sprintf(buf, "0\n");
1429 }
1430}
1431
1432static int cvt_auto_temp(int input)
1433{
1434 if (input == 0xF)
1435 return 6;
1436 if (input == 0x9)
1437 return 5;
1438 if (input < 1 || !is_power_of_2(input))
1439 return -EINVAL;
1440 return ilog2(input);
1441}
1442
1443static ssize_t set_pwm_auto_temp(struct device *dev,
1444 struct device_attribute *devattr,
1445 const char *buf,
1446 size_t count)
1447{
1448 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
1449 struct i2c_client *client = to_i2c_client(dev);
1450 struct adt7462_data *data = i2c_get_clientdata(client);
1451 long temp;
1452
1453 if (strict_strtol(buf, 10, &temp))
1454 return -EINVAL;
1455
1456 temp = cvt_auto_temp(temp);
1457 if (temp < 0)
1458 return temp;
1459
1460 set_pwm_channel(client, data, attr->index, temp);
1461
1462 return count;
1463}
1464
1465static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
1466 set_temp_max, 0);
1467static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
1468 set_temp_max, 1);
1469static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_temp_max,
1470 set_temp_max, 2);
1471static SENSOR_DEVICE_ATTR(temp4_max, S_IWUSR | S_IRUGO, show_temp_max,
1472 set_temp_max, 3);
1473
1474static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min,
1475 set_temp_min, 0);
1476static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min,
1477 set_temp_min, 1);
1478static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_temp_min,
1479 set_temp_min, 2);
1480static SENSOR_DEVICE_ATTR(temp4_min, S_IWUSR | S_IRUGO, show_temp_min,
1481 set_temp_min, 3);
1482
1483static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
1484static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
1485static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
1486static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
1487
1488static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0);
1489static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1);
1490static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2);
1491static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3);
1492
1493static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL,
1494 ADT7462_ALARM1 | ADT7462_LT_ALARM);
1495static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL,
1496 ADT7462_ALARM1 | ADT7462_R1T_ALARM);
1497static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL,
1498 ADT7462_ALARM1 | ADT7462_R2T_ALARM);
1499static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO, show_alarm, NULL,
1500 ADT7462_ALARM1 | ADT7462_R3T_ALARM);
1501
1502static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO, show_volt_max,
1503 set_volt_max, 0);
1504static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO, show_volt_max,
1505 set_volt_max, 1);
1506static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO, show_volt_max,
1507 set_volt_max, 2);
1508static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO, show_volt_max,
1509 set_volt_max, 3);
1510static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO, show_volt_max,
1511 set_volt_max, 4);
1512static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO, show_volt_max,
1513 set_volt_max, 5);
1514static SENSOR_DEVICE_ATTR(in7_max, S_IWUSR | S_IRUGO, show_volt_max,
1515 set_volt_max, 6);
1516static SENSOR_DEVICE_ATTR(in8_max, S_IWUSR | S_IRUGO, show_volt_max,
1517 set_volt_max, 7);
1518static SENSOR_DEVICE_ATTR(in9_max, S_IWUSR | S_IRUGO, show_volt_max,
1519 set_volt_max, 8);
1520static SENSOR_DEVICE_ATTR(in10_max, S_IWUSR | S_IRUGO, show_volt_max,
1521 set_volt_max, 9);
1522static SENSOR_DEVICE_ATTR(in11_max, S_IWUSR | S_IRUGO, show_volt_max,
1523 set_volt_max, 10);
1524static SENSOR_DEVICE_ATTR(in12_max, S_IWUSR | S_IRUGO, show_volt_max,
1525 set_volt_max, 11);
1526static SENSOR_DEVICE_ATTR(in13_max, S_IWUSR | S_IRUGO, show_volt_max,
1527 set_volt_max, 12);
1528
1529static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO, show_volt_min,
1530 set_volt_min, 0);
1531static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO, show_volt_min,
1532 set_volt_min, 1);
1533static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO, show_volt_min,
1534 set_volt_min, 2);
1535static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO, show_volt_min,
1536 set_volt_min, 3);
1537static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO, show_volt_min,
1538 set_volt_min, 4);
1539static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO, show_volt_min,
1540 set_volt_min, 5);
1541static SENSOR_DEVICE_ATTR(in7_min, S_IWUSR | S_IRUGO, show_volt_min,
1542 set_volt_min, 6);
1543static SENSOR_DEVICE_ATTR(in8_min, S_IWUSR | S_IRUGO, show_volt_min,
1544 set_volt_min, 7);
1545static SENSOR_DEVICE_ATTR(in9_min, S_IWUSR | S_IRUGO, show_volt_min,
1546 set_volt_min, 8);
1547static SENSOR_DEVICE_ATTR(in10_min, S_IWUSR | S_IRUGO, show_volt_min,
1548 set_volt_min, 9);
1549static SENSOR_DEVICE_ATTR(in11_min, S_IWUSR | S_IRUGO, show_volt_min,
1550 set_volt_min, 10);
1551static SENSOR_DEVICE_ATTR(in12_min, S_IWUSR | S_IRUGO, show_volt_min,
1552 set_volt_min, 11);
1553static SENSOR_DEVICE_ATTR(in13_min, S_IWUSR | S_IRUGO, show_volt_min,
1554 set_volt_min, 12);
1555
1556static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 0);
1557static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 1);
1558static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_voltage, NULL, 2);
1559static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_voltage, NULL, 3);
1560static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 4);
1561static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 5);
1562static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 6);
1563static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_voltage, NULL, 7);
1564static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_voltage, NULL, 8);
1565static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_voltage, NULL, 9);
1566static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_voltage, NULL, 10);
1567static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_voltage, NULL, 11);
1568static SENSOR_DEVICE_ATTR(in13_input, S_IRUGO, show_voltage, NULL, 12);
1569
1570static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, show_voltage_label, NULL, 0);
1571static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, show_voltage_label, NULL, 1);
1572static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_voltage_label, NULL, 2);
1573static SENSOR_DEVICE_ATTR(in4_label, S_IRUGO, show_voltage_label, NULL, 3);
1574static SENSOR_DEVICE_ATTR(in5_label, S_IRUGO, show_voltage_label, NULL, 4);
1575static SENSOR_DEVICE_ATTR(in6_label, S_IRUGO, show_voltage_label, NULL, 5);
1576static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_voltage_label, NULL, 6);
1577static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_voltage_label, NULL, 7);
1578static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_voltage_label, NULL, 8);
1579static SENSOR_DEVICE_ATTR(in10_label, S_IRUGO, show_voltage_label, NULL, 9);
1580static SENSOR_DEVICE_ATTR(in11_label, S_IRUGO, show_voltage_label, NULL, 10);
1581static SENSOR_DEVICE_ATTR(in12_label, S_IRUGO, show_voltage_label, NULL, 11);
1582static SENSOR_DEVICE_ATTR(in13_label, S_IRUGO, show_voltage_label, NULL, 12);
1583
1584static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL,
1585 ADT7462_ALARM2 | ADT7462_V0_ALARM);
1586static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL,
1587 ADT7462_ALARM2 | ADT7462_V7_ALARM);
1588static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL,
1589 ADT7462_ALARM2 | ADT7462_V2_ALARM);
1590static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL,
1591 ADT7462_ALARM2 | ADT7462_V6_ALARM);
1592static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL,
1593 ADT7462_ALARM2 | ADT7462_V5_ALARM);
1594static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL,
1595 ADT7462_ALARM2 | ADT7462_V4_ALARM);
1596static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL,
1597 ADT7462_ALARM2 | ADT7462_V3_ALARM);
1598static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL,
1599 ADT7462_ALARM2 | ADT7462_V1_ALARM);
1600static SENSOR_DEVICE_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL,
1601 ADT7462_ALARM3 | ADT7462_V10_ALARM);
1602static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL,
1603 ADT7462_ALARM3 | ADT7462_V9_ALARM);
1604static SENSOR_DEVICE_ATTR(in11_alarm, S_IRUGO, show_alarm, NULL,
1605 ADT7462_ALARM3 | ADT7462_V8_ALARM);
1606static SENSOR_DEVICE_ATTR(in12_alarm, S_IRUGO, show_alarm, NULL,
1607 ADT7462_ALARM3 | ADT7462_V11_ALARM);
1608static SENSOR_DEVICE_ATTR(in13_alarm, S_IRUGO, show_alarm, NULL,
1609 ADT7462_ALARM3 | ADT7462_V12_ALARM);
1610
1611static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
1612 set_fan_min, 0);
1613static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
1614 set_fan_min, 1);
1615static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min,
1616 set_fan_min, 2);
1617static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min,
1618 set_fan_min, 3);
1619static SENSOR_DEVICE_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min,
1620 set_fan_min, 4);
1621static SENSOR_DEVICE_ATTR(fan6_min, S_IWUSR | S_IRUGO, show_fan_min,
1622 set_fan_min, 5);
1623static SENSOR_DEVICE_ATTR(fan7_min, S_IWUSR | S_IRUGO, show_fan_min,
1624 set_fan_min, 6);
1625static SENSOR_DEVICE_ATTR(fan8_min, S_IWUSR | S_IRUGO, show_fan_min,
1626 set_fan_min, 7);
1627
1628static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
1629static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
1630static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2);
1631static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3);
1632static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4);
1633static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan, NULL, 5);
1634static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan, NULL, 6);
1635static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, show_fan, NULL, 7);
1636
1637static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL,
1638 ADT7462_ALARM4 | ADT7462_F0_ALARM);
1639static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL,
1640 ADT7462_ALARM4 | ADT7462_F1_ALARM);
1641static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL,
1642 ADT7462_ALARM4 | ADT7462_F2_ALARM);
1643static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL,
1644 ADT7462_ALARM4 | ADT7462_F3_ALARM);
1645static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL,
1646 ADT7462_ALARM4 | ADT7462_F4_ALARM);
1647static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL,
1648 ADT7462_ALARM4 | ADT7462_F5_ALARM);
1649static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO, show_alarm, NULL,
1650 ADT7462_ALARM4 | ADT7462_F6_ALARM);
1651static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO, show_alarm, NULL,
1652 ADT7462_ALARM4 | ADT7462_F7_ALARM);
1653
1654static SENSOR_DEVICE_ATTR(force_pwm_max, S_IWUSR | S_IRUGO,
1655 show_force_pwm_max, set_force_pwm_max, 0);
1656
1657static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0);
1658static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
1659static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2);
1660static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 3);
1661
1662static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO,
1663 show_pwm_min, set_pwm_min, 0);
1664static SENSOR_DEVICE_ATTR(pwm2_auto_point1_pwm, S_IWUSR | S_IRUGO,
1665 show_pwm_min, set_pwm_min, 1);
1666static SENSOR_DEVICE_ATTR(pwm3_auto_point1_pwm, S_IWUSR | S_IRUGO,
1667 show_pwm_min, set_pwm_min, 2);
1668static SENSOR_DEVICE_ATTR(pwm4_auto_point1_pwm, S_IWUSR | S_IRUGO,
1669 show_pwm_min, set_pwm_min, 3);
1670
1671static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO,
1672 show_pwm_max, set_pwm_max, 0);
1673static SENSOR_DEVICE_ATTR(pwm2_auto_point2_pwm, S_IWUSR | S_IRUGO,
1674 show_pwm_max, set_pwm_max, 1);
1675static SENSOR_DEVICE_ATTR(pwm3_auto_point2_pwm, S_IWUSR | S_IRUGO,
1676 show_pwm_max, set_pwm_max, 2);
1677static SENSOR_DEVICE_ATTR(pwm4_auto_point2_pwm, S_IWUSR | S_IRUGO,
1678 show_pwm_max, set_pwm_max, 3);
1679
1680static SENSOR_DEVICE_ATTR(temp1_auto_point1_hyst, S_IWUSR | S_IRUGO,
1681 show_pwm_hyst, set_pwm_hyst, 0);
1682static SENSOR_DEVICE_ATTR(temp2_auto_point1_hyst, S_IWUSR | S_IRUGO,
1683 show_pwm_hyst, set_pwm_hyst, 1);
1684static SENSOR_DEVICE_ATTR(temp3_auto_point1_hyst, S_IWUSR | S_IRUGO,
1685 show_pwm_hyst, set_pwm_hyst, 2);
1686static SENSOR_DEVICE_ATTR(temp4_auto_point1_hyst, S_IWUSR | S_IRUGO,
1687 show_pwm_hyst, set_pwm_hyst, 3);
1688
1689static SENSOR_DEVICE_ATTR(temp1_auto_point2_hyst, S_IWUSR | S_IRUGO,
1690 show_pwm_hyst, set_pwm_hyst, 0);
1691static SENSOR_DEVICE_ATTR(temp2_auto_point2_hyst, S_IWUSR | S_IRUGO,
1692 show_pwm_hyst, set_pwm_hyst, 1);
1693static SENSOR_DEVICE_ATTR(temp3_auto_point2_hyst, S_IWUSR | S_IRUGO,
1694 show_pwm_hyst, set_pwm_hyst, 2);
1695static SENSOR_DEVICE_ATTR(temp4_auto_point2_hyst, S_IWUSR | S_IRUGO,
1696 show_pwm_hyst, set_pwm_hyst, 3);
1697
1698static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IWUSR | S_IRUGO,
1699 show_pwm_tmin, set_pwm_tmin, 0);
1700static SENSOR_DEVICE_ATTR(temp2_auto_point1_temp, S_IWUSR | S_IRUGO,
1701 show_pwm_tmin, set_pwm_tmin, 1);
1702static SENSOR_DEVICE_ATTR(temp3_auto_point1_temp, S_IWUSR | S_IRUGO,
1703 show_pwm_tmin, set_pwm_tmin, 2);
1704static SENSOR_DEVICE_ATTR(temp4_auto_point1_temp, S_IWUSR | S_IRUGO,
1705 show_pwm_tmin, set_pwm_tmin, 3);
1706
1707static SENSOR_DEVICE_ATTR(temp1_auto_point2_temp, S_IWUSR | S_IRUGO,
1708 show_pwm_tmax, set_pwm_tmax, 0);
1709static SENSOR_DEVICE_ATTR(temp2_auto_point2_temp, S_IWUSR | S_IRUGO,
1710 show_pwm_tmax, set_pwm_tmax, 1);
1711static SENSOR_DEVICE_ATTR(temp3_auto_point2_temp, S_IWUSR | S_IRUGO,
1712 show_pwm_tmax, set_pwm_tmax, 2);
1713static SENSOR_DEVICE_ATTR(temp4_auto_point2_temp, S_IWUSR | S_IRUGO,
1714 show_pwm_tmax, set_pwm_tmax, 3);
1715
1716static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_auto,
1717 set_pwm_auto, 0);
1718static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_auto,
1719 set_pwm_auto, 1);
1720static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_auto,
1721 set_pwm_auto, 2);
1722static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_auto,
1723 set_pwm_auto, 3);
1724
1725static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IWUSR | S_IRUGO,
1726 show_pwm_auto_temp, set_pwm_auto_temp, 0);
1727static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IWUSR | S_IRUGO,
1728 show_pwm_auto_temp, set_pwm_auto_temp, 1);
1729static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IWUSR | S_IRUGO,
1730 show_pwm_auto_temp, set_pwm_auto_temp, 2);
1731static SENSOR_DEVICE_ATTR(pwm4_auto_channels_temp, S_IWUSR | S_IRUGO,
1732 show_pwm_auto_temp, set_pwm_auto_temp, 3);
1733
1734static struct attribute *adt7462_attr[] =
1735{
1736 &sensor_dev_attr_temp1_max.dev_attr.attr,
1737 &sensor_dev_attr_temp2_max.dev_attr.attr,
1738 &sensor_dev_attr_temp3_max.dev_attr.attr,
1739 &sensor_dev_attr_temp4_max.dev_attr.attr,
1740
1741 &sensor_dev_attr_temp1_min.dev_attr.attr,
1742 &sensor_dev_attr_temp2_min.dev_attr.attr,
1743 &sensor_dev_attr_temp3_min.dev_attr.attr,
1744 &sensor_dev_attr_temp4_min.dev_attr.attr,
1745
1746 &sensor_dev_attr_temp1_input.dev_attr.attr,
1747 &sensor_dev_attr_temp2_input.dev_attr.attr,
1748 &sensor_dev_attr_temp3_input.dev_attr.attr,
1749 &sensor_dev_attr_temp4_input.dev_attr.attr,
1750
1751 &sensor_dev_attr_temp1_label.dev_attr.attr,
1752 &sensor_dev_attr_temp2_label.dev_attr.attr,
1753 &sensor_dev_attr_temp3_label.dev_attr.attr,
1754 &sensor_dev_attr_temp4_label.dev_attr.attr,
1755
1756 &sensor_dev_attr_temp1_alarm.dev_attr.attr,
1757 &sensor_dev_attr_temp2_alarm.dev_attr.attr,
1758 &sensor_dev_attr_temp3_alarm.dev_attr.attr,
1759 &sensor_dev_attr_temp4_alarm.dev_attr.attr,
1760
1761 &sensor_dev_attr_in1_max.dev_attr.attr,
1762 &sensor_dev_attr_in2_max.dev_attr.attr,
1763 &sensor_dev_attr_in3_max.dev_attr.attr,
1764 &sensor_dev_attr_in4_max.dev_attr.attr,
1765 &sensor_dev_attr_in5_max.dev_attr.attr,
1766 &sensor_dev_attr_in6_max.dev_attr.attr,
1767 &sensor_dev_attr_in7_max.dev_attr.attr,
1768 &sensor_dev_attr_in8_max.dev_attr.attr,
1769 &sensor_dev_attr_in9_max.dev_attr.attr,
1770 &sensor_dev_attr_in10_max.dev_attr.attr,
1771 &sensor_dev_attr_in11_max.dev_attr.attr,
1772 &sensor_dev_attr_in12_max.dev_attr.attr,
1773 &sensor_dev_attr_in13_max.dev_attr.attr,
1774
1775 &sensor_dev_attr_in1_min.dev_attr.attr,
1776 &sensor_dev_attr_in2_min.dev_attr.attr,
1777 &sensor_dev_attr_in3_min.dev_attr.attr,
1778 &sensor_dev_attr_in4_min.dev_attr.attr,
1779 &sensor_dev_attr_in5_min.dev_attr.attr,
1780 &sensor_dev_attr_in6_min.dev_attr.attr,
1781 &sensor_dev_attr_in7_min.dev_attr.attr,
1782 &sensor_dev_attr_in8_min.dev_attr.attr,
1783 &sensor_dev_attr_in9_min.dev_attr.attr,
1784 &sensor_dev_attr_in10_min.dev_attr.attr,
1785 &sensor_dev_attr_in11_min.dev_attr.attr,
1786 &sensor_dev_attr_in12_min.dev_attr.attr,
1787 &sensor_dev_attr_in13_min.dev_attr.attr,
1788
1789 &sensor_dev_attr_in1_input.dev_attr.attr,
1790 &sensor_dev_attr_in2_input.dev_attr.attr,
1791 &sensor_dev_attr_in3_input.dev_attr.attr,
1792 &sensor_dev_attr_in4_input.dev_attr.attr,
1793 &sensor_dev_attr_in5_input.dev_attr.attr,
1794 &sensor_dev_attr_in6_input.dev_attr.attr,
1795 &sensor_dev_attr_in7_input.dev_attr.attr,
1796 &sensor_dev_attr_in8_input.dev_attr.attr,
1797 &sensor_dev_attr_in9_input.dev_attr.attr,
1798 &sensor_dev_attr_in10_input.dev_attr.attr,
1799 &sensor_dev_attr_in11_input.dev_attr.attr,
1800 &sensor_dev_attr_in12_input.dev_attr.attr,
1801 &sensor_dev_attr_in13_input.dev_attr.attr,
1802
1803 &sensor_dev_attr_in1_label.dev_attr.attr,
1804 &sensor_dev_attr_in2_label.dev_attr.attr,
1805 &sensor_dev_attr_in3_label.dev_attr.attr,
1806 &sensor_dev_attr_in4_label.dev_attr.attr,
1807 &sensor_dev_attr_in5_label.dev_attr.attr,
1808 &sensor_dev_attr_in6_label.dev_attr.attr,
1809 &sensor_dev_attr_in7_label.dev_attr.attr,
1810 &sensor_dev_attr_in8_label.dev_attr.attr,
1811 &sensor_dev_attr_in9_label.dev_attr.attr,
1812 &sensor_dev_attr_in10_label.dev_attr.attr,
1813 &sensor_dev_attr_in11_label.dev_attr.attr,
1814 &sensor_dev_attr_in12_label.dev_attr.attr,
1815 &sensor_dev_attr_in13_label.dev_attr.attr,
1816
1817 &sensor_dev_attr_in1_alarm.dev_attr.attr,
1818 &sensor_dev_attr_in2_alarm.dev_attr.attr,
1819 &sensor_dev_attr_in3_alarm.dev_attr.attr,
1820 &sensor_dev_attr_in4_alarm.dev_attr.attr,
1821 &sensor_dev_attr_in5_alarm.dev_attr.attr,
1822 &sensor_dev_attr_in6_alarm.dev_attr.attr,
1823 &sensor_dev_attr_in7_alarm.dev_attr.attr,
1824 &sensor_dev_attr_in8_alarm.dev_attr.attr,
1825 &sensor_dev_attr_in9_alarm.dev_attr.attr,
1826 &sensor_dev_attr_in10_alarm.dev_attr.attr,
1827 &sensor_dev_attr_in11_alarm.dev_attr.attr,
1828 &sensor_dev_attr_in12_alarm.dev_attr.attr,
1829 &sensor_dev_attr_in13_alarm.dev_attr.attr,
1830
1831 &sensor_dev_attr_fan1_min.dev_attr.attr,
1832 &sensor_dev_attr_fan2_min.dev_attr.attr,
1833 &sensor_dev_attr_fan3_min.dev_attr.attr,
1834 &sensor_dev_attr_fan4_min.dev_attr.attr,
1835 &sensor_dev_attr_fan5_min.dev_attr.attr,
1836 &sensor_dev_attr_fan6_min.dev_attr.attr,
1837 &sensor_dev_attr_fan7_min.dev_attr.attr,
1838 &sensor_dev_attr_fan8_min.dev_attr.attr,
1839
1840 &sensor_dev_attr_fan1_input.dev_attr.attr,
1841 &sensor_dev_attr_fan2_input.dev_attr.attr,
1842 &sensor_dev_attr_fan3_input.dev_attr.attr,
1843 &sensor_dev_attr_fan4_input.dev_attr.attr,
1844 &sensor_dev_attr_fan5_input.dev_attr.attr,
1845 &sensor_dev_attr_fan6_input.dev_attr.attr,
1846 &sensor_dev_attr_fan7_input.dev_attr.attr,
1847 &sensor_dev_attr_fan8_input.dev_attr.attr,
1848
1849 &sensor_dev_attr_fan1_alarm.dev_attr.attr,
1850 &sensor_dev_attr_fan2_alarm.dev_attr.attr,
1851 &sensor_dev_attr_fan3_alarm.dev_attr.attr,
1852 &sensor_dev_attr_fan4_alarm.dev_attr.attr,
1853 &sensor_dev_attr_fan5_alarm.dev_attr.attr,
1854 &sensor_dev_attr_fan6_alarm.dev_attr.attr,
1855 &sensor_dev_attr_fan7_alarm.dev_attr.attr,
1856 &sensor_dev_attr_fan8_alarm.dev_attr.attr,
1857
1858 &sensor_dev_attr_force_pwm_max.dev_attr.attr,
1859 &sensor_dev_attr_pwm1.dev_attr.attr,
1860 &sensor_dev_attr_pwm2.dev_attr.attr,
1861 &sensor_dev_attr_pwm3.dev_attr.attr,
1862 &sensor_dev_attr_pwm4.dev_attr.attr,
1863
1864 &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
1865 &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
1866 &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
1867 &sensor_dev_attr_pwm4_auto_point1_pwm.dev_attr.attr,
1868
1869 &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
1870 &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
1871 &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
1872 &sensor_dev_attr_pwm4_auto_point2_pwm.dev_attr.attr,
1873
1874 &sensor_dev_attr_temp1_auto_point1_hyst.dev_attr.attr,
1875 &sensor_dev_attr_temp2_auto_point1_hyst.dev_attr.attr,
1876 &sensor_dev_attr_temp3_auto_point1_hyst.dev_attr.attr,
1877 &sensor_dev_attr_temp4_auto_point1_hyst.dev_attr.attr,
1878
1879 &sensor_dev_attr_temp1_auto_point2_hyst.dev_attr.attr,
1880 &sensor_dev_attr_temp2_auto_point2_hyst.dev_attr.attr,
1881 &sensor_dev_attr_temp3_auto_point2_hyst.dev_attr.attr,
1882 &sensor_dev_attr_temp4_auto_point2_hyst.dev_attr.attr,
1883
1884 &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
1885 &sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr,
1886 &sensor_dev_attr_temp3_auto_point1_temp.dev_attr.attr,
1887 &sensor_dev_attr_temp4_auto_point1_temp.dev_attr.attr,
1888
1889 &sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr,
1890 &sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr,
1891 &sensor_dev_attr_temp3_auto_point2_temp.dev_attr.attr,
1892 &sensor_dev_attr_temp4_auto_point2_temp.dev_attr.attr,
1893
1894 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
1895 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
1896 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
1897 &sensor_dev_attr_pwm4_enable.dev_attr.attr,
1898
1899 &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
1900 &sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr,
1901 &sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr,
1902 &sensor_dev_attr_pwm4_auto_channels_temp.dev_attr.attr,
1903 NULL
1904};
1905
1906/* Return 0 if detection is successful, -ENODEV otherwise */
1907static int adt7462_detect(struct i2c_client *client, int kind,
1908 struct i2c_board_info *info)
1909{
1910 struct i2c_adapter *adapter = client->adapter;
1911
1912 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
1913 return -ENODEV;
1914
1915 if (kind <= 0) {
1916 int vendor, device, revision;
1917
1918 vendor = i2c_smbus_read_byte_data(client, ADT7462_REG_VENDOR);
1919 if (vendor != ADT7462_VENDOR)
1920 return -ENODEV;
1921
1922 device = i2c_smbus_read_byte_data(client, ADT7462_REG_DEVICE);
1923 if (device != ADT7462_DEVICE)
1924 return -ENODEV;
1925
1926 revision = i2c_smbus_read_byte_data(client,
1927 ADT7462_REG_REVISION);
1928 if (revision != ADT7462_REVISION)
1929 return -ENODEV;
1930 } else
1931 dev_dbg(&adapter->dev, "detection forced\n");
1932
1933 strlcpy(info->type, "adt7462", I2C_NAME_SIZE);
1934
1935 return 0;
1936}
1937
1938static int adt7462_probe(struct i2c_client *client,
1939 const struct i2c_device_id *id)
1940{
1941 struct adt7462_data *data;
1942 int err;
1943
1944 data = kzalloc(sizeof(struct adt7462_data), GFP_KERNEL);
1945 if (!data) {
1946 err = -ENOMEM;
1947 goto exit;
1948 }
1949
1950 i2c_set_clientdata(client, data);
1951 mutex_init(&data->lock);
1952
1953 dev_info(&client->dev, "%s chip found\n", client->name);
1954
1955 /* Register sysfs hooks */
1956 data->attrs.attrs = adt7462_attr;
1957 err = sysfs_create_group(&client->dev.kobj, &data->attrs);
1958 if (err)
1959 goto exit_free;
1960
1961 data->hwmon_dev = hwmon_device_register(&client->dev);
1962 if (IS_ERR(data->hwmon_dev)) {
1963 err = PTR_ERR(data->hwmon_dev);
1964 goto exit_remove;
1965 }
1966
1967 return 0;
1968
1969exit_remove:
1970 sysfs_remove_group(&client->dev.kobj, &data->attrs);
1971exit_free:
1972 kfree(data);
1973exit:
1974 return err;
1975}
1976
1977static int adt7462_remove(struct i2c_client *client)
1978{
1979 struct adt7462_data *data = i2c_get_clientdata(client);
1980
1981 hwmon_device_unregister(data->hwmon_dev);
1982 sysfs_remove_group(&client->dev.kobj, &data->attrs);
1983 kfree(data);
1984 return 0;
1985}
1986
1987static int __init adt7462_init(void)
1988{
1989 return i2c_add_driver(&adt7462_driver);
1990}
1991
1992static void __exit adt7462_exit(void)
1993{
1994 i2c_del_driver(&adt7462_driver);
1995}
1996
1997MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
1998MODULE_DESCRIPTION("ADT7462 driver");
1999MODULE_LICENSE("GPL");
2000
2001module_init(adt7462_init);
2002module_exit(adt7462_exit);
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index d368d8f845e1..1311a595147e 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -137,6 +137,8 @@ I2C_CLIENT_INSMOD_1(adt7470);
137#define FAN_PERIOD_INVALID 65535 137#define FAN_PERIOD_INVALID 65535
138#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID) 138#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID)
139 139
140#define ROUND_DIV(x, divisor) (((x) + ((divisor) / 2)) / (divisor))
141
140struct adt7470_data { 142struct adt7470_data {
141 struct device *hwmon_dev; 143 struct device *hwmon_dev;
142 struct attribute_group attrs; 144 struct attribute_group attrs;
@@ -353,7 +355,13 @@ static ssize_t set_temp_min(struct device *dev,
353 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 355 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
354 struct i2c_client *client = to_i2c_client(dev); 356 struct i2c_client *client = to_i2c_client(dev);
355 struct adt7470_data *data = i2c_get_clientdata(client); 357 struct adt7470_data *data = i2c_get_clientdata(client);
356 int temp = simple_strtol(buf, NULL, 10) / 1000; 358 long temp;
359
360 if (strict_strtol(buf, 10, &temp))
361 return -EINVAL;
362
363 temp = ROUND_DIV(temp, 1000);
364 temp = SENSORS_LIMIT(temp, 0, 255);
357 365
358 mutex_lock(&data->lock); 366 mutex_lock(&data->lock);
359 data->temp_min[attr->index] = temp; 367 data->temp_min[attr->index] = temp;
@@ -381,7 +389,13 @@ static ssize_t set_temp_max(struct device *dev,
381 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 389 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
382 struct i2c_client *client = to_i2c_client(dev); 390 struct i2c_client *client = to_i2c_client(dev);
383 struct adt7470_data *data = i2c_get_clientdata(client); 391 struct adt7470_data *data = i2c_get_clientdata(client);
384 int temp = simple_strtol(buf, NULL, 10) / 1000; 392 long temp;
393
394 if (strict_strtol(buf, 10, &temp))
395 return -EINVAL;
396
397 temp = ROUND_DIV(temp, 1000);
398 temp = SENSORS_LIMIT(temp, 0, 255);
385 399
386 mutex_lock(&data->lock); 400 mutex_lock(&data->lock);
387 data->temp_max[attr->index] = temp; 401 data->temp_max[attr->index] = temp;
@@ -430,11 +444,13 @@ static ssize_t set_fan_max(struct device *dev,
430 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 444 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
431 struct i2c_client *client = to_i2c_client(dev); 445 struct i2c_client *client = to_i2c_client(dev);
432 struct adt7470_data *data = i2c_get_clientdata(client); 446 struct adt7470_data *data = i2c_get_clientdata(client);
433 int temp = simple_strtol(buf, NULL, 10); 447 long temp;
434 448
435 if (!temp) 449 if (strict_strtol(buf, 10, &temp) || !temp)
436 return -EINVAL; 450 return -EINVAL;
451
437 temp = FAN_RPM_TO_PERIOD(temp); 452 temp = FAN_RPM_TO_PERIOD(temp);
453 temp = SENSORS_LIMIT(temp, 1, 65534);
438 454
439 mutex_lock(&data->lock); 455 mutex_lock(&data->lock);
440 data->fan_max[attr->index] = temp; 456 data->fan_max[attr->index] = temp;
@@ -465,11 +481,13 @@ static ssize_t set_fan_min(struct device *dev,
465 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 481 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
466 struct i2c_client *client = to_i2c_client(dev); 482 struct i2c_client *client = to_i2c_client(dev);
467 struct adt7470_data *data = i2c_get_clientdata(client); 483 struct adt7470_data *data = i2c_get_clientdata(client);
468 int temp = simple_strtol(buf, NULL, 10); 484 long temp;
469 485
470 if (!temp) 486 if (strict_strtol(buf, 10, &temp) || !temp)
471 return -EINVAL; 487 return -EINVAL;
488
472 temp = FAN_RPM_TO_PERIOD(temp); 489 temp = FAN_RPM_TO_PERIOD(temp);
490 temp = SENSORS_LIMIT(temp, 1, 65534);
473 491
474 mutex_lock(&data->lock); 492 mutex_lock(&data->lock);
475 data->fan_min[attr->index] = temp; 493 data->fan_min[attr->index] = temp;
@@ -507,9 +525,12 @@ static ssize_t set_force_pwm_max(struct device *dev,
507{ 525{
508 struct i2c_client *client = to_i2c_client(dev); 526 struct i2c_client *client = to_i2c_client(dev);
509 struct adt7470_data *data = i2c_get_clientdata(client); 527 struct adt7470_data *data = i2c_get_clientdata(client);
510 int temp = simple_strtol(buf, NULL, 10); 528 long temp;
511 u8 reg; 529 u8 reg;
512 530
531 if (strict_strtol(buf, 10, &temp))
532 return -EINVAL;
533
513 mutex_lock(&data->lock); 534 mutex_lock(&data->lock);
514 data->force_pwm_max = temp; 535 data->force_pwm_max = temp;
515 reg = i2c_smbus_read_byte_data(client, ADT7470_REG_CFG); 536 reg = i2c_smbus_read_byte_data(client, ADT7470_REG_CFG);
@@ -537,7 +558,12 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
537 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 558 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
538 struct i2c_client *client = to_i2c_client(dev); 559 struct i2c_client *client = to_i2c_client(dev);
539 struct adt7470_data *data = i2c_get_clientdata(client); 560 struct adt7470_data *data = i2c_get_clientdata(client);
540 int temp = simple_strtol(buf, NULL, 10); 561 long temp;
562
563 if (strict_strtol(buf, 10, &temp))
564 return -EINVAL;
565
566 temp = SENSORS_LIMIT(temp, 0, 255);
541 567
542 mutex_lock(&data->lock); 568 mutex_lock(&data->lock);
543 data->pwm[attr->index] = temp; 569 data->pwm[attr->index] = temp;
@@ -564,7 +590,12 @@ static ssize_t set_pwm_max(struct device *dev,
564 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 590 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
565 struct i2c_client *client = to_i2c_client(dev); 591 struct i2c_client *client = to_i2c_client(dev);
566 struct adt7470_data *data = i2c_get_clientdata(client); 592 struct adt7470_data *data = i2c_get_clientdata(client);
567 int temp = simple_strtol(buf, NULL, 10); 593 long temp;
594
595 if (strict_strtol(buf, 10, &temp))
596 return -EINVAL;
597
598 temp = SENSORS_LIMIT(temp, 0, 255);
568 599
569 mutex_lock(&data->lock); 600 mutex_lock(&data->lock);
570 data->pwm_max[attr->index] = temp; 601 data->pwm_max[attr->index] = temp;
@@ -592,7 +623,12 @@ static ssize_t set_pwm_min(struct device *dev,
592 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 623 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
593 struct i2c_client *client = to_i2c_client(dev); 624 struct i2c_client *client = to_i2c_client(dev);
594 struct adt7470_data *data = i2c_get_clientdata(client); 625 struct adt7470_data *data = i2c_get_clientdata(client);
595 int temp = simple_strtol(buf, NULL, 10); 626 long temp;
627
628 if (strict_strtol(buf, 10, &temp))
629 return -EINVAL;
630
631 temp = SENSORS_LIMIT(temp, 0, 255);
596 632
597 mutex_lock(&data->lock); 633 mutex_lock(&data->lock);
598 data->pwm_min[attr->index] = temp; 634 data->pwm_min[attr->index] = temp;
@@ -630,7 +666,13 @@ static ssize_t set_pwm_tmin(struct device *dev,
630 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 666 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
631 struct i2c_client *client = to_i2c_client(dev); 667 struct i2c_client *client = to_i2c_client(dev);
632 struct adt7470_data *data = i2c_get_clientdata(client); 668 struct adt7470_data *data = i2c_get_clientdata(client);
633 int temp = simple_strtol(buf, NULL, 10) / 1000; 669 long temp;
670
671 if (strict_strtol(buf, 10, &temp))
672 return -EINVAL;
673
674 temp = ROUND_DIV(temp, 1000);
675 temp = SENSORS_LIMIT(temp, 0, 255);
634 676
635 mutex_lock(&data->lock); 677 mutex_lock(&data->lock);
636 data->pwm_tmin[attr->index] = temp; 678 data->pwm_tmin[attr->index] = temp;
@@ -658,11 +700,14 @@ static ssize_t set_pwm_auto(struct device *dev,
658 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 700 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
659 struct i2c_client *client = to_i2c_client(dev); 701 struct i2c_client *client = to_i2c_client(dev);
660 struct adt7470_data *data = i2c_get_clientdata(client); 702 struct adt7470_data *data = i2c_get_clientdata(client);
661 int temp = simple_strtol(buf, NULL, 10);
662 int pwm_auto_reg = ADT7470_REG_PWM_CFG(attr->index); 703 int pwm_auto_reg = ADT7470_REG_PWM_CFG(attr->index);
663 int pwm_auto_reg_mask; 704 int pwm_auto_reg_mask;
705 long temp;
664 u8 reg; 706 u8 reg;
665 707
708 if (strict_strtol(buf, 10, &temp))
709 return -EINVAL;
710
666 if (attr->index % 2) 711 if (attr->index % 2)
667 pwm_auto_reg_mask = ADT7470_PWM2_AUTO_MASK; 712 pwm_auto_reg_mask = ADT7470_PWM2_AUTO_MASK;
668 else 713 else
@@ -716,10 +761,14 @@ static ssize_t set_pwm_auto_temp(struct device *dev,
716 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 761 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
717 struct i2c_client *client = to_i2c_client(dev); 762 struct i2c_client *client = to_i2c_client(dev);
718 struct adt7470_data *data = i2c_get_clientdata(client); 763 struct adt7470_data *data = i2c_get_clientdata(client);
719 int temp = cvt_auto_temp(simple_strtol(buf, NULL, 10));
720 int pwm_auto_reg = ADT7470_REG_PWM_AUTO_TEMP(attr->index); 764 int pwm_auto_reg = ADT7470_REG_PWM_AUTO_TEMP(attr->index);
765 long temp;
721 u8 reg; 766 u8 reg;
722 767
768 if (strict_strtol(buf, 10, &temp))
769 return -EINVAL;
770
771 temp = cvt_auto_temp(temp);
723 if (temp < 0) 772 if (temp < 0)
724 return temp; 773 return temp;
725 774
diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c
index 3a0b63136479..18aa30866a6c 100644
--- a/drivers/hwmon/adt7473.c
+++ b/drivers/hwmon/adt7473.c
@@ -129,6 +129,8 @@ I2C_CLIENT_INSMOD_1(adt7473);
129#define FAN_PERIOD_INVALID 65535 129#define FAN_PERIOD_INVALID 65535
130#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID) 130#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID)
131 131
132#define ROUND_DIV(x, divisor) (((x) + ((divisor) / 2)) / (divisor))
133
132struct adt7473_data { 134struct adt7473_data {
133 struct device *hwmon_dev; 135 struct device *hwmon_dev;
134 struct attribute_group attrs; 136 struct attribute_group attrs;
@@ -319,35 +321,24 @@ out:
319} 321}
320 322
321/* 323/*
322 * On this chip, voltages are given as a count of steps between a minimum 324 * Conversions
323 * and maximum voltage, not a direct voltage.
324 */ 325 */
325static const int volt_convert_table[][2] = { 326
326 {2997, 3}, 327/* IN are scaled acording to built-in resistors */
327 {4395, 4}, 328static const int adt7473_scaling[] = { /* .001 Volts */
329 2250, 3300
328}; 330};
331#define SCALE(val, from, to) (((val) * (to) + ((from) / 2)) / (from))
329 332
330static int decode_volt(int volt_index, u8 raw) 333static int decode_volt(int volt_index, u8 raw)
331{ 334{
332 int cmax = volt_convert_table[volt_index][0]; 335 return SCALE(raw, 192, adt7473_scaling[volt_index]);
333 int cmin = volt_convert_table[volt_index][1];
334 return ((raw * (cmax - cmin)) / 255) + cmin;
335} 336}
336 337
337static u8 encode_volt(int volt_index, int cooked) 338static u8 encode_volt(int volt_index, int cooked)
338{ 339{
339 int cmax = volt_convert_table[volt_index][0]; 340 int raw = SCALE(cooked, adt7473_scaling[volt_index], 192);
340 int cmin = volt_convert_table[volt_index][1]; 341 return SENSORS_LIMIT(raw, 0, 255);
341 u8 x;
342
343 if (cooked > cmax)
344 cooked = cmax;
345 else if (cooked < cmin)
346 cooked = cmin;
347
348 x = ((cooked - cmin) * 255) / (cmax - cmin);
349
350 return x;
351} 342}
352 343
353static ssize_t show_volt_min(struct device *dev, 344static ssize_t show_volt_min(struct device *dev,
@@ -368,7 +359,12 @@ static ssize_t set_volt_min(struct device *dev,
368 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 359 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
369 struct i2c_client *client = to_i2c_client(dev); 360 struct i2c_client *client = to_i2c_client(dev);
370 struct adt7473_data *data = i2c_get_clientdata(client); 361 struct adt7473_data *data = i2c_get_clientdata(client);
371 int volt = encode_volt(attr->index, simple_strtol(buf, NULL, 10)); 362 long volt;
363
364 if (strict_strtol(buf, 10, &volt))
365 return -EINVAL;
366
367 volt = encode_volt(attr->index, volt);
372 368
373 mutex_lock(&data->lock); 369 mutex_lock(&data->lock);
374 data->volt_min[attr->index] = volt; 370 data->volt_min[attr->index] = volt;
@@ -397,7 +393,12 @@ static ssize_t set_volt_max(struct device *dev,
397 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 393 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
398 struct i2c_client *client = to_i2c_client(dev); 394 struct i2c_client *client = to_i2c_client(dev);
399 struct adt7473_data *data = i2c_get_clientdata(client); 395 struct adt7473_data *data = i2c_get_clientdata(client);
400 int volt = encode_volt(attr->index, simple_strtol(buf, NULL, 10)); 396 long volt;
397
398 if (strict_strtol(buf, 10, &volt))
399 return -EINVAL;
400
401 volt = encode_volt(attr->index, volt);
401 402
402 mutex_lock(&data->lock); 403 mutex_lock(&data->lock);
403 data->volt_max[attr->index] = volt; 404 data->volt_max[attr->index] = volt;
@@ -430,7 +431,8 @@ static int decode_temp(u8 twos_complement, u8 raw)
430 431
431static u8 encode_temp(u8 twos_complement, int cooked) 432static u8 encode_temp(u8 twos_complement, int cooked)
432{ 433{
433 return twos_complement ? cooked & 0xFF : cooked + 64; 434 u8 ret = twos_complement ? cooked & 0xFF : cooked + 64;
435 return SENSORS_LIMIT(ret, 0, 255);
434} 436}
435 437
436static ssize_t show_temp_min(struct device *dev, 438static ssize_t show_temp_min(struct device *dev,
@@ -452,7 +454,12 @@ static ssize_t set_temp_min(struct device *dev,
452 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 454 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
453 struct i2c_client *client = to_i2c_client(dev); 455 struct i2c_client *client = to_i2c_client(dev);
454 struct adt7473_data *data = i2c_get_clientdata(client); 456 struct adt7473_data *data = i2c_get_clientdata(client);
455 int temp = simple_strtol(buf, NULL, 10) / 1000; 457 long temp;
458
459 if (strict_strtol(buf, 10, &temp))
460 return -EINVAL;
461
462 temp = ROUND_DIV(temp, 1000);
456 temp = encode_temp(data->temp_twos_complement, temp); 463 temp = encode_temp(data->temp_twos_complement, temp);
457 464
458 mutex_lock(&data->lock); 465 mutex_lock(&data->lock);
@@ -483,7 +490,12 @@ static ssize_t set_temp_max(struct device *dev,
483 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 490 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
484 struct i2c_client *client = to_i2c_client(dev); 491 struct i2c_client *client = to_i2c_client(dev);
485 struct adt7473_data *data = i2c_get_clientdata(client); 492 struct adt7473_data *data = i2c_get_clientdata(client);
486 int temp = simple_strtol(buf, NULL, 10) / 1000; 493 long temp;
494
495 if (strict_strtol(buf, 10, &temp))
496 return -EINVAL;
497
498 temp = ROUND_DIV(temp, 1000);
487 temp = encode_temp(data->temp_twos_complement, temp); 499 temp = encode_temp(data->temp_twos_complement, temp);
488 500
489 mutex_lock(&data->lock); 501 mutex_lock(&data->lock);
@@ -526,11 +538,13 @@ static ssize_t set_fan_min(struct device *dev,
526 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 538 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
527 struct i2c_client *client = to_i2c_client(dev); 539 struct i2c_client *client = to_i2c_client(dev);
528 struct adt7473_data *data = i2c_get_clientdata(client); 540 struct adt7473_data *data = i2c_get_clientdata(client);
529 int temp = simple_strtol(buf, NULL, 10); 541 long temp;
530 542
531 if (!temp) 543 if (strict_strtol(buf, 10, &temp) || !temp)
532 return -EINVAL; 544 return -EINVAL;
545
533 temp = FAN_RPM_TO_PERIOD(temp); 546 temp = FAN_RPM_TO_PERIOD(temp);
547 temp = SENSORS_LIMIT(temp, 1, 65534);
534 548
535 mutex_lock(&data->lock); 549 mutex_lock(&data->lock);
536 data->fan_min[attr->index] = temp; 550 data->fan_min[attr->index] = temp;
@@ -569,7 +583,10 @@ static ssize_t set_max_duty_at_crit(struct device *dev,
569 u8 reg; 583 u8 reg;
570 struct i2c_client *client = to_i2c_client(dev); 584 struct i2c_client *client = to_i2c_client(dev);
571 struct adt7473_data *data = i2c_get_clientdata(client); 585 struct adt7473_data *data = i2c_get_clientdata(client);
572 int temp = simple_strtol(buf, NULL, 10); 586 long temp;
587
588 if (strict_strtol(buf, 10, &temp))
589 return -EINVAL;
573 590
574 mutex_lock(&data->lock); 591 mutex_lock(&data->lock);
575 data->max_duty_at_overheat = !!temp; 592 data->max_duty_at_overheat = !!temp;
@@ -598,7 +615,12 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
598 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 615 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
599 struct i2c_client *client = to_i2c_client(dev); 616 struct i2c_client *client = to_i2c_client(dev);
600 struct adt7473_data *data = i2c_get_clientdata(client); 617 struct adt7473_data *data = i2c_get_clientdata(client);
601 int temp = simple_strtol(buf, NULL, 10); 618 long temp;
619
620 if (strict_strtol(buf, 10, &temp))
621 return -EINVAL;
622
623 temp = SENSORS_LIMIT(temp, 0, 255);
602 624
603 mutex_lock(&data->lock); 625 mutex_lock(&data->lock);
604 data->pwm[attr->index] = temp; 626 data->pwm[attr->index] = temp;
@@ -625,7 +647,12 @@ static ssize_t set_pwm_max(struct device *dev,
625 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 647 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
626 struct i2c_client *client = to_i2c_client(dev); 648 struct i2c_client *client = to_i2c_client(dev);
627 struct adt7473_data *data = i2c_get_clientdata(client); 649 struct adt7473_data *data = i2c_get_clientdata(client);
628 int temp = simple_strtol(buf, NULL, 10); 650 long temp;
651
652 if (strict_strtol(buf, 10, &temp))
653 return -EINVAL;
654
655 temp = SENSORS_LIMIT(temp, 0, 255);
629 656
630 mutex_lock(&data->lock); 657 mutex_lock(&data->lock);
631 data->pwm_max[attr->index] = temp; 658 data->pwm_max[attr->index] = temp;
@@ -653,7 +680,12 @@ static ssize_t set_pwm_min(struct device *dev,
653 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 680 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
654 struct i2c_client *client = to_i2c_client(dev); 681 struct i2c_client *client = to_i2c_client(dev);
655 struct adt7473_data *data = i2c_get_clientdata(client); 682 struct adt7473_data *data = i2c_get_clientdata(client);
656 int temp = simple_strtol(buf, NULL, 10); 683 long temp;
684
685 if (strict_strtol(buf, 10, &temp))
686 return -EINVAL;
687
688 temp = SENSORS_LIMIT(temp, 0, 255);
657 689
658 mutex_lock(&data->lock); 690 mutex_lock(&data->lock);
659 data->pwm_min[attr->index] = temp; 691 data->pwm_min[attr->index] = temp;
@@ -683,7 +715,12 @@ static ssize_t set_temp_tmax(struct device *dev,
683 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 715 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
684 struct i2c_client *client = to_i2c_client(dev); 716 struct i2c_client *client = to_i2c_client(dev);
685 struct adt7473_data *data = i2c_get_clientdata(client); 717 struct adt7473_data *data = i2c_get_clientdata(client);
686 int temp = simple_strtol(buf, NULL, 10) / 1000; 718 long temp;
719
720 if (strict_strtol(buf, 10, &temp))
721 return -EINVAL;
722
723 temp = ROUND_DIV(temp, 1000);
687 temp = encode_temp(data->temp_twos_complement, temp); 724 temp = encode_temp(data->temp_twos_complement, temp);
688 725
689 mutex_lock(&data->lock); 726 mutex_lock(&data->lock);
@@ -714,7 +751,12 @@ static ssize_t set_temp_tmin(struct device *dev,
714 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 751 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
715 struct i2c_client *client = to_i2c_client(dev); 752 struct i2c_client *client = to_i2c_client(dev);
716 struct adt7473_data *data = i2c_get_clientdata(client); 753 struct adt7473_data *data = i2c_get_clientdata(client);
717 int temp = simple_strtol(buf, NULL, 10) / 1000; 754 long temp;
755
756 if (strict_strtol(buf, 10, &temp))
757 return -EINVAL;
758
759 temp = ROUND_DIV(temp, 1000);
718 temp = encode_temp(data->temp_twos_complement, temp); 760 temp = encode_temp(data->temp_twos_complement, temp);
719 761
720 mutex_lock(&data->lock); 762 mutex_lock(&data->lock);
@@ -752,7 +794,10 @@ static ssize_t set_pwm_enable(struct device *dev,
752 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 794 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
753 struct i2c_client *client = to_i2c_client(dev); 795 struct i2c_client *client = to_i2c_client(dev);
754 struct adt7473_data *data = i2c_get_clientdata(client); 796 struct adt7473_data *data = i2c_get_clientdata(client);
755 int temp = simple_strtol(buf, NULL, 10); 797 long temp;
798
799 if (strict_strtol(buf, 10, &temp))
800 return -EINVAL;
756 801
757 switch (temp) { 802 switch (temp) {
758 case 0: 803 case 0:
@@ -816,7 +861,10 @@ static ssize_t set_pwm_auto_temp(struct device *dev,
816 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 861 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
817 struct i2c_client *client = to_i2c_client(dev); 862 struct i2c_client *client = to_i2c_client(dev);
818 struct adt7473_data *data = i2c_get_clientdata(client); 863 struct adt7473_data *data = i2c_get_clientdata(client);
819 int temp = simple_strtol(buf, NULL, 10); 864 long temp;
865
866 if (strict_strtol(buf, 10, &temp))
867 return -EINVAL;
820 868
821 switch (temp) { 869 switch (temp) {
822 case 1: 870 case 1:
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index bc011da79e14..086c2a5cef0b 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -116,6 +116,21 @@ static const char* temperature_sensors_sets[][36] = {
116/* Set 9: Macbook Pro 3,1 (Santa Rosa) */ 116/* Set 9: Macbook Pro 3,1 (Santa Rosa) */
117 { "TALP", "TB0T", "TC0D", "TC0P", "TG0D", "TG0H", "TTF0", "TW0P", 117 { "TALP", "TB0T", "TC0D", "TC0P", "TG0D", "TG0H", "TTF0", "TW0P",
118 "Th0H", "Th1H", "Th2H", "Tm0P", "Ts0P", NULL }, 118 "Th0H", "Th1H", "Th2H", "Tm0P", "Ts0P", NULL },
119/* Set 10: iMac 5,1 */
120 { "TA0P", "TC0D", "TC0P", "TG0D", "TH0P", "TO0P", "Tm0P", NULL },
121/* Set 11: Macbook 5,1 */
122 { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0P", "TN0D", "TN0P",
123 "TTF0", "Th0H", "Th1H", "ThFH", "Ts0P", "Ts0S", NULL },
124/* Set 12: Macbook Pro 5,1 */
125 { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D",
126 "TG0F", "TG0H", "TG0P", "TG0T", "TG1H", "TN0D", "TN0P", "TTF0",
127 "Th2H", "Tm0P", "Ts0P", "Ts0S", NULL },
128/* Set 13: iMac 8,1 */
129 { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
130 "TL0P", "TO0P", "TW0P", "Tm0P", "Tp0P", NULL },
131/* Set 14: iMac 6,1 */
132 { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
133 "TO0P", "Tp0P", NULL },
119}; 134};
120 135
121/* List of keys used to read/write fan speeds */ 136/* List of keys used to read/write fan speeds */
@@ -1268,7 +1283,7 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
1268 { .accelerometer = 0, .light = 0, .temperature_set = 4 }, 1283 { .accelerometer = 0, .light = 0, .temperature_set = 4 },
1269/* iMac: temperature set 5 */ 1284/* iMac: temperature set 5 */
1270 { .accelerometer = 0, .light = 0, .temperature_set = 5 }, 1285 { .accelerometer = 0, .light = 0, .temperature_set = 5 },
1271/* MacBook3: accelerometer and temperature set 6 */ 1286/* MacBook3, MacBook4: accelerometer and temperature set 6 */
1272 { .accelerometer = 1, .light = 0, .temperature_set = 6 }, 1287 { .accelerometer = 1, .light = 0, .temperature_set = 6 },
1273/* MacBook Air: accelerometer, backlight and temperature set 7 */ 1288/* MacBook Air: accelerometer, backlight and temperature set 7 */
1274 { .accelerometer = 1, .light = 1, .temperature_set = 7 }, 1289 { .accelerometer = 1, .light = 1, .temperature_set = 7 },
@@ -1276,6 +1291,16 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
1276 { .accelerometer = 1, .light = 1, .temperature_set = 8 }, 1291 { .accelerometer = 1, .light = 1, .temperature_set = 8 },
1277/* MacBook Pro 3: accelerometer, backlight and temperature set 9 */ 1292/* MacBook Pro 3: accelerometer, backlight and temperature set 9 */
1278 { .accelerometer = 1, .light = 1, .temperature_set = 9 }, 1293 { .accelerometer = 1, .light = 1, .temperature_set = 9 },
1294/* iMac 5: light sensor only, temperature set 10 */
1295 { .accelerometer = 0, .light = 0, .temperature_set = 10 },
1296/* MacBook 5: accelerometer, backlight and temperature set 11 */
1297 { .accelerometer = 1, .light = 1, .temperature_set = 11 },
1298/* MacBook Pro 5: accelerometer, backlight and temperature set 12 */
1299 { .accelerometer = 1, .light = 1, .temperature_set = 12 },
1300/* iMac 8: light sensor only, temperature set 13 */
1301 { .accelerometer = 0, .light = 0, .temperature_set = 13 },
1302/* iMac 6: light sensor only, temperature set 14 */
1303 { .accelerometer = 0, .light = 0, .temperature_set = 14 },
1279}; 1304};
1280 1305
1281/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". 1306/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1285,6 +1310,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1285 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1310 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1286 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") }, 1311 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") },
1287 &applesmc_dmi_data[7]}, 1312 &applesmc_dmi_data[7]},
1313 { applesmc_dmi_match, "Apple MacBook Pro 5", {
1314 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1315 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") },
1316 &applesmc_dmi_data[12]},
1288 { applesmc_dmi_match, "Apple MacBook Pro 4", { 1317 { applesmc_dmi_match, "Apple MacBook Pro 4", {
1289 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1318 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1290 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro4") }, 1319 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro4") },
@@ -1305,6 +1334,14 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1305 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1334 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1306 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook3") }, 1335 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook3") },
1307 &applesmc_dmi_data[6]}, 1336 &applesmc_dmi_data[6]},
1337 { applesmc_dmi_match, "Apple MacBook 4", {
1338 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1339 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4") },
1340 &applesmc_dmi_data[6]},
1341 { applesmc_dmi_match, "Apple MacBook 5", {
1342 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1343 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5") },
1344 &applesmc_dmi_data[11]},
1308 { applesmc_dmi_match, "Apple MacBook", { 1345 { applesmc_dmi_match, "Apple MacBook", {
1309 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1346 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1310 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") }, 1347 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") },
@@ -1317,6 +1354,22 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1317 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1354 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1318 DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") }, 1355 DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
1319 &applesmc_dmi_data[4]}, 1356 &applesmc_dmi_data[4]},
1357 { applesmc_dmi_match, "Apple MacPro", {
1358 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1359 DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
1360 &applesmc_dmi_data[4]},
1361 { applesmc_dmi_match, "Apple iMac 8", {
1362 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1363 DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") },
1364 &applesmc_dmi_data[13]},
1365 { applesmc_dmi_match, "Apple iMac 6", {
1366 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1367 DMI_MATCH(DMI_PRODUCT_NAME, "iMac6") },
1368 &applesmc_dmi_data[14]},
1369 { applesmc_dmi_match, "Apple iMac 5", {
1370 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1371 DMI_MATCH(DMI_PRODUCT_NAME, "iMac5") },
1372 &applesmc_dmi_data[10]},
1320 { applesmc_dmi_match, "Apple iMac", { 1373 { applesmc_dmi_match, "Apple iMac", {
1321 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1374 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1322 DMI_MATCH(DMI_PRODUCT_NAME,"iMac") }, 1375 DMI_MATCH(DMI_PRODUCT_NAME,"iMac") },
@@ -1511,3 +1564,4 @@ module_exit(applesmc_exit);
1511MODULE_AUTHOR("Nicolas Boichat"); 1564MODULE_AUTHOR("Nicolas Boichat");
1512MODULE_DESCRIPTION("Apple SMC"); 1565MODULE_DESCRIPTION("Apple SMC");
1513MODULE_LICENSE("GPL v2"); 1566MODULE_LICENSE("GPL v2");
1567MODULE_DEVICE_TABLE(dmi, applesmc_whitelist);
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index c54eff92be4a..bfc296145bba 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -180,6 +180,7 @@ static struct vrm_model vrm_models[] = {
180 {X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */ 180 {X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */
181 {X86_VENDOR_AMD, 0xF, 0x3F, ANY, 24}, /* Athlon 64, Opteron */ 181 {X86_VENDOR_AMD, 0xF, 0x3F, ANY, 24}, /* Athlon 64, Opteron */
182 {X86_VENDOR_AMD, 0xF, ANY, ANY, 25}, /* NPT family 0Fh */ 182 {X86_VENDOR_AMD, 0xF, ANY, ANY, 25}, /* NPT family 0Fh */
183 {X86_VENDOR_AMD, 0x10, ANY, ANY, 25}, /* NPT family 10h */
183 {X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */ 184 {X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */
184 {X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */ 185 {X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */
185 {X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */ 186 {X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 7b0ed5dea399..fe74609a7feb 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -88,9 +88,11 @@
88static DEFINE_IDR(aem_idr); 88static DEFINE_IDR(aem_idr);
89static DEFINE_SPINLOCK(aem_idr_lock); 89static DEFINE_SPINLOCK(aem_idr_lock);
90 90
91static struct device_driver aem_driver = { 91static struct platform_driver aem_driver = {
92 .name = DRVNAME, 92 .driver = {
93 .bus = &platform_bus_type, 93 .name = DRVNAME,
94 .bus = &platform_bus_type,
95 }
94}; 96};
95 97
96struct aem_ipmi_data { 98struct aem_ipmi_data {
@@ -583,7 +585,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle)
583 data->pdev = platform_device_alloc(DRVNAME, data->id); 585 data->pdev = platform_device_alloc(DRVNAME, data->id);
584 if (!data->pdev) 586 if (!data->pdev)
585 goto dev_err; 587 goto dev_err;
586 data->pdev->dev.driver = &aem_driver; 588 data->pdev->dev.driver = &aem_driver.driver;
587 589
588 res = platform_device_add(data->pdev); 590 res = platform_device_add(data->pdev);
589 if (res) 591 if (res)
@@ -716,7 +718,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
716 data->pdev = platform_device_alloc(DRVNAME, data->id); 718 data->pdev = platform_device_alloc(DRVNAME, data->id);
717 if (!data->pdev) 719 if (!data->pdev)
718 goto dev_err; 720 goto dev_err;
719 data->pdev->dev.driver = &aem_driver; 721 data->pdev->dev.driver = &aem_driver.driver;
720 722
721 res = platform_device_add(data->pdev); 723 res = platform_device_add(data->pdev);
722 if (res) 724 if (res)
@@ -1085,7 +1087,7 @@ static int __init aem_init(void)
1085{ 1087{
1086 int res; 1088 int res;
1087 1089
1088 res = driver_register(&aem_driver); 1090 res = driver_register(&aem_driver.driver);
1089 if (res) { 1091 if (res) {
1090 printk(KERN_ERR "Can't register aem driver\n"); 1092 printk(KERN_ERR "Can't register aem driver\n");
1091 return res; 1093 return res;
@@ -1097,7 +1099,7 @@ static int __init aem_init(void)
1097 return 0; 1099 return 0;
1098 1100
1099ipmi_reg_err: 1101ipmi_reg_err:
1100 driver_unregister(&aem_driver); 1102 driver_unregister(&aem_driver.driver);
1101 return res; 1103 return res;
1102 1104
1103} 1105}
@@ -1107,7 +1109,7 @@ static void __exit aem_exit(void)
1107 struct aem_data *p1, *next1; 1109 struct aem_data *p1, *next1;
1108 1110
1109 ipmi_smi_watcher_unregister(&driver_data.bmc_events); 1111 ipmi_smi_watcher_unregister(&driver_data.bmc_events);
1110 driver_unregister(&aem_driver); 1112 driver_unregister(&aem_driver.driver);
1111 list_for_each_entry_safe(p1, next1, &driver_data.aem_devices, list) 1113 list_for_each_entry_safe(p1, next1, &driver_data.aem_devices, list)
1112 aem_delete(p1); 1114 aem_delete(p1);
1113} 1115}
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
new file mode 100644
index 000000000000..c002144c76bc
--- /dev/null
+++ b/drivers/hwmon/lis3lv02d.c
@@ -0,0 +1,581 @@
1/*
2 * lis3lv02d.c - ST LIS3LV02DL accelerometer driver
3 *
4 * Copyright (C) 2007-2008 Yan Burman
5 * Copyright (C) 2008 Eric Piel
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/dmi.h>
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/platform_device.h>
28#include <linux/interrupt.h>
29#include <linux/input.h>
30#include <linux/kthread.h>
31#include <linux/semaphore.h>
32#include <linux/delay.h>
33#include <linux/wait.h>
34#include <linux/poll.h>
35#include <linux/freezer.h>
36#include <linux/uaccess.h>
37#include <acpi/acpi_drivers.h>
38#include <asm/atomic.h>
39#include "lis3lv02d.h"
40
41#define DRIVER_NAME "lis3lv02d"
42#define ACPI_MDPS_CLASS "accelerometer"
43
44/* joystick device poll interval in milliseconds */
45#define MDPS_POLL_INTERVAL 50
46/*
47 * The sensor can also generate interrupts (DRDY) but it's pretty pointless
48 * because their are generated even if the data do not change. So it's better
49 * to keep the interrupt for the free-fall event. The values are updated at
50 * 40Hz (at the lowest frequency), but as it can be pretty time consuming on
51 * some low processor, we poll the sensor only at 20Hz... enough for the
52 * joystick.
53 */
54
55/* Maximum value our axis may get for the input device (signed 12 bits) */
56#define MDPS_MAX_VAL 2048
57
58struct axis_conversion {
59 s8 x;
60 s8 y;
61 s8 z;
62};
63
64struct acpi_lis3lv02d {
65 struct acpi_device *device; /* The ACPI device */
66 struct input_dev *idev; /* input device */
67 struct task_struct *kthread; /* kthread for input */
68 struct mutex lock;
69 struct platform_device *pdev; /* platform device */
70 atomic_t count; /* interrupt count after last read */
71 int xcalib; /* calibrated null value for x */
72 int ycalib; /* calibrated null value for y */
73 int zcalib; /* calibrated null value for z */
74 unsigned char is_on; /* whether the device is on or off */
75 unsigned char usage; /* usage counter */
76 struct axis_conversion ac; /* hw -> logical axis */
77};
78
79static struct acpi_lis3lv02d adev;
80
81static int lis3lv02d_remove_fs(void);
82static int lis3lv02d_add_fs(struct acpi_device *device);
83
84/* For automatic insertion of the module */
85static struct acpi_device_id lis3lv02d_device_ids[] = {
86 {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
87 {"", 0},
88};
89MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
90
91/**
92 * lis3lv02d_acpi_init - ACPI _INI method: initialize the device.
93 * @handle: the handle of the device
94 *
95 * Returns AE_OK on success.
96 */
97static inline acpi_status lis3lv02d_acpi_init(acpi_handle handle)
98{
99 return acpi_evaluate_object(handle, METHOD_NAME__INI, NULL, NULL);
100}
101
102/**
103 * lis3lv02d_acpi_read - ACPI ALRD method: read a register
104 * @handle: the handle of the device
105 * @reg: the register to read
106 * @ret: result of the operation
107 *
108 * Returns AE_OK on success.
109 */
110static acpi_status lis3lv02d_acpi_read(acpi_handle handle, int reg, u8 *ret)
111{
112 union acpi_object arg0 = { ACPI_TYPE_INTEGER };
113 struct acpi_object_list args = { 1, &arg0 };
114 unsigned long long lret;
115 acpi_status status;
116
117 arg0.integer.value = reg;
118
119 status = acpi_evaluate_integer(handle, "ALRD", &args, &lret);
120 *ret = lret;
121 return status;
122}
123
124/**
125 * lis3lv02d_acpi_write - ACPI ALWR method: write to a register
126 * @handle: the handle of the device
127 * @reg: the register to write to
128 * @val: the value to write
129 *
130 * Returns AE_OK on success.
131 */
132static acpi_status lis3lv02d_acpi_write(acpi_handle handle, int reg, u8 val)
133{
134 unsigned long long ret; /* Not used when writting */
135 union acpi_object in_obj[2];
136 struct acpi_object_list args = { 2, in_obj };
137
138 in_obj[0].type = ACPI_TYPE_INTEGER;
139 in_obj[0].integer.value = reg;
140 in_obj[1].type = ACPI_TYPE_INTEGER;
141 in_obj[1].integer.value = val;
142
143 return acpi_evaluate_integer(handle, "ALWR", &args, &ret);
144}
145
146static s16 lis3lv02d_read_16(acpi_handle handle, int reg)
147{
148 u8 lo, hi;
149
150 lis3lv02d_acpi_read(handle, reg, &lo);
151 lis3lv02d_acpi_read(handle, reg + 1, &hi);
152 /* In "12 bit right justified" mode, bit 6, bit 7, bit 8 = bit 5 */
153 return (s16)((hi << 8) | lo);
154}
155
156/**
157 * lis3lv02d_get_axis - For the given axis, give the value converted
158 * @axis: 1,2,3 - can also be negative
159 * @hw_values: raw values returned by the hardware
160 *
161 * Returns the converted value.
162 */
163static inline int lis3lv02d_get_axis(s8 axis, int hw_values[3])
164{
165 if (axis > 0)
166 return hw_values[axis - 1];
167 else
168 return -hw_values[-axis - 1];
169}
170
171/**
172 * lis3lv02d_get_xyz - Get X, Y and Z axis values from the accelerometer
173 * @handle: the handle to the device
174 * @x: where to store the X axis value
175 * @y: where to store the Y axis value
176 * @z: where to store the Z axis value
177 *
178 * Note that 40Hz input device can eat up about 10% CPU at 800MHZ
179 */
180static void lis3lv02d_get_xyz(acpi_handle handle, int *x, int *y, int *z)
181{
182 int position[3];
183
184 position[0] = lis3lv02d_read_16(handle, OUTX_L);
185 position[1] = lis3lv02d_read_16(handle, OUTY_L);
186 position[2] = lis3lv02d_read_16(handle, OUTZ_L);
187
188 *x = lis3lv02d_get_axis(adev.ac.x, position);
189 *y = lis3lv02d_get_axis(adev.ac.y, position);
190 *z = lis3lv02d_get_axis(adev.ac.z, position);
191}
192
193static inline void lis3lv02d_poweroff(acpi_handle handle)
194{
195 adev.is_on = 0;
196 /* disable X,Y,Z axis and power down */
197 lis3lv02d_acpi_write(handle, CTRL_REG1, 0x00);
198}
199
200static void lis3lv02d_poweron(acpi_handle handle)
201{
202 u8 val;
203
204 adev.is_on = 1;
205 lis3lv02d_acpi_init(handle);
206 lis3lv02d_acpi_write(handle, FF_WU_CFG, 0);
207 /*
208 * BDU: LSB and MSB values are not updated until both have been read.
209 * So the value read will always be correct.
210 * IEN: Interrupt for free-fall and DD, not for data-ready.
211 */
212 lis3lv02d_acpi_read(handle, CTRL_REG2, &val);
213 val |= CTRL2_BDU | CTRL2_IEN;
214 lis3lv02d_acpi_write(handle, CTRL_REG2, val);
215}
216
217#ifdef CONFIG_PM
218static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
219{
220 /* make sure the device is off when we suspend */
221 lis3lv02d_poweroff(device->handle);
222 return 0;
223}
224
225static int lis3lv02d_resume(struct acpi_device *device)
226{
227 /* put back the device in the right state (ACPI might turn it on) */
228 mutex_lock(&adev.lock);
229 if (adev.usage > 0)
230 lis3lv02d_poweron(device->handle);
231 else
232 lis3lv02d_poweroff(device->handle);
233 mutex_unlock(&adev.lock);
234 return 0;
235}
236#else
237#define lis3lv02d_suspend NULL
238#define lis3lv02d_resume NULL
239#endif
240
241
242/*
243 * To be called before starting to use the device. It makes sure that the
244 * device will always be on until a call to lis3lv02d_decrease_use(). Not to be
245 * used from interrupt context.
246 */
247static void lis3lv02d_increase_use(struct acpi_lis3lv02d *dev)
248{
249 mutex_lock(&dev->lock);
250 dev->usage++;
251 if (dev->usage == 1) {
252 if (!dev->is_on)
253 lis3lv02d_poweron(dev->device->handle);
254 }
255 mutex_unlock(&dev->lock);
256}
257
258/*
259 * To be called whenever a usage of the device is stopped.
260 * It will make sure to turn off the device when there is not usage.
261 */
262static void lis3lv02d_decrease_use(struct acpi_lis3lv02d *dev)
263{
264 mutex_lock(&dev->lock);
265 dev->usage--;
266 if (dev->usage == 0)
267 lis3lv02d_poweroff(dev->device->handle);
268 mutex_unlock(&dev->lock);
269}
270
271/**
272 * lis3lv02d_joystick_kthread - Kthread polling function
273 * @data: unused - here to conform to threadfn prototype
274 */
275static int lis3lv02d_joystick_kthread(void *data)
276{
277 int x, y, z;
278
279 while (!kthread_should_stop()) {
280 lis3lv02d_get_xyz(adev.device->handle, &x, &y, &z);
281 input_report_abs(adev.idev, ABS_X, x - adev.xcalib);
282 input_report_abs(adev.idev, ABS_Y, y - adev.ycalib);
283 input_report_abs(adev.idev, ABS_Z, z - adev.zcalib);
284
285 input_sync(adev.idev);
286
287 try_to_freeze();
288 msleep_interruptible(MDPS_POLL_INTERVAL);
289 }
290
291 return 0;
292}
293
294static int lis3lv02d_joystick_open(struct input_dev *input)
295{
296 lis3lv02d_increase_use(&adev);
297 adev.kthread = kthread_run(lis3lv02d_joystick_kthread, NULL, "klis3lv02d");
298 if (IS_ERR(adev.kthread)) {
299 lis3lv02d_decrease_use(&adev);
300 return PTR_ERR(adev.kthread);
301 }
302
303 return 0;
304}
305
306static void lis3lv02d_joystick_close(struct input_dev *input)
307{
308 kthread_stop(adev.kthread);
309 lis3lv02d_decrease_use(&adev);
310}
311
312
313static inline void lis3lv02d_calibrate_joystick(void)
314{
315 lis3lv02d_get_xyz(adev.device->handle, &adev.xcalib, &adev.ycalib, &adev.zcalib);
316}
317
318static int lis3lv02d_joystick_enable(void)
319{
320 int err;
321
322 if (adev.idev)
323 return -EINVAL;
324
325 adev.idev = input_allocate_device();
326 if (!adev.idev)
327 return -ENOMEM;
328
329 lis3lv02d_calibrate_joystick();
330
331 adev.idev->name = "ST LIS3LV02DL Accelerometer";
332 adev.idev->phys = DRIVER_NAME "/input0";
333 adev.idev->id.bustype = BUS_HOST;
334 adev.idev->id.vendor = 0;
335 adev.idev->dev.parent = &adev.pdev->dev;
336 adev.idev->open = lis3lv02d_joystick_open;
337 adev.idev->close = lis3lv02d_joystick_close;
338
339 set_bit(EV_ABS, adev.idev->evbit);
340 input_set_abs_params(adev.idev, ABS_X, -MDPS_MAX_VAL, MDPS_MAX_VAL, 3, 3);
341 input_set_abs_params(adev.idev, ABS_Y, -MDPS_MAX_VAL, MDPS_MAX_VAL, 3, 3);
342 input_set_abs_params(adev.idev, ABS_Z, -MDPS_MAX_VAL, MDPS_MAX_VAL, 3, 3);
343
344 err = input_register_device(adev.idev);
345 if (err) {
346 input_free_device(adev.idev);
347 adev.idev = NULL;
348 }
349
350 return err;
351}
352
353static void lis3lv02d_joystick_disable(void)
354{
355 if (!adev.idev)
356 return;
357
358 input_unregister_device(adev.idev);
359 adev.idev = NULL;
360}
361
362
363/*
364 * Initialise the accelerometer and the various subsystems.
365 * Should be rather independant of the bus system.
366 */
367static int lis3lv02d_init_device(struct acpi_lis3lv02d *dev)
368{
369 mutex_init(&dev->lock);
370 lis3lv02d_add_fs(dev->device);
371 lis3lv02d_increase_use(dev);
372
373 if (lis3lv02d_joystick_enable())
374 printk(KERN_ERR DRIVER_NAME ": joystick initialization failed\n");
375
376 lis3lv02d_decrease_use(dev);
377 return 0;
378}
379
380static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
381{
382 adev.ac = *((struct axis_conversion *)dmi->driver_data);
383 printk(KERN_INFO DRIVER_NAME ": hardware type %s found.\n", dmi->ident);
384
385 return 1;
386}
387
388/* Represents, for each axis seen by userspace, the corresponding hw axis (+1).
389 * If the value is negative, the opposite of the hw value is used. */
390static struct axis_conversion lis3lv02d_axis_normal = {1, 2, 3};
391static struct axis_conversion lis3lv02d_axis_y_inverted = {1, -2, 3};
392static struct axis_conversion lis3lv02d_axis_x_inverted = {-1, 2, 3};
393static struct axis_conversion lis3lv02d_axis_z_inverted = {1, 2, -3};
394static struct axis_conversion lis3lv02d_axis_xy_rotated_left = {-2, 1, 3};
395static struct axis_conversion lis3lv02d_axis_xy_swap_inverted = {-2, -1, 3};
396
397#define AXIS_DMI_MATCH(_ident, _name, _axis) { \
398 .ident = _ident, \
399 .callback = lis3lv02d_dmi_matched, \
400 .matches = { \
401 DMI_MATCH(DMI_PRODUCT_NAME, _name) \
402 }, \
403 .driver_data = &lis3lv02d_axis_##_axis \
404}
405static struct dmi_system_id lis3lv02d_dmi_ids[] = {
406 /* product names are truncated to match all kinds of a same model */
407 AXIS_DMI_MATCH("NC64x0", "HP Compaq nc64", x_inverted),
408 AXIS_DMI_MATCH("NC84x0", "HP Compaq nc84", z_inverted),
409 AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted),
410 AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted),
411 AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted),
412 AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted),
413 AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
414 { NULL, }
415/* Laptop models without axis info (yet):
416 * "NC651xx" "HP Compaq 651"
417 * "NC671xx" "HP Compaq 671"
418 * "NC6910" "HP Compaq 6910"
419 * HP Compaq 8710x Notebook PC / Mobile Workstation
420 * "NC2400" "HP Compaq nc2400"
421 * "NX74x0" "HP Compaq nx74"
422 * "NX6325" "HP Compaq nx6325"
423 * "NC4400" "HP Compaq nc4400"
424 */
425};
426
427static int lis3lv02d_add(struct acpi_device *device)
428{
429 u8 val;
430
431 if (!device)
432 return -EINVAL;
433
434 adev.device = device;
435 strcpy(acpi_device_name(device), DRIVER_NAME);
436 strcpy(acpi_device_class(device), ACPI_MDPS_CLASS);
437 device->driver_data = &adev;
438
439 lis3lv02d_acpi_read(device->handle, WHO_AM_I, &val);
440 if ((val != LIS3LV02DL_ID) && (val != LIS302DL_ID)) {
441 printk(KERN_ERR DRIVER_NAME
442 ": Accelerometer chip not LIS3LV02D{L,Q}\n");
443 }
444
445 /* If possible use a "standard" axes order */
446 if (dmi_check_system(lis3lv02d_dmi_ids) == 0) {
447 printk(KERN_INFO DRIVER_NAME ": laptop model unknown, "
448 "using default axes configuration\n");
449 adev.ac = lis3lv02d_axis_normal;
450 }
451
452 return lis3lv02d_init_device(&adev);
453}
454
455static int lis3lv02d_remove(struct acpi_device *device, int type)
456{
457 if (!device)
458 return -EINVAL;
459
460 lis3lv02d_joystick_disable();
461 lis3lv02d_poweroff(device->handle);
462
463 return lis3lv02d_remove_fs();
464}
465
466
467/* Sysfs stuff */
468static ssize_t lis3lv02d_position_show(struct device *dev,
469 struct device_attribute *attr, char *buf)
470{
471 int x, y, z;
472
473 lis3lv02d_increase_use(&adev);
474 lis3lv02d_get_xyz(adev.device->handle, &x, &y, &z);
475 lis3lv02d_decrease_use(&adev);
476 return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
477}
478
479static ssize_t lis3lv02d_calibrate_show(struct device *dev,
480 struct device_attribute *attr, char *buf)
481{
482 return sprintf(buf, "(%d,%d,%d)\n", adev.xcalib, adev.ycalib, adev.zcalib);
483}
484
485static ssize_t lis3lv02d_calibrate_store(struct device *dev,
486 struct device_attribute *attr,
487 const char *buf, size_t count)
488{
489 lis3lv02d_increase_use(&adev);
490 lis3lv02d_calibrate_joystick();
491 lis3lv02d_decrease_use(&adev);
492 return count;
493}
494
495/* conversion btw sampling rate and the register values */
496static int lis3lv02dl_df_val[4] = {40, 160, 640, 2560};
497static ssize_t lis3lv02d_rate_show(struct device *dev,
498 struct device_attribute *attr, char *buf)
499{
500 u8 ctrl;
501 int val;
502
503 lis3lv02d_increase_use(&adev);
504 lis3lv02d_acpi_read(adev.device->handle, CTRL_REG1, &ctrl);
505 lis3lv02d_decrease_use(&adev);
506 val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4;
507 return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]);
508}
509
510static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL);
511static DEVICE_ATTR(calibrate, S_IRUGO|S_IWUSR, lis3lv02d_calibrate_show,
512 lis3lv02d_calibrate_store);
513static DEVICE_ATTR(rate, S_IRUGO, lis3lv02d_rate_show, NULL);
514
515static struct attribute *lis3lv02d_attributes[] = {
516 &dev_attr_position.attr,
517 &dev_attr_calibrate.attr,
518 &dev_attr_rate.attr,
519 NULL
520};
521
522static struct attribute_group lis3lv02d_attribute_group = {
523 .attrs = lis3lv02d_attributes
524};
525
526static int lis3lv02d_add_fs(struct acpi_device *device)
527{
528 adev.pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
529 if (IS_ERR(adev.pdev))
530 return PTR_ERR(adev.pdev);
531
532 return sysfs_create_group(&adev.pdev->dev.kobj, &lis3lv02d_attribute_group);
533}
534
535static int lis3lv02d_remove_fs(void)
536{
537 sysfs_remove_group(&adev.pdev->dev.kobj, &lis3lv02d_attribute_group);
538 platform_device_unregister(adev.pdev);
539 return 0;
540}
541
542/* For the HP MDPS aka 3D Driveguard */
543static struct acpi_driver lis3lv02d_driver = {
544 .name = DRIVER_NAME,
545 .class = ACPI_MDPS_CLASS,
546 .ids = lis3lv02d_device_ids,
547 .ops = {
548 .add = lis3lv02d_add,
549 .remove = lis3lv02d_remove,
550 .suspend = lis3lv02d_suspend,
551 .resume = lis3lv02d_resume,
552 }
553};
554
555static int __init lis3lv02d_init_module(void)
556{
557 int ret;
558
559 if (acpi_disabled)
560 return -ENODEV;
561
562 ret = acpi_bus_register_driver(&lis3lv02d_driver);
563 if (ret < 0)
564 return ret;
565
566 printk(KERN_INFO DRIVER_NAME " driver loaded.\n");
567
568 return 0;
569}
570
571static void __exit lis3lv02d_exit_module(void)
572{
573 acpi_bus_unregister_driver(&lis3lv02d_driver);
574}
575
576MODULE_DESCRIPTION("ST LIS3LV02Dx three-axis digital accelerometer driver");
577MODULE_AUTHOR("Yan Burman and Eric Piel");
578MODULE_LICENSE("GPL");
579
580module_init(lis3lv02d_init_module);
581module_exit(lis3lv02d_exit_module);
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
new file mode 100644
index 000000000000..330cfc60e948
--- /dev/null
+++ b/drivers/hwmon/lis3lv02d.h
@@ -0,0 +1,149 @@
1/*
2 * lis3lv02d.h - ST LIS3LV02DL accelerometer driver
3 *
4 * Copyright (C) 2007-2008 Yan Burman
5 * Copyright (C) 2008 Eric Piel
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22/*
23 * The actual chip is STMicroelectronics LIS3LV02DL or LIS3LV02DQ that seems to
24 * be connected via SPI. There exists also several similar chips (such as LIS302DL or
25 * LIS3L02DQ) but not in the HP laptops and they have slightly different registers.
26 * They can also be connected via I²C.
27 */
28
29#define LIS3LV02DL_ID 0x3A /* Also the LIS3LV02DQ */
30#define LIS302DL_ID 0x3B /* Also the LIS202DL! */
31
32enum lis3lv02d_reg {
33 WHO_AM_I = 0x0F,
34 OFFSET_X = 0x16,
35 OFFSET_Y = 0x17,
36 OFFSET_Z = 0x18,
37 GAIN_X = 0x19,
38 GAIN_Y = 0x1A,
39 GAIN_Z = 0x1B,
40 CTRL_REG1 = 0x20,
41 CTRL_REG2 = 0x21,
42 CTRL_REG3 = 0x22,
43 HP_FILTER_RESET = 0x23,
44 STATUS_REG = 0x27,
45 OUTX_L = 0x28,
46 OUTX_H = 0x29,
47 OUTY_L = 0x2A,
48 OUTY_H = 0x2B,
49 OUTZ_L = 0x2C,
50 OUTZ_H = 0x2D,
51 FF_WU_CFG = 0x30,
52 FF_WU_SRC = 0x31,
53 FF_WU_ACK = 0x32,
54 FF_WU_THS_L = 0x34,
55 FF_WU_THS_H = 0x35,
56 FF_WU_DURATION = 0x36,
57 DD_CFG = 0x38,
58 DD_SRC = 0x39,
59 DD_ACK = 0x3A,
60 DD_THSI_L = 0x3C,
61 DD_THSI_H = 0x3D,
62 DD_THSE_L = 0x3E,
63 DD_THSE_H = 0x3F,
64};
65
66enum lis3lv02d_ctrl1 {
67 CTRL1_Xen = 0x01,
68 CTRL1_Yen = 0x02,
69 CTRL1_Zen = 0x04,
70 CTRL1_ST = 0x08,
71 CTRL1_DF0 = 0x10,
72 CTRL1_DF1 = 0x20,
73 CTRL1_PD0 = 0x40,
74 CTRL1_PD1 = 0x80,
75};
76enum lis3lv02d_ctrl2 {
77 CTRL2_DAS = 0x01,
78 CTRL2_SIM = 0x02,
79 CTRL2_DRDY = 0x04,
80 CTRL2_IEN = 0x08,
81 CTRL2_BOOT = 0x10,
82 CTRL2_BLE = 0x20,
83 CTRL2_BDU = 0x40, /* Block Data Update */
84 CTRL2_FS = 0x80, /* Full Scale selection */
85};
86
87
88enum lis3lv02d_ctrl3 {
89 CTRL3_CFS0 = 0x01,
90 CTRL3_CFS1 = 0x02,
91 CTRL3_FDS = 0x10,
92 CTRL3_HPFF = 0x20,
93 CTRL3_HPDD = 0x40,
94 CTRL3_ECK = 0x80,
95};
96
97enum lis3lv02d_status_reg {
98 STATUS_XDA = 0x01,
99 STATUS_YDA = 0x02,
100 STATUS_ZDA = 0x04,
101 STATUS_XYZDA = 0x08,
102 STATUS_XOR = 0x10,
103 STATUS_YOR = 0x20,
104 STATUS_ZOR = 0x40,
105 STATUS_XYZOR = 0x80,
106};
107
108enum lis3lv02d_ff_wu_cfg {
109 FF_WU_CFG_XLIE = 0x01,
110 FF_WU_CFG_XHIE = 0x02,
111 FF_WU_CFG_YLIE = 0x04,
112 FF_WU_CFG_YHIE = 0x08,
113 FF_WU_CFG_ZLIE = 0x10,
114 FF_WU_CFG_ZHIE = 0x20,
115 FF_WU_CFG_LIR = 0x40,
116 FF_WU_CFG_AOI = 0x80,
117};
118
119enum lis3lv02d_ff_wu_src {
120 FF_WU_SRC_XL = 0x01,
121 FF_WU_SRC_XH = 0x02,
122 FF_WU_SRC_YL = 0x04,
123 FF_WU_SRC_YH = 0x08,
124 FF_WU_SRC_ZL = 0x10,
125 FF_WU_SRC_ZH = 0x20,
126 FF_WU_SRC_IA = 0x40,
127};
128
129enum lis3lv02d_dd_cfg {
130 DD_CFG_XLIE = 0x01,
131 DD_CFG_XHIE = 0x02,
132 DD_CFG_YLIE = 0x04,
133 DD_CFG_YHIE = 0x08,
134 DD_CFG_ZLIE = 0x10,
135 DD_CFG_ZHIE = 0x20,
136 DD_CFG_LIR = 0x40,
137 DD_CFG_IEND = 0x80,
138};
139
140enum lis3lv02d_dd_src {
141 DD_SRC_XL = 0x01,
142 DD_SRC_XH = 0x02,
143 DD_SRC_YL = 0x04,
144 DD_SRC_YH = 0x08,
145 DD_SRC_ZL = 0x10,
146 DD_SRC_ZH = 0x20,
147 DD_SRC_IA = 0x40,
148};
149
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 3ff0285396fa..cfc1ee90f5a3 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -39,7 +39,8 @@
39static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 39static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
40 40
41/* Insmod parameters */ 41/* Insmod parameters */
42I2C_CLIENT_INSMOD_6(lm85b, lm85c, adm1027, adt7463, emc6d100, emc6d102); 42I2C_CLIENT_INSMOD_7(lm85b, lm85c, adm1027, adt7463, adt7468, emc6d100,
43 emc6d102);
43 44
44/* The LM85 registers */ 45/* The LM85 registers */
45 46
@@ -59,6 +60,12 @@ I2C_CLIENT_INSMOD_6(lm85b, lm85c, adm1027, adt7463, emc6d100, emc6d102);
59 60
60#define LM85_REG_COMPANY 0x3e 61#define LM85_REG_COMPANY 0x3e
61#define LM85_REG_VERSTEP 0x3f 62#define LM85_REG_VERSTEP 0x3f
63
64#define ADT7468_REG_CFG5 0x7c
65#define ADT7468_OFF64 0x01
66#define IS_ADT7468_OFF64(data) \
67 ((data)->type == adt7468 && !((data)->cfg5 & ADT7468_OFF64))
68
62/* These are the recognized values for the above regs */ 69/* These are the recognized values for the above regs */
63#define LM85_COMPANY_NATIONAL 0x01 70#define LM85_COMPANY_NATIONAL 0x01
64#define LM85_COMPANY_ANALOG_DEV 0x41 71#define LM85_COMPANY_ANALOG_DEV 0x41
@@ -70,6 +77,8 @@ I2C_CLIENT_INSMOD_6(lm85b, lm85c, adm1027, adt7463, emc6d100, emc6d102);
70#define LM85_VERSTEP_ADM1027 0x60 77#define LM85_VERSTEP_ADM1027 0x60
71#define LM85_VERSTEP_ADT7463 0x62 78#define LM85_VERSTEP_ADT7463 0x62
72#define LM85_VERSTEP_ADT7463C 0x6A 79#define LM85_VERSTEP_ADT7463C 0x6A
80#define LM85_VERSTEP_ADT7468_1 0x71
81#define LM85_VERSTEP_ADT7468_2 0x72
73#define LM85_VERSTEP_EMC6D100_A0 0x60 82#define LM85_VERSTEP_EMC6D100_A0 0x60
74#define LM85_VERSTEP_EMC6D100_A1 0x61 83#define LM85_VERSTEP_EMC6D100_A1 0x61
75#define LM85_VERSTEP_EMC6D102 0x65 84#define LM85_VERSTEP_EMC6D102 0x65
@@ -306,6 +315,7 @@ struct lm85_data {
306 u8 vid; /* Register value */ 315 u8 vid; /* Register value */
307 u8 vrm; /* VRM version */ 316 u8 vrm; /* VRM version */
308 u32 alarms; /* Register encoding, combined */ 317 u32 alarms; /* Register encoding, combined */
318 u8 cfg5; /* Config Register 5 on ADT7468 */
309 struct lm85_autofan autofan[3]; 319 struct lm85_autofan autofan[3];
310 struct lm85_zone zone[3]; 320 struct lm85_zone zone[3];
311}; 321};
@@ -685,6 +695,9 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
685 struct lm85_data *data = i2c_get_clientdata(client); 695 struct lm85_data *data = i2c_get_clientdata(client);
686 long val = simple_strtol(buf, NULL, 10); 696 long val = simple_strtol(buf, NULL, 10);
687 697
698 if (IS_ADT7468_OFF64(data))
699 val += 64;
700
688 mutex_lock(&data->update_lock); 701 mutex_lock(&data->update_lock);
689 data->temp_min[nr] = TEMP_TO_REG(val); 702 data->temp_min[nr] = TEMP_TO_REG(val);
690 lm85_write_value(client, LM85_REG_TEMP_MIN(nr), data->temp_min[nr]); 703 lm85_write_value(client, LM85_REG_TEMP_MIN(nr), data->temp_min[nr]);
@@ -708,6 +721,9 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
708 struct lm85_data *data = i2c_get_clientdata(client); 721 struct lm85_data *data = i2c_get_clientdata(client);
709 long val = simple_strtol(buf, NULL, 10); 722 long val = simple_strtol(buf, NULL, 10);
710 723
724 if (IS_ADT7468_OFF64(data))
725 val += 64;
726
711 mutex_lock(&data->update_lock); 727 mutex_lock(&data->update_lock);
712 data->temp_max[nr] = TEMP_TO_REG(val); 728 data->temp_max[nr] = TEMP_TO_REG(val);
713 lm85_write_value(client, LM85_REG_TEMP_MAX(nr), data->temp_max[nr]); 729 lm85_write_value(client, LM85_REG_TEMP_MAX(nr), data->temp_max[nr]);
@@ -1163,6 +1179,10 @@ static int lm85_detect(struct i2c_client *client, int kind,
1163 case LM85_VERSTEP_ADT7463C: 1179 case LM85_VERSTEP_ADT7463C:
1164 kind = adt7463; 1180 kind = adt7463;
1165 break; 1181 break;
1182 case LM85_VERSTEP_ADT7468_1:
1183 case LM85_VERSTEP_ADT7468_2:
1184 kind = adt7468;
1185 break;
1166 } 1186 }
1167 } else if (company == LM85_COMPANY_SMSC) { 1187 } else if (company == LM85_COMPANY_SMSC) {
1168 switch (verstep) { 1188 switch (verstep) {
@@ -1195,6 +1215,9 @@ static int lm85_detect(struct i2c_client *client, int kind,
1195 case adt7463: 1215 case adt7463:
1196 type_name = "adt7463"; 1216 type_name = "adt7463";
1197 break; 1217 break;
1218 case adt7468:
1219 type_name = "adt7468";
1220 break;
1198 case emc6d100: 1221 case emc6d100:
1199 type_name = "emc6d100"; 1222 type_name = "emc6d100";
1200 break; 1223 break;
@@ -1246,10 +1269,11 @@ static int lm85_probe(struct i2c_client *client,
1246 if (err) 1269 if (err)
1247 goto err_kfree; 1270 goto err_kfree;
1248 1271
1249 /* The ADT7463 has an optional VRM 10 mode where pin 21 is used 1272 /* The ADT7463/68 have an optional VRM 10 mode where pin 21 is used
1250 as a sixth digital VID input rather than an analog input. */ 1273 as a sixth digital VID input rather than an analog input. */
1251 data->vid = lm85_read_value(client, LM85_REG_VID); 1274 data->vid = lm85_read_value(client, LM85_REG_VID);
1252 if (!(data->type == adt7463 && (data->vid & 0x80))) 1275 if (!((data->type == adt7463 || data->type == adt7468) &&
1276 (data->vid & 0x80)))
1253 if ((err = sysfs_create_group(&client->dev.kobj, 1277 if ((err = sysfs_create_group(&client->dev.kobj,
1254 &lm85_group_in4))) 1278 &lm85_group_in4)))
1255 goto err_remove_files; 1279 goto err_remove_files;
@@ -1357,7 +1381,8 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1357 * There are 2 additional resolution bits per channel and we 1381 * There are 2 additional resolution bits per channel and we
1358 * have room for 4, so we shift them to the left. 1382 * have room for 4, so we shift them to the left.
1359 */ 1383 */
1360 if (data->type == adm1027 || data->type == adt7463) { 1384 if (data->type == adm1027 || data->type == adt7463 ||
1385 data->type == adt7468) {
1361 int ext1 = lm85_read_value(client, 1386 int ext1 = lm85_read_value(client,
1362 ADM1027_REG_EXTEND_ADC1); 1387 ADM1027_REG_EXTEND_ADC1);
1363 int ext2 = lm85_read_value(client, 1388 int ext2 = lm85_read_value(client,
@@ -1382,16 +1407,23 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1382 lm85_read_value(client, LM85_REG_FAN(i)); 1407 lm85_read_value(client, LM85_REG_FAN(i));
1383 } 1408 }
1384 1409
1385 if (!(data->type == adt7463 && (data->vid & 0x80))) { 1410 if (!((data->type == adt7463 || data->type == adt7468) &&
1411 (data->vid & 0x80))) {
1386 data->in[4] = lm85_read_value(client, 1412 data->in[4] = lm85_read_value(client,
1387 LM85_REG_IN(4)); 1413 LM85_REG_IN(4));
1388 } 1414 }
1389 1415
1416 if (data->type == adt7468)
1417 data->cfg5 = lm85_read_value(client, ADT7468_REG_CFG5);
1418
1390 for (i = 0; i <= 2; ++i) { 1419 for (i = 0; i <= 2; ++i) {
1391 data->temp[i] = 1420 data->temp[i] =
1392 lm85_read_value(client, LM85_REG_TEMP(i)); 1421 lm85_read_value(client, LM85_REG_TEMP(i));
1393 data->pwm[i] = 1422 data->pwm[i] =
1394 lm85_read_value(client, LM85_REG_PWM(i)); 1423 lm85_read_value(client, LM85_REG_PWM(i));
1424
1425 if (IS_ADT7468_OFF64(data))
1426 data->temp[i] -= 64;
1395 } 1427 }
1396 1428
1397 data->alarms = lm85_read_value(client, LM85_REG_ALARM1); 1429 data->alarms = lm85_read_value(client, LM85_REG_ALARM1);
@@ -1446,7 +1478,8 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1446 lm85_read_value(client, LM85_REG_FAN_MIN(i)); 1478 lm85_read_value(client, LM85_REG_FAN_MIN(i));
1447 } 1479 }
1448 1480
1449 if (!(data->type == adt7463 && (data->vid & 0x80))) { 1481 if (!((data->type == adt7463 || data->type == adt7468) &&
1482 (data->vid & 0x80))) {
1450 data->in_min[4] = lm85_read_value(client, 1483 data->in_min[4] = lm85_read_value(client,
1451 LM85_REG_IN_MIN(4)); 1484 LM85_REG_IN_MIN(4));
1452 data->in_max[4] = lm85_read_value(client, 1485 data->in_max[4] = lm85_read_value(client,
@@ -1481,6 +1514,13 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1481 lm85_read_value(client, LM85_REG_AFAN_LIMIT(i)); 1514 lm85_read_value(client, LM85_REG_AFAN_LIMIT(i));
1482 data->zone[i].critical = 1515 data->zone[i].critical =
1483 lm85_read_value(client, LM85_REG_AFAN_CRITICAL(i)); 1516 lm85_read_value(client, LM85_REG_AFAN_CRITICAL(i));
1517
1518 if (IS_ADT7468_OFF64(data)) {
1519 data->temp_min[i] -= 64;
1520 data->temp_max[i] -= 64;
1521 data->zone[i].limit -= 64;
1522 data->zone[i].critical -= 64;
1523 }
1484 } 1524 }
1485 1525
1486 i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1); 1526 i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 3edeebc0b835..96a701866726 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -12,9 +12,9 @@
12 * made by National Semiconductor. Both have an increased remote 12 * made by National Semiconductor. Both have an increased remote
13 * temperature measurement accuracy (1 degree), and the LM99 13 * temperature measurement accuracy (1 degree), and the LM99
14 * additionally shifts remote temperatures (measured and limits) by 16 14 * additionally shifts remote temperatures (measured and limits) by 16
15 * degrees, which allows for higher temperatures measurement. The 15 * degrees, which allows for higher temperatures measurement.
16 * driver doesn't handle it since it can be done easily in user-space.
17 * Note that there is no way to differentiate between both chips. 16 * Note that there is no way to differentiate between both chips.
17 * When device is auto-detected, the driver will assume an LM99.
18 * 18 *
19 * This driver also supports the LM86, another sensor chip made by 19 * This driver also supports the LM86, another sensor chip made by
20 * National Semiconductor. It is exactly similar to the LM90 except it 20 * National Semiconductor. It is exactly similar to the LM90 except it
@@ -169,8 +169,8 @@ static const struct i2c_device_id lm90_id[] = {
169 { "adt7461", adt7461 }, 169 { "adt7461", adt7461 },
170 { "lm90", lm90 }, 170 { "lm90", lm90 },
171 { "lm86", lm86 }, 171 { "lm86", lm86 },
172 { "lm89", lm99 }, 172 { "lm89", lm86 },
173 { "lm99", lm99 }, /* Missing temperature offset */ 173 { "lm99", lm99 },
174 { "max6646", max6646 }, 174 { "max6646", max6646 },
175 { "max6647", max6646 }, 175 { "max6647", max6646 },
176 { "max6649", max6646 }, 176 { "max6649", max6646 },
@@ -366,6 +366,10 @@ static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
366 else 366 else
367 temp = temp_from_s8(data->temp8[attr->index]); 367 temp = temp_from_s8(data->temp8[attr->index]);
368 368
369 /* +16 degrees offset for temp2 for the LM99 */
370 if (data->kind == lm99 && attr->index == 3)
371 temp += 16000;
372
369 return sprintf(buf, "%d\n", temp); 373 return sprintf(buf, "%d\n", temp);
370} 374}
371 375
@@ -385,6 +389,10 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
385 long val = simple_strtol(buf, NULL, 10); 389 long val = simple_strtol(buf, NULL, 10);
386 int nr = attr->index; 390 int nr = attr->index;
387 391
392 /* +16 degrees offset for temp2 for the LM99 */
393 if (data->kind == lm99 && attr->index == 3)
394 val -= 16000;
395
388 mutex_lock(&data->update_lock); 396 mutex_lock(&data->update_lock);
389 if (data->kind == adt7461) 397 if (data->kind == adt7461)
390 data->temp8[nr] = temp_to_u8_adt7461(data, val); 398 data->temp8[nr] = temp_to_u8_adt7461(data, val);
@@ -411,6 +419,10 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
411 else 419 else
412 temp = temp_from_s16(data->temp11[attr->index]); 420 temp = temp_from_s16(data->temp11[attr->index]);
413 421
422 /* +16 degrees offset for temp2 for the LM99 */
423 if (data->kind == lm99 && attr->index <= 2)
424 temp += 16000;
425
414 return sprintf(buf, "%d\n", temp); 426 return sprintf(buf, "%d\n", temp);
415} 427}
416 428
@@ -432,6 +444,10 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
432 long val = simple_strtol(buf, NULL, 10); 444 long val = simple_strtol(buf, NULL, 10);
433 int nr = attr->index; 445 int nr = attr->index;
434 446
447 /* +16 degrees offset for temp2 for the LM99 */
448 if (data->kind == lm99 && attr->index <= 2)
449 val -= 16000;
450
435 mutex_lock(&data->update_lock); 451 mutex_lock(&data->update_lock);
436 if (data->kind == adt7461) 452 if (data->kind == adt7461)
437 data->temp11[nr] = temp_to_u16_adt7461(data, val); 453 data->temp11[nr] = temp_to_u16_adt7461(data, val);
@@ -461,9 +477,15 @@ static ssize_t show_temphyst(struct device *dev, struct device_attribute *devatt
461 477
462 if (data->kind == adt7461) 478 if (data->kind == adt7461)
463 temp = temp_from_u8_adt7461(data, data->temp8[attr->index]); 479 temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
480 else if (data->kind == max6646)
481 temp = temp_from_u8(data->temp8[attr->index]);
464 else 482 else
465 temp = temp_from_s8(data->temp8[attr->index]); 483 temp = temp_from_s8(data->temp8[attr->index]);
466 484
485 /* +16 degrees offset for temp2 for the LM99 */
486 if (data->kind == lm99 && attr->index == 3)
487 temp += 16000;
488
467 return sprintf(buf, "%d\n", temp - temp_from_s8(data->temp_hyst)); 489 return sprintf(buf, "%d\n", temp - temp_from_s8(data->temp_hyst));
468} 490}
469 491
@@ -473,12 +495,19 @@ static ssize_t set_temphyst(struct device *dev, struct device_attribute *dummy,
473 struct i2c_client *client = to_i2c_client(dev); 495 struct i2c_client *client = to_i2c_client(dev);
474 struct lm90_data *data = i2c_get_clientdata(client); 496 struct lm90_data *data = i2c_get_clientdata(client);
475 long val = simple_strtol(buf, NULL, 10); 497 long val = simple_strtol(buf, NULL, 10);
476 long hyst; 498 int temp;
477 499
478 mutex_lock(&data->update_lock); 500 mutex_lock(&data->update_lock);
479 hyst = temp_from_s8(data->temp8[2]) - val; 501 if (data->kind == adt7461)
502 temp = temp_from_u8_adt7461(data, data->temp8[2]);
503 else if (data->kind == max6646)
504 temp = temp_from_u8(data->temp8[2]);
505 else
506 temp = temp_from_s8(data->temp8[2]);
507
508 data->temp_hyst = hyst_to_reg(temp - val);
480 i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST, 509 i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
481 hyst_to_reg(hyst)); 510 data->temp_hyst);
482 mutex_unlock(&data->update_lock); 511 mutex_unlock(&data->update_lock);
483 return count; 512 return count;
484} 513}
@@ -682,6 +711,15 @@ static int lm90_detect(struct i2c_client *new_client, int kind,
682 } else 711 } else
683 if ((chip_id & 0xF0) == 0x30) { /* LM89/LM99 */ 712 if ((chip_id & 0xF0) == 0x30) { /* LM89/LM99 */
684 kind = lm99; 713 kind = lm99;
714 dev_info(&adapter->dev,
715 "Assuming LM99 chip at "
716 "0x%02x\n", address);
717 dev_info(&adapter->dev,
718 "If it is an LM89, pass "
719 "force_lm86=%d,0x%02x when "
720 "loading the lm90 driver\n",
721 i2c_adapter_id(adapter),
722 address);
685 } else 723 } else
686 if (address == 0x4C 724 if (address == 0x4C
687 && (chip_id & 0xF0) == 0x10) { /* LM86 */ 725 && (chip_id & 0xF0) == 0x10) { /* LM86 */
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index d4d1b859d4f1..fc12bd412e3a 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1968,7 +1968,7 @@ exit:
1968 return res; 1968 return res;
1969} 1969}
1970 1970
1971static void __exit 1971static void
1972w83781d_isa_unregister(void) 1972w83781d_isa_unregister(void)
1973{ 1973{
1974 if (pdev) { 1974 if (pdev) {
@@ -2017,7 +2017,7 @@ w83781d_isa_register(void)
2017 return 0; 2017 return 0;
2018} 2018}
2019 2019
2020static void __exit 2020static void
2021w83781d_isa_unregister(void) 2021w83781d_isa_unregister(void)
2022{ 2022{
2023} 2023}
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 1e328d19cd6d..3e01992230b8 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -135,7 +135,7 @@ static int wait_for_pin(struct i2c_algo_pcf_data *adap, int *status) {
135 *status = get_pcf(adap, 1); 135 *status = get_pcf(adap, 1);
136#ifndef STUB_I2C 136#ifndef STUB_I2C
137 while (timeout-- && (*status & I2C_PCF_PIN)) { 137 while (timeout-- && (*status & I2C_PCF_PIN)) {
138 adap->waitforpin(); 138 adap->waitforpin(adap->data);
139 *status = get_pcf(adap, 1); 139 *status = get_pcf(adap, 1);
140 } 140 }
141 if (*status & I2C_PCF_LAB) { 141 if (*status & I2C_PCF_LAB) {
@@ -208,7 +208,7 @@ static int pcf_init_8584 (struct i2c_algo_pcf_data *adap)
208 return -ENXIO; 208 return -ENXIO;
209 } 209 }
210 210
211 printk(KERN_DEBUG "i2c-algo-pcf.o: deteted and initialized PCF8584.\n"); 211 printk(KERN_DEBUG "i2c-algo-pcf.o: detected and initialized PCF8584.\n");
212 212
213 return 0; 213 return 0;
214} 214}
@@ -331,13 +331,16 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
331 int i; 331 int i;
332 int ret=0, timeout, status; 332 int ret=0, timeout, status;
333 333
334 if (adap->xfer_begin)
335 adap->xfer_begin(adap->data);
334 336
335 /* Check for bus busy */ 337 /* Check for bus busy */
336 timeout = wait_for_bb(adap); 338 timeout = wait_for_bb(adap);
337 if (timeout) { 339 if (timeout) {
338 DEB2(printk(KERN_ERR "i2c-algo-pcf.o: " 340 DEB2(printk(KERN_ERR "i2c-algo-pcf.o: "
339 "Timeout waiting for BB in pcf_xfer\n");) 341 "Timeout waiting for BB in pcf_xfer\n");)
340 return -EIO; 342 i = -EIO;
343 goto out;
341 } 344 }
342 345
343 for (i = 0;ret >= 0 && i < num; i++) { 346 for (i = 0;ret >= 0 && i < num; i++) {
@@ -359,12 +362,14 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
359 if (timeout) { 362 if (timeout) {
360 if (timeout == -EINTR) { 363 if (timeout == -EINTR) {
361 /* arbitration lost */ 364 /* arbitration lost */
362 return (-EINTR); 365 i = -EINTR;
366 goto out;
363 } 367 }
364 i2c_stop(adap); 368 i2c_stop(adap);
365 DEB2(printk(KERN_ERR "i2c-algo-pcf.o: Timeout waiting " 369 DEB2(printk(KERN_ERR "i2c-algo-pcf.o: Timeout waiting "
366 "for PIN(1) in pcf_xfer\n");) 370 "for PIN(1) in pcf_xfer\n");)
367 return (-EREMOTEIO); 371 i = -EREMOTEIO;
372 goto out;
368 } 373 }
369 374
370#ifndef STUB_I2C 375#ifndef STUB_I2C
@@ -372,7 +377,8 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
372 if (status & I2C_PCF_LRB) { 377 if (status & I2C_PCF_LRB) {
373 i2c_stop(adap); 378 i2c_stop(adap);
374 DEB2(printk(KERN_ERR "i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");) 379 DEB2(printk(KERN_ERR "i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");)
375 return (-EREMOTEIO); 380 i = -EREMOTEIO;
381 goto out;
376 } 382 }
377#endif 383#endif
378 384
@@ -404,6 +410,9 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
404 } 410 }
405 } 411 }
406 412
413out:
414 if (adap->xfer_end)
415 adap->xfer_end(adap->data);
407 return (i); 416 return (i);
408} 417}
409 418
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index acadbc51fc0f..7f95905bbb9d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -97,6 +97,7 @@ config I2C_I801
97 ICH9 97 ICH9
98 Tolapai 98 Tolapai
99 ICH10 99 ICH10
100 PCH
100 101
101 This driver can also be built as a module. If so, the module 102 This driver can also be built as a module. If so, the module
102 will be called i2c-i801. 103 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 8164de1f4d72..3fcf78e906db 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -365,6 +365,7 @@ static int cpm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
365 pmsg = &msgs[tptr]; 365 pmsg = &msgs[tptr];
366 if (pmsg->flags & I2C_M_RD) 366 if (pmsg->flags & I2C_M_RD)
367 ret = wait_event_interruptible_timeout(cpm->i2c_wait, 367 ret = wait_event_interruptible_timeout(cpm->i2c_wait,
368 (in_be16(&tbdf[tptr].cbd_sc) & BD_SC_NAK) ||
368 !(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY), 369 !(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY),
369 1 * HZ); 370 1 * HZ);
370 else 371 else
@@ -423,7 +424,6 @@ static const struct i2c_adapter cpm_ops = {
423 .owner = THIS_MODULE, 424 .owner = THIS_MODULE,
424 .name = "i2c-cpm", 425 .name = "i2c-cpm",
425 .algo = &cpm_i2c_algo, 426 .algo = &cpm_i2c_algo,
426 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
427}; 427};
428 428
429static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm) 429static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 7f38c01fb3a0..0ed3ccb81b63 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -104,7 +104,8 @@ static int pcf_isa_getclock(void *data)
104 return (clock); 104 return (clock);
105} 105}
106 106
107static void pcf_isa_waitforpin(void) { 107static void pcf_isa_waitforpin(void *data)
108{
108 DEFINE_WAIT(wait); 109 DEFINE_WAIT(wait);
109 int timeout = 2; 110 int timeout = 2;
110 unsigned long flags; 111 unsigned long flags;
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index f4d22ae9d294..e5a8dae4a289 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -92,7 +92,7 @@ static void highlander_i2c_setup(struct highlander_i2c_dev *dev)
92static void smbus_write_data(u8 *src, u16 *dst, int len) 92static void smbus_write_data(u8 *src, u16 *dst, int len)
93{ 93{
94 for (; len > 1; len -= 2) { 94 for (; len > 1; len -= 2) {
95 *dst++ = be16_to_cpup((u16 *)src); 95 *dst++ = be16_to_cpup((__be16 *)src);
96 src += 2; 96 src += 2;
97 } 97 }
98 98
@@ -103,7 +103,7 @@ static void smbus_write_data(u8 *src, u16 *dst, int len)
103static void smbus_read_data(u16 *src, u8 *dst, int len) 103static void smbus_read_data(u16 *src, u8 *dst, int len)
104{ 104{
105 for (; len > 1; len -= 2) { 105 for (; len > 1; len -= 2) {
106 *(u16 *)dst = cpu_to_be16p(src++); 106 *(__be16 *)dst = cpu_to_be16p(src++);
107 dst += 2; 107 dst += 2;
108 } 108 }
109 109
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 1098f21ace13..648aa7baff83 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -123,7 +123,7 @@ static int __devinit hydra_probe(struct pci_dev *dev,
123 hydra_adap.name)) 123 hydra_adap.name))
124 return -EBUSY; 124 return -EBUSY;
125 125
126 hydra_bit_data.data = ioremap(base, pci_resource_len(dev, 0)); 126 hydra_bit_data.data = pci_ioremap_bar(dev, 0);
127 if (hydra_bit_data.data == NULL) { 127 if (hydra_bit_data.data == NULL) {
128 release_mem_region(base+offsetof(struct Hydra, CachePD), 4); 128 release_mem_region(base+offsetof(struct Hydra, CachePD), 4);
129 return -ENODEV; 129 return -ENODEV;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index dc7ea32b69a8..5123eb69a971 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -41,6 +41,7 @@
41 Tolapai 0x5032 32 hard yes yes yes 41 Tolapai 0x5032 32 hard yes yes yes
42 ICH10 0x3a30 32 hard yes yes yes 42 ICH10 0x3a30 32 hard yes yes yes
43 ICH10 0x3a60 32 hard yes yes yes 43 ICH10 0x3a60 32 hard yes yes yes
44 PCH 0x3b30 32 hard yes yes yes
44 45
45 Features supported by this driver: 46 Features supported by this driver:
46 Software PEC no 47 Software PEC no
@@ -576,6 +577,7 @@ static struct pci_device_id i801_ids[] = {
576 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) }, 577 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) },
577 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) }, 578 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
578 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) }, 579 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
580 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
579 { 0, } 581 { 0, }
580}; 582};
581 583
@@ -599,6 +601,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
599 case PCI_DEVICE_ID_INTEL_TOLAPAI_1: 601 case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
600 case PCI_DEVICE_ID_INTEL_ICH10_4: 602 case PCI_DEVICE_ID_INTEL_ICH10_4:
601 case PCI_DEVICE_ID_INTEL_ICH10_5: 603 case PCI_DEVICE_ID_INTEL_ICH10_5:
604 case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
602 i801_features |= FEATURE_I2C_BLOCK_READ; 605 i801_features |= FEATURE_I2C_BLOCK_READ;
603 /* fall through */ 606 /* fall through */
604 case PCI_DEVICE_ID_INTEL_82801DB_3: 607 case PCI_DEVICE_ID_INTEL_82801DB_3:
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 59ba2086d2f9..a257cd5cd134 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -189,8 +189,6 @@ static void i2c_parport_attach (struct parport *port)
189 if (adapter_parm[type].init.val) 189 if (adapter_parm[type].init.val)
190 line_set(port, 1, &adapter_parm[type].init); 190 line_set(port, 1, &adapter_parm[type].init);
191 191
192 parport_release(adapter->pdev);
193
194 if (i2c_bit_add_bus(&adapter->adapter) < 0) { 192 if (i2c_bit_add_bus(&adapter->adapter) < 0) {
195 printk(KERN_ERR "i2c-parport: Unable to register with I2C\n"); 193 printk(KERN_ERR "i2c-parport: Unable to register with I2C\n");
196 goto ERROR1; 194 goto ERROR1;
@@ -202,6 +200,7 @@ static void i2c_parport_attach (struct parport *port)
202 return; 200 return;
203 201
204ERROR1: 202ERROR1:
203 parport_release(adapter->pdev);
205 parport_unregister_device(adapter->pdev); 204 parport_unregister_device(adapter->pdev);
206ERROR0: 205ERROR0:
207 kfree(adapter); 206 kfree(adapter);
@@ -221,6 +220,7 @@ static void i2c_parport_detach (struct parport *port)
221 if (adapter_parm[type].init.val) 220 if (adapter_parm[type].init.val)
222 line_set(port, 0, &adapter_parm[type].init); 221 line_set(port, 0, &adapter_parm[type].init);
223 222
223 parport_release(adapter->pdev);
224 parport_unregister_device(adapter->pdev); 224 parport_unregister_device(adapter->pdev);
225 if (prev) 225 if (prev)
226 prev->next = adapter->next; 226 prev->next = adapter->next;
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index dcf2045b5222..0bdb2d7f0570 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -486,7 +486,7 @@ static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd(
486 486
487 if (cmd->type == MSP_TWI_CMD_WRITE || 487 if (cmd->type == MSP_TWI_CMD_WRITE ||
488 cmd->type == MSP_TWI_CMD_WRITE_READ) { 488 cmd->type == MSP_TWI_CMD_WRITE_READ) {
489 __be64 tmp = cpu_to_be64p((u64 *)cmd->write_data); 489 u64 tmp = be64_to_cpup((__be64 *)cmd->write_data);
490 tmp >>= (MSP_MAX_BYTES_PER_RW - cmd->write_len) * 8; 490 tmp >>= (MSP_MAX_BYTES_PER_RW - cmd->write_len) * 8;
491 dev_dbg(&pmcmsptwi_adapter.dev, "Writing 0x%016llx\n", tmp); 491 dev_dbg(&pmcmsptwi_adapter.dev, "Writing 0x%016llx\n", tmp);
492 pmcmsptwi_writel(tmp & 0x00000000ffffffffLL, 492 pmcmsptwi_writel(tmp & 0x00000000ffffffffLL,
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index c772e02c2803..b7434d24904e 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -56,6 +56,7 @@ enum s3c24xx_i2c_state {
56struct s3c24xx_i2c { 56struct s3c24xx_i2c {
57 spinlock_t lock; 57 spinlock_t lock;
58 wait_queue_head_t wait; 58 wait_queue_head_t wait;
59 unsigned int suspended:1;
59 60
60 struct i2c_msg *msg; 61 struct i2c_msg *msg;
61 unsigned int msg_num; 62 unsigned int msg_num;
@@ -507,7 +508,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c, struct i2c_msg *msgs, int
507 unsigned long timeout; 508 unsigned long timeout;
508 int ret; 509 int ret;
509 510
510 if (!readl(i2c->regs + S3C2410_IICCON) & S3C2410_IICCON_IRQEN) 511 if (i2c->suspended)
511 return -EIO; 512 return -EIO;
512 513
513 ret = s3c24xx_i2c_set_master(i2c); 514 ret = s3c24xx_i2c_set_master(i2c);
@@ -986,17 +987,26 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
986} 987}
987 988
988#ifdef CONFIG_PM 989#ifdef CONFIG_PM
990static int s3c24xx_i2c_suspend_late(struct platform_device *dev,
991 pm_message_t msg)
992{
993 struct s3c24xx_i2c *i2c = platform_get_drvdata(dev);
994 i2c->suspended = 1;
995 return 0;
996}
997
989static int s3c24xx_i2c_resume(struct platform_device *dev) 998static int s3c24xx_i2c_resume(struct platform_device *dev)
990{ 999{
991 struct s3c24xx_i2c *i2c = platform_get_drvdata(dev); 1000 struct s3c24xx_i2c *i2c = platform_get_drvdata(dev);
992 1001
993 if (i2c != NULL) 1002 i2c->suspended = 0;
994 s3c24xx_i2c_init(i2c); 1003 s3c24xx_i2c_init(i2c);
995 1004
996 return 0; 1005 return 0;
997} 1006}
998 1007
999#else 1008#else
1009#define s3c24xx_i2c_suspend_late NULL
1000#define s3c24xx_i2c_resume NULL 1010#define s3c24xx_i2c_resume NULL
1001#endif 1011#endif
1002 1012
@@ -1005,6 +1015,7 @@ static int s3c24xx_i2c_resume(struct platform_device *dev)
1005static struct platform_driver s3c2410_i2c_driver = { 1015static struct platform_driver s3c2410_i2c_driver = {
1006 .probe = s3c24xx_i2c_probe, 1016 .probe = s3c24xx_i2c_probe,
1007 .remove = s3c24xx_i2c_remove, 1017 .remove = s3c24xx_i2c_remove,
1018 .suspend_late = s3c24xx_i2c_suspend_late,
1008 .resume = s3c24xx_i2c_resume, 1019 .resume = s3c24xx_i2c_resume,
1009 .driver = { 1020 .driver = {
1010 .owner = THIS_MODULE, 1021 .owner = THIS_MODULE,
@@ -1015,6 +1026,7 @@ static struct platform_driver s3c2410_i2c_driver = {
1015static struct platform_driver s3c2440_i2c_driver = { 1026static struct platform_driver s3c2440_i2c_driver = {
1016 .probe = s3c24xx_i2c_probe, 1027 .probe = s3c24xx_i2c_probe,
1017 .remove = s3c24xx_i2c_remove, 1028 .remove = s3c24xx_i2c_remove,
1029 .suspend_late = s3c24xx_i2c_suspend_late,
1018 .resume = s3c24xx_i2c_resume, 1030 .resume = s3c24xx_i2c_resume,
1019 .driver = { 1031 .driver = {
1020 .owner = THIS_MODULE, 1032 .owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 640cbb237328..3384a717fec0 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -318,7 +318,8 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd)
318 } else 318 } else
319 data = i2c_op(pd, OP_RX, 0); 319 data = i2c_op(pd, OP_RX, 0);
320 320
321 pd->msg->buf[real_pos] = data; 321 if (real_pos >= 0)
322 pd->msg->buf[real_pos] = data;
322 } while (0); 323 } while (0);
323 324
324 pd->pos++; 325 pd->pos++;
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index c3022a023449..e4c98539c517 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -81,6 +81,7 @@ static struct i2c_algo_bit_data scx200_i2c_data = {
81 81
82static struct i2c_adapter scx200_i2c_ops = { 82static struct i2c_adapter scx200_i2c_ops = {
83 .owner = THIS_MODULE, 83 .owner = THIS_MODULE,
84 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
84 .id = I2C_HW_B_SCX200, 85 .id = I2C_HW_B_SCX200,
85 .algo_data = &scx200_i2c_data, 86 .algo_data = &scx200_i2c_data,
86 .name = "NatSemi SCx200 I2C", 87 .name = "NatSemi SCx200 I2C",
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 17356827b93d..4c35702830ce 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -1,6 +1,8 @@
1# 1#
2# Miscellaneous I2C chip drivers configuration 2# Miscellaneous I2C chip drivers configuration
3# 3#
4# *** DEPRECATED! Do not add new entries! See Makefile ***
5#
4 6
5menu "Miscellaneous I2C Chip support" 7menu "Miscellaneous I2C Chip support"
6 8
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index ca520fa143d6..23d2a31b0a64 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -1,7 +1,8 @@
1# 1#
2# Makefile for miscellaneous I2C chip drivers. 2# Makefile for miscellaneous I2C chip drivers.
3# 3#
4# Think twice before you add a new driver to this directory. 4# Do not add new drivers to this directory! It is DEPRECATED.
5#
5# Device drivers are better grouped according to the functionality they 6# Device drivers are better grouped according to the functionality they
6# implement rather than to the bus they are connected to. In particular: 7# implement rather than to the bus they are connected to. In particular:
7# * Hardware monitoring chip drivers go to drivers/hwmon 8# * Hardware monitoring chip drivers go to drivers/hwmon
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index 28902ebd5539..e0d56ef2bcb0 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/gpio.h>
28#include <linux/usb/ch9.h> 29#include <linux/usb/ch9.h>
29#include <linux/usb/gadget.h> 30#include <linux/usb/gadget.h>
30#include <linux/usb.h> 31#include <linux/usb.h>
@@ -33,7 +34,10 @@
33#include <linux/workqueue.h> 34#include <linux/workqueue.h>
34 35
35#include <asm/irq.h> 36#include <asm/irq.h>
37#include <asm/mach-types.h>
38
36#include <mach/usb.h> 39#include <mach/usb.h>
40#include <mach/mux.h>
37 41
38 42
39#ifndef DEBUG 43#ifndef DEBUG
@@ -88,14 +92,9 @@ struct isp1301 {
88 92
89/*-------------------------------------------------------------------------*/ 93/*-------------------------------------------------------------------------*/
90 94
91#ifdef CONFIG_MACH_OMAP_H2
92
93/* board-specific PM hooks */ 95/* board-specific PM hooks */
94 96
95#include <asm/gpio.h> 97#if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3)
96#include <mach/mux.h>
97#include <asm/mach-types.h>
98
99 98
100#if defined(CONFIG_TPS65010) || defined(CONFIG_TPS65010_MODULE) 99#if defined(CONFIG_TPS65010) || defined(CONFIG_TPS65010_MODULE)
101 100
@@ -135,6 +134,33 @@ static inline void notresponding(struct isp1301 *isp)
135 134
136#endif 135#endif
137 136
137#if defined(CONFIG_MACH_OMAP_H4)
138
139static void enable_vbus_draw(struct isp1301 *isp, unsigned mA)
140{
141 /* H4 controls this by DIP switch S2.4; no soft control.
142 * ON means the charger is always enabled. Leave it OFF
143 * unless the OTG port is used only in B-peripheral mode.
144 */
145}
146
147static void enable_vbus_source(struct isp1301 *isp)
148{
149 /* this board won't supply more than 8mA vbus power.
150 * some boards can switch a 100ma "unit load" (or more).
151 */
152}
153
154
155/* products will deliver OTG messages with LEDs, GUI, etc */
156static inline void notresponding(struct isp1301 *isp)
157{
158 printk(KERN_NOTICE "OTG device not responding.\n");
159}
160
161
162#endif
163
138/*-------------------------------------------------------------------------*/ 164/*-------------------------------------------------------------------------*/
139 165
140static struct i2c_driver isp1301_driver; 166static struct i2c_driver isp1301_driver;
@@ -334,8 +360,7 @@ static int gadget_suspend(struct isp1301 *isp)
334 * NOTE: guaranteeing certain response times might mean we shouldn't 360 * NOTE: guaranteeing certain response times might mean we shouldn't
335 * share keventd's work queue; a realtime task might be safest. 361 * share keventd's work queue; a realtime task might be safest.
336 */ 362 */
337void 363static void isp1301_defer_work(struct isp1301 *isp, int work)
338isp1301_defer_work(struct isp1301 *isp, int work)
339{ 364{
340 int status; 365 int status;
341 366
@@ -512,7 +537,6 @@ static void update_otg1(struct isp1301 *isp, u8 int_src)
512 otg_ctrl &= ~OTG_XCEIV_INPUTS; 537 otg_ctrl &= ~OTG_XCEIV_INPUTS;
513 otg_ctrl &= ~(OTG_ID|OTG_ASESSVLD|OTG_VBUSVLD); 538 otg_ctrl &= ~(OTG_ID|OTG_ASESSVLD|OTG_VBUSVLD);
514 539
515
516 if (int_src & INTR_SESS_VLD) 540 if (int_src & INTR_SESS_VLD)
517 otg_ctrl |= OTG_ASESSVLD; 541 otg_ctrl |= OTG_ASESSVLD;
518 else if (isp->otg.state == OTG_STATE_A_WAIT_VFALL) { 542 else if (isp->otg.state == OTG_STATE_A_WAIT_VFALL) {
@@ -886,11 +910,11 @@ static int otg_probe(struct platform_device *dev)
886 910
887static int otg_remove(struct platform_device *dev) 911static int otg_remove(struct platform_device *dev)
888{ 912{
889 otg_dev = 0; 913 otg_dev = NULL;
890 return 0; 914 return 0;
891} 915}
892 916
893struct platform_driver omap_otg_driver = { 917static struct platform_driver omap_otg_driver = {
894 .probe = otg_probe, 918 .probe = otg_probe,
895 .remove = otg_remove, 919 .remove = otg_remove,
896 .driver = { 920 .driver = {
@@ -1212,6 +1236,8 @@ static void isp1301_release(struct device *dev)
1212 1236
1213 isp = dev_get_drvdata(dev); 1237 isp = dev_get_drvdata(dev);
1214 1238
1239 /* FIXME -- not with a "new style" driver, it doesn't!! */
1240
1215 /* ugly -- i2c hijacks our memory hook to wait_for_completion() */ 1241 /* ugly -- i2c hijacks our memory hook to wait_for_completion() */
1216 if (isp->i2c_release) 1242 if (isp->i2c_release)
1217 isp->i2c_release(dev); 1243 isp->i2c_release(dev);
@@ -1233,7 +1259,7 @@ static int __exit isp1301_remove(struct i2c_client *i2c)
1233 otg_unbind(isp); 1259 otg_unbind(isp);
1234#endif 1260#endif
1235 if (machine_is_omap_h2()) 1261 if (machine_is_omap_h2())
1236 omap_free_gpio(2); 1262 gpio_free(2);
1237 1263
1238 isp->timer.data = 0; 1264 isp->timer.data = 0;
1239 set_bit(WORK_STOP, &isp->todo); 1265 set_bit(WORK_STOP, &isp->todo);
@@ -1241,7 +1267,7 @@ static int __exit isp1301_remove(struct i2c_client *i2c)
1241 flush_scheduled_work(); 1267 flush_scheduled_work();
1242 1268
1243 put_device(&i2c->dev); 1269 put_device(&i2c->dev);
1244 the_transceiver = 0; 1270 the_transceiver = NULL;
1245 1271
1246 return 0; 1272 return 0;
1247} 1273}
@@ -1295,7 +1321,7 @@ isp1301_set_host(struct otg_transceiver *otg, struct usb_bus *host)
1295 if (!host) { 1321 if (!host) {
1296 omap_writew(0, OTG_IRQ_EN); 1322 omap_writew(0, OTG_IRQ_EN);
1297 power_down(isp); 1323 power_down(isp);
1298 isp->otg.host = 0; 1324 isp->otg.host = NULL;
1299 return 0; 1325 return 0;
1300 } 1326 }
1301 1327
@@ -1344,7 +1370,9 @@ static int
1344isp1301_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *gadget) 1370isp1301_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *gadget)
1345{ 1371{
1346 struct isp1301 *isp = container_of(otg, struct isp1301, otg); 1372 struct isp1301 *isp = container_of(otg, struct isp1301, otg);
1373#ifndef CONFIG_USB_OTG
1347 u32 l; 1374 u32 l;
1375#endif
1348 1376
1349 if (!otg || isp != the_transceiver) 1377 if (!otg || isp != the_transceiver)
1350 return -ENODEV; 1378 return -ENODEV;
@@ -1354,7 +1382,7 @@ isp1301_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *gadget)
1354 if (!isp->otg.default_a) 1382 if (!isp->otg.default_a)
1355 enable_vbus_draw(isp, 0); 1383 enable_vbus_draw(isp, 0);
1356 usb_gadget_vbus_disconnect(isp->otg.gadget); 1384 usb_gadget_vbus_disconnect(isp->otg.gadget);
1357 isp->otg.gadget = 0; 1385 isp->otg.gadget = NULL;
1358 power_down(isp); 1386 power_down(isp);
1359 return 0; 1387 return 0;
1360 } 1388 }
@@ -1379,7 +1407,7 @@ isp1301_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *gadget)
1379 power_up(isp); 1407 power_up(isp);
1380 isp->otg.state = OTG_STATE_B_IDLE; 1408 isp->otg.state = OTG_STATE_B_IDLE;
1381 1409
1382 if (machine_is_omap_h2()) 1410 if (machine_is_omap_h2() || machine_is_omap_h3())
1383 isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0); 1411 isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0);
1384 1412
1385 isp1301_set_bits(isp, ISP1301_INTERRUPT_RISING, 1413 isp1301_set_bits(isp, ISP1301_INTERRUPT_RISING,
@@ -1499,7 +1527,8 @@ isp1301_start_hnp(struct otg_transceiver *dev)
1499 1527
1500/*-------------------------------------------------------------------------*/ 1528/*-------------------------------------------------------------------------*/
1501 1529
1502static int __init isp1301_probe(struct i2c_client *i2c) 1530static int __init
1531isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
1503{ 1532{
1504 int status; 1533 int status;
1505 struct isp1301 *isp; 1534 struct isp1301 *isp;
@@ -1647,7 +1676,7 @@ module_init(isp_init);
1647static void __exit isp_exit(void) 1676static void __exit isp_exit(void)
1648{ 1677{
1649 if (the_transceiver) 1678 if (the_transceiver)
1650 otg_set_transceiver(0); 1679 otg_set_transceiver(NULL);
1651 i2c_del_driver(&isp1301_driver); 1680 i2c_del_driver(&isp1301_driver);
1652} 1681}
1653module_exit(isp_exit); 1682module_exit(isp_exit);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 42e852d79ffa..c6a63f46bc15 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -266,6 +266,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
266 266
267 client->dev.platform_data = info->platform_data; 267 client->dev.platform_data = info->platform_data;
268 268
269 if (info->archdata)
270 client->dev.archdata = *info->archdata;
271
269 client->flags = info->flags; 272 client->flags = info->flags;
270 client->addr = info->addr; 273 client->addr = info->addr;
271 client->irq = info->irq; 274 client->irq = info->irq;
@@ -628,7 +631,7 @@ int i2c_del_adapter(struct i2c_adapter *adap)
628 631
629 /* detach any active clients. This must be done first, because 632 /* detach any active clients. This must be done first, because
630 * it can fail; in which case we give up. */ 633 * it can fail; in which case we give up. */
631 list_for_each_entry_safe(client, _n, &adap->clients, list) { 634 list_for_each_entry_safe_reverse(client, _n, &adap->clients, list) {
632 struct i2c_driver *driver; 635 struct i2c_driver *driver;
633 636
634 driver = client->driver; 637 driver = client->driver;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index a820ca6fc327..e6857e01d1ba 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -669,10 +669,12 @@ config BLK_DEV_CELLEB
669 669
670endif 670endif
671 671
672# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF
672config BLK_DEV_IDE_PMAC 673config BLK_DEV_IDE_PMAC
673 tristate "PowerMac on-board IDE support" 674 tristate "PowerMac on-board IDE support"
674 depends on PPC_PMAC && IDE=y 675 depends on PPC_PMAC && IDE=y
675 select IDE_TIMINGS 676 select IDE_TIMINGS
677 select BLK_DEV_IDEDMA_PCI
676 help 678 help
677 This driver provides support for the on-board IDE controller on 679 This driver provides support for the on-board IDE controller on
678 most of the recent Apple Power Macintoshes and PowerBooks. 680 most of the recent Apple Power Macintoshes and PowerBooks.
@@ -689,16 +691,6 @@ config BLK_DEV_IDE_PMAC_ATA100FIRST
689 CD-ROM on hda. This option changes this to more natural hda for 691 CD-ROM on hda. This option changes this to more natural hda for
690 hard disk and hdc for CD-ROM. 692 hard disk and hdc for CD-ROM.
691 693
692config BLK_DEV_IDEDMA_PMAC
693 bool "PowerMac IDE DMA support"
694 depends on BLK_DEV_IDE_PMAC
695 select BLK_DEV_IDEDMA_PCI
696 help
697 This option allows the driver for the on-board IDE controller on
698 Power Macintoshes and PowerBooks to use DMA (direct memory access)
699 to transfer data to and from memory. Saying Y is safe and improves
700 performance.
701
702config BLK_DEV_IDE_AU1XXX 694config BLK_DEV_IDE_AU1XXX
703 bool "IDE for AMD Alchemy Au1200" 695 bool "IDE for AMD Alchemy Au1200"
704 depends on SOC_AU1200 696 depends on SOC_AU1200
@@ -720,6 +712,16 @@ config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ
720 default "128" 712 default "128"
721 depends on BLK_DEV_IDE_AU1XXX 713 depends on BLK_DEV_IDE_AU1XXX
722 714
715config BLK_DEV_IDE_TX4938
716 tristate "TX4938 internal IDE support"
717 depends on SOC_TX4938
718 select IDE_TIMINGS
719
720config BLK_DEV_IDE_TX4939
721 tristate "TX4939 internal IDE support"
722 depends on SOC_TX4939
723 select BLK_DEV_IDEDMA_SFF
724
723config IDE_ARM 725config IDE_ARM
724 tristate "ARM IDE support" 726 tristate "ARM IDE support"
725 depends on ARM && (ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK) 727 depends on ARM && (ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK)
@@ -902,7 +904,7 @@ config BLK_DEV_UMC8672
902endif 904endif
903 905
904config BLK_DEV_IDEDMA 906config BLK_DEV_IDEDMA
905 def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_PMAC || \ 907 def_bool BLK_DEV_IDEDMA_SFF || \
906 BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 908 BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
907 909
908endif # IDE 910endif # IDE
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 093d3248ca89..7818d402b188 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -18,22 +18,65 @@ ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o
18 18
19obj-$(CONFIG_IDE) += ide-core.o 19obj-$(CONFIG_IDE) += ide-core.o
20 20
21ifeq ($(CONFIG_IDE_ARM), y) 21obj-$(CONFIG_IDE_ARM) += ide_arm.o
22 ide-arm-core-y += arm/ide_arm.o 22
23 obj-y += ide-arm-core.o 23obj-$(CONFIG_BLK_DEV_ALI14XX) += ali14xx.o
24endif 24obj-$(CONFIG_BLK_DEV_UMC8672) += umc8672.o
25 25obj-$(CONFIG_BLK_DEV_DTC2278) += dtc2278.o
26obj-$(CONFIG_IDE) += legacy/ pci/ 26obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o
27obj-$(CONFIG_BLK_DEV_QD65XX) += qd65xx.o
28obj-$(CONFIG_BLK_DEV_4DRIVES) += ide-4drives.o
29
30obj-$(CONFIG_BLK_DEV_GAYLE) += gayle.o
31obj-$(CONFIG_BLK_DEV_FALCON_IDE) += falconide.o
32obj-$(CONFIG_BLK_DEV_MAC_IDE) += macide.o
33obj-$(CONFIG_BLK_DEV_Q40IDE) += q40ide.o
34obj-$(CONFIG_BLK_DEV_BUDDHA) += buddha.o
35
36obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o
37obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o
38obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o
39obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o
40obj-$(CONFIG_BLK_DEV_CELLEB) += scc_pata.o
41obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o
42obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o
43obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
44obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o
45obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o
46obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o
47obj-$(CONFIG_BLK_DEV_DELKIN) += delkin_cb.o
48obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
49obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o
50obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o
51obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o
52obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o
53obj-$(CONFIG_BLK_DEV_OPTI621) += opti621.o
54obj-$(CONFIG_BLK_DEV_PDC202XX_OLD) += pdc202xx_old.o
55obj-$(CONFIG_BLK_DEV_PDC202XX_NEW) += pdc202xx_new.o
56obj-$(CONFIG_BLK_DEV_PIIX) += piix.o
57obj-$(CONFIG_BLK_DEV_RZ1000) += rz1000.o
58obj-$(CONFIG_BLK_DEV_SVWKS) += serverworks.o
59obj-$(CONFIG_BLK_DEV_SGIIOC4) += sgiioc4.o
60obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o
61obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o
62obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o
63obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o
64obj-$(CONFIG_BLK_DEV_TC86C001) += tc86c001.o
65obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o
66obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o
67obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o
68
69# Must appear at the end of the block
70obj-$(CONFIG_BLK_DEV_GENERIC) += ide-pci-generic.o
27 71
28obj-$(CONFIG_IDEPCI_PCIBUS_ORDER) += ide-scan-pci.o 72obj-$(CONFIG_IDEPCI_PCIBUS_ORDER) += ide-scan-pci.o
29 73
30ifeq ($(CONFIG_BLK_DEV_CMD640), y) 74obj-$(CONFIG_BLK_DEV_CMD640) += cmd640.o
31 cmd640-core-y += pci/cmd640.o 75
32 obj-y += cmd640-core.o 76obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o
33endif 77
78obj-$(CONFIG_IDE_H8300) += ide-h8300.o
34 79
35obj-$(CONFIG_IDE) += ppc/
36obj-$(CONFIG_IDE_H8300) += h8300/
37obj-$(CONFIG_IDE_GENERIC) += ide-generic.o 80obj-$(CONFIG_IDE_GENERIC) += ide-generic.o
38obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o 81obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
39 82
@@ -58,14 +101,15 @@ obj-$(CONFIG_IDE_GD) += ide-gd_mod.o
58obj-$(CONFIG_BLK_DEV_IDECD) += ide-cd_mod.o 101obj-$(CONFIG_BLK_DEV_IDECD) += ide-cd_mod.o
59obj-$(CONFIG_BLK_DEV_IDETAPE) += ide-tape.o 102obj-$(CONFIG_BLK_DEV_IDETAPE) += ide-tape.o
60 103
61ifeq ($(CONFIG_BLK_DEV_IDECS), y) 104obj-$(CONFIG_BLK_DEV_IDECS) += ide-cs.o
62 ide-cs-core-y += legacy/ide-cs.o
63 obj-y += ide-cs-core.o
64endif
65 105
66ifeq ($(CONFIG_BLK_DEV_PLATFORM), y) 106obj-$(CONFIG_BLK_DEV_PLATFORM) += ide_platform.o
67 ide-platform-core-y += legacy/ide_platform.o 107
68 obj-y += ide-platform-core.o 108obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
69endif 109obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
110obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
111
112obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o
70 113
71obj-$(CONFIG_IDE) += arm/ mips/ 114obj-$(CONFIG_BLK_DEV_IDE_TX4938) += tx4938ide.o
115obj-$(CONFIG_BLK_DEV_IDE_TX4939) += tx4939ide.o
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/aec62xx.c
index 4142c698e0d3..4142c698e0d3 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/aec62xx.c
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/ali14xx.c
index 90da1f953ed0..90da1f953ed0 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/ali14xx.c
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/alim15x3.c
index daf9dce39e52..45d2356bb725 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org) 6 * Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org)
7 * May be copied or modified under the terms of the GNU General Public License 7 * May be copied or modified under the terms of the GNU General Public License
8 * Copyright (C) 2002 Alan Cox <alan@redhat.com> 8 * Copyright (C) 2002 Alan Cox
9 * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw> 9 * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
10 * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com> 10 * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
11 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> 11 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
@@ -591,7 +591,7 @@ static int __init ali15x3_ide_init(void)
591 591
592static void __exit ali15x3_ide_exit(void) 592static void __exit ali15x3_ide_exit(void)
593{ 593{
594 return pci_unregister_driver(&alim15x3_pci_driver); 594 pci_unregister_driver(&alim15x3_pci_driver);
595} 595}
596 596
597module_init(ali15x3_ide_init); 597module_init(ali15x3_ide_init);
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/amd74xx.c
index 81ec73134eda..c6bcd3014a29 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/amd74xx.c
@@ -3,7 +3,7 @@
3 * IDE driver for Linux. 3 * IDE driver for Linux.
4 * 4 *
5 * Copyright (c) 2000-2002 Vojtech Pavlik 5 * Copyright (c) 2000-2002 Vojtech Pavlik
6 * Copyright (c) 2007 Bartlomiej Zolnierkiewicz 6 * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz
7 * 7 *
8 * Based on the work of: 8 * Based on the work of:
9 * Andre Hedrick 9 * Andre Hedrick
@@ -263,6 +263,15 @@ static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_
263 d.udma_mask = ATA_UDMA5; 263 d.udma_mask = ATA_UDMA5;
264 } 264 }
265 265
266 /*
267 * It seems that on some nVidia controllers using AltStatus
268 * register can be unreliable so default to Status register
269 * if the device is in Compatibility Mode.
270 */
271 if (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
272 ide_pci_is_in_compatibility_mode(dev))
273 d.host_flags |= IDE_HFLAG_BROKEN_ALTSTATUS;
274
266 printk(KERN_INFO "%s %s: UDMA%s controller\n", 275 printk(KERN_INFO "%s %s: UDMA%s controller\n",
267 d.name, pci_name(dev), amd_dma[fls(d.udma_mask) - 1]); 276 d.name, pci_name(dev), amd_dma[fls(d.udma_mask) - 1]);
268 277
diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile
deleted file mode 100644
index 5bc26053afa6..000000000000
--- a/drivers/ide/arm/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
1
2obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
3obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
4obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
5
6ifeq ($(CONFIG_IDE_ARM), m)
7 obj-m += ide_arm.o
8endif
9
10EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/atiixp.c
index b2735d28f5cc..b2735d28f5cc 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/atiixp.c
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 0ec8fd1e4dcb..0ec8fd1e4dcb 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/buddha.c
index c5a3c9ef6a5d..c5a3c9ef6a5d 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/buddha.c
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/cmd640.c
index e4306647d00d..e4306647d00d 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/cmd640.c
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/cmd64x.c
index 935385c77e06..935385c77e06 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/cmd64x.c
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/cs5520.c
index 5efb467f8fa0..5efb467f8fa0 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/cs5520.c
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/cs5530.c
index 53f079cc00af..53f079cc00af 100644
--- a/drivers/ide/pci/cs5530.c
+++ b/drivers/ide/cs5530.c
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/cs5535.c
index 983d957a0189..983d957a0189 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/cs5535.c
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/cy82c693.c
index 5297f07d2933..5297f07d2933 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/cy82c693.c
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/delkin_cb.c
index 8f1b2d9f0513..8f1b2d9f0513 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/delkin_cb.c
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/dtc2278.c
index 689b2e493413..689b2e493413 100644
--- a/drivers/ide/legacy/dtc2278.c
+++ b/drivers/ide/dtc2278.c
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/falconide.c
index 39d500d84b07..39d500d84b07 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/falconide.c
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/gayle.c
index 691506886561..691506886561 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/gayle.c
diff --git a/drivers/ide/h8300/Makefile b/drivers/ide/h8300/Makefile
deleted file mode 100644
index 5eba16f423f4..000000000000
--- a/drivers/ide/h8300/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1
2obj-$(CONFIG_IDE_H8300) += ide-h8300.o
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/hpt366.c
index a7909e9c720e..f5afd46ed51c 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -52,7 +52,7 @@
52 * different clocks on read/write. This requires overloading rw_disk and 52 * different clocks on read/write. This requires overloading rw_disk and
53 * other deeply crazy things. Thanks to <http://www.hoerstreich.de> for 53 * other deeply crazy things. Thanks to <http://www.hoerstreich.de> for
54 * keeping me sane. 54 * keeping me sane.
55 * Alan Cox <alan@redhat.com> 55 * Alan Cox <alan@lxorguk.ukuu.org.uk>
56 * 56 *
57 * - fix the clock turnaround code: it was writing to the wrong ports when 57 * - fix the clock turnaround code: it was writing to the wrong ports when
58 * called for the secondary channel, caching the current clock mode per- 58 * called for the secondary channel, caching the current clock mode per-
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/ht6560b.c
index c7e5c2246b79..c7e5c2246b79 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/ht6560b.c
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/icside.c
index 76bdc9a27f6f..81f70caeb40f 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/icside.c
@@ -419,7 +419,7 @@ static void icside_setup_ports(hw_regs_t *hw, void __iomem *base,
419 hw->chipset = ide_acorn; 419 hw->chipset = ide_acorn;
420} 420}
421 421
422static int __init 422static int __devinit
423icside_register_v5(struct icside_state *state, struct expansion_card *ec) 423icside_register_v5(struct icside_state *state, struct expansion_card *ec)
424{ 424{
425 void __iomem *base; 425 void __iomem *base;
@@ -473,7 +473,7 @@ static const struct ide_port_info icside_v6_port_info __initdata = {
473 .swdma_mask = ATA_SWDMA2, 473 .swdma_mask = ATA_SWDMA2,
474}; 474};
475 475
476static int __init 476static int __devinit
477icside_register_v6(struct icside_state *state, struct expansion_card *ec) 477icside_register_v6(struct icside_state *state, struct expansion_card *ec)
478{ 478{
479 void __iomem *ioc_base, *easi_base; 479 void __iomem *ioc_base, *easi_base;
@@ -690,9 +690,9 @@ static int __init icside_init(void)
690 return ecard_register_driver(&icside_driver); 690 return ecard_register_driver(&icside_driver);
691} 691}
692 692
693static void __exit icside_exit(void); 693static void __exit icside_exit(void)
694{ 694{
695 ecard_unregister_driver(&icside_driver); 695 ecard_remove_driver(&icside_driver);
696} 696}
697 697
698MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); 698MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/ide-4drives.c
index 9e85b1ec9607..9e85b1ec9607 100644
--- a/drivers/ide/legacy/ide-4drives.c
+++ b/drivers/ide/ide-4drives.c
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 13265a8827da..42ab6d8715f2 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1250,7 +1250,8 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
1250 * separate masks. 1250 * separate masks.
1251 */ 1251 */
1252 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1252 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1253 if ((unsigned long)buf & alignment || rq->data_len & alignment 1253 if ((unsigned long)buf & alignment
1254 || rq->data_len & q->dma_pad_mask
1254 || object_is_on_stack(buf)) 1255 || object_is_on_stack(buf))
1255 drive->dma = 0; 1256 drive->dma = 0;
1256 } 1257 }
@@ -1966,6 +1967,7 @@ static const struct cd_list_entry ide_cd_quirks_list[] = {
1966 { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, 1967 { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1967 { "Optiarc DVD RW AD-7200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, 1968 { "Optiarc DVD RW AD-7200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1968 { "Optiarc DVD RW AD-7543A", NULL, IDE_AFLAG_NO_AUTOCLOSE }, 1969 { "Optiarc DVD RW AD-7543A", NULL, IDE_AFLAG_NO_AUTOCLOSE },
1970 { "TEAC CD-ROM CD-224E", NULL, IDE_AFLAG_NO_AUTOCLOSE },
1969 { NULL, NULL, 0 } 1971 { NULL, NULL, 0 }
1970}; 1972};
1971 1973
@@ -2089,17 +2091,15 @@ static ide_driver_t ide_cdrom_driver = {
2089#endif 2091#endif
2090}; 2092};
2091 2093
2092static int idecd_open(struct inode *inode, struct file *file) 2094static int idecd_open(struct block_device *bdev, fmode_t mode)
2093{ 2095{
2094 struct gendisk *disk = inode->i_bdev->bd_disk; 2096 struct cdrom_info *info = ide_cd_get(bdev->bd_disk);
2095 struct cdrom_info *info;
2096 int rc = -ENOMEM; 2097 int rc = -ENOMEM;
2097 2098
2098 info = ide_cd_get(disk);
2099 if (!info) 2099 if (!info)
2100 return -ENXIO; 2100 return -ENXIO;
2101 2101
2102 rc = cdrom_open(&info->devinfo, inode, file); 2102 rc = cdrom_open(&info->devinfo, bdev, mode);
2103 2103
2104 if (rc < 0) 2104 if (rc < 0)
2105 ide_cd_put(info); 2105 ide_cd_put(info);
@@ -2107,12 +2107,11 @@ static int idecd_open(struct inode *inode, struct file *file)
2107 return rc; 2107 return rc;
2108} 2108}
2109 2109
2110static int idecd_release(struct inode *inode, struct file *file) 2110static int idecd_release(struct gendisk *disk, fmode_t mode)
2111{ 2111{
2112 struct gendisk *disk = inode->i_bdev->bd_disk;
2113 struct cdrom_info *info = ide_drv_g(disk, cdrom_info); 2112 struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
2114 2113
2115 cdrom_release(&info->devinfo, file); 2114 cdrom_release(&info->devinfo, mode);
2116 2115
2117 ide_cd_put(info); 2116 ide_cd_put(info);
2118 2117
@@ -2158,10 +2157,9 @@ static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
2158 return 0; 2157 return 0;
2159} 2158}
2160 2159
2161static int idecd_ioctl(struct inode *inode, struct file *file, 2160static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
2162 unsigned int cmd, unsigned long arg) 2161 unsigned int cmd, unsigned long arg)
2163{ 2162{
2164 struct block_device *bdev = inode->i_bdev;
2165 struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info); 2163 struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
2166 int err; 2164 int err;
2167 2165
@@ -2174,9 +2172,9 @@ static int idecd_ioctl(struct inode *inode, struct file *file,
2174 break; 2172 break;
2175 } 2173 }
2176 2174
2177 err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg); 2175 err = generic_ide_ioctl(info->drive, bdev, cmd, arg);
2178 if (err == -EINVAL) 2176 if (err == -EINVAL)
2179 err = cdrom_ioctl(file, &info->devinfo, inode, cmd, arg); 2177 err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd, arg);
2180 2178
2181 return err; 2179 return err;
2182} 2180}
@@ -2201,7 +2199,7 @@ static struct block_device_operations idecd_ops = {
2201 .owner = THIS_MODULE, 2199 .owner = THIS_MODULE,
2202 .open = idecd_open, 2200 .open = idecd_open,
2203 .release = idecd_release, 2201 .release = idecd_release,
2204 .ioctl = idecd_ioctl, 2202 .locked_ioctl = idecd_ioctl,
2205 .media_changed = idecd_media_changed, 2203 .media_changed = idecd_media_changed,
2206 .revalidate_disk = idecd_revalidate_disk 2204 .revalidate_disk = idecd_revalidate_disk
2207}; 2205};
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/ide-cs.c
index cb199c815b53..f50210fe558f 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -444,6 +444,7 @@ static struct pcmcia_device_id ide_ids[] = {
444 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), 444 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
445 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e), 445 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
446 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6), 446 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
447 PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506),
447 PCMCIA_DEVICE_NULL, 448 PCMCIA_DEVICE_NULL,
448}; 449};
449MODULE_DEVICE_TABLE(pcmcia, ide_ids); 450MODULE_DEVICE_TABLE(pcmcia, ide_ids);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 223750c1b5a6..eb9fac4d0f0c 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 1994-1998 Linus Torvalds & authors (see below) 2 * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
3 * Copyright (C) 1998-2002 Linux ATA Development 3 * Copyright (C) 1998-2002 Linux ATA Development
4 * Andre Hedrick <andre@linux-ide.org> 4 * Andre Hedrick <andre@linux-ide.org>
5 * Copyright (C) 2003 Red Hat <alan@redhat.com> 5 * Copyright (C) 2003 Red Hat
6 * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz 6 * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
7 */ 7 */
8 8
@@ -600,6 +600,7 @@ static int ide_disk_check(ide_drive_t *drive, const char *s)
600static void ide_disk_setup(ide_drive_t *drive) 600static void ide_disk_setup(ide_drive_t *drive)
601{ 601{
602 struct ide_disk_obj *idkp = drive->driver_data; 602 struct ide_disk_obj *idkp = drive->driver_data;
603 struct request_queue *q = drive->queue;
603 ide_hwif_t *hwif = drive->hwif; 604 ide_hwif_t *hwif = drive->hwif;
604 u16 *id = drive->id; 605 u16 *id = drive->id;
605 char *m = (char *)&id[ATA_ID_PROD]; 606 char *m = (char *)&id[ATA_ID_PROD];
@@ -626,11 +627,14 @@ static void ide_disk_setup(ide_drive_t *drive)
626 if (max_s > hwif->rqsize) 627 if (max_s > hwif->rqsize)
627 max_s = hwif->rqsize; 628 max_s = hwif->rqsize;
628 629
629 blk_queue_max_sectors(drive->queue, max_s); 630 blk_queue_max_sectors(q, max_s);
630 } 631 }
631 632
632 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, 633 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
633 drive->queue->max_sectors / 2); 634 q->max_sectors / 2);
635
636 if (ata_id_is_ssd(id) || ata_id_is_cfa(id))
637 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
634 638
635 /* calculate drive capacity, and select LBA if possible */ 639 /* calculate drive capacity, and select LBA if possible */
636 ide_disk_get_capacity(drive); 640 ide_disk_get_capacity(drive);
diff --git a/drivers/ide/ide-disk.h b/drivers/ide/ide-disk.h
index b234b0feaf7b..d511dab7c4aa 100644
--- a/drivers/ide/ide-disk.h
+++ b/drivers/ide/ide-disk.h
@@ -13,7 +13,7 @@ ide_decl_devset(wcache);
13ide_decl_devset(acoustic); 13ide_decl_devset(acoustic);
14 14
15/* ide-disk_ioctl.c */ 15/* ide-disk_ioctl.c */
16int ide_disk_ioctl(ide_drive_t *, struct inode *, struct file *, unsigned int, 16int ide_disk_ioctl(ide_drive_t *, struct block_device *, fmode_t, unsigned int,
17 unsigned long); 17 unsigned long);
18 18
19#ifdef CONFIG_IDE_PROC_FS 19#ifdef CONFIG_IDE_PROC_FS
diff --git a/drivers/ide/ide-disk_ioctl.c b/drivers/ide/ide-disk_ioctl.c
index a49698bcf966..7b783dd7c0be 100644
--- a/drivers/ide/ide-disk_ioctl.c
+++ b/drivers/ide/ide-disk_ioctl.c
@@ -13,15 +13,14 @@ static const struct ide_ioctl_devset ide_disk_ioctl_settings[] = {
13{ 0 } 13{ 0 }
14}; 14};
15 15
16int ide_disk_ioctl(ide_drive_t *drive, struct inode *inode, struct file *file, 16int ide_disk_ioctl(ide_drive_t *drive, struct block_device *bdev, fmode_t mode,
17 unsigned int cmd, unsigned long arg) 17 unsigned int cmd, unsigned long arg)
18{ 18{
19 struct block_device *bdev = inode->i_bdev;
20 int err; 19 int err;
21 20
22 err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings); 21 err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings);
23 if (err != -EOPNOTSUPP) 22 if (err != -EOPNOTSUPP)
24 return err; 23 return err;
25 24
26 return generic_ide_ioctl(drive, file, bdev, cmd, arg); 25 return generic_ide_ioctl(drive, bdev, cmd, arg);
27} 26}
diff --git a/drivers/ide/ide-floppy.h b/drivers/ide/ide-floppy.h
index c17124dd6079..6dd2beb48434 100644
--- a/drivers/ide/ide-floppy.h
+++ b/drivers/ide/ide-floppy.h
@@ -23,8 +23,8 @@ void ide_floppy_create_mode_sense_cmd(struct ide_atapi_pc *, u8);
23void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *); 23void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *);
24 24
25/* ide-floppy_ioctl.c */ 25/* ide-floppy_ioctl.c */
26int ide_floppy_ioctl(ide_drive_t *, struct inode *, struct file *, unsigned int, 26int ide_floppy_ioctl(ide_drive_t *, struct block_device *, fmode_t,
27 unsigned long); 27 unsigned int, unsigned long);
28 28
29#ifdef CONFIG_IDE_PROC_FS 29#ifdef CONFIG_IDE_PROC_FS
30/* ide-floppy_proc.c */ 30/* ide-floppy_proc.c */
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
index 409e4c15f9b7..2bc51ff73fee 100644
--- a/drivers/ide/ide-floppy_ioctl.c
+++ b/drivers/ide/ide-floppy_ioctl.c
@@ -241,7 +241,7 @@ static int ide_floppy_lockdoor(ide_drive_t *drive, struct ide_atapi_pc *pc,
241 return 0; 241 return 0;
242} 242}
243 243
244static int ide_floppy_format_ioctl(ide_drive_t *drive, struct file *file, 244static int ide_floppy_format_ioctl(ide_drive_t *drive, fmode_t mode,
245 unsigned int cmd, void __user *argp) 245 unsigned int cmd, void __user *argp)
246{ 246{
247 switch (cmd) { 247 switch (cmd) {
@@ -250,7 +250,7 @@ static int ide_floppy_format_ioctl(ide_drive_t *drive, struct file *file,
250 case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY: 250 case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY:
251 return ide_floppy_get_format_capacities(drive, argp); 251 return ide_floppy_get_format_capacities(drive, argp);
252 case IDEFLOPPY_IOCTL_FORMAT_START: 252 case IDEFLOPPY_IOCTL_FORMAT_START:
253 if (!(file->f_mode & 2)) 253 if (!(mode & FMODE_WRITE))
254 return -EPERM; 254 return -EPERM;
255 return ide_floppy_format_unit(drive, (int __user *)argp); 255 return ide_floppy_format_unit(drive, (int __user *)argp);
256 case IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS: 256 case IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS:
@@ -260,10 +260,9 @@ static int ide_floppy_format_ioctl(ide_drive_t *drive, struct file *file,
260 } 260 }
261} 261}
262 262
263int ide_floppy_ioctl(ide_drive_t *drive, struct inode *inode, 263int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
264 struct file *file, unsigned int cmd, unsigned long arg) 264 fmode_t mode, unsigned int cmd, unsigned long arg)
265{ 265{
266 struct block_device *bdev = inode->i_bdev;
267 struct ide_atapi_pc pc; 266 struct ide_atapi_pc pc;
268 void __user *argp = (void __user *)arg; 267 void __user *argp = (void __user *)arg;
269 int err; 268 int err;
@@ -271,7 +270,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct inode *inode,
271 if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) 270 if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR)
272 return ide_floppy_lockdoor(drive, &pc, arg, cmd); 271 return ide_floppy_lockdoor(drive, &pc, arg, cmd);
273 272
274 err = ide_floppy_format_ioctl(drive, file, cmd, argp); 273 err = ide_floppy_format_ioctl(drive, mode, cmd, argp);
275 if (err != -ENOTTY) 274 if (err != -ENOTTY)
276 return err; 275 return err;
277 276
@@ -280,11 +279,11 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct inode *inode,
280 * and CDROM_SEND_PACKET (legacy) ioctls 279 * and CDROM_SEND_PACKET (legacy) ioctls
281 */ 280 */
282 if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND) 281 if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
283 err = scsi_cmd_ioctl(file, bdev->bd_disk->queue, 282 err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
284 bdev->bd_disk, cmd, argp); 283 mode, cmd, argp);
285 284
286 if (err == -ENOTTY) 285 if (err == -ENOTTY)
287 err = generic_ide_ioctl(drive, file, bdev, cmd, arg); 286 err = generic_ide_ioctl(drive, bdev, cmd, arg);
288 287
289 return err; 288 return err;
290} 289}
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index d44898f46c33..b8078b3231f7 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -169,9 +169,9 @@ static ide_driver_t ide_gd_driver = {
169#endif 169#endif
170}; 170};
171 171
172static int ide_gd_open(struct inode *inode, struct file *filp) 172static int ide_gd_open(struct block_device *bdev, fmode_t mode)
173{ 173{
174 struct gendisk *disk = inode->i_bdev->bd_disk; 174 struct gendisk *disk = bdev->bd_disk;
175 struct ide_disk_obj *idkp; 175 struct ide_disk_obj *idkp;
176 ide_drive_t *drive; 176 ide_drive_t *drive;
177 int ret = 0; 177 int ret = 0;
@@ -197,12 +197,12 @@ static int ide_gd_open(struct inode *inode, struct file *filp)
197 * unreadable disk, so that we can get the format capacity 197 * unreadable disk, so that we can get the format capacity
198 * of the drive or begin the format - Sam 198 * of the drive or begin the format - Sam
199 */ 199 */
200 if (ret && (filp->f_flags & O_NDELAY) == 0) { 200 if (ret && (mode & FMODE_NDELAY) == 0) {
201 ret = -EIO; 201 ret = -EIO;
202 goto out_put_idkp; 202 goto out_put_idkp;
203 } 203 }
204 204
205 if ((drive->dev_flags & IDE_DFLAG_WP) && (filp->f_mode & 2)) { 205 if ((drive->dev_flags & IDE_DFLAG_WP) && (mode & FMODE_WRITE)) {
206 ret = -EROFS; 206 ret = -EROFS;
207 goto out_put_idkp; 207 goto out_put_idkp;
208 } 208 }
@@ -214,7 +214,7 @@ static int ide_gd_open(struct inode *inode, struct file *filp)
214 */ 214 */
215 drive->disk_ops->set_doorlock(drive, disk, 1); 215 drive->disk_ops->set_doorlock(drive, disk, 1);
216 drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED; 216 drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
217 check_disk_change(inode->i_bdev); 217 check_disk_change(bdev);
218 } else if (drive->dev_flags & IDE_DFLAG_FORMAT_IN_PROGRESS) { 218 } else if (drive->dev_flags & IDE_DFLAG_FORMAT_IN_PROGRESS) {
219 ret = -EBUSY; 219 ret = -EBUSY;
220 goto out_put_idkp; 220 goto out_put_idkp;
@@ -227,9 +227,8 @@ out_put_idkp:
227 return ret; 227 return ret;
228} 228}
229 229
230static int ide_gd_release(struct inode *inode, struct file *filp) 230static int ide_gd_release(struct gendisk *disk, fmode_t mode)
231{ 231{
232 struct gendisk *disk = inode->i_bdev->bd_disk;
233 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); 232 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
234 ide_drive_t *drive = idkp->drive; 233 ide_drive_t *drive = idkp->drive;
235 234
@@ -282,25 +281,29 @@ static int ide_gd_media_changed(struct gendisk *disk)
282static int ide_gd_revalidate_disk(struct gendisk *disk) 281static int ide_gd_revalidate_disk(struct gendisk *disk)
283{ 282{
284 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); 283 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
285 set_capacity(disk, ide_gd_capacity(idkp->drive)); 284 ide_drive_t *drive = idkp->drive;
285
286 if (ide_gd_media_changed(disk))
287 drive->disk_ops->get_capacity(drive);
288
289 set_capacity(disk, ide_gd_capacity(drive));
286 return 0; 290 return 0;
287} 291}
288 292
289static int ide_gd_ioctl(struct inode *inode, struct file *file, 293static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
290 unsigned int cmd, unsigned long arg) 294 unsigned int cmd, unsigned long arg)
291{ 295{
292 struct block_device *bdev = inode->i_bdev;
293 struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj); 296 struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
294 ide_drive_t *drive = idkp->drive; 297 ide_drive_t *drive = idkp->drive;
295 298
296 return drive->disk_ops->ioctl(drive, inode, file, cmd, arg); 299 return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg);
297} 300}
298 301
299static struct block_device_operations ide_gd_ops = { 302static struct block_device_operations ide_gd_ops = {
300 .owner = THIS_MODULE, 303 .owner = THIS_MODULE,
301 .open = ide_gd_open, 304 .open = ide_gd_open,
302 .release = ide_gd_release, 305 .release = ide_gd_release,
303 .ioctl = ide_gd_ioctl, 306 .locked_ioctl = ide_gd_ioctl,
304 .getgeo = ide_gd_getgeo, 307 .getgeo = ide_gd_getgeo,
305 .media_changed = ide_gd_media_changed, 308 .media_changed = ide_gd_media_changed,
306 .revalidate_disk = ide_gd_revalidate_disk 309 .revalidate_disk = ide_gd_revalidate_disk
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/ide-h8300.c
index e2cdd2e9cdec..e2cdd2e9cdec 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/ide-h8300.c
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 7162d67562af..cc35d6dbd410 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -132,10 +132,14 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
132} 132}
133EXPORT_SYMBOL(ide_end_request); 133EXPORT_SYMBOL(ide_end_request);
134 134
135static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error) 135static void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
136{ 136{
137 struct request_pm_state *pm = rq->data; 137 struct request_pm_state *pm = rq->data;
138 138
139#ifdef DEBUG_PM
140 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
141 drive->name, pm->pm_step);
142#endif
139 if (drive->media != ide_disk) 143 if (drive->media != ide_disk)
140 return; 144 return;
141 145
@@ -172,7 +176,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
172 /* Not supported? Switch to next step now. */ 176 /* Not supported? Switch to next step now. */
173 if (ata_id_flush_enabled(drive->id) == 0 || 177 if (ata_id_flush_enabled(drive->id) == 0 ||
174 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) { 178 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
175 ide_complete_power_step(drive, rq, 0, 0); 179 ide_complete_power_step(drive, rq);
176 return ide_stopped; 180 return ide_stopped;
177 } 181 }
178 if (ata_id_flush_ext_enabled(drive->id)) 182 if (ata_id_flush_ext_enabled(drive->id))
@@ -191,7 +195,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
191 if (drive->media != ide_disk) 195 if (drive->media != ide_disk)
192 pm->pm_step = IDE_PM_RESTORE_DMA; 196 pm->pm_step = IDE_PM_RESTORE_DMA;
193 else 197 else
194 ide_complete_power_step(drive, rq, 0, 0); 198 ide_complete_power_step(drive, rq);
195 return ide_stopped; 199 return ide_stopped;
196 case IDE_PM_IDLE: /* Resume step 2 (idle) */ 200 case IDE_PM_IDLE: /* Resume step 2 (idle) */
197 args->tf.command = ATA_CMD_IDLEIMMEDIATE; 201 args->tf.command = ATA_CMD_IDLEIMMEDIATE;
@@ -322,11 +326,8 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
322 } 326 }
323 } else if (blk_pm_request(rq)) { 327 } else if (blk_pm_request(rq)) {
324 struct request_pm_state *pm = rq->data; 328 struct request_pm_state *pm = rq->data;
325#ifdef DEBUG_PM 329
326 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n", 330 ide_complete_power_step(drive, rq);
327 drive->name, rq->pm->pm_step, stat, err);
328#endif
329 ide_complete_power_step(drive, rq, stat, err);
330 if (pm->pm_step == IDE_PM_COMPLETED) 331 if (pm->pm_step == IDE_PM_COMPLETED)
331 ide_complete_pm_request(drive, rq); 332 ide_complete_pm_request(drive, rq);
332 return; 333 return;
@@ -804,7 +805,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
804 struct request_pm_state *pm = rq->data; 805 struct request_pm_state *pm = rq->data;
805#ifdef DEBUG_PM 806#ifdef DEBUG_PM
806 printk("%s: start_power_step(step: %d)\n", 807 printk("%s: start_power_step(step: %d)\n",
807 drive->name, rq->pm->pm_step); 808 drive->name, pm->pm_step);
808#endif 809#endif
809 startstop = ide_start_power_step(drive, rq); 810 startstop = ide_start_power_step(drive, rq);
810 if (startstop == ide_stopped && 811 if (startstop == ide_stopped &&
@@ -967,14 +968,13 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
967 ide_startstop_t startstop; 968 ide_startstop_t startstop;
968 int loops = 0; 969 int loops = 0;
969 970
970 /* for atari only: POSSIBLY BROKEN HERE(?) */
971 ide_get_lock(ide_intr, hwgroup);
972
973 /* caller must own ide_lock */ 971 /* caller must own ide_lock */
974 BUG_ON(!irqs_disabled()); 972 BUG_ON(!irqs_disabled());
975 973
976 while (!hwgroup->busy) { 974 while (!hwgroup->busy) {
977 hwgroup->busy = 1; 975 hwgroup->busy = 1;
976 /* for atari only */
977 ide_get_lock(ide_intr, hwgroup);
978 drive = choose_drive(hwgroup); 978 drive = choose_drive(hwgroup);
979 if (drive == NULL) { 979 if (drive == NULL) {
980 int sleeping = 0; 980 int sleeping = 0;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index a90945f49792..fcde16bb53a7 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -240,8 +240,7 @@ static int generic_drive_reset(ide_drive_t *drive)
240 return ret; 240 return ret;
241} 241}
242 242
243int generic_ide_ioctl(ide_drive_t *drive, struct file *file, 243int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
244 struct block_device *bdev,
245 unsigned int cmd, unsigned long arg) 244 unsigned int cmd, unsigned long arg)
246{ 245{
247 int err; 246 int err;
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index bb7a1ed8094e..c41c3b9b6f02 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> 2 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
3 * Copyright (C) 2003 Red Hat <alan@redhat.com> 3 * Copyright (C) 2003 Red Hat
4 * 4 *
5 */ 5 */
6 6
@@ -457,18 +457,14 @@ int drive_is_ready (ide_drive_t *drive)
457 if (drive->waiting_for_dma) 457 if (drive->waiting_for_dma)
458 return hwif->dma_ops->dma_test_irq(drive); 458 return hwif->dma_ops->dma_test_irq(drive);
459 459
460#if 0
461 /* need to guarantee 400ns since last command was issued */
462 udelay(1);
463#endif
464
465 /* 460 /*
466 * We do a passive status test under shared PCI interrupts on 461 * We do a passive status test under shared PCI interrupts on
467 * cards that truly share the ATA side interrupt, but may also share 462 * cards that truly share the ATA side interrupt, but may also share
468 * an interrupt with another pci card/device. We make no assumptions 463 * an interrupt with another pci card/device. We make no assumptions
469 * about possible isa-pnp and pci-pnp issues yet. 464 * about possible isa-pnp and pci-pnp issues yet.
470 */ 465 */
471 if (hwif->io_ports.ctl_addr) 466 if (hwif->io_ports.ctl_addr &&
467 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
472 stat = hwif->tp_ops->read_altstatus(hwif); 468 stat = hwif->tp_ops->read_altstatus(hwif);
473 else 469 else
474 /* Note: this may clear a pending IRQ!! */ 470 /* Note: this may clear a pending IRQ!! */
@@ -610,6 +606,7 @@ static const struct drive_list_entry ivb_list[] = {
610 { "TSSTcorp CDDVDW SH-S202N" , "SB01" }, 606 { "TSSTcorp CDDVDW SH-S202N" , "SB01" },
611 { "TSSTcorp CDDVDW SH-S202H" , "SB00" }, 607 { "TSSTcorp CDDVDW SH-S202H" , "SB00" },
612 { "TSSTcorp CDDVDW SH-S202H" , "SB01" }, 608 { "TSSTcorp CDDVDW SH-S202H" , "SB01" },
609 { "SAMSUNG SP0822N" , "WA100-10" },
613 { NULL , NULL } 610 { NULL , NULL }
614}; 611};
615 612
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/ide-pci-generic.c
index 474f96a7c076..bddae2b329a0 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/ide-pci-generic.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> 2 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
3 * Portions (C) Copyright 2002 Red Hat Inc <alan@redhat.com> 3 * Portions (C) Copyright 2002 Red Hat Inc
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the 6 * under the terms of the GNU General Public License as published by the
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 1649ea54f76c..c55bdbd22314 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -266,7 +266,8 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
266 /* take a deep breath */ 266 /* take a deep breath */
267 msleep(50); 267 msleep(50);
268 268
269 if (io_ports->ctl_addr) { 269 if (io_ports->ctl_addr &&
270 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) {
270 a = tp_ops->read_altstatus(hwif); 271 a = tp_ops->read_altstatus(hwif);
271 s = tp_ops->read_status(hwif); 272 s = tp_ops->read_status(hwif);
272 if ((a ^ s) & ~ATA_IDX) 273 if ((a ^ s) & ~ATA_IDX)
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index c31d0dd7a532..f3cddd1b2f8f 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 1997-1998 Mark Lord 2 * Copyright (C) 1997-1998 Mark Lord
3 * Copyright (C) 2003 Red Hat <alan@redhat.com> 3 * Copyright (C) 2003 Red Hat
4 * 4 *
5 * Some code was moved here from ide.c, see it for original copyrights. 5 * Some code was moved here from ide.c, see it for original copyrights.
6 */ 6 */
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index b2b2e5e8d38e..a2d470eb2b55 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2340,35 +2340,30 @@ static const struct file_operations idetape_fops = {
2340 .release = idetape_chrdev_release, 2340 .release = idetape_chrdev_release,
2341}; 2341};
2342 2342
2343static int idetape_open(struct inode *inode, struct file *filp) 2343static int idetape_open(struct block_device *bdev, fmode_t mode)
2344{ 2344{
2345 struct gendisk *disk = inode->i_bdev->bd_disk; 2345 struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk);
2346 struct ide_tape_obj *tape;
2347 2346
2348 tape = ide_tape_get(disk);
2349 if (!tape) 2347 if (!tape)
2350 return -ENXIO; 2348 return -ENXIO;
2351 2349
2352 return 0; 2350 return 0;
2353} 2351}
2354 2352
2355static int idetape_release(struct inode *inode, struct file *filp) 2353static int idetape_release(struct gendisk *disk, fmode_t mode)
2356{ 2354{
2357 struct gendisk *disk = inode->i_bdev->bd_disk;
2358 struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj); 2355 struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj);
2359 2356
2360 ide_tape_put(tape); 2357 ide_tape_put(tape);
2361
2362 return 0; 2358 return 0;
2363} 2359}
2364 2360
2365static int idetape_ioctl(struct inode *inode, struct file *file, 2361static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
2366 unsigned int cmd, unsigned long arg) 2362 unsigned int cmd, unsigned long arg)
2367{ 2363{
2368 struct block_device *bdev = inode->i_bdev;
2369 struct ide_tape_obj *tape = ide_drv_g(bdev->bd_disk, ide_tape_obj); 2364 struct ide_tape_obj *tape = ide_drv_g(bdev->bd_disk, ide_tape_obj);
2370 ide_drive_t *drive = tape->drive; 2365 ide_drive_t *drive = tape->drive;
2371 int err = generic_ide_ioctl(drive, file, bdev, cmd, arg); 2366 int err = generic_ide_ioctl(drive, bdev, cmd, arg);
2372 if (err == -EINVAL) 2367 if (err == -EINVAL)
2373 err = idetape_blkdev_ioctl(drive, cmd, arg); 2368 err = idetape_blkdev_ioctl(drive, cmd, arg);
2374 return err; 2369 return err;
@@ -2378,7 +2373,7 @@ static struct block_device_operations idetape_block_ops = {
2378 .owner = THIS_MODULE, 2373 .owner = THIS_MODULE,
2379 .open = idetape_open, 2374 .open = idetape_open,
2380 .release = idetape_release, 2375 .release = idetape_release,
2381 .ioctl = idetape_ioctl, 2376 .locked_ioctl = idetape_ioctl,
2382}; 2377};
2383 2378
2384static int ide_tape_probe(ide_drive_t *drive) 2379static int ide_tape_probe(ide_drive_t *drive)
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/ide_arm.c
index f728f2927b5a..f728f2927b5a 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/ide_arm.c
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/ide_platform.c
index 051b4ab0f359..051b4ab0f359 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/ide_platform.c
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/it8213.c
index 7c2feeb3c5ec..7c2feeb3c5ec 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/it8213.c
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/it821x.c
index 995e18bb3139..ef004089761b 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/it821x.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2004 Red Hat <alan@redhat.com> 2 * Copyright (C) 2004 Red Hat
3 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 3 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
4 * 4 *
5 * May be copied or modified under the terms of the GNU General Public License 5 * May be copied or modified under the terms of the GNU General Public License
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/jmicron.c
index 9a68433cf46d..bf2be6431b20 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/jmicron.c
@@ -1,6 +1,6 @@
1 1
2/* 2/*
3 * Copyright (C) 2006 Red Hat <alan@redhat.com> 3 * Copyright (C) 2006 Red Hat
4 * 4 *
5 * May be copied or modified under the terms of the GNU General Public License 5 * May be copied or modified under the terms of the GNU General Public License
6 */ 6 */
diff --git a/drivers/ide/legacy/Makefile b/drivers/ide/legacy/Makefile
deleted file mode 100644
index 6939329f89e8..000000000000
--- a/drivers/ide/legacy/Makefile
+++ /dev/null
@@ -1,25 +0,0 @@
1
2# link order is important here
3
4obj-$(CONFIG_BLK_DEV_ALI14XX) += ali14xx.o
5obj-$(CONFIG_BLK_DEV_UMC8672) += umc8672.o
6obj-$(CONFIG_BLK_DEV_DTC2278) += dtc2278.o
7obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o
8obj-$(CONFIG_BLK_DEV_QD65XX) += qd65xx.o
9obj-$(CONFIG_BLK_DEV_4DRIVES) += ide-4drives.o
10
11obj-$(CONFIG_BLK_DEV_GAYLE) += gayle.o
12obj-$(CONFIG_BLK_DEV_FALCON_IDE) += falconide.o
13obj-$(CONFIG_BLK_DEV_MAC_IDE) += macide.o
14obj-$(CONFIG_BLK_DEV_Q40IDE) += q40ide.o
15obj-$(CONFIG_BLK_DEV_BUDDHA) += buddha.o
16
17ifeq ($(CONFIG_BLK_DEV_IDECS), m)
18 obj-m += ide-cs.o
19endif
20
21ifeq ($(CONFIG_BLK_DEV_PLATFORM), m)
22 obj-m += ide_platform.o
23endif
24
25EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/macide.c
index 43f97cc1d30e..43f97cc1d30e 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/macide.c
diff --git a/drivers/ide/mips/Makefile b/drivers/ide/mips/Makefile
deleted file mode 100644
index 5873fa0b8769..000000000000
--- a/drivers/ide/mips/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o
2
3EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/ns87415.c
index 13789060f407..13789060f407 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/ns87415.c
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/opti621.c
index 6048eda3cd61..6048eda3cd61 100644
--- a/drivers/ide/pci/opti621.c
+++ b/drivers/ide/opti621.c
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 122ed3c072fd..122ed3c072fd 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
deleted file mode 100644
index ab44a1f5f5a9..000000000000
--- a/drivers/ide/pci/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
1
2obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o
3obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o
4obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o
5obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o
6obj-$(CONFIG_BLK_DEV_CELLEB) += scc_pata.o
7obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o
8obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o
9obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
10obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o
11obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o
12obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o
13obj-$(CONFIG_BLK_DEV_DELKIN) += delkin_cb.o
14obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
15obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o
16obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o
17obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o
18obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o
19obj-$(CONFIG_BLK_DEV_OPTI621) += opti621.o
20obj-$(CONFIG_BLK_DEV_PDC202XX_OLD) += pdc202xx_old.o
21obj-$(CONFIG_BLK_DEV_PDC202XX_NEW) += pdc202xx_new.o
22obj-$(CONFIG_BLK_DEV_PIIX) += piix.o
23obj-$(CONFIG_BLK_DEV_RZ1000) += rz1000.o
24obj-$(CONFIG_BLK_DEV_SVWKS) += serverworks.o
25obj-$(CONFIG_BLK_DEV_SGIIOC4) += sgiioc4.o
26obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o
27obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o
28obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o
29obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o
30obj-$(CONFIG_BLK_DEV_TC86C001) += tc86c001.o
31obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o
32obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o
33obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o
34
35# Must appear at the end of the block
36obj-$(CONFIG_BLK_DEV_GENERIC) += ide-pci-generic.o
37ide-pci-generic-y += generic.o
38
39ifeq ($(CONFIG_BLK_DEV_CMD640), m)
40 obj-m += cmd640.o
41endif
42
43EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index 211ae46e3e0c..211ae46e3e0c 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 799557c25eef..799557c25eef 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/piix.c
index d63f9fdca76b..61d2d920a5cd 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/piix.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer 2 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
3 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 3 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
4 * Copyright (C) 2003 Red Hat Inc <alan@redhat.com> 4 * Copyright (C) 2003 Red Hat
5 * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com> 5 * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
6 * 6 *
7 * May be copied or modified under the terms of the GNU General Public License 7 * May be copied or modified under the terms of the GNU General Public License
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/pmac.c
index 2e19d6298536..7c481bb56fab 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/pmac.c
@@ -66,7 +66,6 @@ typedef struct pmac_ide_hwif {
66 struct macio_dev *mdev; 66 struct macio_dev *mdev;
67 u32 timings[4]; 67 u32 timings[4];
68 volatile u32 __iomem * *kauai_fcr; 68 volatile u32 __iomem * *kauai_fcr;
69#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
70 /* Those fields are duplicating what is in hwif. We currently 69 /* Those fields are duplicating what is in hwif. We currently
71 * can't use the hwif ones because of some assumptions that are 70 * can't use the hwif ones because of some assumptions that are
72 * beeing done by the generic code about the kind of dma controller 71 * beeing done by the generic code about the kind of dma controller
@@ -74,8 +73,6 @@ typedef struct pmac_ide_hwif {
74 */ 73 */
75 volatile struct dbdma_regs __iomem * dma_regs; 74 volatile struct dbdma_regs __iomem * dma_regs;
76 struct dbdma_cmd* dma_table_cpu; 75 struct dbdma_cmd* dma_table_cpu;
77#endif
78
79} pmac_ide_hwif_t; 76} pmac_ide_hwif_t;
80 77
81enum { 78enum {
@@ -222,8 +219,6 @@ static const char* model_name[] = {
222#define KAUAI_FCR_UATA_RESET_N 0x00000002 219#define KAUAI_FCR_UATA_RESET_N 0x00000002
223#define KAUAI_FCR_UATA_ENABLE 0x00000001 220#define KAUAI_FCR_UATA_ENABLE 0x00000001
224 221
225#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
226
227/* Rounded Multiword DMA timings 222/* Rounded Multiword DMA timings
228 * 223 *
229 * I gave up finding a generic formula for all controller 224 * I gave up finding a generic formula for all controller
@@ -413,8 +408,6 @@ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
413static void pmac_ide_selectproc(ide_drive_t *drive); 408static void pmac_ide_selectproc(ide_drive_t *drive);
414static void pmac_ide_kauai_selectproc(ide_drive_t *drive); 409static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
415 410
416#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
417
418#define PMAC_IDE_REG(x) \ 411#define PMAC_IDE_REG(x) \
419 ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x))) 412 ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
420 413
@@ -584,8 +577,6 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
584 pmac_ide_do_update_timings(drive); 577 pmac_ide_do_update_timings(drive);
585} 578}
586 579
587#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
588
589/* 580/*
590 * Calculate KeyLargo ATA/66 UDMA timings 581 * Calculate KeyLargo ATA/66 UDMA timings
591 */ 582 */
@@ -786,7 +777,6 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
786 drive->name, speed & 0xf, *timings); 777 drive->name, speed & 0xf, *timings);
787#endif 778#endif
788} 779}
789#endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */
790 780
791static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) 781static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
792{ 782{
@@ -804,7 +794,6 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
804 tl[0] = *timings; 794 tl[0] = *timings;
805 tl[1] = *timings2; 795 tl[1] = *timings2;
806 796
807#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
808 if (speed >= XFER_UDMA_0) { 797 if (speed >= XFER_UDMA_0) {
809 if (pmif->kind == controller_kl_ata4) 798 if (pmif->kind == controller_kl_ata4)
810 ret = set_timings_udma_ata4(&tl[0], speed); 799 ret = set_timings_udma_ata4(&tl[0], speed);
@@ -817,7 +806,7 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
817 ret = -1; 806 ret = -1;
818 } else 807 } else
819 set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed); 808 set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
820#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 809
821 if (ret) 810 if (ret)
822 return; 811 return;
823 812
@@ -1008,9 +997,7 @@ static const struct ide_port_info pmac_port_info = {
1008 .chipset = ide_pmac, 997 .chipset = ide_pmac,
1009 .tp_ops = &pmac_tp_ops, 998 .tp_ops = &pmac_tp_ops,
1010 .port_ops = &pmac_ide_port_ops, 999 .port_ops = &pmac_ide_port_ops,
1011#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1012 .dma_ops = &pmac_dma_ops, 1000 .dma_ops = &pmac_dma_ops,
1013#endif
1014 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | 1001 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
1015 IDE_HFLAG_POST_SET_MODE | 1002 IDE_HFLAG_POST_SET_MODE |
1016 IDE_HFLAG_MMIO | 1003 IDE_HFLAG_MMIO |
@@ -1182,7 +1169,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1182 pmif->regbase = regbase; 1169 pmif->regbase = regbase;
1183 pmif->irq = irq; 1170 pmif->irq = irq;
1184 pmif->kauai_fcr = NULL; 1171 pmif->kauai_fcr = NULL;
1185#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC 1172
1186 if (macio_resource_count(mdev) >= 2) { 1173 if (macio_resource_count(mdev) >= 2) {
1187 if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) 1174 if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
1188 printk(KERN_WARNING "ide-pmac: can't request DMA " 1175 printk(KERN_WARNING "ide-pmac: can't request DMA "
@@ -1192,7 +1179,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1192 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); 1179 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
1193 } else 1180 } else
1194 pmif->dma_regs = NULL; 1181 pmif->dma_regs = NULL;
1195#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 1182
1196 dev_set_drvdata(&mdev->ofdev.dev, pmif); 1183 dev_set_drvdata(&mdev->ofdev.dev, pmif);
1197 1184
1198 memset(&hw, 0, sizeof(hw)); 1185 memset(&hw, 0, sizeof(hw));
@@ -1300,9 +1287,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1300 1287
1301 base = ioremap(rbase, rlen); 1288 base = ioremap(rbase, rlen);
1302 pmif->regbase = (unsigned long) base + 0x2000; 1289 pmif->regbase = (unsigned long) base + 0x2000;
1303#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1304 pmif->dma_regs = base + 0x1000; 1290 pmif->dma_regs = base + 0x1000;
1305#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1306 pmif->kauai_fcr = base; 1291 pmif->kauai_fcr = base;
1307 pmif->irq = pdev->irq; 1292 pmif->irq = pdev->irq;
1308 1293
@@ -1434,8 +1419,6 @@ out:
1434 return error; 1419 return error;
1435} 1420}
1436 1421
1437#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1438
1439/* 1422/*
1440 * pmac_ide_build_dmatable builds the DBDMA command list 1423 * pmac_ide_build_dmatable builds the DBDMA command list
1441 * for a transfer and sets the DBDMA channel to point to it. 1424 * for a transfer and sets the DBDMA channel to point to it.
@@ -1723,13 +1706,6 @@ static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1723 1706
1724 return 0; 1707 return 0;
1725} 1708}
1726#else
1727static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1728 const struct ide_port_info *d)
1729{
1730 return -EOPNOTSUPP;
1731}
1732#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1733 1709
1734module_init(pmac_ide_probe); 1710module_init(pmac_ide_probe);
1735 1711
diff --git a/drivers/ide/ppc/Makefile b/drivers/ide/ppc/Makefile
deleted file mode 100644
index 74e52adcdf4b..000000000000
--- a/drivers/ide/ppc/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1
2obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/q40ide.c
index 4af4a8ce4cdf..4af4a8ce4cdf 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/q40ide.c
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/qd65xx.c
index bc27c7aba936..bc27c7aba936 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/qd65xx.c
diff --git a/drivers/ide/legacy/qd65xx.h b/drivers/ide/qd65xx.h
index c83dea85e621..c83dea85e621 100644
--- a/drivers/ide/legacy/qd65xx.h
+++ b/drivers/ide/qd65xx.h
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/rapide.c
index 78d27d9ae430..d5003ca69801 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/rapide.c
@@ -11,7 +11,7 @@
11 11
12#include <asm/ecard.h> 12#include <asm/ecard.h>
13 13
14static struct const ide_port_info rapide_port_info = { 14static const struct ide_port_info rapide_port_info = {
15 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, 15 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
16}; 16};
17 17
@@ -97,7 +97,7 @@ static int __init rapide_init(void)
97 97
98static void __exit rapide_exit(void) 98static void __exit rapide_exit(void)
99{ 99{
100 ecard_unregister_driver(&rapide_driver); 100 ecard_remove_driver(&rapide_driver);
101} 101}
102 102
103MODULE_LICENSE("GPL"); 103MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pci/rz1000.c b/drivers/ide/rz1000.c
index 7daf0135cbac..7daf0135cbac 100644
--- a/drivers/ide/pci/rz1000.c
+++ b/drivers/ide/rz1000.c
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/sc1200.c
index f1a8758e3a99..f1a8758e3a99 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/sc1200.c
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/scc_pata.c
index 49f163aa51e3..0f48f9dacfa5 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/scc_pata.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * This code is based on drivers/ide/pci/siimage.c: 6 * This code is based on drivers/ide/pci/siimage.c:
7 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> 7 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (C) 2003 Red Hat <alan@redhat.com> 8 * Copyright (C) 2003 Red Hat
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -536,10 +536,6 @@ static u8 scc_udma_filter(ide_drive_t *drive)
536 536
537static int setup_mmio_scc (struct pci_dev *dev, const char *name) 537static int setup_mmio_scc (struct pci_dev *dev, const char *name)
538{ 538{
539 unsigned long ctl_base = pci_resource_start(dev, 0);
540 unsigned long dma_base = pci_resource_start(dev, 1);
541 unsigned long ctl_size = pci_resource_len(dev, 0);
542 unsigned long dma_size = pci_resource_len(dev, 1);
543 void __iomem *ctl_addr; 539 void __iomem *ctl_addr;
544 void __iomem *dma_addr; 540 void __iomem *dma_addr;
545 int i, ret; 541 int i, ret;
@@ -557,10 +553,12 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
557 return ret; 553 return ret;
558 } 554 }
559 555
560 if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL) 556 ctl_addr = pci_ioremap_bar(dev, 0);
557 if (!ctl_addr)
561 goto fail_0; 558 goto fail_0;
562 559
563 if ((dma_addr = ioremap(dma_base, dma_size)) == NULL) 560 dma_addr = pci_ioremap_bar(dev, 1);
561 if (!dma_addr)
564 goto fail_1; 562 goto fail_1;
565 563
566 pci_set_master(dev); 564 pci_set_master(dev);
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/serverworks.c
index 437bc919dafd..437bc919dafd 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/serverworks.c
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/sgiioc4.c
index 8af9b23499fd..a687a7dfea6f 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -550,7 +550,7 @@ static const struct ide_dma_ops sgiioc4_dma_ops = {
550 .dma_timeout = ide_dma_timeout, 550 .dma_timeout = ide_dma_timeout,
551}; 551};
552 552
553static const struct ide_port_info sgiioc4_port_info __devinitdata = { 553static const struct ide_port_info sgiioc4_port_info __devinitconst = {
554 .name = DRV_NAME, 554 .name = DRV_NAME,
555 .chipset = ide_pci, 555 .chipset = ide_pci,
556 .init_dma = ide_dma_sgiioc4, 556 .init_dma = ide_dma_sgiioc4,
@@ -567,14 +567,12 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
567 unsigned long cmd_base, irqport; 567 unsigned long cmd_base, irqport;
568 unsigned long bar0, cmd_phys_base, ctl; 568 unsigned long bar0, cmd_phys_base, ctl;
569 void __iomem *virt_base; 569 void __iomem *virt_base;
570 struct ide_host *host;
571 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 570 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
572 struct ide_port_info d = sgiioc4_port_info;
573 int rc; 571 int rc;
574 572
575 /* Get the CmdBlk and CtrlBlk Base Registers */ 573 /* Get the CmdBlk and CtrlBlk Base Registers */
576 bar0 = pci_resource_start(dev, 0); 574 bar0 = pci_resource_start(dev, 0);
577 virt_base = ioremap(bar0, pci_resource_len(dev, 0)); 575 virt_base = pci_ioremap_bar(dev, 0);
578 if (virt_base == NULL) { 576 if (virt_base == NULL) {
579 printk(KERN_ERR "%s: Unable to remap BAR 0 address: 0x%lx\n", 577 printk(KERN_ERR "%s: Unable to remap BAR 0 address: 0x%lx\n",
580 DRV_NAME, bar0); 578 DRV_NAME, bar0);
@@ -590,7 +588,8 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
590 printk(KERN_ERR "%s %s -- ERROR: addresses 0x%08lx to 0x%08lx " 588 printk(KERN_ERR "%s %s -- ERROR: addresses 0x%08lx to 0x%08lx "
591 "already in use\n", DRV_NAME, pci_name(dev), 589 "already in use\n", DRV_NAME, pci_name(dev),
592 cmd_phys_base, cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); 590 cmd_phys_base, cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
593 return -EBUSY; 591 rc = -EBUSY;
592 goto req_mem_rgn_err;
594 } 593 }
595 594
596 /* Initialize the IO registers */ 595 /* Initialize the IO registers */
@@ -603,21 +602,12 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
603 /* Initializing chipset IRQ Registers */ 602 /* Initializing chipset IRQ Registers */
604 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); 603 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
605 604
606 host = ide_host_alloc(&d, hws); 605 rc = ide_host_add(&sgiioc4_port_info, hws, NULL);
607 if (host == NULL) { 606 if (!rc)
608 rc = -ENOMEM; 607 return 0;
609 goto err;
610 }
611
612 rc = ide_host_register(host, &d, hws);
613 if (rc)
614 goto err_free;
615 608
616 return 0;
617err_free:
618 ide_host_free(host);
619err:
620 release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE); 609 release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE);
610req_mem_rgn_err:
621 iounmap(virt_base); 611 iounmap(virt_base);
622 return rc; 612 return rc;
623} 613}
@@ -643,7 +633,7 @@ out:
643 return ret; 633 return ret;
644} 634}
645 635
646int 636int __devinit
647ioc4_ide_attach_one(struct ioc4_driver_data *idd) 637ioc4_ide_attach_one(struct ioc4_driver_data *idd)
648{ 638{
649 /* PCI-RT does not bring out IDE connection. 639 /* PCI-RT does not bring out IDE connection.
@@ -655,7 +645,7 @@ ioc4_ide_attach_one(struct ioc4_driver_data *idd)
655 return pci_init_sgiioc4(idd->idd_pdev); 645 return pci_init_sgiioc4(idd->idd_pdev);
656} 646}
657 647
658static struct ioc4_submodule ioc4_ide_submodule = { 648static struct ioc4_submodule __devinitdata ioc4_ide_submodule = {
659 .is_name = "IOC4_ide", 649 .is_name = "IOC4_ide",
660 .is_owner = THIS_MODULE, 650 .is_owner = THIS_MODULE,
661 .is_probe = ioc4_ide_attach_one, 651 .is_probe = ioc4_ide_attach_one,
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/siimage.c
index eb4faf92c571..7d622d20bc4c 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/siimage.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> 2 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
3 * Copyright (C) 2003 Red Hat <alan@redhat.com> 3 * Copyright (C) 2003 Red Hat
4 * Copyright (C) 2007-2008 MontaVista Software, Inc. 4 * Copyright (C) 2007-2008 MontaVista Software, Inc.
5 * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz 5 * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz
6 * 6 *
@@ -784,7 +784,7 @@ static int __devinit siimage_init_one(struct pci_dev *dev,
784 printk(KERN_WARNING DRV_NAME " %s: MMIO ports not " 784 printk(KERN_WARNING DRV_NAME " %s: MMIO ports not "
785 "available\n", pci_name(dev)); 785 "available\n", pci_name(dev));
786 } else { 786 } else {
787 ioaddr = ioremap(bar5, barsize); 787 ioaddr = pci_ioremap_bar(dev, 5);
788 if (ioaddr == NULL) 788 if (ioaddr == NULL)
789 release_mem_region(bar5, barsize); 789 release_mem_region(bar5, barsize);
790 } 790 }
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/sis5513.c
index ad32e18c5ba3..ad32e18c5ba3 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/sis5513.c
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/sl82c105.c
index 84dc33602ff8..84dc33602ff8 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/sl82c105.c
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/slc90e66.c
index 0f759e4ed779..0f759e4ed779 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/slc90e66.c
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/tc86c001.c
index 93e2cce4b296..93e2cce4b296 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/tc86c001.c
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/triflex.c
index b6ff40336aa9..b6ff40336aa9 100644
--- a/drivers/ide/pci/triflex.c
+++ b/drivers/ide/triflex.c
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/trm290.c
index 75ea61526566..75ea61526566 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/trm290.c
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
new file mode 100644
index 000000000000..9120063e8f87
--- /dev/null
+++ b/drivers/ide/tx4938ide.c
@@ -0,0 +1,323 @@
1/*
2 * TX4938 internal IDE driver
3 * Based on tx4939ide.c.
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * (C) Copyright TOSHIBA CORPORATION 2005-2007
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/ide.h>
15#include <linux/init.h>
16#include <linux/platform_device.h>
17#include <linux/io.h>
18#include <asm/txx9/tx4938.h>
19
20static void tx4938ide_tune_ebusc(unsigned int ebus_ch,
21 unsigned int gbus_clock,
22 u8 pio)
23{
24 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
25 u64 cr = __raw_readq(&tx4938_ebuscptr->cr[ebus_ch]);
26 unsigned int sp = (cr >> 4) & 3;
27 unsigned int clock = gbus_clock / (4 - sp);
28 unsigned int cycle = 1000000000 / clock;
29 unsigned int shwt;
30 int wt;
31
32 /* Minimum DIOx- active time */
33 wt = DIV_ROUND_UP(t->act8b, cycle) - 2;
34 /* IORDY setup time: 35ns */
35 wt = max_t(int, wt, DIV_ROUND_UP(35, cycle));
36 /* actual wait-cycle is max(wt & ~1, 1) */
37 if (wt > 2 && (wt & 1))
38 wt++;
39 wt &= ~1;
40 /* Address-valid to DIOR/DIOW setup */
41 shwt = DIV_ROUND_UP(t->setup, cycle);
42
43 /* -DIOx recovery time (SHWT * 4) and cycle time requirement */
44 while ((shwt * 4 + wt + (wt ? 2 : 3)) * cycle < t->cycle)
45 shwt++;
46 if (shwt > 7) {
47 pr_warning("tx4938ide: SHWT violation (%d)\n", shwt);
48 shwt = 7;
49 }
50 pr_debug("tx4938ide: ebus %d, bus cycle %dns, WT %d, SHWT %d\n",
51 ebus_ch, cycle, wt, shwt);
52
53 __raw_writeq((cr & ~0x3f007ull) | (wt << 12) | shwt,
54 &tx4938_ebuscptr->cr[ebus_ch]);
55}
56
57static void tx4938ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
58{
59 ide_hwif_t *hwif = drive->hwif;
60 struct tx4938ide_platform_info *pdata = hwif->dev->platform_data;
61 u8 safe = pio;
62 ide_drive_t *pair;
63
64 pair = ide_get_pair_dev(drive);
65 if (pair)
66 safe = min(safe, ide_get_best_pio_mode(pair, 255, 5));
67 tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, safe);
68}
69
70#ifdef __BIG_ENDIAN
71
72/* custom iops (independent from SWAP_IO_SPACE) */
73static u8 tx4938ide_inb(unsigned long port)
74{
75 return __raw_readb((void __iomem *)port);
76}
77
78static void tx4938ide_outb(u8 value, unsigned long port)
79{
80 __raw_writeb(value, (void __iomem *)port);
81}
82
83static void tx4938ide_tf_load(ide_drive_t *drive, ide_task_t *task)
84{
85 ide_hwif_t *hwif = drive->hwif;
86 struct ide_io_ports *io_ports = &hwif->io_ports;
87 struct ide_taskfile *tf = &task->tf;
88 u8 HIHI = task->tf_flags & IDE_TFLAG_LBA48 ? 0xE0 : 0xEF;
89
90 if (task->tf_flags & IDE_TFLAG_FLAGGED)
91 HIHI = 0xFF;
92
93 if (task->tf_flags & IDE_TFLAG_OUT_DATA) {
94 u16 data = (tf->hob_data << 8) | tf->data;
95
96 /* no endian swap */
97 __raw_writew(data, (void __iomem *)io_ports->data_addr);
98 }
99
100 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
101 tx4938ide_outb(tf->hob_feature, io_ports->feature_addr);
102 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
103 tx4938ide_outb(tf->hob_nsect, io_ports->nsect_addr);
104 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
105 tx4938ide_outb(tf->hob_lbal, io_ports->lbal_addr);
106 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
107 tx4938ide_outb(tf->hob_lbam, io_ports->lbam_addr);
108 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
109 tx4938ide_outb(tf->hob_lbah, io_ports->lbah_addr);
110
111 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
112 tx4938ide_outb(tf->feature, io_ports->feature_addr);
113 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
114 tx4938ide_outb(tf->nsect, io_ports->nsect_addr);
115 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
116 tx4938ide_outb(tf->lbal, io_ports->lbal_addr);
117 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
118 tx4938ide_outb(tf->lbam, io_ports->lbam_addr);
119 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
120 tx4938ide_outb(tf->lbah, io_ports->lbah_addr);
121
122 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
123 tx4938ide_outb((tf->device & HIHI) | drive->select,
124 io_ports->device_addr);
125}
126
127static void tx4938ide_tf_read(ide_drive_t *drive, ide_task_t *task)
128{
129 ide_hwif_t *hwif = drive->hwif;
130 struct ide_io_ports *io_ports = &hwif->io_ports;
131 struct ide_taskfile *tf = &task->tf;
132
133 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
134 u16 data;
135
136 /* no endian swap */
137 data = __raw_readw((void __iomem *)io_ports->data_addr);
138 tf->data = data & 0xff;
139 tf->hob_data = (data >> 8) & 0xff;
140 }
141
142 /* be sure we're looking at the low order bits */
143 tx4938ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
144
145 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
146 tf->feature = tx4938ide_inb(io_ports->feature_addr);
147 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
148 tf->nsect = tx4938ide_inb(io_ports->nsect_addr);
149 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
150 tf->lbal = tx4938ide_inb(io_ports->lbal_addr);
151 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
152 tf->lbam = tx4938ide_inb(io_ports->lbam_addr);
153 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
154 tf->lbah = tx4938ide_inb(io_ports->lbah_addr);
155 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
156 tf->device = tx4938ide_inb(io_ports->device_addr);
157
158 if (task->tf_flags & IDE_TFLAG_LBA48) {
159 tx4938ide_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
160
161 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
162 tf->hob_feature =
163 tx4938ide_inb(io_ports->feature_addr);
164 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
165 tf->hob_nsect = tx4938ide_inb(io_ports->nsect_addr);
166 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
167 tf->hob_lbal = tx4938ide_inb(io_ports->lbal_addr);
168 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
169 tf->hob_lbam = tx4938ide_inb(io_ports->lbam_addr);
170 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
171 tf->hob_lbah = tx4938ide_inb(io_ports->lbah_addr);
172 }
173}
174
175static void tx4938ide_input_data_swap(ide_drive_t *drive, struct request *rq,
176 void *buf, unsigned int len)
177{
178 unsigned long port = drive->hwif->io_ports.data_addr;
179 unsigned short *ptr = buf;
180 unsigned int count = (len + 1) / 2;
181
182 while (count--)
183 *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
184 __ide_flush_dcache_range((unsigned long)buf, count * 2);
185}
186
187static void tx4938ide_output_data_swap(ide_drive_t *drive, struct request *rq,
188 void *buf, unsigned int len)
189{
190 unsigned long port = drive->hwif->io_ports.data_addr;
191 unsigned short *ptr = buf;
192 unsigned int count = (len + 1) / 2;
193
194 while (count--) {
195 __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
196 ptr++;
197 }
198 __ide_flush_dcache_range((unsigned long)buf, count * 2);
199}
200
201static const struct ide_tp_ops tx4938ide_tp_ops = {
202 .exec_command = ide_exec_command,
203 .read_status = ide_read_status,
204 .read_altstatus = ide_read_altstatus,
205 .read_sff_dma_status = ide_read_sff_dma_status,
206
207 .set_irq = ide_set_irq,
208
209 .tf_load = tx4938ide_tf_load,
210 .tf_read = tx4938ide_tf_read,
211
212 .input_data = tx4938ide_input_data_swap,
213 .output_data = tx4938ide_output_data_swap,
214};
215
216#endif /* __BIG_ENDIAN */
217
218static const struct ide_port_ops tx4938ide_port_ops = {
219 .set_pio_mode = tx4938ide_set_pio_mode,
220};
221
222static const struct ide_port_info tx4938ide_port_info __initdata = {
223 .port_ops = &tx4938ide_port_ops,
224#ifdef __BIG_ENDIAN
225 .tp_ops = &tx4938ide_tp_ops,
226#endif
227 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
228 .pio_mask = ATA_PIO5,
229};
230
231static int __init tx4938ide_probe(struct platform_device *pdev)
232{
233 hw_regs_t hw;
234 hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
235 struct ide_host *host;
236 struct resource *res;
237 struct tx4938ide_platform_info *pdata = pdev->dev.platform_data;
238 int irq, ret, i;
239 unsigned long mapbase, mapctl;
240 struct ide_port_info d = tx4938ide_port_info;
241
242 irq = platform_get_irq(pdev, 0);
243 if (irq < 0)
244 return -ENODEV;
245 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
246 if (!res)
247 return -ENODEV;
248
249 if (!devm_request_mem_region(&pdev->dev, res->start,
250 res->end - res->start + 1, "tx4938ide"))
251 return -EBUSY;
252 mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start,
253 8 << pdata->ioport_shift);
254 mapctl = (unsigned long)devm_ioremap(&pdev->dev,
255 res->start + 0x10000 +
256 (6 << pdata->ioport_shift),
257 1 << pdata->ioport_shift);
258 if (!mapbase || !mapctl)
259 return -EBUSY;
260
261 memset(&hw, 0, sizeof(hw));
262 if (pdata->ioport_shift) {
263 unsigned long port = mapbase;
264 unsigned long ctl = mapctl;
265
266 hw.io_ports_array[0] = port;
267#ifdef __BIG_ENDIAN
268 port++;
269 ctl++;
270#endif
271 for (i = 1; i <= 7; i++)
272 hw.io_ports_array[i] =
273 port + (i << pdata->ioport_shift);
274 hw.io_ports.ctl_addr = ctl;
275 } else
276 ide_std_init_ports(&hw, mapbase, mapctl);
277 hw.irq = irq;
278 hw.dev = &pdev->dev;
279
280 pr_info("TX4938 IDE interface (base %#lx, ctl %#lx, irq %d)\n",
281 mapbase, mapctl, hw.irq);
282 if (pdata->gbus_clock)
283 tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, 0);
284 else
285 d.port_ops = NULL;
286 ret = ide_host_add(&d, hws, &host);
287 if (!ret)
288 platform_set_drvdata(pdev, host);
289 return ret;
290}
291
292static int __exit tx4938ide_remove(struct platform_device *pdev)
293{
294 struct ide_host *host = platform_get_drvdata(pdev);
295
296 ide_host_remove(host);
297 return 0;
298}
299
300static struct platform_driver tx4938ide_driver = {
301 .driver = {
302 .name = "tx4938ide",
303 .owner = THIS_MODULE,
304 },
305 .remove = __exit_p(tx4938ide_remove),
306};
307
308static int __init tx4938ide_init(void)
309{
310 return platform_driver_probe(&tx4938ide_driver, tx4938ide_probe);
311}
312
313static void __exit tx4938ide_exit(void)
314{
315 platform_driver_unregister(&tx4938ide_driver);
316}
317
318module_init(tx4938ide_init);
319module_exit(tx4938ide_exit);
320
321MODULE_DESCRIPTION("TX4938 internal IDE driver");
322MODULE_LICENSE("GPL");
323MODULE_ALIAS("platform:tx4938ide");
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
new file mode 100644
index 000000000000..bafb7d1a22e2
--- /dev/null
+++ b/drivers/ide/tx4939ide.c
@@ -0,0 +1,754 @@
1/*
2 * TX4939 internal IDE driver
3 * Based on RBTX49xx patch from CELF patch archive.
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * (C) Copyright TOSHIBA CORPORATION 2005-2007
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/ide.h>
15#include <linux/init.h>
16#include <linux/delay.h>
17#include <linux/platform_device.h>
18#include <linux/io.h>
19#include <linux/scatterlist.h>
20
21#define MODNAME "tx4939ide"
22
23/* ATA Shadow Registers (8-bit except for Data which is 16-bit) */
24#define TX4939IDE_Data 0x000
25#define TX4939IDE_Error_Feature 0x001
26#define TX4939IDE_Sec 0x002
27#define TX4939IDE_LBA0 0x003
28#define TX4939IDE_LBA1 0x004
29#define TX4939IDE_LBA2 0x005
30#define TX4939IDE_DevHead 0x006
31#define TX4939IDE_Stat_Cmd 0x007
32#define TX4939IDE_AltStat_DevCtl 0x402
33/* H/W DMA Registers */
34#define TX4939IDE_DMA_Cmd 0x800 /* 8-bit */
35#define TX4939IDE_DMA_Stat 0x802 /* 8-bit */
36#define TX4939IDE_PRD_Ptr 0x804 /* 32-bit */
37/* ATA100 CORE Registers (16-bit) */
38#define TX4939IDE_Sys_Ctl 0xc00
39#define TX4939IDE_Xfer_Cnt_1 0xc08
40#define TX4939IDE_Xfer_Cnt_2 0xc0a
41#define TX4939IDE_Sec_Cnt 0xc10
42#define TX4939IDE_Start_Lo_Addr 0xc18
43#define TX4939IDE_Start_Up_Addr 0xc20
44#define TX4939IDE_Add_Ctl 0xc28
45#define TX4939IDE_Lo_Burst_Cnt 0xc30
46#define TX4939IDE_Up_Burst_Cnt 0xc38
47#define TX4939IDE_PIO_Addr 0xc88
48#define TX4939IDE_H_Rst_Tim 0xc90
49#define TX4939IDE_Int_Ctl 0xc98
50#define TX4939IDE_Pkt_Cmd 0xcb8
51#define TX4939IDE_Bxfer_Cnt_Hi 0xcc0
52#define TX4939IDE_Bxfer_Cnt_Lo 0xcc8
53#define TX4939IDE_Dev_TErr 0xcd0
54#define TX4939IDE_Pkt_Xfer_Ctl 0xcd8
55#define TX4939IDE_Start_TAddr 0xce0
56
57/* bits for Int_Ctl */
58#define TX4939IDE_INT_ADDRERR 0x80
59#define TX4939IDE_INT_REACHMUL 0x40
60#define TX4939IDE_INT_DEVTIMING 0x20
61#define TX4939IDE_INT_UDMATERM 0x10
62#define TX4939IDE_INT_TIMER 0x08
63#define TX4939IDE_INT_BUSERR 0x04
64#define TX4939IDE_INT_XFEREND 0x02
65#define TX4939IDE_INT_HOST 0x01
66
67#define TX4939IDE_IGNORE_INTS \
68 (TX4939IDE_INT_ADDRERR | TX4939IDE_INT_REACHMUL | \
69 TX4939IDE_INT_DEVTIMING | TX4939IDE_INT_UDMATERM | \
70 TX4939IDE_INT_TIMER | TX4939IDE_INT_XFEREND)
71
72#ifdef __BIG_ENDIAN
73#define tx4939ide_swizzlel(a) ((a) ^ 4)
74#define tx4939ide_swizzlew(a) ((a) ^ 6)
75#define tx4939ide_swizzleb(a) ((a) ^ 7)
76#else
77#define tx4939ide_swizzlel(a) (a)
78#define tx4939ide_swizzlew(a) (a)
79#define tx4939ide_swizzleb(a) (a)
80#endif
81
82static u16 tx4939ide_readw(void __iomem *base, u32 reg)
83{
84 return __raw_readw(base + tx4939ide_swizzlew(reg));
85}
86static u8 tx4939ide_readb(void __iomem *base, u32 reg)
87{
88 return __raw_readb(base + tx4939ide_swizzleb(reg));
89}
90static void tx4939ide_writel(u32 val, void __iomem *base, u32 reg)
91{
92 __raw_writel(val, base + tx4939ide_swizzlel(reg));
93}
94static void tx4939ide_writew(u16 val, void __iomem *base, u32 reg)
95{
96 __raw_writew(val, base + tx4939ide_swizzlew(reg));
97}
98static void tx4939ide_writeb(u8 val, void __iomem *base, u32 reg)
99{
100 __raw_writeb(val, base + tx4939ide_swizzleb(reg));
101}
102
103#define TX4939IDE_BASE(hwif) ((void __iomem *)(hwif)->extra_base)
104
105static void tx4939ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
106{
107 ide_hwif_t *hwif = drive->hwif;
108 int is_slave = drive->dn;
109 u32 mask, val;
110 u8 safe = pio;
111 ide_drive_t *pair;
112
113 pair = ide_get_pair_dev(drive);
114 if (pair)
115 safe = min(safe, ide_get_best_pio_mode(pair, 255, 4));
116 /*
117 * Update Command Transfer Mode for master/slave and Data
118 * Transfer Mode for this drive.
119 */
120 mask = is_slave ? 0x07f00000 : 0x000007f0;
121 val = ((safe << 8) | (pio << 4)) << (is_slave ? 16 : 0);
122 hwif->select_data = (hwif->select_data & ~mask) | val;
123 /* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */
124}
125
126static void tx4939ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
127{
128 ide_hwif_t *hwif = drive->hwif;
129 u32 mask, val;
130
131 /* Update Data Transfer Mode for this drive. */
132 if (mode >= XFER_UDMA_0)
133 val = mode - XFER_UDMA_0 + 8;
134 else
135 val = mode - XFER_MW_DMA_0 + 5;
136 if (drive->dn) {
137 mask = 0x00f00000;
138 val <<= 20;
139 } else {
140 mask = 0x000000f0;
141 val <<= 4;
142 }
143 hwif->select_data = (hwif->select_data & ~mask) | val;
144 /* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */
145}
146
147static u16 tx4939ide_check_error_ints(ide_hwif_t *hwif)
148{
149 void __iomem *base = TX4939IDE_BASE(hwif);
150 u16 ctl = tx4939ide_readw(base, TX4939IDE_Int_Ctl);
151
152 if (ctl & TX4939IDE_INT_BUSERR) {
153 /* reset FIFO */
154 u16 sysctl = tx4939ide_readw(base, TX4939IDE_Sys_Ctl);
155
156 tx4939ide_writew(sysctl | 0x4000, base, TX4939IDE_Sys_Ctl);
157 mmiowb();
158 /* wait 12GBUSCLK (typ. 60ns @ GBUS200MHz, max 270ns) */
159 ndelay(270);
160 tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl);
161 }
162 if (ctl & (TX4939IDE_INT_ADDRERR |
163 TX4939IDE_INT_DEVTIMING | TX4939IDE_INT_BUSERR))
164 pr_err("%s: Error interrupt %#x (%s%s%s )\n",
165 hwif->name, ctl,
166 ctl & TX4939IDE_INT_ADDRERR ? " Address-Error" : "",
167 ctl & TX4939IDE_INT_DEVTIMING ? " DEV-Timing" : "",
168 ctl & TX4939IDE_INT_BUSERR ? " Bus-Error" : "");
169 return ctl;
170}
171
172static void tx4939ide_clear_irq(ide_drive_t *drive)
173{
174 ide_hwif_t *hwif;
175 void __iomem *base;
176 u16 ctl;
177
178 /*
179 * tx4939ide_dma_test_irq() and tx4939ide_dma_end() do all job
180 * for DMA case.
181 */
182 if (drive->waiting_for_dma)
183 return;
184 hwif = drive->hwif;
185 base = TX4939IDE_BASE(hwif);
186 ctl = tx4939ide_check_error_ints(hwif);
187 tx4939ide_writew(ctl, base, TX4939IDE_Int_Ctl);
188}
189
190static u8 tx4939ide_cable_detect(ide_hwif_t *hwif)
191{
192 void __iomem *base = TX4939IDE_BASE(hwif);
193
194 return tx4939ide_readw(base, TX4939IDE_Sys_Ctl) & 0x2000 ?
195 ATA_CBL_PATA40 : ATA_CBL_PATA80;
196}
197
198#ifdef __BIG_ENDIAN
199static void tx4939ide_dma_host_set(ide_drive_t *drive, int on)
200{
201 ide_hwif_t *hwif = drive->hwif;
202 u8 unit = drive->dn;
203 void __iomem *base = TX4939IDE_BASE(hwif);
204 u8 dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat);
205
206 if (on)
207 dma_stat |= (1 << (5 + unit));
208 else
209 dma_stat &= ~(1 << (5 + unit));
210
211 tx4939ide_writeb(dma_stat, base, TX4939IDE_DMA_Stat);
212}
213#else
214#define tx4939ide_dma_host_set ide_dma_host_set
215#endif
216
217static u8 tx4939ide_clear_dma_status(void __iomem *base)
218{
219 u8 dma_stat;
220
221 /* read DMA status for INTR & ERROR flags */
222 dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat);
223 /* clear INTR & ERROR flags */
224 tx4939ide_writeb(dma_stat | ATA_DMA_INTR | ATA_DMA_ERR, base,
225 TX4939IDE_DMA_Stat);
226 /* recover intmask cleared by writing to bit2 of DMA_Stat */
227 tx4939ide_writew(TX4939IDE_IGNORE_INTS << 8, base, TX4939IDE_Int_Ctl);
228 return dma_stat;
229}
230
231#ifdef __BIG_ENDIAN
232/* custom ide_build_dmatable to handle swapped layout */
233static int tx4939ide_build_dmatable(ide_drive_t *drive, struct request *rq)
234{
235 ide_hwif_t *hwif = drive->hwif;
236 u32 *table = (u32 *)hwif->dmatable_cpu;
237 unsigned int count = 0;
238 int i;
239 struct scatterlist *sg;
240
241 hwif->sg_nents = ide_build_sglist(drive, rq);
242 if (hwif->sg_nents == 0)
243 return 0;
244
245 for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) {
246 u32 cur_addr, cur_len, bcount;
247
248 cur_addr = sg_dma_address(sg);
249 cur_len = sg_dma_len(sg);
250
251 /*
252 * Fill in the DMA table, without crossing any 64kB boundaries.
253 */
254
255 while (cur_len) {
256 if (count++ >= PRD_ENTRIES)
257 goto use_pio_instead;
258
259 bcount = 0x10000 - (cur_addr & 0xffff);
260 if (bcount > cur_len)
261 bcount = cur_len;
262 *table++ = bcount & 0xffff;
263 *table++ = cur_addr;
264 cur_addr += bcount;
265 cur_len -= bcount;
266 }
267 }
268
269 if (count) {
270 *(table - 2) |= 0x80000000;
271 return count;
272 }
273
274use_pio_instead:
275 printk(KERN_ERR "%s: %s\n", drive->name,
276 count ? "DMA table too small" : "empty DMA table?");
277
278 ide_destroy_dmatable(drive);
279
280 return 0; /* revert to PIO for this request */
281}
282#else
283#define tx4939ide_build_dmatable ide_build_dmatable
284#endif
285
286static int tx4939ide_dma_setup(ide_drive_t *drive)
287{
288 ide_hwif_t *hwif = drive->hwif;
289 void __iomem *base = TX4939IDE_BASE(hwif);
290 struct request *rq = hwif->hwgroup->rq;
291 u8 reading;
292 int nent;
293
294 if (rq_data_dir(rq))
295 reading = 0;
296 else
297 reading = ATA_DMA_WR;
298
299 /* fall back to PIO! */
300 nent = tx4939ide_build_dmatable(drive, rq);
301 if (!nent) {
302 ide_map_sg(drive, rq);
303 return 1;
304 }
305
306 /* PRD table */
307 tx4939ide_writel(hwif->dmatable_dma, base, TX4939IDE_PRD_Ptr);
308
309 /* specify r/w */
310 tx4939ide_writeb(reading, base, TX4939IDE_DMA_Cmd);
311
312 /* clear INTR & ERROR flags */
313 tx4939ide_clear_dma_status(base);
314
315 drive->waiting_for_dma = 1;
316
317 tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
318 TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
319 tx4939ide_writew(rq->nr_sectors, base, TX4939IDE_Sec_Cnt);
320 return 0;
321}
322
323static int tx4939ide_dma_end(ide_drive_t *drive)
324{
325 ide_hwif_t *hwif = drive->hwif;
326 u8 dma_stat, dma_cmd;
327 void __iomem *base = TX4939IDE_BASE(hwif);
328 u16 ctl = tx4939ide_readw(base, TX4939IDE_Int_Ctl);
329
330 drive->waiting_for_dma = 0;
331
332 /* get DMA command mode */
333 dma_cmd = tx4939ide_readb(base, TX4939IDE_DMA_Cmd);
334 /* stop DMA */
335 tx4939ide_writeb(dma_cmd & ~ATA_DMA_START, base, TX4939IDE_DMA_Cmd);
336
337 /* read and clear the INTR & ERROR bits */
338 dma_stat = tx4939ide_clear_dma_status(base);
339
340 /* purge DMA mappings */
341 ide_destroy_dmatable(drive);
342 /* verify good DMA status */
343 wmb();
344
345 if ((dma_stat & (ATA_DMA_INTR | ATA_DMA_ERR | ATA_DMA_ACTIVE)) == 0 &&
346 (ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) ==
347 (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST))
348 /* INT_IDE lost... bug? */
349 return 0;
350 return ((dma_stat & (ATA_DMA_INTR | ATA_DMA_ERR | ATA_DMA_ACTIVE)) !=
351 ATA_DMA_INTR) ? 0x10 | dma_stat : 0;
352}
353
354/* returns 1 if DMA IRQ issued, 0 otherwise */
355static int tx4939ide_dma_test_irq(ide_drive_t *drive)
356{
357 ide_hwif_t *hwif = drive->hwif;
358 void __iomem *base = TX4939IDE_BASE(hwif);
359 u16 ctl, ide_int;
360 u8 dma_stat, stat;
361 int found = 0;
362
363 ctl = tx4939ide_check_error_ints(hwif);
364 ide_int = ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST);
365 switch (ide_int) {
366 case TX4939IDE_INT_HOST:
367 /* On error, XFEREND might not be asserted. */
368 stat = tx4939ide_readb(base, TX4939IDE_AltStat_DevCtl);
369 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) == ATA_ERR)
370 found = 1;
371 else
372 /* Wait for XFEREND (Mask HOST and unmask XFEREND) */
373 ctl &= ~TX4939IDE_INT_XFEREND << 8;
374 ctl |= ide_int << 8;
375 break;
376 case TX4939IDE_INT_HOST | TX4939IDE_INT_XFEREND:
377 dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat);
378 if (!(dma_stat & ATA_DMA_INTR))
379 pr_warning("%s: weird interrupt status. "
380 "DMA_Stat %#02x int_ctl %#04x\n",
381 hwif->name, dma_stat, ctl);
382 found = 1;
383 break;
384 }
385 /*
386 * Do not clear XFEREND, HOST now. They will be cleared by
387 * clearing bit2 of DMA_Stat.
388 */
389 ctl &= ~ide_int;
390 tx4939ide_writew(ctl, base, TX4939IDE_Int_Ctl);
391 return found;
392}
393
394static void tx4939ide_init_hwif(ide_hwif_t *hwif)
395{
396 void __iomem *base = TX4939IDE_BASE(hwif);
397
398 /* Soft Reset */
399 tx4939ide_writew(0x8000, base, TX4939IDE_Sys_Ctl);
400 mmiowb();
401 /* at least 20 GBUSCLK (typ. 100ns @ GBUS200MHz, max 450ns) */
402 ndelay(450);
403 tx4939ide_writew(0x0000, base, TX4939IDE_Sys_Ctl);
404 /* mask some interrupts and clear all interrupts */
405 tx4939ide_writew((TX4939IDE_IGNORE_INTS << 8) | 0xff, base,
406 TX4939IDE_Int_Ctl);
407
408 tx4939ide_writew(0x0008, base, TX4939IDE_Lo_Burst_Cnt);
409 tx4939ide_writew(0, base, TX4939IDE_Up_Burst_Cnt);
410}
411
412static int tx4939ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
413{
414 hwif->dma_base =
415 hwif->extra_base + tx4939ide_swizzleb(TX4939IDE_DMA_Cmd);
416 /*
417 * Note that we cannot use ATA_DMA_TABLE_OFS, ATA_DMA_STATUS
418 * for big endian.
419 */
420 return ide_allocate_dma_engine(hwif);
421}
422
423static void tx4939ide_tf_load_fixup(ide_drive_t *drive, ide_task_t *task)
424{
425 ide_hwif_t *hwif = drive->hwif;
426 void __iomem *base = TX4939IDE_BASE(hwif);
427 u16 sysctl = hwif->select_data >> (drive->dn ? 16 : 0);
428
429 /*
430 * Fix ATA100 CORE System Control Register. (The write to the
431 * Device/Head register may write wrong data to the System
432 * Control Register)
433 * While Sys_Ctl is written here, selectproc is not needed.
434 */
435 tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl);
436}
437
438#ifdef __BIG_ENDIAN
439
440static u8 tx4939ide_read_sff_dma_status(ide_hwif_t *hwif)
441{
442 void __iomem *base = TX4939IDE_BASE(hwif);
443
444 return tx4939ide_readb(base, TX4939IDE_DMA_Stat);
445}
446
447/* custom iops (independent from SWAP_IO_SPACE) */
448static u8 tx4939ide_inb(unsigned long port)
449{
450 return __raw_readb((void __iomem *)port);
451}
452
453static void tx4939ide_outb(u8 value, unsigned long port)
454{
455 __raw_writeb(value, (void __iomem *)port);
456}
457
458static void tx4939ide_tf_load(ide_drive_t *drive, ide_task_t *task)
459{
460 ide_hwif_t *hwif = drive->hwif;
461 struct ide_io_ports *io_ports = &hwif->io_ports;
462 struct ide_taskfile *tf = &task->tf;
463 u8 HIHI = task->tf_flags & IDE_TFLAG_LBA48 ? 0xE0 : 0xEF;
464
465 if (task->tf_flags & IDE_TFLAG_FLAGGED)
466 HIHI = 0xFF;
467
468 if (task->tf_flags & IDE_TFLAG_OUT_DATA) {
469 u16 data = (tf->hob_data << 8) | tf->data;
470
471 /* no endian swap */
472 __raw_writew(data, (void __iomem *)io_ports->data_addr);
473 }
474
475 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
476 tx4939ide_outb(tf->hob_feature, io_ports->feature_addr);
477 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
478 tx4939ide_outb(tf->hob_nsect, io_ports->nsect_addr);
479 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
480 tx4939ide_outb(tf->hob_lbal, io_ports->lbal_addr);
481 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
482 tx4939ide_outb(tf->hob_lbam, io_ports->lbam_addr);
483 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
484 tx4939ide_outb(tf->hob_lbah, io_ports->lbah_addr);
485
486 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
487 tx4939ide_outb(tf->feature, io_ports->feature_addr);
488 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
489 tx4939ide_outb(tf->nsect, io_ports->nsect_addr);
490 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
491 tx4939ide_outb(tf->lbal, io_ports->lbal_addr);
492 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
493 tx4939ide_outb(tf->lbam, io_ports->lbam_addr);
494 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
495 tx4939ide_outb(tf->lbah, io_ports->lbah_addr);
496
497 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE) {
498 tx4939ide_outb((tf->device & HIHI) | drive->select,
499 io_ports->device_addr);
500 tx4939ide_tf_load_fixup(drive, task);
501 }
502}
503
504static void tx4939ide_tf_read(ide_drive_t *drive, ide_task_t *task)
505{
506 ide_hwif_t *hwif = drive->hwif;
507 struct ide_io_ports *io_ports = &hwif->io_ports;
508 struct ide_taskfile *tf = &task->tf;
509
510 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
511 u16 data;
512
513 /* no endian swap */
514 data = __raw_readw((void __iomem *)io_ports->data_addr);
515 tf->data = data & 0xff;
516 tf->hob_data = (data >> 8) & 0xff;
517 }
518
519 /* be sure we're looking at the low order bits */
520 tx4939ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
521
522 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
523 tf->feature = tx4939ide_inb(io_ports->feature_addr);
524 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
525 tf->nsect = tx4939ide_inb(io_ports->nsect_addr);
526 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
527 tf->lbal = tx4939ide_inb(io_ports->lbal_addr);
528 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
529 tf->lbam = tx4939ide_inb(io_ports->lbam_addr);
530 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
531 tf->lbah = tx4939ide_inb(io_ports->lbah_addr);
532 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
533 tf->device = tx4939ide_inb(io_ports->device_addr);
534
535 if (task->tf_flags & IDE_TFLAG_LBA48) {
536 tx4939ide_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
537
538 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
539 tf->hob_feature =
540 tx4939ide_inb(io_ports->feature_addr);
541 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
542 tf->hob_nsect = tx4939ide_inb(io_ports->nsect_addr);
543 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
544 tf->hob_lbal = tx4939ide_inb(io_ports->lbal_addr);
545 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
546 tf->hob_lbam = tx4939ide_inb(io_ports->lbam_addr);
547 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
548 tf->hob_lbah = tx4939ide_inb(io_ports->lbah_addr);
549 }
550}
551
552static void tx4939ide_input_data_swap(ide_drive_t *drive, struct request *rq,
553 void *buf, unsigned int len)
554{
555 unsigned long port = drive->hwif->io_ports.data_addr;
556 unsigned short *ptr = buf;
557 unsigned int count = (len + 1) / 2;
558
559 while (count--)
560 *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
561 __ide_flush_dcache_range((unsigned long)buf, count * 2);
562}
563
564static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq,
565 void *buf, unsigned int len)
566{
567 unsigned long port = drive->hwif->io_ports.data_addr;
568 unsigned short *ptr = buf;
569 unsigned int count = (len + 1) / 2;
570
571 while (count--) {
572 __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
573 ptr++;
574 }
575 __ide_flush_dcache_range((unsigned long)buf, count * 2);
576}
577
578static const struct ide_tp_ops tx4939ide_tp_ops = {
579 .exec_command = ide_exec_command,
580 .read_status = ide_read_status,
581 .read_altstatus = ide_read_altstatus,
582 .read_sff_dma_status = tx4939ide_read_sff_dma_status,
583
584 .set_irq = ide_set_irq,
585
586 .tf_load = tx4939ide_tf_load,
587 .tf_read = tx4939ide_tf_read,
588
589 .input_data = tx4939ide_input_data_swap,
590 .output_data = tx4939ide_output_data_swap,
591};
592
593#else /* __LITTLE_ENDIAN */
594
595static void tx4939ide_tf_load(ide_drive_t *drive, ide_task_t *task)
596{
597 ide_tf_load(drive, task);
598 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
599 tx4939ide_tf_load_fixup(drive, task);
600}
601
602static const struct ide_tp_ops tx4939ide_tp_ops = {
603 .exec_command = ide_exec_command,
604 .read_status = ide_read_status,
605 .read_altstatus = ide_read_altstatus,
606 .read_sff_dma_status = ide_read_sff_dma_status,
607
608 .set_irq = ide_set_irq,
609
610 .tf_load = tx4939ide_tf_load,
611 .tf_read = ide_tf_read,
612
613 .input_data = ide_input_data,
614 .output_data = ide_output_data,
615};
616
617#endif /* __LITTLE_ENDIAN */
618
619static const struct ide_port_ops tx4939ide_port_ops = {
620 .set_pio_mode = tx4939ide_set_pio_mode,
621 .set_dma_mode = tx4939ide_set_dma_mode,
622 .clear_irq = tx4939ide_clear_irq,
623 .cable_detect = tx4939ide_cable_detect,
624};
625
626static const struct ide_dma_ops tx4939ide_dma_ops = {
627 .dma_host_set = tx4939ide_dma_host_set,
628 .dma_setup = tx4939ide_dma_setup,
629 .dma_exec_cmd = ide_dma_exec_cmd,
630 .dma_start = ide_dma_start,
631 .dma_end = tx4939ide_dma_end,
632 .dma_test_irq = tx4939ide_dma_test_irq,
633 .dma_lost_irq = ide_dma_lost_irq,
634 .dma_timeout = ide_dma_timeout,
635};
636
637static const struct ide_port_info tx4939ide_port_info __initdata = {
638 .init_hwif = tx4939ide_init_hwif,
639 .init_dma = tx4939ide_init_dma,
640 .port_ops = &tx4939ide_port_ops,
641 .dma_ops = &tx4939ide_dma_ops,
642 .tp_ops = &tx4939ide_tp_ops,
643 .host_flags = IDE_HFLAG_MMIO,
644 .pio_mask = ATA_PIO4,
645 .mwdma_mask = ATA_MWDMA2,
646 .udma_mask = ATA_UDMA5,
647};
648
649static int __init tx4939ide_probe(struct platform_device *pdev)
650{
651 hw_regs_t hw;
652 hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
653 struct ide_host *host;
654 struct resource *res;
655 int irq, ret;
656 unsigned long mapbase;
657
658 irq = platform_get_irq(pdev, 0);
659 if (irq < 0)
660 return -ENODEV;
661 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
662 if (!res)
663 return -ENODEV;
664
665 if (!devm_request_mem_region(&pdev->dev, res->start,
666 res->end - res->start + 1, "tx4938ide"))
667 return -EBUSY;
668 mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start,
669 res->end - res->start + 1);
670 if (!mapbase)
671 return -EBUSY;
672 memset(&hw, 0, sizeof(hw));
673 hw.io_ports.data_addr =
674 mapbase + tx4939ide_swizzlew(TX4939IDE_Data);
675 hw.io_ports.error_addr =
676 mapbase + tx4939ide_swizzleb(TX4939IDE_Error_Feature);
677 hw.io_ports.nsect_addr =
678 mapbase + tx4939ide_swizzleb(TX4939IDE_Sec);
679 hw.io_ports.lbal_addr =
680 mapbase + tx4939ide_swizzleb(TX4939IDE_LBA0);
681 hw.io_ports.lbam_addr =
682 mapbase + tx4939ide_swizzleb(TX4939IDE_LBA1);
683 hw.io_ports.lbah_addr =
684 mapbase + tx4939ide_swizzleb(TX4939IDE_LBA2);
685 hw.io_ports.device_addr =
686 mapbase + tx4939ide_swizzleb(TX4939IDE_DevHead);
687 hw.io_ports.command_addr =
688 mapbase + tx4939ide_swizzleb(TX4939IDE_Stat_Cmd);
689 hw.io_ports.ctl_addr =
690 mapbase + tx4939ide_swizzleb(TX4939IDE_AltStat_DevCtl);
691 hw.irq = irq;
692 hw.dev = &pdev->dev;
693
694 pr_info("TX4939 IDE interface (base %#lx, irq %d)\n", mapbase, irq);
695 host = ide_host_alloc(&tx4939ide_port_info, hws);
696 if (!host)
697 return -ENOMEM;
698 /* use extra_base for base address of the all registers */
699 host->ports[0]->extra_base = mapbase;
700 ret = ide_host_register(host, &tx4939ide_port_info, hws);
701 if (ret) {
702 ide_host_free(host);
703 return ret;
704 }
705 platform_set_drvdata(pdev, host);
706 return 0;
707}
708
709static int __exit tx4939ide_remove(struct platform_device *pdev)
710{
711 struct ide_host *host = platform_get_drvdata(pdev);
712
713 ide_host_remove(host);
714 return 0;
715}
716
717#ifdef CONFIG_PM
718static int tx4939ide_resume(struct platform_device *dev)
719{
720 struct ide_host *host = platform_get_drvdata(dev);
721 ide_hwif_t *hwif = host->ports[0];
722
723 tx4939ide_init_hwif(hwif);
724 return 0;
725}
726#else
727#define tx4939ide_resume NULL
728#endif
729
730static struct platform_driver tx4939ide_driver = {
731 .driver = {
732 .name = MODNAME,
733 .owner = THIS_MODULE,
734 },
735 .remove = __exit_p(tx4939ide_remove),
736 .resume = tx4939ide_resume,
737};
738
739static int __init tx4939ide_init(void)
740{
741 return platform_driver_probe(&tx4939ide_driver, tx4939ide_probe);
742}
743
744static void __exit tx4939ide_exit(void)
745{
746 platform_driver_unregister(&tx4939ide_driver);
747}
748
749module_init(tx4939ide_init);
750module_exit(tx4939ide_exit);
751
752MODULE_DESCRIPTION("TX4939 internal IDE driver");
753MODULE_LICENSE("GPL");
754MODULE_ALIAS("platform:tx4939ide");
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/umc8672.c
index 1da076e0c917..1da076e0c917 100644
--- a/drivers/ide/legacy/umc8672.c
+++ b/drivers/ide/umc8672.c
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/via82cxxx.c
index 2a812d3207e9..2a812d3207e9 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
new file mode 100644
index 000000000000..f15e90a453d1
--- /dev/null
+++ b/drivers/idle/Kconfig
@@ -0,0 +1,18 @@
1
2menu "Memory power savings"
3depends on X86_64
4
5config I7300_IDLE_IOAT_CHANNEL
6 bool
7
8config I7300_IDLE
9 tristate "Intel chipset idle memory power saving driver"
10 select I7300_IDLE_IOAT_CHANNEL
11 depends on EXPERIMENTAL
12 help
13 Enable memory power savings when idle with certain Intel server
14 chipsets. The chipset must have I/O AT support, such as the
15 Intel 7300. The power savings depends on the type and quantity of
16 DRAM devices.
17
18endmenu
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
new file mode 100644
index 000000000000..5f68fc377e21
--- /dev/null
+++ b/drivers/idle/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_I7300_IDLE) += i7300_idle.o
2
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
new file mode 100644
index 000000000000..fb176f6ef9f8
--- /dev/null
+++ b/drivers/idle/i7300_idle.c
@@ -0,0 +1,609 @@
1/*
2 * (C) Copyright 2008 Intel Corporation
3 * Authors:
4 * Andy Henroid <andrew.d.henroid@intel.com>
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 */
7
8/*
9 * Save DIMM power on Intel 7300-based platforms when all CPUs/cores
10 * are idle, using the DIMM thermal throttling capability.
11 *
12 * This driver depends on the Intel integrated DMA controller (I/O AT).
13 * If the driver for I/O AT (drivers/dma/ioatdma*) is also enabled,
14 * this driver should work cooperatively.
15 */
16
17/* #define DEBUG */
18
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/sched.h>
22#include <linux/notifier.h>
23#include <linux/cpumask.h>
24#include <linux/ktime.h>
25#include <linux/delay.h>
26#include <linux/debugfs.h>
27#include <linux/stop_machine.h>
28#include <linux/i7300_idle.h>
29
30#include <asm/idle.h>
31
32#include "../dma/ioatdma_hw.h"
33#include "../dma/ioatdma_registers.h"
34
35#define I7300_IDLE_DRIVER_VERSION "1.55"
36#define I7300_PRINT "i7300_idle:"
37
38#define MAX_STOP_RETRIES 10
39
40static int debug;
41module_param_named(debug, debug, uint, 0644);
42MODULE_PARM_DESC(debug, "Enable debug printks in this driver");
43
44#define dprintk(fmt, arg...) \
45 do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0)
46
47/*
48 * Value to set THRTLOW to when initiating throttling
49 * 0 = No throttling
50 * 1 = Throttle when > 4 activations per eval window (Maximum throttling)
51 * 2 = Throttle when > 8 activations
52 * 168 = Throttle when > 672 activations (Minimum throttling)
53 */
54#define MAX_THROTTLE_LOW_LIMIT 168
55static uint throttle_low_limit = 1;
56module_param_named(throttle_low_limit, throttle_low_limit, uint, 0644);
57MODULE_PARM_DESC(throttle_low_limit,
58 "Value for THRTLOWLM activation field "
59 "(0 = disable throttle, 1 = Max throttle, 168 = Min throttle)");
60
61/*
62 * simple invocation and duration statistics
63 */
64static unsigned long total_starts;
65static unsigned long total_us;
66
67#ifdef DEBUG
68static unsigned long past_skip;
69#endif
70
71static struct pci_dev *fbd_dev;
72
73static spinlock_t i7300_idle_lock;
74static int i7300_idle_active;
75
76static u8 i7300_idle_thrtctl_saved;
77static u8 i7300_idle_thrtlow_saved;
78static u32 i7300_idle_mc_saved;
79
80static cpumask_t idle_cpumask;
81static ktime_t start_ktime;
82static unsigned long avg_idle_us;
83
84static struct dentry *debugfs_dir;
85
86/* Begin: I/O AT Helper routines */
87
88#define IOAT_CHANBASE(ioat_ctl, chan) (ioat_ctl + 0x80 + 0x80 * chan)
89/* Snoop control (disable snoops when coherency is not important) */
90#define IOAT_DESC_SADDR_SNP_CTL (1UL << 1)
91#define IOAT_DESC_DADDR_SNP_CTL (1UL << 2)
92
93static struct pci_dev *ioat_dev;
94static struct ioat_dma_descriptor *ioat_desc; /* I/O AT desc & data (1 page) */
95static unsigned long ioat_desc_phys;
96static u8 *ioat_iomap; /* I/O AT memory-mapped control regs (aka CB_BAR) */
97static u8 *ioat_chanbase;
98
99/* Start I/O AT memory copy */
100static int i7300_idle_ioat_start(void)
101{
102 u32 err;
103 /* Clear error (due to circular descriptor pointer) */
104 err = readl(ioat_chanbase + IOAT_CHANERR_OFFSET);
105 if (err)
106 writel(err, ioat_chanbase + IOAT_CHANERR_OFFSET);
107
108 writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
109 return 0;
110}
111
112/* Stop I/O AT memory copy */
113static void i7300_idle_ioat_stop(void)
114{
115 int i;
116 u64 sts;
117
118 for (i = 0; i < MAX_STOP_RETRIES; i++) {
119 writeb(IOAT_CHANCMD_RESET,
120 ioat_chanbase + IOAT1_CHANCMD_OFFSET);
121
122 udelay(10);
123
124 sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
125 IOAT_CHANSTS_DMA_TRANSFER_STATUS;
126
127 if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE)
128 break;
129
130 }
131
132 if (i == MAX_STOP_RETRIES) {
133 dprintk("failed to stop I/O AT after %d retries\n",
134 MAX_STOP_RETRIES);
135 }
136}
137
138/* Test I/O AT by copying 1024 byte from 2k to 1k */
139static int __init i7300_idle_ioat_selftest(u8 *ctl,
140 struct ioat_dma_descriptor *desc, unsigned long desc_phys)
141{
142 u64 chan_sts;
143
144 memset(desc, 0, 2048);
145 memset((u8 *) desc + 2048, 0xab, 1024);
146
147 desc[0].size = 1024;
148 desc[0].ctl = 0;
149 desc[0].src_addr = desc_phys + 2048;
150 desc[0].dst_addr = desc_phys + 1024;
151 desc[0].next = 0;
152
153 writeb(IOAT_CHANCMD_RESET, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
154 writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
155
156 udelay(1000);
157
158 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
159 IOAT_CHANSTS_DMA_TRANSFER_STATUS;
160
161 if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE) {
162 /* Not complete, reset the channel */
163 writeb(IOAT_CHANCMD_RESET,
164 ioat_chanbase + IOAT1_CHANCMD_OFFSET);
165 return -1;
166 }
167
168 if (*(u32 *) ((u8 *) desc + 3068) != 0xabababab ||
169 *(u32 *) ((u8 *) desc + 2044) != 0xabababab) {
170 dprintk("Data values src 0x%x, dest 0x%x, memset 0x%x\n",
171 *(u32 *) ((u8 *) desc + 2048),
172 *(u32 *) ((u8 *) desc + 1024),
173 *(u32 *) ((u8 *) desc + 3072));
174 return -1;
175 }
176 return 0;
177}
178
179static struct device dummy_dma_dev = {
180 .bus_id = "fallback device",
181 .coherent_dma_mask = DMA_64BIT_MASK,
182 .dma_mask = &dummy_dma_dev.coherent_dma_mask,
183};
184
185/* Setup and initialize I/O AT */
186/* This driver needs I/O AT as the throttling takes effect only when there is
187 * some memory activity. We use I/O AT to set up a dummy copy, while all CPUs
188 * go idle and memory is throttled.
189 */
190static int __init i7300_idle_ioat_init(void)
191{
192 u8 ver, chan_count, ioat_chan;
193 u16 chan_ctl;
194
195 ioat_iomap = (u8 *) ioremap_nocache(pci_resource_start(ioat_dev, 0),
196 pci_resource_len(ioat_dev, 0));
197
198 if (!ioat_iomap) {
199 printk(KERN_ERR I7300_PRINT "failed to map I/O AT registers\n");
200 goto err_ret;
201 }
202
203 ver = readb(ioat_iomap + IOAT_VER_OFFSET);
204 if (ver != IOAT_VER_1_2) {
205 printk(KERN_ERR I7300_PRINT "unknown I/O AT version (%u.%u)\n",
206 ver >> 4, ver & 0xf);
207 goto err_unmap;
208 }
209
210 chan_count = readb(ioat_iomap + IOAT_CHANCNT_OFFSET);
211 if (!chan_count) {
212 printk(KERN_ERR I7300_PRINT "unexpected # of I/O AT channels "
213 "(%u)\n",
214 chan_count);
215 goto err_unmap;
216 }
217
218 ioat_chan = chan_count - 1;
219 ioat_chanbase = IOAT_CHANBASE(ioat_iomap, ioat_chan);
220
221 chan_ctl = readw(ioat_chanbase + IOAT_CHANCTRL_OFFSET);
222 if (chan_ctl & IOAT_CHANCTRL_CHANNEL_IN_USE) {
223 printk(KERN_ERR I7300_PRINT "channel %d in use\n", ioat_chan);
224 goto err_unmap;
225 }
226
227 writew(IOAT_CHANCTRL_CHANNEL_IN_USE,
228 ioat_chanbase + IOAT_CHANCTRL_OFFSET);
229
230 ioat_desc = (struct ioat_dma_descriptor *)dma_alloc_coherent(
231 &dummy_dma_dev, 4096,
232 (dma_addr_t *)&ioat_desc_phys, GFP_KERNEL);
233 if (!ioat_desc) {
234 printk(KERN_ERR I7300_PRINT "failed to allocate I/O AT desc\n");
235 goto err_mark_unused;
236 }
237
238 writel(ioat_desc_phys & 0xffffffffUL,
239 ioat_chanbase + IOAT1_CHAINADDR_OFFSET_LOW);
240 writel(ioat_desc_phys >> 32,
241 ioat_chanbase + IOAT1_CHAINADDR_OFFSET_HIGH);
242
243 if (i7300_idle_ioat_selftest(ioat_iomap, ioat_desc, ioat_desc_phys)) {
244 printk(KERN_ERR I7300_PRINT "I/O AT self-test failed\n");
245 goto err_free;
246 }
247
248 /* Setup circular I/O AT descriptor chain */
249 ioat_desc[0].ctl = IOAT_DESC_SADDR_SNP_CTL | IOAT_DESC_DADDR_SNP_CTL;
250 ioat_desc[0].src_addr = ioat_desc_phys + 2048;
251 ioat_desc[0].dst_addr = ioat_desc_phys + 3072;
252 ioat_desc[0].size = 128;
253 ioat_desc[0].next = ioat_desc_phys + sizeof(struct ioat_dma_descriptor);
254
255 ioat_desc[1].ctl = ioat_desc[0].ctl;
256 ioat_desc[1].src_addr = ioat_desc[0].src_addr;
257 ioat_desc[1].dst_addr = ioat_desc[0].dst_addr;
258 ioat_desc[1].size = ioat_desc[0].size;
259 ioat_desc[1].next = ioat_desc_phys;
260
261 return 0;
262
263err_free:
264 dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
265err_mark_unused:
266 writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
267err_unmap:
268 iounmap(ioat_iomap);
269err_ret:
270 return -ENODEV;
271}
272
273/* Cleanup I/O AT */
274static void __exit i7300_idle_ioat_exit(void)
275{
276 int i;
277 u64 chan_sts;
278
279 i7300_idle_ioat_stop();
280
281 /* Wait for a while for the channel to halt before releasing */
282 for (i = 0; i < MAX_STOP_RETRIES; i++) {
283 writeb(IOAT_CHANCMD_RESET,
284 ioat_chanbase + IOAT1_CHANCMD_OFFSET);
285
286 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
287 IOAT_CHANSTS_DMA_TRANSFER_STATUS;
288
289 if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
290 writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
291 break;
292 }
293 udelay(1000);
294 }
295
296 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
297 IOAT_CHANSTS_DMA_TRANSFER_STATUS;
298
299 /*
300 * We tried to reset multiple times. If IO A/T channel is still active
301 * flag an error and return without cleanup. Memory leak is better
302 * than random corruption in that extreme error situation.
303 */
304 if (chan_sts == IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
305 printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels."
306 " Not freeing resources\n");
307 return;
308 }
309
310 dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
311 iounmap(ioat_iomap);
312}
313
314/* End: I/O AT Helper routines */
315
316#define DIMM_THRTLOW 0x64
317#define DIMM_THRTCTL 0x67
318#define DIMM_THRTCTL_THRMHUNT (1UL << 0)
319#define DIMM_MC 0x40
320#define DIMM_GTW_MODE (1UL << 17)
321#define DIMM_GBLACT 0x60
322
323/*
324 * Keep track of an exponential-decaying average of recent idle durations.
325 * The latest duration gets DURATION_WEIGHT_PCT percentage weight
326 * in this average, with the old average getting the remaining weight.
327 *
328 * High weights emphasize recent history, low weights include long history.
329 */
330#define DURATION_WEIGHT_PCT 55
331
332/*
333 * When the decaying average of recent durations or the predicted duration
334 * of the next timer interrupt is shorter than duration_threshold, the
335 * driver will decline to throttle.
336 */
337#define DURATION_THRESHOLD_US 100
338
339
340/* Store DIMM thermal throttle configuration */
341static int i7300_idle_thrt_save(void)
342{
343 u32 new_mc_val;
344 u8 gblactlm;
345
346 pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &i7300_idle_thrtctl_saved);
347 pci_read_config_byte(fbd_dev, DIMM_THRTLOW, &i7300_idle_thrtlow_saved);
348 pci_read_config_dword(fbd_dev, DIMM_MC, &i7300_idle_mc_saved);
349 /*
350 * Make sure we have Global Throttling Window Mode set to have a
351 * "short" window. This (mostly) works around an issue where
352 * throttling persists until the end of the global throttling window
353 * size. On the tested system, this was resulting in a maximum of
354 * 64 ms to exit throttling (average 32 ms). The actual numbers
355 * depends on system frequencies. Setting the short window reduces
356 * this by a factor of 4096.
357 *
358 * We will only do this only if the system is set for
359 * unlimited-activations while in open-loop throttling (i.e., when
360 * Global Activation Throttle Limit is zero).
361 */
362 pci_read_config_byte(fbd_dev, DIMM_GBLACT, &gblactlm);
363 dprintk("thrtctl_saved = 0x%02x, thrtlow_saved = 0x%02x\n",
364 i7300_idle_thrtctl_saved,
365 i7300_idle_thrtlow_saved);
366 dprintk("mc_saved = 0x%08x, gblactlm = 0x%02x\n",
367 i7300_idle_mc_saved,
368 gblactlm);
369 if (gblactlm == 0) {
370 new_mc_val = i7300_idle_mc_saved | DIMM_GTW_MODE;
371 pci_write_config_dword(fbd_dev, DIMM_MC, new_mc_val);
372 return 0;
373 } else {
374 dprintk("could not set GTW_MODE = 1 (OLTT enabled)\n");
375 return -ENODEV;
376 }
377}
378
379/* Restore DIMM thermal throttle configuration */
380static void i7300_idle_thrt_restore(void)
381{
382 pci_write_config_dword(fbd_dev, DIMM_MC, i7300_idle_mc_saved);
383 pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
384 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
385}
386
387/* Enable DIMM thermal throttling */
388static void i7300_idle_start(void)
389{
390 u8 new_ctl;
391 u8 limit;
392
393 new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
394 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
395
396 limit = throttle_low_limit;
397 if (unlikely(limit > MAX_THROTTLE_LOW_LIMIT))
398 limit = MAX_THROTTLE_LOW_LIMIT;
399
400 pci_write_config_byte(fbd_dev, DIMM_THRTLOW, limit);
401
402 new_ctl = i7300_idle_thrtctl_saved | DIMM_THRTCTL_THRMHUNT;
403 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
404}
405
406/* Disable DIMM thermal throttling */
407static void i7300_idle_stop(void)
408{
409 u8 new_ctl;
410 u8 got_ctl;
411
412 new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
413 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
414
415 pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
416 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
417 pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &got_ctl);
418 WARN_ON_ONCE(got_ctl != i7300_idle_thrtctl_saved);
419}
420
421
422/*
423 * i7300_avg_duration_check()
424 * return 0 if the decaying average of recent idle durations is
425 * more than DURATION_THRESHOLD_US
426 */
427static int i7300_avg_duration_check(void)
428{
429 if (avg_idle_us >= DURATION_THRESHOLD_US)
430 return 0;
431
432#ifdef DEBUG
433 past_skip++;
434#endif
435 return 1;
436}
437
438/* Idle notifier to look at idle CPUs */
439static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
440 void *data)
441{
442 unsigned long flags;
443 ktime_t now_ktime;
444 static ktime_t idle_begin_time;
445 static int time_init = 1;
446
447 if (!throttle_low_limit)
448 return 0;
449
450 if (unlikely(time_init)) {
451 time_init = 0;
452 idle_begin_time = ktime_get();
453 }
454
455 spin_lock_irqsave(&i7300_idle_lock, flags);
456 if (val == IDLE_START) {
457
458 cpu_set(smp_processor_id(), idle_cpumask);
459
460 if (cpus_weight(idle_cpumask) != num_online_cpus())
461 goto end;
462
463 now_ktime = ktime_get();
464 idle_begin_time = now_ktime;
465
466 if (i7300_avg_duration_check())
467 goto end;
468
469 i7300_idle_active = 1;
470 total_starts++;
471 start_ktime = now_ktime;
472
473 i7300_idle_start();
474 i7300_idle_ioat_start();
475
476 } else if (val == IDLE_END) {
477 cpu_clear(smp_processor_id(), idle_cpumask);
478 if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) {
479 /* First CPU coming out of idle */
480 u64 idle_duration_us;
481
482 now_ktime = ktime_get();
483
484 idle_duration_us = ktime_to_us(ktime_sub
485 (now_ktime, idle_begin_time));
486
487 avg_idle_us =
488 ((100 - DURATION_WEIGHT_PCT) * avg_idle_us +
489 DURATION_WEIGHT_PCT * idle_duration_us) / 100;
490
491 if (i7300_idle_active) {
492 ktime_t idle_ktime;
493
494 idle_ktime = ktime_sub(now_ktime, start_ktime);
495 total_us += ktime_to_us(idle_ktime);
496
497 i7300_idle_ioat_stop();
498 i7300_idle_stop();
499 i7300_idle_active = 0;
500 }
501 }
502 }
503end:
504 spin_unlock_irqrestore(&i7300_idle_lock, flags);
505 return 0;
506}
507
508static struct notifier_block i7300_idle_nb = {
509 .notifier_call = i7300_idle_notifier,
510};
511
512MODULE_DEVICE_TABLE(pci, pci_tbl);
513
514int stats_open_generic(struct inode *inode, struct file *fp)
515{
516 fp->private_data = inode->i_private;
517 return 0;
518}
519
520static ssize_t stats_read_ul(struct file *fp, char __user *ubuf, size_t count,
521 loff_t *off)
522{
523 unsigned long *p = fp->private_data;
524 char buf[32];
525 int len;
526
527 len = snprintf(buf, 32, "%lu\n", *p);
528 return simple_read_from_buffer(ubuf, count, off, buf, len);
529}
530
531static const struct file_operations idle_fops = {
532 .open = stats_open_generic,
533 .read = stats_read_ul,
534};
535
536struct debugfs_file_info {
537 void *ptr;
538 char name[32];
539 struct dentry *file;
540} debugfs_file_list[] = {
541 {&total_starts, "total_starts", NULL},
542 {&total_us, "total_us", NULL},
543#ifdef DEBUG
544 {&past_skip, "past_skip", NULL},
545#endif
546 {NULL, "", NULL}
547 };
548
549static int __init i7300_idle_init(void)
550{
551 spin_lock_init(&i7300_idle_lock);
552 cpus_clear(idle_cpumask);
553 total_us = 0;
554
555 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev))
556 return -ENODEV;
557
558 if (i7300_idle_thrt_save())
559 return -ENODEV;
560
561 if (i7300_idle_ioat_init())
562 return -ENODEV;
563
564 debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
565 if (debugfs_dir) {
566 int i = 0;
567
568 while (debugfs_file_list[i].ptr != NULL) {
569 debugfs_file_list[i].file = debugfs_create_file(
570 debugfs_file_list[i].name,
571 S_IRUSR,
572 debugfs_dir,
573 debugfs_file_list[i].ptr,
574 &idle_fops);
575 i++;
576 }
577 }
578
579 idle_notifier_register(&i7300_idle_nb);
580
581 printk(KERN_INFO "i7300_idle: loaded v%s\n", I7300_IDLE_DRIVER_VERSION);
582 return 0;
583}
584
585static void __exit i7300_idle_exit(void)
586{
587 idle_notifier_unregister(&i7300_idle_nb);
588
589 if (debugfs_dir) {
590 int i = 0;
591
592 while (debugfs_file_list[i].file != NULL) {
593 debugfs_remove(debugfs_file_list[i].file);
594 i++;
595 }
596
597 debugfs_remove(debugfs_dir);
598 }
599 i7300_idle_thrt_restore();
600 i7300_idle_ioat_exit();
601}
602
603module_init(i7300_idle_init);
604module_exit(i7300_idle_exit);
605
606MODULE_AUTHOR("Andy Henroid <andrew.d.henroid@intel.com>");
607MODULE_DESCRIPTION("Intel Chipset DIMM Idle Power Saving Driver v"
608 I7300_IDLE_DRIVER_VERSION);
609MODULE_LICENSE("GPL");
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 2f83543a9dfc..c19f23267157 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1270,8 +1270,14 @@ static int dv1394_mmap(struct file *file, struct vm_area_struct *vma)
1270 struct video_card *video = file_to_video_card(file); 1270 struct video_card *video = file_to_video_card(file);
1271 int retval = -EINVAL; 1271 int retval = -EINVAL;
1272 1272
1273 /* serialize mmap */ 1273 /*
1274 mutex_lock(&video->mtx); 1274 * We cannot use the blocking variant mutex_lock here because .mmap
1275 * is called with mmap_sem held, while .ioctl, .read, .write acquire
1276 * video->mtx and subsequently call copy_to/from_user which will
1277 * grab mmap_sem in case of a page fault.
1278 */
1279 if (!mutex_trylock(&video->mtx))
1280 return -EAGAIN;
1275 1281
1276 if ( ! video_card_initialized(video) ) { 1282 if ( ! video_card_initialized(video) ) {
1277 retval = do_dv1394_init_default(video); 1283 retval = do_dv1394_init_default(video);
@@ -1828,9 +1834,6 @@ static int dv1394_release(struct inode *inode, struct file *file)
1828 /* OK to free the DMA buffer, no more mappings can exist */ 1834 /* OK to free the DMA buffer, no more mappings can exist */
1829 do_dv1394_shutdown(video, 1); 1835 do_dv1394_shutdown(video, 1);
1830 1836
1831 /* clean up async I/O users */
1832 dv1394_fasync(-1, file, 0);
1833
1834 /* give someone else a turn */ 1837 /* give someone else a turn */
1835 clear_bit(0, &video->open); 1838 clear_bit(0, &video->open);
1836 1839
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 918ffc4fc8ac..272543a42a43 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -46,10 +46,6 @@ static DEFINE_RWLOCK(hl_irqs_lock);
46 46
47static DEFINE_RWLOCK(addr_space_lock); 47static DEFINE_RWLOCK(addr_space_lock);
48 48
49/* addr_space list will have zero and max already included as bounds */
50static struct hpsb_address_ops dummy_ops = { NULL, NULL, NULL, NULL };
51static struct hpsb_address_serve dummy_zero_addr, dummy_max_addr;
52
53 49
54static struct hl_host_info *hl_get_hostinfo(struct hpsb_highlevel *hl, 50static struct hl_host_info *hl_get_hostinfo(struct hpsb_highlevel *hl,
55 struct hpsb_host *host) 51 struct hpsb_host *host)
@@ -481,20 +477,23 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
481 return retval; 477 return retval;
482} 478}
483 479
480static struct hpsb_address_ops dummy_ops;
481
482/* dummy address spaces as lower and upper bounds of the host's a.s. list */
484static void init_hpsb_highlevel(struct hpsb_host *host) 483static void init_hpsb_highlevel(struct hpsb_host *host)
485{ 484{
486 INIT_LIST_HEAD(&dummy_zero_addr.host_list); 485 INIT_LIST_HEAD(&host->dummy_zero_addr.host_list);
487 INIT_LIST_HEAD(&dummy_zero_addr.hl_list); 486 INIT_LIST_HEAD(&host->dummy_zero_addr.hl_list);
488 INIT_LIST_HEAD(&dummy_max_addr.host_list); 487 INIT_LIST_HEAD(&host->dummy_max_addr.host_list);
489 INIT_LIST_HEAD(&dummy_max_addr.hl_list); 488 INIT_LIST_HEAD(&host->dummy_max_addr.hl_list);
490 489
491 dummy_zero_addr.op = dummy_max_addr.op = &dummy_ops; 490 host->dummy_zero_addr.op = host->dummy_max_addr.op = &dummy_ops;
492 491
493 dummy_zero_addr.start = dummy_zero_addr.end = 0; 492 host->dummy_zero_addr.start = host->dummy_zero_addr.end = 0;
494 dummy_max_addr.start = dummy_max_addr.end = ((u64) 1) << 48; 493 host->dummy_max_addr.start = host->dummy_max_addr.end = ((u64) 1) << 48;
495 494
496 list_add_tail(&dummy_zero_addr.host_list, &host->addr_space); 495 list_add_tail(&host->dummy_zero_addr.host_list, &host->addr_space);
497 list_add_tail(&dummy_max_addr.host_list, &host->addr_space); 496 list_add_tail(&host->dummy_max_addr.host_list, &host->addr_space);
498} 497}
499 498
500void highlevel_add_host(struct hpsb_host *host) 499void highlevel_add_host(struct hpsb_host *host)
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index 8dd09d850419..237d0c9d69c6 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -155,11 +155,11 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
155 memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device)); 155 memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device));
156 h->device.parent = dev; 156 h->device.parent = dev;
157 set_dev_node(&h->device, dev_to_node(dev)); 157 set_dev_node(&h->device, dev_to_node(dev));
158 snprintf(h->device.bus_id, BUS_ID_SIZE, "fw-host%d", h->id); 158 dev_set_name(&h->device, "fw-host%d", h->id);
159 159
160 h->host_dev.parent = &h->device; 160 h->host_dev.parent = &h->device;
161 h->host_dev.class = &hpsb_host_class; 161 h->host_dev.class = &hpsb_host_class;
162 snprintf(h->host_dev.bus_id, BUS_ID_SIZE, "fw-host%d", h->id); 162 dev_set_name(&h->host_dev, "fw-host%d", h->id);
163 163
164 if (device_register(&h->device)) 164 if (device_register(&h->device))
165 goto fail; 165 goto fail;
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index e4e8aeb4d778..dd229950acca 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -13,6 +13,7 @@ struct module;
13 13
14#include "ieee1394_types.h" 14#include "ieee1394_types.h"
15#include "csr.h" 15#include "csr.h"
16#include "highlevel.h"
16 17
17struct hpsb_packet; 18struct hpsb_packet;
18struct hpsb_iso; 19struct hpsb_iso;
@@ -72,6 +73,9 @@ struct hpsb_host {
72 struct { DECLARE_BITMAP(map, 64); } tl_pool[ALL_NODES]; 73 struct { DECLARE_BITMAP(map, 64); } tl_pool[ALL_NODES];
73 74
74 struct csr_control csr; 75 struct csr_control csr;
76
77 struct hpsb_address_serve dummy_zero_addr;
78 struct hpsb_address_serve dummy_max_addr;
75}; 79};
76 80
77enum devctl_cmd { 81enum devctl_cmd {
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 2376b729e876..79ef5fd928ae 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -115,8 +115,14 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
115 return error; 115 return error;
116} 116}
117 117
118#define OUI_FREECOM_TECHNOLOGIES_GMBH 0x0001db
119
118static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci) 120static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci)
119{ 121{
122 /* Freecom FireWire Hard Drive firmware bug */
123 if (be32_to_cpu(bus_info_data[3]) >> 8 == OUI_FREECOM_TECHNOLOGIES_GMBH)
124 return 0;
125
120 return (be32_to_cpu(bus_info_data[2]) >> 8) & 0x3; 126 return (be32_to_cpu(bus_info_data[2]) >> 8) & 0x3;
121} 127}
122 128
@@ -826,13 +832,11 @@ static struct node_entry *nodemgr_create_node(octlet_t guid,
826 memcpy(&ne->device, &nodemgr_dev_template_ne, 832 memcpy(&ne->device, &nodemgr_dev_template_ne,
827 sizeof(ne->device)); 833 sizeof(ne->device));
828 ne->device.parent = &host->device; 834 ne->device.parent = &host->device;
829 snprintf(ne->device.bus_id, BUS_ID_SIZE, "%016Lx", 835 dev_set_name(&ne->device, "%016Lx", (unsigned long long)(ne->guid));
830 (unsigned long long)(ne->guid));
831 836
832 ne->node_dev.parent = &ne->device; 837 ne->node_dev.parent = &ne->device;
833 ne->node_dev.class = &nodemgr_ne_class; 838 ne->node_dev.class = &nodemgr_ne_class;
834 snprintf(ne->node_dev.bus_id, BUS_ID_SIZE, "%016Lx", 839 dev_set_name(&ne->node_dev, "%016Lx", (unsigned long long)(ne->guid));
835 (unsigned long long)(ne->guid));
836 840
837 if (device_register(&ne->device)) 841 if (device_register(&ne->device))
838 goto fail_devreg; 842 goto fail_devreg;
@@ -932,13 +936,11 @@ static void nodemgr_register_device(struct node_entry *ne,
932 936
933 ud->device.parent = parent; 937 ud->device.parent = parent;
934 938
935 snprintf(ud->device.bus_id, BUS_ID_SIZE, "%s-%u", 939 dev_set_name(&ud->device, "%s-%u", dev_name(&ne->device), ud->id);
936 ne->device.bus_id, ud->id);
937 940
938 ud->unit_dev.parent = &ud->device; 941 ud->unit_dev.parent = &ud->device;
939 ud->unit_dev.class = &nodemgr_ud_class; 942 ud->unit_dev.class = &nodemgr_ud_class;
940 snprintf(ud->unit_dev.bus_id, BUS_ID_SIZE, "%s-%u", 943 dev_set_name(&ud->unit_dev, "%s-%u", dev_name(&ne->device), ud->id);
941 ne->device.bus_id, ud->id);
942 944
943 if (device_register(&ud->device)) 945 if (device_register(&ud->device))
944 goto fail_devreg; 946 goto fail_devreg;
@@ -953,7 +955,7 @@ static void nodemgr_register_device(struct node_entry *ne,
953fail_classdevreg: 955fail_classdevreg:
954 device_unregister(&ud->device); 956 device_unregister(&ud->device);
955fail_devreg: 957fail_devreg:
956 HPSB_ERR("Failed to create unit %s", ud->device.bus_id); 958 HPSB_ERR("Failed to create unit %s", dev_name(&ud->device));
957} 959}
958 960
959 961
@@ -1689,6 +1691,7 @@ static int nodemgr_host_thread(void *data)
1689 g = get_hpsb_generation(host); 1691 g = get_hpsb_generation(host);
1690 for (i = 0; i < 4 ; i++) { 1692 for (i = 0; i < 4 ; i++) {
1691 msleep_interruptible(63); 1693 msleep_interruptible(63);
1694 try_to_freeze();
1692 if (kthread_should_stop()) 1695 if (kthread_should_stop())
1693 goto exit; 1696 goto exit;
1694 1697
@@ -1729,6 +1732,7 @@ static int nodemgr_host_thread(void *data)
1729 /* Sleep 3 seconds */ 1732 /* Sleep 3 seconds */
1730 for (i = 3000/200; i; i--) { 1733 for (i = 3000/200; i; i--) {
1731 msleep_interruptible(200); 1734 msleep_interruptible(200);
1735 try_to_freeze();
1732 if (kthread_should_stop()) 1736 if (kthread_should_stop())
1733 goto exit; 1737 goto exit;
1734 1738
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 9f19ac492106..bf7e761c12b1 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2268,7 +2268,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
2268 return -EFAULT; 2268 return -EFAULT;
2269 } 2269 }
2270 2270
2271 mutex_lock(&fi->state_mutex); 2271 if (!mutex_trylock(&fi->state_mutex))
2272 return -EAGAIN;
2272 2273
2273 switch (fi->state) { 2274 switch (fi->state) {
2274 case opened: 2275 case opened:
@@ -2548,7 +2549,8 @@ static int raw1394_mmap(struct file *file, struct vm_area_struct *vma)
2548 struct file_info *fi = file->private_data; 2549 struct file_info *fi = file->private_data;
2549 int ret; 2550 int ret;
2550 2551
2551 mutex_lock(&fi->state_mutex); 2552 if (!mutex_trylock(&fi->state_mutex))
2553 return -EAGAIN;
2552 2554
2553 if (fi->iso_state == RAW1394_ISO_INACTIVE) 2555 if (fi->iso_state == RAW1394_ISO_INACTIVE)
2554 ret = -EINVAL; 2556 ret = -EINVAL;
@@ -2669,7 +2671,8 @@ static long raw1394_ioctl(struct file *file, unsigned int cmd,
2669 break; 2671 break;
2670 } 2672 }
2671 2673
2672 mutex_lock(&fi->state_mutex); 2674 if (!mutex_trylock(&fi->state_mutex))
2675 return -EAGAIN;
2673 2676
2674 switch (fi->iso_state) { 2677 switch (fi->iso_state) {
2675 case RAW1394_ISO_INACTIVE: 2678 case RAW1394_ISO_INACTIVE:
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index c52f6e6e8af2..a373c18cf7b8 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -402,6 +402,11 @@ static const struct {
402 }, 402 },
403 /* iPod mini */ { 403 /* iPod mini */ {
404 .firmware_revision = 0x0a2700, 404 .firmware_revision = 0x0a2700,
405 .model_id = 0x000022,
406 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
407 },
408 /* iPod mini */ {
409 .firmware_revision = 0x0a2700,
405 .model_id = 0x000023, 410 .model_id = 0x000023,
406 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, 411 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
407 }, 412 },
@@ -890,12 +895,13 @@ static void sbp2_host_reset(struct hpsb_host *host)
890 return; 895 return;
891 896
892 read_lock_irqsave(&sbp2_hi_logical_units_lock, flags); 897 read_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
898
893 list_for_each_entry(lu, &hi->logical_units, lu_list) 899 list_for_each_entry(lu, &hi->logical_units, lu_list)
894 if (likely(atomic_read(&lu->state) != 900 if (atomic_cmpxchg(&lu->state,
895 SBP2LU_STATE_IN_SHUTDOWN)) { 901 SBP2LU_STATE_RUNNING, SBP2LU_STATE_IN_RESET)
896 atomic_set(&lu->state, SBP2LU_STATE_IN_RESET); 902 == SBP2LU_STATE_RUNNING)
897 scsi_block_requests(lu->shost); 903 scsi_block_requests(lu->shost);
898 } 904
899 read_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags); 905 read_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
900} 906}
901 907
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 49c45feccd5b..5c54fc2350be 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -406,19 +406,15 @@ static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
406 406
407 if (i == qp_info->snoop_table_size) { 407 if (i == qp_info->snoop_table_size) {
408 /* Grow table. */ 408 /* Grow table. */
409 new_snoop_table = kmalloc(sizeof mad_snoop_priv * 409 new_snoop_table = krealloc(qp_info->snoop_table,
410 qp_info->snoop_table_size + 1, 410 sizeof mad_snoop_priv *
411 GFP_ATOMIC); 411 (qp_info->snoop_table_size + 1),
412 GFP_ATOMIC);
412 if (!new_snoop_table) { 413 if (!new_snoop_table) {
413 i = -ENOMEM; 414 i = -ENOMEM;
414 goto out; 415 goto out;
415 } 416 }
416 if (qp_info->snoop_table) { 417
417 memcpy(new_snoop_table, qp_info->snoop_table,
418 sizeof mad_snoop_priv *
419 qp_info->snoop_table_size);
420 kfree(qp_info->snoop_table);
421 }
422 qp_info->snoop_table = new_snoop_table; 418 qp_info->snoop_table = new_snoop_table;
423 qp_info->snoop_table_size++; 419 qp_info->snoop_table_size++;
424 } 420 }
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 3ddacf39b7ba..4346a24568fb 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -904,8 +904,8 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
904 904
905 mutex_lock(&file->mut); 905 mutex_lock(&file->mut);
906 mc = ucma_alloc_multicast(ctx); 906 mc = ucma_alloc_multicast(ctx);
907 if (IS_ERR(mc)) { 907 if (!mc) {
908 ret = PTR_ERR(mc); 908 ret = -ENOMEM;
909 goto err1; 909 goto err1;
910 } 910 }
911 911
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index d85af1b67027..eb36a81dd09b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -358,8 +358,6 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
358 } 358 }
359 spin_unlock_irq(&file->lock); 359 spin_unlock_irq(&file->lock);
360 360
361 ib_uverbs_event_fasync(-1, filp, 0);
362
363 if (file->is_async) { 361 if (file->is_async) {
364 ib_unregister_event_handler(&file->uverbs_file->event_handler); 362 ib_unregister_event_handler(&file->uverbs_file->event_handler);
365 kref_put(&file->uverbs_file->ref, ib_uverbs_release_file); 363 kref_put(&file->uverbs_file->ref, ib_uverbs_release_file);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index c325c44807e8..44e936e48a31 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1942,6 +1942,7 @@ fail4:
1942fail3: 1942fail3:
1943 cxgb3_free_atid(ep->com.tdev, ep->atid); 1943 cxgb3_free_atid(ep->com.tdev, ep->atid);
1944fail2: 1944fail2:
1945 cm_id->rem_ref(cm_id);
1945 put_ep(&ep->com); 1946 put_ep(&ep->com);
1946out: 1947out:
1947 return err; 1948 return err;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index ecff98043589..160ef482712d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1102,9 +1102,7 @@ static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
1102 char *cp, *next; 1102 char *cp, *next;
1103 unsigned fw_maj, fw_min, fw_mic; 1103 unsigned fw_maj, fw_min, fw_mic;
1104 1104
1105 rtnl_lock();
1106 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1105 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1107 rtnl_unlock();
1108 1106
1109 next = info.fw_version + 1; 1107 next = info.fw_version + 1;
1110 cp = strsep(&next, "."); 1108 cp = strsep(&next, ".");
@@ -1192,9 +1190,7 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, ch
1192 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1190 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1193 1191
1194 PDBG("%s dev 0x%p\n", __func__, dev); 1192 PDBG("%s dev 0x%p\n", __func__, dev);
1195 rtnl_lock();
1196 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1193 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1197 rtnl_unlock();
1198 return sprintf(buf, "%s\n", info.fw_version); 1194 return sprintf(buf, "%s\n", info.fw_version);
1199} 1195}
1200 1196
@@ -1207,9 +1203,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
1207 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1203 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1208 1204
1209 PDBG("%s dev 0x%p\n", __func__, dev); 1205 PDBG("%s dev 0x%p\n", __func__, dev);
1210 rtnl_lock();
1211 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1206 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1212 rtnl_unlock();
1213 return sprintf(buf, "%s\n", info.driver); 1207 return sprintf(buf, "%s\n", info.driver);
1214} 1208}
1215 1209
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 3e4585c2318a..19661b2f0406 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -745,7 +745,6 @@ int iwch_post_zb_read(struct iwch_qp *qhp)
745 wqe->read.rdmaop = T3_READ_REQ; 745 wqe->read.rdmaop = T3_READ_REQ;
746 wqe->read.reserved[0] = 0; 746 wqe->read.reserved[0] = 0;
747 wqe->read.reserved[1] = 0; 747 wqe->read.reserved[1] = 0;
748 wqe->read.reserved[2] = 0;
749 wqe->read.rem_stag = cpu_to_be32(1); 748 wqe->read.rem_stag = cpu_to_be32(1);
750 wqe->read.rem_to = cpu_to_be64(1); 749 wqe->read.rem_to = cpu_to_be64(1);
751 wqe->read.local_stag = cpu_to_be32(1); 750 wqe->read.local_stag = cpu_to_be32(1);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 5d7b7855afb9..7fc35cf0cddf 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -128,6 +128,8 @@ struct ehca_shca {
128 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */ 128 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
129 u32 hca_cap_mr_pgsize; 129 u32 hca_cap_mr_pgsize;
130 int max_mtu; 130 int max_mtu;
131 int max_num_qps;
132 int max_num_cqs;
131 atomic_t num_cqs; 133 atomic_t num_cqs;
132 atomic_t num_qps; 134 atomic_t num_qps;
133}; 135};
@@ -161,7 +163,8 @@ struct ehca_mod_qp_parm {
161/* struct for tracking if cqes have been reported to the application */ 163/* struct for tracking if cqes have been reported to the application */
162struct ehca_qmap_entry { 164struct ehca_qmap_entry {
163 u16 app_wr_id; 165 u16 app_wr_id;
164 u16 reported; 166 u8 reported;
167 u8 cqe_req;
165}; 168};
166 169
167struct ehca_queue_map { 170struct ehca_queue_map {
@@ -169,6 +172,7 @@ struct ehca_queue_map {
169 unsigned int entries; 172 unsigned int entries;
170 unsigned int tail; 173 unsigned int tail;
171 unsigned int left_to_poll; 174 unsigned int left_to_poll;
175 unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
172}; 176};
173 177
174struct ehca_qp { 178struct ehca_qp {
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 33647a95eb9a..2f4c28a30271 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -132,9 +132,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
132 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) 132 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
133 return ERR_PTR(-EINVAL); 133 return ERR_PTR(-EINVAL);
134 134
135 if (!atomic_add_unless(&shca->num_cqs, 1, ehca_max_cq)) { 135 if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
136 ehca_err(device, "Unable to create CQ, max number of %i " 136 ehca_err(device, "Unable to create CQ, max number of %i "
137 "CQs reached.", ehca_max_cq); 137 "CQs reached.", shca->max_num_cqs);
138 ehca_err(device, "To increase the maximum number of CQs " 138 ehca_err(device, "To increase the maximum number of CQs "
139 "use the number_of_cqs module parameter.\n"); 139 "use the number_of_cqs module parameter.\n");
140 return ERR_PTR(-ENOSPC); 140 return ERR_PTR(-ENOSPC);
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index cb55be04442c..757035ea246f 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -359,36 +359,48 @@ static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
359 *old_attr = new_attr; 359 *old_attr = new_attr;
360} 360}
361 361
362/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */
363static int replay_modify_qp(struct ehca_sport *sport)
364{
365 int aqp1_destroyed;
366 unsigned long flags;
367
368 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
369
370 aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI];
371
372 if (sport->ibqp_sqp[IB_QPT_SMI])
373 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
374 if (!aqp1_destroyed)
375 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
376
377 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
378
379 return aqp1_destroyed;
380}
381
362static void parse_ec(struct ehca_shca *shca, u64 eqe) 382static void parse_ec(struct ehca_shca *shca, u64 eqe)
363{ 383{
364 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); 384 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
365 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); 385 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
366 u8 spec_event; 386 u8 spec_event;
367 struct ehca_sport *sport = &shca->sport[port - 1]; 387 struct ehca_sport *sport = &shca->sport[port - 1];
368 unsigned long flags;
369 388
370 switch (ec) { 389 switch (ec) {
371 case 0x30: /* port availability change */ 390 case 0x30: /* port availability change */
372 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { 391 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
373 int suppress_event; 392 /* only replay modify_qp calls in autodetect mode;
374 /* replay modify_qp for sqps */ 393 * if AQP1 was destroyed, the port is already down
375 spin_lock_irqsave(&sport->mod_sqp_lock, flags); 394 * again and we can drop the event.
376 suppress_event = !sport->ibqp_sqp[IB_QPT_GSI]; 395 */
377 if (sport->ibqp_sqp[IB_QPT_SMI]) 396 if (ehca_nr_ports < 0)
378 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]); 397 if (replay_modify_qp(sport))
379 if (!suppress_event) 398 break;
380 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
381 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
382
383 /* AQP1 was destroyed, ignore this event */
384 if (suppress_event)
385 break;
386 399
387 sport->port_state = IB_PORT_ACTIVE; 400 sport->port_state = IB_PORT_ACTIVE;
388 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, 401 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
389 "is active"); 402 "is active");
390 ehca_query_sma_attr(shca, port, 403 ehca_query_sma_attr(shca, port, &sport->saved_attr);
391 &sport->saved_attr);
392 } else { 404 } else {
393 sport->port_state = IB_PORT_DOWN; 405 sport->port_state = IB_PORT_DOWN;
394 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, 406 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 598844d2edc9..bec7e0249358 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -44,6 +44,8 @@
44#include <linux/slab.h> 44#include <linux/slab.h>
45#endif 45#endif
46 46
47#include <linux/notifier.h>
48#include <linux/memory.h>
47#include "ehca_classes.h" 49#include "ehca_classes.h"
48#include "ehca_iverbs.h" 50#include "ehca_iverbs.h"
49#include "ehca_mrmw.h" 51#include "ehca_mrmw.h"
@@ -366,22 +368,23 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
366 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; 368 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
367 369
368 /* Set maximum number of CQs and QPs to calculate EQ size */ 370 /* Set maximum number of CQs and QPs to calculate EQ size */
369 if (ehca_max_qp == -1) 371 if (shca->max_num_qps == -1)
370 ehca_max_qp = min_t(int, rblock->max_qp, EHCA_MAX_NUM_QUEUES); 372 shca->max_num_qps = min_t(int, rblock->max_qp,
371 else if (ehca_max_qp < 1 || ehca_max_qp > rblock->max_qp) { 373 EHCA_MAX_NUM_QUEUES);
372 ehca_gen_err("Requested number of QPs is out of range (1 - %i) " 374 else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
373 "specified by HW", rblock->max_qp); 375 ehca_gen_warn("The requested number of QPs is out of range "
374 ret = -EINVAL; 376 "(1 - %i) specified by HW. Value is set to %i",
375 goto sense_attributes1; 377 rblock->max_qp, rblock->max_qp);
378 shca->max_num_qps = rblock->max_qp;
376 } 379 }
377 380
378 if (ehca_max_cq == -1) 381 if (shca->max_num_cqs == -1)
379 ehca_max_cq = min_t(int, rblock->max_cq, EHCA_MAX_NUM_QUEUES); 382 shca->max_num_cqs = min_t(int, rblock->max_cq,
380 else if (ehca_max_cq < 1 || ehca_max_cq > rblock->max_cq) { 383 EHCA_MAX_NUM_QUEUES);
381 ehca_gen_err("Requested number of CQs is out of range (1 - %i) " 384 else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
382 "specified by HW", rblock->max_cq); 385 ehca_gen_warn("The requested number of CQs is out of range "
383 ret = -EINVAL; 386 "(1 - %i) specified by HW. Value is set to %i",
384 goto sense_attributes1; 387 rblock->max_cq, rblock->max_cq);
385 } 388 }
386 389
387 /* query max MTU from first port -- it's the same for all ports */ 390 /* query max MTU from first port -- it's the same for all ports */
@@ -733,9 +736,13 @@ static int __devinit ehca_probe(struct of_device *dev,
733 ehca_gen_err("Cannot allocate shca memory."); 736 ehca_gen_err("Cannot allocate shca memory.");
734 return -ENOMEM; 737 return -ENOMEM;
735 } 738 }
739
736 mutex_init(&shca->modify_mutex); 740 mutex_init(&shca->modify_mutex);
737 atomic_set(&shca->num_cqs, 0); 741 atomic_set(&shca->num_cqs, 0);
738 atomic_set(&shca->num_qps, 0); 742 atomic_set(&shca->num_qps, 0);
743 shca->max_num_qps = ehca_max_qp;
744 shca->max_num_cqs = ehca_max_cq;
745
739 for (i = 0; i < ARRAY_SIZE(shca->sport); i++) 746 for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
740 spin_lock_init(&shca->sport[i].mod_sqp_lock); 747 spin_lock_init(&shca->sport[i].mod_sqp_lock);
741 748
@@ -755,7 +762,7 @@ static int __devinit ehca_probe(struct of_device *dev,
755 goto probe1; 762 goto probe1;
756 } 763 }
757 764
758 eq_size = 2 * ehca_max_cq + 4 * ehca_max_qp; 765 eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
759 /* create event queues */ 766 /* create event queues */
760 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size); 767 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
761 if (ret) { 768 if (ret) {
@@ -964,6 +971,40 @@ void ehca_poll_eqs(unsigned long data)
964 spin_unlock(&shca_list_lock); 971 spin_unlock(&shca_list_lock);
965} 972}
966 973
974static int ehca_mem_notifier(struct notifier_block *nb,
975 unsigned long action, void *data)
976{
977 static unsigned long ehca_dmem_warn_time;
978
979 switch (action) {
980 case MEM_CANCEL_OFFLINE:
981 case MEM_CANCEL_ONLINE:
982 case MEM_ONLINE:
983 case MEM_OFFLINE:
984 return NOTIFY_OK;
985 case MEM_GOING_ONLINE:
986 case MEM_GOING_OFFLINE:
987 /* only ok if no hca is attached to the lpar */
988 spin_lock(&shca_list_lock);
989 if (list_empty(&shca_list)) {
990 spin_unlock(&shca_list_lock);
991 return NOTIFY_OK;
992 } else {
993 spin_unlock(&shca_list_lock);
994 if (printk_timed_ratelimit(&ehca_dmem_warn_time,
995 30 * 1000))
996 ehca_gen_err("DMEM operations are not allowed"
997 "in conjunction with eHCA");
998 return NOTIFY_BAD;
999 }
1000 }
1001 return NOTIFY_OK;
1002}
1003
1004static struct notifier_block ehca_mem_nb = {
1005 .notifier_call = ehca_mem_notifier,
1006};
1007
967static int __init ehca_module_init(void) 1008static int __init ehca_module_init(void)
968{ 1009{
969 int ret; 1010 int ret;
@@ -991,6 +1032,12 @@ static int __init ehca_module_init(void)
991 goto module_init2; 1032 goto module_init2;
992 } 1033 }
993 1034
1035 ret = register_memory_notifier(&ehca_mem_nb);
1036 if (ret) {
1037 ehca_gen_err("Failed registering memory add/remove notifier");
1038 goto module_init3;
1039 }
1040
994 if (ehca_poll_all_eqs != 1) { 1041 if (ehca_poll_all_eqs != 1) {
995 ehca_gen_err("WARNING!!!"); 1042 ehca_gen_err("WARNING!!!");
996 ehca_gen_err("It is possible to lose interrupts."); 1043 ehca_gen_err("It is possible to lose interrupts.");
@@ -1003,6 +1050,9 @@ static int __init ehca_module_init(void)
1003 1050
1004 return 0; 1051 return 0;
1005 1052
1053module_init3:
1054 ibmebus_unregister_driver(&ehca_driver);
1055
1006module_init2: 1056module_init2:
1007 ehca_destroy_slab_caches(); 1057 ehca_destroy_slab_caches();
1008 1058
@@ -1018,6 +1068,8 @@ static void __exit ehca_module_exit(void)
1018 1068
1019 ibmebus_unregister_driver(&ehca_driver); 1069 ibmebus_unregister_driver(&ehca_driver);
1020 1070
1071 unregister_memory_notifier(&ehca_mem_nb);
1072
1021 ehca_destroy_slab_caches(); 1073 ehca_destroy_slab_caches();
1022 1074
1023 ehca_destroy_comp_pool(); 1075 ehca_destroy_comp_pool();
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 4dbe2870e014..cadbf0cdd910 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -435,9 +435,13 @@ static void reset_queue_map(struct ehca_queue_map *qmap)
435{ 435{
436 int i; 436 int i;
437 437
438 qmap->tail = 0; 438 qmap->tail = qmap->entries - 1;
439 for (i = 0; i < qmap->entries; i++) 439 qmap->left_to_poll = 0;
440 qmap->next_wqe_idx = 0;
441 for (i = 0; i < qmap->entries; i++) {
440 qmap->map[i].reported = 1; 442 qmap->map[i].reported = 1;
443 qmap->map[i].cqe_req = 0;
444 }
441} 445}
442 446
443/* 447/*
@@ -465,9 +469,9 @@ static struct ehca_qp *internal_create_qp(
465 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num; 469 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
466 unsigned long flags; 470 unsigned long flags;
467 471
468 if (!atomic_add_unless(&shca->num_qps, 1, ehca_max_qp)) { 472 if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
469 ehca_err(pd->device, "Unable to create QP, max number of %i " 473 ehca_err(pd->device, "Unable to create QP, max number of %i "
470 "QPs reached.", ehca_max_qp); 474 "QPs reached.", shca->max_num_qps);
471 ehca_err(pd->device, "To increase the maximum number of QPs " 475 ehca_err(pd->device, "To increase the maximum number of QPs "
472 "use the number_of_qps module parameter.\n"); 476 "use the number_of_qps module parameter.\n");
473 return ERR_PTR(-ENOSPC); 477 return ERR_PTR(-ENOSPC);
@@ -502,6 +506,12 @@ static struct ehca_qp *internal_create_qp(
502 if (init_attr->srq) { 506 if (init_attr->srq) {
503 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq); 507 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
504 508
509 if (qp_type == IB_QPT_UC) {
510 ehca_err(pd->device, "UC with SRQ not supported");
511 atomic_dec(&shca->num_qps);
512 return ERR_PTR(-EINVAL);
513 }
514
505 has_srq = 1; 515 has_srq = 1;
506 parms.ext_type = EQPT_SRQBASE; 516 parms.ext_type = EQPT_SRQBASE;
507 parms.srq_qpn = my_srq->real_qp_num; 517 parms.srq_qpn = my_srq->real_qp_num;
@@ -854,6 +864,11 @@ static struct ehca_qp *internal_create_qp(
854 if (qp_type == IB_QPT_GSI) { 864 if (qp_type == IB_QPT_GSI) {
855 h_ret = ehca_define_sqp(shca, my_qp, init_attr); 865 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
856 if (h_ret != H_SUCCESS) { 866 if (h_ret != H_SUCCESS) {
867 kfree(my_qp->mod_qp_parm);
868 my_qp->mod_qp_parm = NULL;
869 /* the QP pointer is no longer valid */
870 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
871 NULL;
857 ret = ehca2ib_return_code(h_ret); 872 ret = ehca2ib_return_code(h_ret);
858 goto create_qp_exit6; 873 goto create_qp_exit6;
859 } 874 }
@@ -1110,6 +1125,7 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1110 void *wqe_v; 1125 void *wqe_v;
1111 u64 q_ofs; 1126 u64 q_ofs;
1112 u32 wqe_idx; 1127 u32 wqe_idx;
1128 unsigned int tail_idx;
1113 1129
1114 /* convert real to abs address */ 1130 /* convert real to abs address */
1115 wqe_p = wqe_p & (~(1UL << 63)); 1131 wqe_p = wqe_p & (~(1UL << 63));
@@ -1122,12 +1138,17 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1122 return -EFAULT; 1138 return -EFAULT;
1123 } 1139 }
1124 1140
1141 tail_idx = (qmap->tail + 1) % qmap->entries;
1125 wqe_idx = q_ofs / ipz_queue->qe_size; 1142 wqe_idx = q_ofs / ipz_queue->qe_size;
1126 if (wqe_idx < qmap->tail)
1127 qmap->left_to_poll = (qmap->entries - qmap->tail) + wqe_idx;
1128 else
1129 qmap->left_to_poll = wqe_idx - qmap->tail;
1130 1143
1144 /* check all processed wqes, whether a cqe is requested or not */
1145 while (tail_idx != wqe_idx) {
1146 if (qmap->map[tail_idx].cqe_req)
1147 qmap->left_to_poll++;
1148 tail_idx = (tail_idx + 1) % qmap->entries;
1149 }
1150 /* save index in queue, where we have to start flushing */
1151 qmap->next_wqe_idx = wqe_idx;
1131 return 0; 1152 return 0;
1132} 1153}
1133 1154
@@ -1174,10 +1195,14 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1174 } else { 1195 } else {
1175 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags); 1196 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1176 my_qp->sq_map.left_to_poll = 0; 1197 my_qp->sq_map.left_to_poll = 0;
1198 my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
1199 my_qp->sq_map.entries;
1177 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags); 1200 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1178 1201
1179 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); 1202 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1180 my_qp->rq_map.left_to_poll = 0; 1203 my_qp->rq_map.left_to_poll = 0;
1204 my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
1205 my_qp->rq_map.entries;
1181 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags); 1206 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1182 } 1207 }
1183 1208
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 64928079eafa..00a648f4316c 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -179,6 +179,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
179 179
180 qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id); 180 qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
181 qmap_entry->reported = 0; 181 qmap_entry->reported = 0;
182 qmap_entry->cqe_req = 0;
182 183
183 switch (send_wr->opcode) { 184 switch (send_wr->opcode) {
184 case IB_WR_SEND: 185 case IB_WR_SEND:
@@ -203,8 +204,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
203 204
204 if ((send_wr->send_flags & IB_SEND_SIGNALED || 205 if ((send_wr->send_flags & IB_SEND_SIGNALED ||
205 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR) 206 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
206 && !hidden) 207 && !hidden) {
207 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM; 208 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
209 qmap_entry->cqe_req = 1;
210 }
208 211
209 if (send_wr->opcode == IB_WR_SEND_WITH_IMM || 212 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
210 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 213 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
@@ -569,6 +572,7 @@ static int internal_post_recv(struct ehca_qp *my_qp,
569 qmap_entry = &my_qp->rq_map.map[rq_map_idx]; 572 qmap_entry = &my_qp->rq_map.map[rq_map_idx];
570 qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id); 573 qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id);
571 qmap_entry->reported = 0; 574 qmap_entry->reported = 0;
575 qmap_entry->cqe_req = 1;
572 576
573 wqe_cnt++; 577 wqe_cnt++;
574 } /* eof for cur_recv_wr */ 578 } /* eof for cur_recv_wr */
@@ -706,27 +710,34 @@ repoll:
706 goto repoll; 710 goto repoll;
707 wc->qp = &my_qp->ib_qp; 711 wc->qp = &my_qp->ib_qp;
708 712
713 qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
714 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
715 /* We got a send completion. */
716 qmap = &my_qp->sq_map;
717 else
718 /* We got a receive completion. */
719 qmap = &my_qp->rq_map;
720
721 /* advance the tail pointer */
722 qmap->tail = qmap_tail_idx;
723
709 if (is_error) { 724 if (is_error) {
710 /* 725 /*
711 * set left_to_poll to 0 because in error state, we will not 726 * set left_to_poll to 0 because in error state, we will not
712 * get any additional CQEs 727 * get any additional CQEs
713 */ 728 */
714 ehca_add_to_err_list(my_qp, 1); 729 my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
730 my_qp->sq_map.entries;
715 my_qp->sq_map.left_to_poll = 0; 731 my_qp->sq_map.left_to_poll = 0;
732 ehca_add_to_err_list(my_qp, 1);
716 733
734 my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
735 my_qp->rq_map.entries;
736 my_qp->rq_map.left_to_poll = 0;
717 if (HAS_RQ(my_qp)) 737 if (HAS_RQ(my_qp))
718 ehca_add_to_err_list(my_qp, 0); 738 ehca_add_to_err_list(my_qp, 0);
719 my_qp->rq_map.left_to_poll = 0;
720 } 739 }
721 740
722 qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
723 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
724 /* We got a send completion. */
725 qmap = &my_qp->sq_map;
726 else
727 /* We got a receive completion. */
728 qmap = &my_qp->rq_map;
729
730 qmap_entry = &qmap->map[qmap_tail_idx]; 741 qmap_entry = &qmap->map[qmap_tail_idx];
731 if (qmap_entry->reported) { 742 if (qmap_entry->reported) {
732 ehca_warn(cq->device, "Double cqe on qp_num=%#x", 743 ehca_warn(cq->device, "Double cqe on qp_num=%#x",
@@ -738,10 +749,6 @@ repoll:
738 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id); 749 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
739 qmap_entry->reported = 1; 750 qmap_entry->reported = 1;
740 751
741 /* this is a proper completion, we need to advance the tail pointer */
742 if (++qmap->tail == qmap->entries)
743 qmap->tail = 0;
744
745 /* if left_to_poll is decremented to 0, add the QP to the error list */ 752 /* if left_to_poll is decremented to 0, add the QP to the error list */
746 if (qmap->left_to_poll > 0) { 753 if (qmap->left_to_poll > 0) {
747 qmap->left_to_poll--; 754 qmap->left_to_poll--;
@@ -805,13 +812,14 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
805 else 812 else
806 qmap = &my_qp->rq_map; 813 qmap = &my_qp->rq_map;
807 814
808 qmap_entry = &qmap->map[qmap->tail]; 815 qmap_entry = &qmap->map[qmap->next_wqe_idx];
809 816
810 while ((nr < num_entries) && (qmap_entry->reported == 0)) { 817 while ((nr < num_entries) && (qmap_entry->reported == 0)) {
811 /* generate flush CQE */ 818 /* generate flush CQE */
819
812 memset(wc, 0, sizeof(*wc)); 820 memset(wc, 0, sizeof(*wc));
813 821
814 offset = qmap->tail * ipz_queue->qe_size; 822 offset = qmap->next_wqe_idx * ipz_queue->qe_size;
815 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); 823 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
816 if (!wqe) { 824 if (!wqe) {
817 ehca_err(cq->device, "Invalid wqe offset=%#lx on " 825 ehca_err(cq->device, "Invalid wqe offset=%#lx on "
@@ -850,11 +858,12 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
850 858
851 wc->qp = &my_qp->ib_qp; 859 wc->qp = &my_qp->ib_qp;
852 860
853 /* mark as reported and advance tail pointer */ 861 /* mark as reported and advance next_wqe pointer */
854 qmap_entry->reported = 1; 862 qmap_entry->reported = 1;
855 if (++qmap->tail == qmap->entries) 863 qmap->next_wqe_idx++;
856 qmap->tail = 0; 864 if (qmap->next_wqe_idx == qmap->entries)
857 qmap_entry = &qmap->map[qmap->tail]; 865 qmap->next_wqe_idx = 0;
866 qmap_entry = &qmap->map[qmap->next_wqe_idx];
858 867
859 wc++; nr++; 868 wc++; nr++;
860 } 869 }
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index fc0f6d9e6030..2296832f94da 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -156,7 +156,7 @@ bail:
156/** 156/**
157 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE 157 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
158 * @qp: the QP 158 * @qp: the QP
159 * @wr_id_only: update wr_id only, not SGEs 159 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
160 * 160 *
161 * Return 0 if no RWQE is available, otherwise return 1. 161 * Return 0 if no RWQE is available, otherwise return 1.
162 * 162 *
@@ -173,8 +173,6 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
173 u32 tail; 173 u32 tail;
174 int ret; 174 int ret;
175 175
176 qp->r_sge.sg_list = qp->r_sg_list;
177
178 if (qp->ibqp.srq) { 176 if (qp->ibqp.srq) {
179 srq = to_isrq(qp->ibqp.srq); 177 srq = to_isrq(qp->ibqp.srq);
180 handler = srq->ibsrq.event_handler; 178 handler = srq->ibsrq.event_handler;
@@ -206,8 +204,10 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
206 wqe = get_rwqe_ptr(rq, tail); 204 wqe = get_rwqe_ptr(rq, tail);
207 if (++tail >= rq->size) 205 if (++tail >= rq->size)
208 tail = 0; 206 tail = 0;
209 } while (!wr_id_only && !ipath_init_sge(qp, wqe, &qp->r_len, 207 if (wr_id_only)
210 &qp->r_sge)); 208 break;
209 qp->r_sge.sg_list = qp->r_sg_list;
210 } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
211 qp->r_wr_id = wqe->wr_id; 211 qp->r_wr_id = wqe->wr_id;
212 wq->tail = tail; 212 wq->tail = tail;
213 213
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index d0866a3636e2..18308494a195 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -343,6 +343,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
343{ 343{
344 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); 344 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
345 struct mlx4_ib_cq *cq = to_mcq(ibcq); 345 struct mlx4_ib_cq *cq = to_mcq(ibcq);
346 struct mlx4_mtt mtt;
346 int outst_cqe; 347 int outst_cqe;
347 int err; 348 int err;
348 349
@@ -376,10 +377,13 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
376 goto out; 377 goto out;
377 } 378 }
378 379
380 mtt = cq->buf.mtt;
381
379 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); 382 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
380 if (err) 383 if (err)
381 goto err_buf; 384 goto err_buf;
382 385
386 mlx4_mtt_cleanup(dev->dev, &mtt);
383 if (ibcq->uobject) { 387 if (ibcq->uobject) {
384 cq->buf = cq->resize_buf->buf; 388 cq->buf = cq->resize_buf->buf;
385 cq->ibcq.cqe = cq->resize_buf->cqe; 389 cq->ibcq.cqe = cq->resize_buf->cqe;
@@ -406,6 +410,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
406 goto out; 410 goto out;
407 411
408err_buf: 412err_buf:
413 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
409 if (!ibcq->uobject) 414 if (!ibcq->uobject)
410 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf, 415 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
411 cq->resize_buf->cqe); 416 cq->resize_buf->cqe);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index cdca3a511e1c..606f1e2ef284 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -298,7 +298,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
298 int p, q; 298 int p, q;
299 int ret; 299 int ret;
300 300
301 for (p = 0; p < dev->dev->caps.num_ports; ++p) 301 for (p = 0; p < dev->num_ports; ++p)
302 for (q = 0; q <= 1; ++q) { 302 for (q = 0; q <= 1; ++q) {
303 agent = ib_register_mad_agent(&dev->ib_dev, p + 1, 303 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
304 q ? IB_QPT_GSI : IB_QPT_SMI, 304 q ? IB_QPT_GSI : IB_QPT_SMI,
@@ -314,7 +314,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
314 return 0; 314 return 0;
315 315
316err: 316err:
317 for (p = 0; p < dev->dev->caps.num_ports; ++p) 317 for (p = 0; p < dev->num_ports; ++p)
318 for (q = 0; q <= 1; ++q) 318 for (q = 0; q <= 1; ++q)
319 if (dev->send_agent[p][q]) 319 if (dev->send_agent[p][q])
320 ib_unregister_mad_agent(dev->send_agent[p][q]); 320 ib_unregister_mad_agent(dev->send_agent[p][q]);
@@ -327,7 +327,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
327 struct ib_mad_agent *agent; 327 struct ib_mad_agent *agent;
328 int p, q; 328 int p, q;
329 329
330 for (p = 0; p < dev->dev->caps.num_ports; ++p) { 330 for (p = 0; p < dev->num_ports; ++p) {
331 for (q = 0; q <= 1; ++q) { 331 for (q = 0; q <= 1; ++q) {
332 agent = dev->send_agent[p][q]; 332 agent = dev->send_agent[p][q];
333 dev->send_agent[p][q] = NULL; 333 dev->send_agent[p][q] = NULL;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index a3c2851c0545..2e80f8f47b02 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -574,7 +574,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
574 ibdev->ib_dev.owner = THIS_MODULE; 574 ibdev->ib_dev.owner = THIS_MODULE;
575 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; 575 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
576 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; 576 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
577 ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports; 577 ibdev->num_ports = 0;
578 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
579 ibdev->num_ports++;
580 ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
578 ibdev->ib_dev.num_comp_vectors = 1; 581 ibdev->ib_dev.num_comp_vectors = 1;
579 ibdev->ib_dev.dma_device = &dev->pdev->dev; 582 ibdev->ib_dev.dma_device = &dev->pdev->dev;
580 583
@@ -691,7 +694,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
691 struct mlx4_ib_dev *ibdev = ibdev_ptr; 694 struct mlx4_ib_dev *ibdev = ibdev_ptr;
692 int p; 695 int p;
693 696
694 for (p = 1; p <= dev->caps.num_ports; ++p) 697 for (p = 1; p <= ibdev->num_ports; ++p)
695 mlx4_CLOSE_PORT(dev, p); 698 mlx4_CLOSE_PORT(dev, p);
696 699
697 mlx4_ib_mad_cleanup(ibdev); 700 mlx4_ib_mad_cleanup(ibdev);
@@ -706,6 +709,10 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
706 enum mlx4_dev_event event, int port) 709 enum mlx4_dev_event event, int port)
707{ 710{
708 struct ib_event ibev; 711 struct ib_event ibev;
712 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
713
714 if (port > ibdev->num_ports)
715 return;
709 716
710 switch (event) { 717 switch (event) {
711 case MLX4_DEV_EVENT_PORT_UP: 718 case MLX4_DEV_EVENT_PORT_UP:
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6e2b0dc21b61..9974e886b8de 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -162,6 +162,7 @@ struct mlx4_ib_ah {
162struct mlx4_ib_dev { 162struct mlx4_ib_dev {
163 struct ib_device ib_dev; 163 struct ib_device ib_dev;
164 struct mlx4_dev *dev; 164 struct mlx4_dev *dev;
165 int num_ports;
165 void __iomem *uar_map; 166 void __iomem *uar_map;
166 167
167 struct mlx4_uar priv_uar; 168 struct mlx4_uar priv_uar;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 87f5c5a87b98..8e4d26d56a95 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -205,6 +205,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
205 goto err_mr; 205 goto err_mr;
206 206
207 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; 207 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
208 mr->umem = NULL;
208 209
209 return &mr->ibmr; 210 return &mr->ibmr;
210 211
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index baa01deb2436..39167a797f99 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -451,6 +451,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
451 struct ib_qp_init_attr *init_attr, 451 struct ib_qp_init_attr *init_attr,
452 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) 452 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
453{ 453{
454 int qpn;
454 int err; 455 int err;
455 456
456 mutex_init(&qp->mutex); 457 mutex_init(&qp->mutex);
@@ -545,9 +546,17 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
545 } 546 }
546 } 547 }
547 548
548 err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp); 549 if (sqpn) {
550 qpn = sqpn;
551 } else {
552 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
553 if (err)
554 goto err_wrid;
555 }
556
557 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
549 if (err) 558 if (err)
550 goto err_wrid; 559 goto err_qpn;
551 560
552 /* 561 /*
553 * Hardware wants QPN written in big-endian order (after 562 * Hardware wants QPN written in big-endian order (after
@@ -560,6 +569,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
560 569
561 return 0; 570 return 0;
562 571
572err_qpn:
573 if (!sqpn)
574 mlx4_qp_release_range(dev->dev, qpn, 1);
575
563err_wrid: 576err_wrid:
564 if (pd->uobject) { 577 if (pd->uobject) {
565 if (!init_attr->srq) 578 if (!init_attr->srq)
@@ -655,6 +668,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
655 mlx4_ib_unlock_cqs(send_cq, recv_cq); 668 mlx4_ib_unlock_cqs(send_cq, recv_cq);
656 669
657 mlx4_qp_free(dev->dev, &qp->mqp); 670 mlx4_qp_free(dev->dev, &qp->mqp);
671
672 if (!is_sqp(dev, qp))
673 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
674
658 mlx4_mtt_cleanup(dev->dev, &qp->mtt); 675 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
659 676
660 if (is_user) { 677 if (is_user) {
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index a2b04d62b1a4..aa1dc41f04c8 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -95,6 +95,10 @@ unsigned int wqm_quanta = 0x10000;
95module_param(wqm_quanta, int, 0644); 95module_param(wqm_quanta, int, 0644);
96MODULE_PARM_DESC(wqm_quanta, "WQM quanta"); 96MODULE_PARM_DESC(wqm_quanta, "WQM quanta");
97 97
98static unsigned int limit_maxrdreqsz;
99module_param(limit_maxrdreqsz, bool, 0644);
100MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
101
98LIST_HEAD(nes_adapter_list); 102LIST_HEAD(nes_adapter_list);
99static LIST_HEAD(nes_dev_list); 103static LIST_HEAD(nes_dev_list);
100 104
@@ -588,6 +592,18 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
588 nesdev->nesadapter->port_count; 592 nesdev->nesadapter->port_count;
589 } 593 }
590 594
595 if ((limit_maxrdreqsz ||
596 ((nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_GLADIUS) &&
597 (hw_rev == NE020_REV1))) &&
598 (pcie_get_readrq(pcidev) > 256)) {
599 if (pcie_set_readrq(pcidev, 256))
600 printk(KERN_ERR PFX "Unable to set max read request"
601 " to 256 bytes\n");
602 else
603 nes_debug(NES_DBG_INIT, "Max read request size set"
604 " to 256 bytes\n");
605 }
606
591 tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev); 607 tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
592 608
593 /* bring up the Control QP */ 609 /* bring up the Control QP */
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 610b9d859597..bc0b4de04450 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -40,6 +40,7 @@
40#define NES_PHY_TYPE_ARGUS 4 40#define NES_PHY_TYPE_ARGUS 4
41#define NES_PHY_TYPE_PUMA_1G 5 41#define NES_PHY_TYPE_PUMA_1G 5
42#define NES_PHY_TYPE_PUMA_10G 6 42#define NES_PHY_TYPE_PUMA_10G 6
43#define NES_PHY_TYPE_GLADIUS 7
43 44
44#define NES_MULTICAST_PF_MAX 8 45#define NES_MULTICAST_PF_MAX 8
45 46
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 932e56fcf774..d36c9a0bf1bb 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -220,14 +220,14 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
220 if (nesqp->ibqp_state > IB_QPS_RTS) 220 if (nesqp->ibqp_state > IB_QPS_RTS)
221 return -EINVAL; 221 return -EINVAL;
222 222
223 spin_lock_irqsave(&nesqp->lock, flags); 223 spin_lock_irqsave(&nesqp->lock, flags);
224 224
225 head = nesqp->hwqp.sq_head; 225 head = nesqp->hwqp.sq_head;
226 qsize = nesqp->hwqp.sq_tail; 226 qsize = nesqp->hwqp.sq_tail;
227 227
228 /* Check for SQ overflow */ 228 /* Check for SQ overflow */
229 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { 229 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
230 spin_unlock_irqrestore(&nesqp->lock, flags); 230 spin_unlock_irqrestore(&nesqp->lock, flags);
231 return -EINVAL; 231 return -EINVAL;
232 } 232 }
233 233
@@ -269,7 +269,7 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
269 nes_write32(nesdev->regs+NES_WQE_ALLOC, 269 nes_write32(nesdev->regs+NES_WQE_ALLOC,
270 (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); 270 (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
271 271
272 spin_unlock_irqrestore(&nesqp->lock, flags); 272 spin_unlock_irqrestore(&nesqp->lock, flags);
273 273
274 return 0; 274 return 0;
275} 275}
@@ -349,7 +349,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
349 if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) { 349 if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) {
350 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 350 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
351 ret = -ENOMEM; 351 ret = -ENOMEM;
352 goto failed_vpbl_alloc; 352 goto failed_vpbl_avail;
353 } else { 353 } else {
354 nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used; 354 nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used;
355 } 355 }
@@ -357,7 +357,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
357 if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) { 357 if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) {
358 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 358 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
359 ret = -ENOMEM; 359 ret = -ENOMEM;
360 goto failed_vpbl_alloc; 360 goto failed_vpbl_avail;
361 } else { 361 } else {
362 nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used; 362 nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used;
363 } 363 }
@@ -391,14 +391,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
391 goto failed_vpbl_alloc; 391 goto failed_vpbl_alloc;
392 } 392 }
393 393
394 nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_KERNEL); 394 nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
395 nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_ATOMIC);
395 if (!nesfmr->root_vpbl.leaf_vpbl) { 396 if (!nesfmr->root_vpbl.leaf_vpbl) {
396 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 397 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
397 ret = -ENOMEM; 398 ret = -ENOMEM;
398 goto failed_leaf_vpbl_alloc; 399 goto failed_leaf_vpbl_alloc;
399 } 400 }
400 401
401 nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
402 nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p" 402 nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p"
403 " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n", 403 " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n",
404 nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl); 404 nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl);
@@ -519,6 +519,16 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
519 nesfmr->root_vpbl.pbl_pbase); 519 nesfmr->root_vpbl.pbl_pbase);
520 520
521 failed_vpbl_alloc: 521 failed_vpbl_alloc:
522 if (nesfmr->nesmr.pbls_used != 0) {
523 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
524 if (nesfmr->nesmr.pbl_4k)
525 nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
526 else
527 nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
528 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
529 }
530
531failed_vpbl_avail:
522 kfree(nesfmr); 532 kfree(nesfmr);
523 533
524 failed_fmr_alloc: 534 failed_fmr_alloc:
@@ -534,18 +544,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
534 */ 544 */
535static int nes_dealloc_fmr(struct ib_fmr *ibfmr) 545static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
536{ 546{
547 unsigned long flags;
537 struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr); 548 struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr);
538 struct nes_fmr *nesfmr = to_nesfmr(nesmr); 549 struct nes_fmr *nesfmr = to_nesfmr(nesmr);
539 struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device); 550 struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device);
540 struct nes_device *nesdev = nesvnic->nesdev; 551 struct nes_device *nesdev = nesvnic->nesdev;
541 struct nes_mr temp_nesmr = *nesmr; 552 struct nes_adapter *nesadapter = nesdev->nesadapter;
542 int i = 0; 553 int i = 0;
543 554
544 temp_nesmr.ibmw.device = ibfmr->device;
545 temp_nesmr.ibmw.pd = ibfmr->pd;
546 temp_nesmr.ibmw.rkey = ibfmr->rkey;
547 temp_nesmr.ibmw.uobject = NULL;
548
549 /* free the resources */ 555 /* free the resources */
550 if (nesfmr->leaf_pbl_cnt == 0) { 556 if (nesfmr->leaf_pbl_cnt == 0) {
551 /* single PBL case */ 557 /* single PBL case */
@@ -561,8 +567,24 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
561 pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase, 567 pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
562 nesfmr->root_vpbl.pbl_pbase); 568 nesfmr->root_vpbl.pbl_pbase);
563 } 569 }
570 nesmr->ibmw.device = ibfmr->device;
571 nesmr->ibmw.pd = ibfmr->pd;
572 nesmr->ibmw.rkey = ibfmr->rkey;
573 nesmr->ibmw.uobject = NULL;
564 574
565 return nes_dealloc_mw(&temp_nesmr.ibmw); 575 if (nesfmr->nesmr.pbls_used != 0) {
576 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
577 if (nesfmr->nesmr.pbl_4k) {
578 nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
579 WARN_ON(nesadapter->free_4kpbl > nesadapter->max_4kpbl);
580 } else {
581 nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
582 WARN_ON(nesadapter->free_256pbl > nesadapter->max_256pbl);
583 }
584 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
585 }
586
587 return nes_dealloc_mw(&nesmr->ibmw);
566} 588}
567 589
568 590
@@ -1595,7 +1617,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
1595 nes_ucontext->mcrqf = req.mcrqf; 1617 nes_ucontext->mcrqf = req.mcrqf;
1596 if (nes_ucontext->mcrqf) { 1618 if (nes_ucontext->mcrqf) {
1597 if (nes_ucontext->mcrqf & 0x80000000) 1619 if (nes_ucontext->mcrqf & 0x80000000)
1598 nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 12 + (nes_ucontext->mcrqf & 0xf) - 1; 1620 nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 28 + 2 * ((nes_ucontext->mcrqf & 0xf) - 1);
1599 else if (nes_ucontext->mcrqf & 0x40000000) 1621 else if (nes_ucontext->mcrqf & 0x40000000)
1600 nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff; 1622 nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff;
1601 else 1623 else
@@ -3212,7 +3234,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3212 if (nesqp->ibqp_state > IB_QPS_RTS) 3234 if (nesqp->ibqp_state > IB_QPS_RTS)
3213 return -EINVAL; 3235 return -EINVAL;
3214 3236
3215 spin_lock_irqsave(&nesqp->lock, flags); 3237 spin_lock_irqsave(&nesqp->lock, flags);
3216 3238
3217 head = nesqp->hwqp.sq_head; 3239 head = nesqp->hwqp.sq_head;
3218 3240
@@ -3337,7 +3359,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3337 (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id); 3359 (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id);
3338 } 3360 }
3339 3361
3340 spin_unlock_irqrestore(&nesqp->lock, flags); 3362 spin_unlock_irqrestore(&nesqp->lock, flags);
3341 3363
3342 if (err) 3364 if (err)
3343 *bad_wr = ib_wr; 3365 *bad_wr = ib_wr;
@@ -3368,7 +3390,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
3368 if (nesqp->ibqp_state > IB_QPS_RTS) 3390 if (nesqp->ibqp_state > IB_QPS_RTS)
3369 return -EINVAL; 3391 return -EINVAL;
3370 3392
3371 spin_lock_irqsave(&nesqp->lock, flags); 3393 spin_lock_irqsave(&nesqp->lock, flags);
3372 3394
3373 head = nesqp->hwqp.rq_head; 3395 head = nesqp->hwqp.rq_head;
3374 3396
@@ -3421,7 +3443,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
3421 nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id); 3443 nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id);
3422 } 3444 }
3423 3445
3424 spin_unlock_irqrestore(&nesqp->lock, flags); 3446 spin_unlock_irqrestore(&nesqp->lock, flags);
3425 3447
3426 if (err) 3448 if (err)
3427 *bad_wr = ib_wr; 3449 *bad_wr = ib_wr;
@@ -3453,7 +3475,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3453 3475
3454 nes_debug(NES_DBG_CQ, "\n"); 3476 nes_debug(NES_DBG_CQ, "\n");
3455 3477
3456 spin_lock_irqsave(&nescq->lock, flags); 3478 spin_lock_irqsave(&nescq->lock, flags);
3457 3479
3458 head = nescq->hw_cq.cq_head; 3480 head = nescq->hw_cq.cq_head;
3459 cq_size = nescq->hw_cq.cq_size; 3481 cq_size = nescq->hw_cq.cq_size;
@@ -3562,7 +3584,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3562 nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n", 3584 nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n",
3563 cqe_count, nescq->hw_cq.cq_number); 3585 cqe_count, nescq->hw_cq.cq_number);
3564 3586
3565 spin_unlock_irqrestore(&nescq->lock, flags); 3587 spin_unlock_irqrestore(&nescq->lock, flags);
3566 3588
3567 return cqe_count; 3589 return cqe_count;
3568} 3590}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 68ba5c3482e4..e0c7dfabf2b4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -507,6 +507,7 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev);
507void ipoib_drain_cq(struct net_device *dev); 507void ipoib_drain_cq(struct net_device *dev);
508 508
509void ipoib_set_ethtool_ops(struct net_device *dev); 509void ipoib_set_ethtool_ops(struct net_device *dev);
510int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
510 511
511#ifdef CONFIG_INFINIBAND_IPOIB_CM 512#ifdef CONFIG_INFINIBAND_IPOIB_CM
512 513
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 66af5c1a76e5..e9795f60e5d6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -42,6 +42,13 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
42 strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1); 42 strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
43} 43}
44 44
45static u32 ipoib_get_rx_csum(struct net_device *dev)
46{
47 struct ipoib_dev_priv *priv = netdev_priv(dev);
48 return test_bit(IPOIB_FLAG_CSUM, &priv->flags) &&
49 !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
50}
51
45static int ipoib_get_coalesce(struct net_device *dev, 52static int ipoib_get_coalesce(struct net_device *dev,
46 struct ethtool_coalesce *coal) 53 struct ethtool_coalesce *coal)
47{ 54{
@@ -129,7 +136,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev,
129 136
130static const struct ethtool_ops ipoib_ethtool_ops = { 137static const struct ethtool_ops ipoib_ethtool_ops = {
131 .get_drvinfo = ipoib_get_drvinfo, 138 .get_drvinfo = ipoib_get_drvinfo,
132 .get_tso = ethtool_op_get_tso, 139 .get_rx_csum = ipoib_get_rx_csum,
133 .get_coalesce = ipoib_get_coalesce, 140 .get_coalesce = ipoib_get_coalesce,
134 .set_coalesce = ipoib_set_coalesce, 141 .set_coalesce = ipoib_set_coalesce,
135 .get_flags = ethtool_op_get_flags, 142 .get_flags = ethtool_op_get_flags,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0e748aeeae99..28eb6f03c588 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -685,10 +685,6 @@ int ipoib_ib_dev_open(struct net_device *dev)
685 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, 685 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
686 round_jiffies_relative(HZ)); 686 round_jiffies_relative(HZ));
687 687
688 init_timer(&priv->poll_timer);
689 priv->poll_timer.function = ipoib_ib_tx_timer_func;
690 priv->poll_timer.data = (unsigned long)dev;
691
692 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 688 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
693 689
694 return 0; 690 return 0;
@@ -906,6 +902,9 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
906 return -ENODEV; 902 return -ENODEV;
907 } 903 }
908 904
905 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
906 (unsigned long) dev);
907
909 if (dev->flags & IFF_UP) { 908 if (dev->flags & IFF_UP) {
910 if (ipoib_ib_dev_open(dev)) { 909 if (ipoib_ib_dev_open(dev)) {
911 ipoib_transport_dev_cleanup(dev); 910 ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index c0ee514396df..85257f6b9576 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -106,12 +106,13 @@ int ipoib_open(struct net_device *dev)
106 106
107 ipoib_dbg(priv, "bringing up interface\n"); 107 ipoib_dbg(priv, "bringing up interface\n");
108 108
109 napi_enable(&priv->napi);
110 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
111 110
112 if (ipoib_pkey_dev_delay_open(dev)) 111 if (ipoib_pkey_dev_delay_open(dev))
113 return 0; 112 return 0;
114 113
114 napi_enable(&priv->napi);
115
115 if (ipoib_ib_dev_open(dev)) { 116 if (ipoib_ib_dev_open(dev)) {
116 napi_disable(&priv->napi); 117 napi_disable(&priv->napi);
117 return -EINVAL; 118 return -EINVAL;
@@ -546,6 +547,7 @@ static int path_rec_start(struct net_device *dev,
546 if (path->query_id < 0) { 547 if (path->query_id < 0) {
547 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 548 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
548 path->query = NULL; 549 path->query = NULL;
550 complete(&path->done);
549 return path->query_id; 551 return path->query_id;
550 } 552 }
551 553
@@ -662,7 +664,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
662 skb_push(skb, sizeof *phdr); 664 skb_push(skb, sizeof *phdr);
663 __skb_queue_tail(&path->queue, skb); 665 __skb_queue_tail(&path->queue, skb);
664 666
665 if (path_rec_start(dev, path)) { 667 if (!path->query && path_rec_start(dev, path)) {
666 spin_unlock_irqrestore(&priv->lock, flags); 668 spin_unlock_irqrestore(&priv->lock, flags);
667 path_free(dev, path); 669 path_free(dev, path);
668 return; 670 return;
@@ -1173,11 +1175,48 @@ int ipoib_add_pkey_attr(struct net_device *dev)
1173 return device_create_file(&dev->dev, &dev_attr_pkey); 1175 return device_create_file(&dev->dev, &dev_attr_pkey);
1174} 1176}
1175 1177
1178int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1179{
1180 struct ib_device_attr *device_attr;
1181 int result = -ENOMEM;
1182
1183 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1184 if (!device_attr) {
1185 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1186 hca->name, sizeof *device_attr);
1187 return result;
1188 }
1189
1190 result = ib_query_device(hca, device_attr);
1191 if (result) {
1192 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1193 hca->name, result);
1194 kfree(device_attr);
1195 return result;
1196 }
1197 priv->hca_caps = device_attr->device_cap_flags;
1198
1199 kfree(device_attr);
1200
1201 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1202 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
1203 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1204 }
1205
1206 if (lro)
1207 priv->dev->features |= NETIF_F_LRO;
1208
1209 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1210 priv->dev->features |= NETIF_F_TSO;
1211
1212 return 0;
1213}
1214
1215
1176static struct net_device *ipoib_add_port(const char *format, 1216static struct net_device *ipoib_add_port(const char *format,
1177 struct ib_device *hca, u8 port) 1217 struct ib_device *hca, u8 port)
1178{ 1218{
1179 struct ipoib_dev_priv *priv; 1219 struct ipoib_dev_priv *priv;
1180 struct ib_device_attr *device_attr;
1181 struct ib_port_attr attr; 1220 struct ib_port_attr attr;
1182 int result = -ENOMEM; 1221 int result = -ENOMEM;
1183 1222
@@ -1206,31 +1245,8 @@ static struct net_device *ipoib_add_port(const char *format,
1206 goto device_init_failed; 1245 goto device_init_failed;
1207 } 1246 }
1208 1247
1209 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 1248 if (ipoib_set_dev_features(priv, hca))
1210 if (!device_attr) {
1211 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1212 hca->name, sizeof *device_attr);
1213 goto device_init_failed; 1249 goto device_init_failed;
1214 }
1215
1216 result = ib_query_device(hca, device_attr);
1217 if (result) {
1218 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1219 hca->name, result);
1220 kfree(device_attr);
1221 goto device_init_failed;
1222 }
1223 priv->hca_caps = device_attr->device_cap_flags;
1224
1225 kfree(device_attr);
1226
1227 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1228 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
1229 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1230 }
1231
1232 if (lro)
1233 priv->dev->features |= NETIF_F_LRO;
1234 1250
1235 /* 1251 /*
1236 * Set the full membership bit, so that we join the right 1252 * Set the full membership bit, so that we join the right
@@ -1266,9 +1282,6 @@ static struct net_device *ipoib_add_port(const char *format,
1266 goto event_failed; 1282 goto event_failed;
1267 } 1283 }
1268 1284
1269 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1270 priv->dev->features |= NETIF_F_TSO;
1271
1272 result = register_netdev(priv->dev); 1285 result = register_netdev(priv->dev);
1273 if (result) { 1286 if (result) {
1274 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1287 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index b08eb56196d3..2cf1a4088718 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -93,6 +93,10 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
93 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 93 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
94 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 94 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
95 95
96 result = ipoib_set_dev_features(priv, ppriv->ca);
97 if (result)
98 goto device_init_failed;
99
96 priv->pkey = pkey; 100 priv->pkey = pkey;
97 101
98 memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); 102 memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 3524bef62be6..1070db330d35 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -235,7 +235,6 @@ static int evdev_release(struct inode *inode, struct file *file)
235 evdev_ungrab(evdev, client); 235 evdev_ungrab(evdev, client);
236 mutex_unlock(&evdev->mutex); 236 mutex_unlock(&evdev->mutex);
237 237
238 evdev_fasync(-1, file, 0);
239 evdev_detach_client(evdev, client); 238 evdev_detach_client(evdev, client);
240 kfree(client); 239 kfree(client);
241 240
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 6790e975a98c..bc4e40f3ede7 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -397,8 +397,9 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
397{ 397{
398 struct ml_device *ml = dev->ff->private; 398 struct ml_device *ml = dev->ff->private;
399 struct ml_effect_state *state = &ml->states[effect_id]; 399 struct ml_effect_state *state = &ml->states[effect_id];
400 unsigned long flags;
400 401
401 spin_lock_bh(&ml->timer_lock); 402 spin_lock_irqsave(&ml->timer_lock, flags);
402 403
403 if (value > 0) { 404 if (value > 0) {
404 debug("initiated play"); 405 debug("initiated play");
@@ -424,7 +425,7 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
424 ml_play_effects(ml); 425 ml_play_effects(ml);
425 } 426 }
426 427
427 spin_unlock_bh(&ml->timer_lock); 428 spin_unlock_irqrestore(&ml->timer_lock, flags);
428 429
429 return 0; 430 return 0;
430} 431}
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 65d7077a75a1..a85b1485e774 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -244,7 +244,6 @@ static int joydev_release(struct inode *inode, struct file *file)
244 struct joydev_client *client = file->private_data; 244 struct joydev_client *client = file->private_data;
245 struct joydev *joydev = client->joydev; 245 struct joydev *joydev = client->joydev;
246 246
247 joydev_fasync(-1, file, 0);
248 joydev_detach_client(joydev, client); 247 joydev_detach_client(joydev, client);
249 kfree(client); 248 kfree(client);
250 249
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 22016ca15351..379b7ff354ec 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -824,7 +824,7 @@ static void atkbd_disconnect(struct serio *serio)
824 atkbd_disable(atkbd); 824 atkbd_disable(atkbd);
825 825
826 /* make sure we don't have a command in flight */ 826 /* make sure we don't have a command in flight */
827 flush_scheduled_work(); 827 cancel_delayed_work_sync(&atkbd->event_work);
828 828
829 sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group); 829 sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
830 input_unregister_device(atkbd->dev); 830 input_unregister_device(atkbd->dev);
@@ -868,6 +868,22 @@ static void atkbd_hp_keymap_fixup(struct atkbd *atkbd)
868} 868}
869 869
870/* 870/*
871 * Inventec system with broken key release on volume keys
872 */
873static void atkbd_inventec_keymap_fixup(struct atkbd *atkbd)
874{
875 const unsigned int forced_release_keys[] = {
876 0xae, 0xb0,
877 };
878 int i;
879
880 if (atkbd->set == 2)
881 for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
882 __set_bit(forced_release_keys[i],
883 atkbd->force_release_mask);
884}
885
886/*
871 * atkbd_set_keycode_table() initializes keyboard's keycode table 887 * atkbd_set_keycode_table() initializes keyboard's keycode table
872 * according to the selected scancode set 888 * according to the selected scancode set
873 */ 889 */
@@ -1468,6 +1484,15 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1468 .callback = atkbd_setup_fixup, 1484 .callback = atkbd_setup_fixup,
1469 .driver_data = atkbd_hp_keymap_fixup, 1485 .driver_data = atkbd_hp_keymap_fixup,
1470 }, 1486 },
1487 {
1488 .ident = "Inventec Symphony",
1489 .matches = {
1490 DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"),
1491 DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"),
1492 },
1493 .callback = atkbd_setup_fixup,
1494 .driver_data = atkbd_inventec_keymap_fixup,
1495 },
1471 { } 1496 { }
1472}; 1497};
1473 1498
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index bce160f4349b..86457feccfc4 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -42,7 +42,7 @@
42 42
43static char *phone = "kip1000"; 43static char *phone = "kip1000";
44module_param(phone, charp, S_IRUSR); 44module_param(phone, charp, S_IRUSR);
45MODULE_PARM_DESC(phone, "Phone name {kip1000, gtalk, usbph01}"); 45MODULE_PARM_DESC(phone, "Phone name {kip1000, gtalk, usbph01, atcom}");
46 46
47enum { 47enum {
48 /* HID Registers */ 48 /* HID Registers */
@@ -258,6 +258,37 @@ static unsigned short keymap_usbph01(int scancode)
258 } 258 }
259} 259}
260 260
261/*
262 * Keymap for ATCom AU-100
263 * http://www.atcom.cn/En_products_AU100.html
264 * http://www.packetizer.com/products/au100/
265 * http://www.voip-info.org/wiki/view/AU-100
266 *
267 * Contributed by daniel@gimpelevich.san-francisco.ca.us
268 */
269static unsigned short keymap_atcom(int scancode)
270{
271 switch (scancode) { /* phone key: */
272 case 0x82: return KEY_NUMERIC_0; /* 0 */
273 case 0x11: return KEY_NUMERIC_1; /* 1 */
274 case 0x12: return KEY_NUMERIC_2; /* 2 */
275 case 0x14: return KEY_NUMERIC_3; /* 3 */
276 case 0x21: return KEY_NUMERIC_4; /* 4 */
277 case 0x22: return KEY_NUMERIC_5; /* 5 */
278 case 0x24: return KEY_NUMERIC_6; /* 6 */
279 case 0x41: return KEY_NUMERIC_7; /* 7 */
280 case 0x42: return KEY_NUMERIC_8; /* 8 */
281 case 0x44: return KEY_NUMERIC_9; /* 9 */
282 case 0x84: return KEY_NUMERIC_POUND; /* # */
283 case 0x81: return KEY_NUMERIC_STAR; /* * */
284 case 0x18: return KEY_ENTER; /* pickup */
285 case 0x28: return KEY_ESC; /* hangup */
286 case 0x48: return KEY_LEFT; /* left arrow */
287 case 0x88: return KEY_RIGHT; /* right arrow */
288 default: return special_keymap(scancode);
289 }
290}
291
261static unsigned short (*keymap)(int) = keymap_kip1000; 292static unsigned short (*keymap)(int) = keymap_kip1000;
262 293
263/* 294/*
@@ -840,6 +871,10 @@ static int __init cm109_select_keymap(void)
840 keymap = keymap_usbph01; 871 keymap = keymap_usbph01;
841 printk(KERN_INFO KBUILD_MODNAME ": " 872 printk(KERN_INFO KBUILD_MODNAME ": "
842 "Keymap for Allied-Telesis Corega USBPH01 phone loaded\n"); 873 "Keymap for Allied-Telesis Corega USBPH01 phone loaded\n");
874 } else if (!strcasecmp(phone, "atcom")) {
875 keymap = keymap_atcom;
876 printk(KERN_INFO KBUILD_MODNAME ": "
877 "Keymap for ATCom AU-100 phone loaded\n");
843 } else { 878 } else {
844 printk(KERN_ERR KBUILD_MODNAME ": " 879 printk(KERN_ERR KBUILD_MODNAME ": "
845 "Unsupported phone: %s\n", phone); 880 "Unsupported phone: %s\n", phone);
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 82ec6b1b6467..216a559f55ea 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -71,7 +71,6 @@ static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
71static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait); 71static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait);
72 72
73static int hp_sdc_rtc_open(struct inode *inode, struct file *file); 73static int hp_sdc_rtc_open(struct inode *inode, struct file *file);
74static int hp_sdc_rtc_release(struct inode *inode, struct file *file);
75static int hp_sdc_rtc_fasync (int fd, struct file *filp, int on); 74static int hp_sdc_rtc_fasync (int fd, struct file *filp, int on);
76 75
77static int hp_sdc_rtc_read_proc(char *page, char **start, off_t off, 76static int hp_sdc_rtc_read_proc(char *page, char **start, off_t off,
@@ -414,17 +413,6 @@ static int hp_sdc_rtc_open(struct inode *inode, struct file *file)
414 return 0; 413 return 0;
415} 414}
416 415
417static int hp_sdc_rtc_release(struct inode *inode, struct file *file)
418{
419 /* Turn off interrupts? */
420
421 if (file->f_flags & FASYNC) {
422 hp_sdc_rtc_fasync (-1, file, 0);
423 }
424
425 return 0;
426}
427
428static int hp_sdc_rtc_fasync (int fd, struct file *filp, int on) 416static int hp_sdc_rtc_fasync (int fd, struct file *filp, int on)
429{ 417{
430 return fasync_helper (fd, filp, on, &hp_sdc_rtc_async_queue); 418 return fasync_helper (fd, filp, on, &hp_sdc_rtc_async_queue);
@@ -680,7 +668,6 @@ static const struct file_operations hp_sdc_rtc_fops = {
680 .poll = hp_sdc_rtc_poll, 668 .poll = hp_sdc_rtc_poll,
681 .ioctl = hp_sdc_rtc_ioctl, 669 .ioctl = hp_sdc_rtc_ioctl,
682 .open = hp_sdc_rtc_open, 670 .open = hp_sdc_rtc_open,
683 .release = hp_sdc_rtc_release,
684 .fasync = hp_sdc_rtc_fasync, 671 .fasync = hp_sdc_rtc_fasync,
685}; 672};
686 673
diff --git a/drivers/input/misc/sgi_btns.c b/drivers/input/misc/sgi_btns.c
index ce238f59b3c8..be3a15f5b25d 100644
--- a/drivers/input/misc/sgi_btns.c
+++ b/drivers/input/misc/sgi_btns.c
@@ -174,5 +174,6 @@ static void __exit sgi_buttons_exit(void)
174 platform_driver_unregister(&sgi_buttons_driver); 174 platform_driver_unregister(&sgi_buttons_driver);
175} 175}
176 176
177MODULE_LICENSE("GPL");
177module_init(sgi_buttons_init); 178module_init(sgi_buttons_init);
178module_exit(sgi_buttons_exit); 179module_exit(sgi_buttons_exit);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index f488b6852baf..4e9934259775 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -25,8 +25,8 @@ config MOUSE_PS2
25 mice with wheels and extra buttons, Microsoft, Logitech or Genius 25 mice with wheels and extra buttons, Microsoft, Logitech or Genius
26 compatible. 26 compatible.
27 27
28 Synaptics TouchPad users might be interested in a specialized 28 Synaptics, ALPS or Elantech TouchPad users might be interested
29 XFree86 driver at: 29 in a specialized Xorg/XFree86 driver at:
30 <http://w1.894.telia.com/~u89404340/touchpad/index.html> 30 <http://w1.894.telia.com/~u89404340/touchpad/index.html>
31 and a new version of GPM at: 31 and a new version of GPM at:
32 <http://www.geocities.com/dt_or/gpm/gpm.html> 32 <http://www.geocities.com/dt_or/gpm/gpm.html>
@@ -87,6 +87,27 @@ config MOUSE_PS2_TRACKPOINT
87 87
88 If unsure, say Y. 88 If unsure, say Y.
89 89
90config MOUSE_PS2_ELANTECH
91 bool "Elantech PS/2 protocol extension"
92 depends on MOUSE_PS2
93 help
94 Say Y here if you have an Elantech PS/2 touchpad connected
95 to your system.
96
97 Note that if you enable this driver you will need an updated
98 X.org Synaptics driver that does not require ABS_PRESSURE
99 reports from the touchpad (i.e. post 1.5.0 version). You can
100 grab a patch for the driver here:
101
102 http://userweb.kernel.org/~dtor/synaptics-no-abspressure.patch
103
104 If unsure, say N.
105
106 This driver exposes some configuration registers via sysfs
107 entries. For further information,
108 see <file:Documentation/input/elantech.txt>.
109
110
90config MOUSE_PS2_TOUCHKIT 111config MOUSE_PS2_TOUCHKIT
91 bool "eGalax TouchKit PS/2 protocol extension" 112 bool "eGalax TouchKit PS/2 protocol extension"
92 depends on MOUSE_PS2 113 depends on MOUSE_PS2
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 8e6e69097801..96f1dd8037f8 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
21psmouse-objs := psmouse-base.o synaptics.o 21psmouse-objs := psmouse-base.o synaptics.o
22 22
23psmouse-$(CONFIG_MOUSE_PS2_ALPS) += alps.o 23psmouse-$(CONFIG_MOUSE_PS2_ALPS) += alps.o
24psmouse-$(CONFIG_MOUSE_PS2_ELANTECH) += elantech.o
24psmouse-$(CONFIG_MOUSE_PS2_OLPC) += hgpk.o 25psmouse-$(CONFIG_MOUSE_PS2_OLPC) += hgpk.o
25psmouse-$(CONFIG_MOUSE_PS2_LOGIPS2PP) += logips2pp.o 26psmouse-$(CONFIG_MOUSE_PS2_LOGIPS2PP) += logips2pp.o
26psmouse-$(CONFIG_MOUSE_PS2_LIFEBOOK) += lifebook.o 27psmouse-$(CONFIG_MOUSE_PS2_LIFEBOOK) += lifebook.o
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
new file mode 100644
index 000000000000..b9a25d57bc5e
--- /dev/null
+++ b/drivers/input/mouse/elantech.c
@@ -0,0 +1,674 @@
1/*
2 * Elantech Touchpad driver (v5)
3 *
4 * Copyright (C) 2007-2008 Arjan Opmeer <arjan@opmeer.net>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * Trademarks are the property of their respective owners.
11 */
12
13#include <linux/delay.h>
14#include <linux/module.h>
15#include <linux/input.h>
16#include <linux/serio.h>
17#include <linux/libps2.h>
18#include "psmouse.h"
19#include "elantech.h"
20
21#define elantech_debug(format, arg...) \
22 do { \
23 if (etd->debug) \
24 printk(KERN_DEBUG format, ##arg); \
25 } while (0)
26
27/*
28 * Send a Synaptics style sliced query command
29 */
30static int synaptics_send_cmd(struct psmouse *psmouse, unsigned char c,
31 unsigned char *param)
32{
33 if (psmouse_sliced_command(psmouse, c) ||
34 ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_GETINFO)) {
35 pr_err("elantech.c: synaptics_send_cmd query 0x%02x failed.\n", c);
36 return -1;
37 }
38
39 return 0;
40}
41
42/*
43 * A retrying version of ps2_command
44 */
45static int elantech_ps2_command(struct psmouse *psmouse,
46 unsigned char *param, int command)
47{
48 struct ps2dev *ps2dev = &psmouse->ps2dev;
49 struct elantech_data *etd = psmouse->private;
50 int rc;
51 int tries = ETP_PS2_COMMAND_TRIES;
52
53 do {
54 rc = ps2_command(ps2dev, param, command);
55 if (rc == 0)
56 break;
57 tries--;
58 elantech_debug("elantech.c: retrying ps2 command 0x%02x (%d).\n",
59 command, tries);
60 msleep(ETP_PS2_COMMAND_DELAY);
61 } while (tries > 0);
62
63 if (rc)
64 pr_err("elantech.c: ps2 command 0x%02x failed.\n", command);
65
66 return rc;
67}
68
69/*
70 * Send an Elantech style special command to read a value from a register
71 */
72static int elantech_read_reg(struct psmouse *psmouse, unsigned char reg,
73 unsigned char *val)
74{
75 struct elantech_data *etd = psmouse->private;
76 unsigned char param[3];
77 int rc = 0;
78
79 if (reg < 0x10 || reg > 0x26)
80 return -1;
81
82 if (reg > 0x11 && reg < 0x20)
83 return -1;
84
85 switch (etd->hw_version) {
86 case 1:
87 if (psmouse_sliced_command(psmouse, ETP_REGISTER_READ) ||
88 psmouse_sliced_command(psmouse, reg) ||
89 ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_GETINFO)) {
90 rc = -1;
91 }
92 break;
93
94 case 2:
95 if (elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
96 elantech_ps2_command(psmouse, NULL, ETP_REGISTER_READ) ||
97 elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
98 elantech_ps2_command(psmouse, NULL, reg) ||
99 elantech_ps2_command(psmouse, param, PSMOUSE_CMD_GETINFO)) {
100 rc = -1;
101 }
102 break;
103 }
104
105 if (rc)
106 pr_err("elantech.c: failed to read register 0x%02x.\n", reg);
107 else
108 *val = param[0];
109
110 return rc;
111}
112
113/*
114 * Send an Elantech style special command to write a register with a value
115 */
116static int elantech_write_reg(struct psmouse *psmouse, unsigned char reg,
117 unsigned char val)
118{
119 struct elantech_data *etd = psmouse->private;
120 int rc = 0;
121
122 if (reg < 0x10 || reg > 0x26)
123 return -1;
124
125 if (reg > 0x11 && reg < 0x20)
126 return -1;
127
128 switch (etd->hw_version) {
129 case 1:
130 if (psmouse_sliced_command(psmouse, ETP_REGISTER_WRITE) ||
131 psmouse_sliced_command(psmouse, reg) ||
132 psmouse_sliced_command(psmouse, val) ||
133 ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11)) {
134 rc = -1;
135 }
136 break;
137
138 case 2:
139 if (elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
140 elantech_ps2_command(psmouse, NULL, ETP_REGISTER_WRITE) ||
141 elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
142 elantech_ps2_command(psmouse, NULL, reg) ||
143 elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) ||
144 elantech_ps2_command(psmouse, NULL, val) ||
145 elantech_ps2_command(psmouse, NULL, PSMOUSE_CMD_SETSCALE11)) {
146 rc = -1;
147 }
148 break;
149 }
150
151 if (rc)
152 pr_err("elantech.c: failed to write register 0x%02x with value 0x%02x.\n",
153 reg, val);
154
155 return rc;
156}
157
158/*
159 * Dump a complete mouse movement packet to the syslog
160 */
161static void elantech_packet_dump(unsigned char *packet, int size)
162{
163 int i;
164
165 printk(KERN_DEBUG "elantech.c: PS/2 packet [");
166 for (i = 0; i < size; i++)
167 printk("%s0x%02x ", (i) ? ", " : " ", packet[i]);
168 printk("]\n");
169}
170
171/*
172 * Interpret complete data packets and report absolute mode input events for
173 * hardware version 1. (4 byte packets)
174 */
175static void elantech_report_absolute_v1(struct psmouse *psmouse)
176{
177 struct input_dev *dev = psmouse->dev;
178 struct elantech_data *etd = psmouse->private;
179 unsigned char *packet = psmouse->packet;
180 int fingers;
181
182 if (etd->fw_version_maj == 0x01) {
183 /* byte 0: D U p1 p2 1 p3 R L
184 byte 1: f 0 th tw x9 x8 y9 y8 */
185 fingers = ((packet[1] & 0x80) >> 7) +
186 ((packet[1] & 0x30) >> 4);
187 } else {
188 /* byte 0: n1 n0 p2 p1 1 p3 R L
189 byte 1: 0 0 0 0 x9 x8 y9 y8 */
190 fingers = (packet[0] & 0xc0) >> 6;
191 }
192
193 input_report_key(dev, BTN_TOUCH, fingers != 0);
194
195 /* byte 2: x7 x6 x5 x4 x3 x2 x1 x0
196 byte 3: y7 y6 y5 y4 y3 y2 y1 y0 */
197 if (fingers) {
198 input_report_abs(dev, ABS_X,
199 ((packet[1] & 0x0c) << 6) | packet[2]);
200 input_report_abs(dev, ABS_Y, ETP_YMAX_V1 -
201 (((packet[1] & 0x03) << 8) | packet[3]));
202 }
203
204 input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
205 input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
206 input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
207 input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
208 input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
209
210 if ((etd->fw_version_maj == 0x01) &&
211 (etd->capabilities & ETP_CAP_HAS_ROCKER)) {
212 /* rocker up */
213 input_report_key(dev, BTN_FORWARD, packet[0] & 0x40);
214 /* rocker down */
215 input_report_key(dev, BTN_BACK, packet[0] & 0x80);
216 }
217
218 input_sync(dev);
219}
220
221/*
222 * Interpret complete data packets and report absolute mode input events for
223 * hardware version 2. (6 byte packets)
224 */
225static void elantech_report_absolute_v2(struct psmouse *psmouse)
226{
227 struct input_dev *dev = psmouse->dev;
228 unsigned char *packet = psmouse->packet;
229 int fingers, x1, y1, x2, y2;
230
231 /* byte 0: n1 n0 . . . . R L */
232 fingers = (packet[0] & 0xc0) >> 6;
233 input_report_key(dev, BTN_TOUCH, fingers != 0);
234
235 switch (fingers) {
236 case 1:
237 /* byte 1: x15 x14 x13 x12 x11 x10 x9 x8
238 byte 2: x7 x6 x5 x4 x4 x2 x1 x0 */
239 input_report_abs(dev, ABS_X, (packet[1] << 8) | packet[2]);
240 /* byte 4: y15 y14 y13 y12 y11 y10 y8 y8
241 byte 5: y7 y6 y5 y4 y3 y2 y1 y0 */
242 input_report_abs(dev, ABS_Y, ETP_YMAX_V2 -
243 ((packet[4] << 8) | packet[5]));
244 break;
245
246 case 2:
247 /* The coordinate of each finger is reported separately with
248 a lower resolution for two finger touches */
249 /* byte 0: . . ay8 ax8 . . . .
250 byte 1: ax7 ax6 ax5 ax4 ax3 ax2 ax1 ax0 */
251 x1 = ((packet[0] & 0x10) << 4) | packet[1];
252 /* byte 2: ay7 ay6 ay5 ay4 ay3 ay2 ay1 ay0 */
253 y1 = ETP_2FT_YMAX - (((packet[0] & 0x20) << 3) | packet[2]);
254 /* byte 3: . . by8 bx8 . . . .
255 byte 4: bx7 bx6 bx5 bx4 bx3 bx2 bx1 bx0 */
256 x2 = ((packet[3] & 0x10) << 4) | packet[4];
257 /* byte 5: by7 by8 by5 by4 by3 by2 by1 by0 */
258 y2 = ETP_2FT_YMAX - (((packet[3] & 0x20) << 3) | packet[5]);
259 /* For compatibility with the X Synaptics driver scale up one
260 coordinate and report as ordinary mouse movent */
261 input_report_abs(dev, ABS_X, x1 << 2);
262 input_report_abs(dev, ABS_Y, y1 << 2);
263 /* For compatibility with the proprietary X Elantech driver
264 report both coordinates as hat coordinates */
265 input_report_abs(dev, ABS_HAT0X, x1);
266 input_report_abs(dev, ABS_HAT0Y, y1);
267 input_report_abs(dev, ABS_HAT1X, x2);
268 input_report_abs(dev, ABS_HAT1Y, y2);
269 break;
270 }
271
272 input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
273 input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
274 input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
275 input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
276 input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
277
278 input_sync(dev);
279}
280
281static int elantech_check_parity_v1(struct psmouse *psmouse)
282{
283 struct elantech_data *etd = psmouse->private;
284 unsigned char *packet = psmouse->packet;
285 unsigned char p1, p2, p3;
286
287 /* Parity bits are placed differently */
288 if (etd->fw_version_maj == 0x01) {
289 /* byte 0: D U p1 p2 1 p3 R L */
290 p1 = (packet[0] & 0x20) >> 5;
291 p2 = (packet[0] & 0x10) >> 4;
292 } else {
293 /* byte 0: n1 n0 p2 p1 1 p3 R L */
294 p1 = (packet[0] & 0x10) >> 4;
295 p2 = (packet[0] & 0x20) >> 5;
296 }
297
298 p3 = (packet[0] & 0x04) >> 2;
299
300 return etd->parity[packet[1]] == p1 &&
301 etd->parity[packet[2]] == p2 &&
302 etd->parity[packet[3]] == p3;
303}
304
305/*
306 * Process byte stream from mouse and handle complete packets
307 */
308static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
309{
310 struct elantech_data *etd = psmouse->private;
311
312 if (psmouse->pktcnt < psmouse->pktsize)
313 return PSMOUSE_GOOD_DATA;
314
315 if (etd->debug > 1)
316 elantech_packet_dump(psmouse->packet, psmouse->pktsize);
317
318 switch (etd->hw_version) {
319 case 1:
320 if (etd->paritycheck && !elantech_check_parity_v1(psmouse))
321 return PSMOUSE_BAD_DATA;
322
323 elantech_report_absolute_v1(psmouse);
324 break;
325
326 case 2:
327 /* We don't know how to check parity in protocol v2 */
328 elantech_report_absolute_v2(psmouse);
329 break;
330 }
331
332 return PSMOUSE_FULL_PACKET;
333}
334
335/*
336 * Put the touchpad into absolute mode
337 */
338static int elantech_set_absolute_mode(struct psmouse *psmouse)
339{
340 struct elantech_data *etd = psmouse->private;
341 unsigned char val;
342 int tries = ETP_READ_BACK_TRIES;
343 int rc = 0;
344
345 switch (etd->hw_version) {
346 case 1:
347 etd->reg_10 = 0x16;
348 etd->reg_11 = 0x8f;
349 if (elantech_write_reg(psmouse, 0x10, etd->reg_10) ||
350 elantech_write_reg(psmouse, 0x11, etd->reg_11)) {
351 rc = -1;
352 }
353 break;
354
355 case 2:
356 /* Windows driver values */
357 etd->reg_10 = 0x54;
358 etd->reg_11 = 0x88; /* 0x8a */
359 etd->reg_21 = 0x60; /* 0x00 */
360 if (elantech_write_reg(psmouse, 0x10, etd->reg_10) ||
361 elantech_write_reg(psmouse, 0x11, etd->reg_11) ||
362 elantech_write_reg(psmouse, 0x21, etd->reg_21)) {
363 rc = -1;
364 break;
365 }
366 /*
367 * Read back reg 0x10. The touchpad is probably initalising
368 * and not ready until we read back the value we just wrote.
369 */
370 do {
371 rc = elantech_read_reg(psmouse, 0x10, &val);
372 if (rc == 0)
373 break;
374 tries--;
375 elantech_debug("elantech.c: retrying read (%d).\n",
376 tries);
377 msleep(ETP_READ_BACK_DELAY);
378 } while (tries > 0);
379 if (rc)
380 pr_err("elantech.c: failed to read back register 0x10.\n");
381 break;
382 }
383
384 if (rc)
385 pr_err("elantech.c: failed to initialise registers.\n");
386
387 return rc;
388}
389
390/*
391 * Set the appropriate event bits for the input subsystem
392 */
393static void elantech_set_input_params(struct psmouse *psmouse)
394{
395 struct input_dev *dev = psmouse->dev;
396 struct elantech_data *etd = psmouse->private;
397
398 __set_bit(EV_KEY, dev->evbit);
399 __set_bit(EV_ABS, dev->evbit);
400
401 __set_bit(BTN_LEFT, dev->keybit);
402 __set_bit(BTN_RIGHT, dev->keybit);
403
404 __set_bit(BTN_TOUCH, dev->keybit);
405 __set_bit(BTN_TOOL_FINGER, dev->keybit);
406 __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
407 __set_bit(BTN_TOOL_TRIPLETAP, dev->keybit);
408
409 switch (etd->hw_version) {
410 case 1:
411 /* Rocker button */
412 if ((etd->fw_version_maj == 0x01) &&
413 (etd->capabilities & ETP_CAP_HAS_ROCKER)) {
414 __set_bit(BTN_FORWARD, dev->keybit);
415 __set_bit(BTN_BACK, dev->keybit);
416 }
417 input_set_abs_params(dev, ABS_X, ETP_XMIN_V1, ETP_XMAX_V1, 0, 0);
418 input_set_abs_params(dev, ABS_Y, ETP_YMIN_V1, ETP_YMAX_V1, 0, 0);
419 break;
420
421 case 2:
422 input_set_abs_params(dev, ABS_X, ETP_XMIN_V2, ETP_XMAX_V2, 0, 0);
423 input_set_abs_params(dev, ABS_Y, ETP_YMIN_V2, ETP_YMAX_V2, 0, 0);
424 input_set_abs_params(dev, ABS_HAT0X, ETP_2FT_XMIN, ETP_2FT_XMAX, 0, 0);
425 input_set_abs_params(dev, ABS_HAT0Y, ETP_2FT_YMIN, ETP_2FT_YMAX, 0, 0);
426 input_set_abs_params(dev, ABS_HAT1X, ETP_2FT_XMIN, ETP_2FT_XMAX, 0, 0);
427 input_set_abs_params(dev, ABS_HAT1Y, ETP_2FT_YMIN, ETP_2FT_YMAX, 0, 0);
428 break;
429 }
430}
431
432struct elantech_attr_data {
433 size_t field_offset;
434 unsigned char reg;
435};
436
437/*
438 * Display a register value by reading a sysfs entry
439 */
440static ssize_t elantech_show_int_attr(struct psmouse *psmouse, void *data,
441 char *buf)
442{
443 struct elantech_data *etd = psmouse->private;
444 struct elantech_attr_data *attr = data;
445 unsigned char *reg = (unsigned char *) etd + attr->field_offset;
446 int rc = 0;
447
448 if (attr->reg)
449 rc = elantech_read_reg(psmouse, attr->reg, reg);
450
451 return sprintf(buf, "0x%02x\n", (attr->reg && rc) ? -1 : *reg);
452}
453
454/*
455 * Write a register value by writing a sysfs entry
456 */
457static ssize_t elantech_set_int_attr(struct psmouse *psmouse,
458 void *data, const char *buf, size_t count)
459{
460 struct elantech_data *etd = psmouse->private;
461 struct elantech_attr_data *attr = data;
462 unsigned char *reg = (unsigned char *) etd + attr->field_offset;
463 unsigned long value;
464 int err;
465
466 err = strict_strtoul(buf, 16, &value);
467 if (err)
468 return err;
469
470 if (value > 0xff)
471 return -EINVAL;
472
473 /* Do we need to preserve some bits for version 2 hardware too? */
474 if (etd->hw_version == 1) {
475 if (attr->reg == 0x10)
476 /* Force absolute mode always on */
477 value |= ETP_R10_ABSOLUTE_MODE;
478 else if (attr->reg == 0x11)
479 /* Force 4 byte mode always on */
480 value |= ETP_R11_4_BYTE_MODE;
481 }
482
483 if (!attr->reg || elantech_write_reg(psmouse, attr->reg, value) == 0)
484 *reg = value;
485
486 return count;
487}
488
489#define ELANTECH_INT_ATTR(_name, _register) \
490 static struct elantech_attr_data elantech_attr_##_name = { \
491 .field_offset = offsetof(struct elantech_data, _name), \
492 .reg = _register, \
493 }; \
494 PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \
495 &elantech_attr_##_name, \
496 elantech_show_int_attr, \
497 elantech_set_int_attr)
498
499ELANTECH_INT_ATTR(reg_10, 0x10);
500ELANTECH_INT_ATTR(reg_11, 0x11);
501ELANTECH_INT_ATTR(reg_20, 0x20);
502ELANTECH_INT_ATTR(reg_21, 0x21);
503ELANTECH_INT_ATTR(reg_22, 0x22);
504ELANTECH_INT_ATTR(reg_23, 0x23);
505ELANTECH_INT_ATTR(reg_24, 0x24);
506ELANTECH_INT_ATTR(reg_25, 0x25);
507ELANTECH_INT_ATTR(reg_26, 0x26);
508ELANTECH_INT_ATTR(debug, 0);
509ELANTECH_INT_ATTR(paritycheck, 0);
510
511static struct attribute *elantech_attrs[] = {
512 &psmouse_attr_reg_10.dattr.attr,
513 &psmouse_attr_reg_11.dattr.attr,
514 &psmouse_attr_reg_20.dattr.attr,
515 &psmouse_attr_reg_21.dattr.attr,
516 &psmouse_attr_reg_22.dattr.attr,
517 &psmouse_attr_reg_23.dattr.attr,
518 &psmouse_attr_reg_24.dattr.attr,
519 &psmouse_attr_reg_25.dattr.attr,
520 &psmouse_attr_reg_26.dattr.attr,
521 &psmouse_attr_debug.dattr.attr,
522 &psmouse_attr_paritycheck.dattr.attr,
523 NULL
524};
525
526static struct attribute_group elantech_attr_group = {
527 .attrs = elantech_attrs,
528};
529
530/*
531 * Use magic knock to detect Elantech touchpad
532 */
533int elantech_detect(struct psmouse *psmouse, int set_properties)
534{
535 struct ps2dev *ps2dev = &psmouse->ps2dev;
536 unsigned char param[3];
537
538 ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
539
540 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
541 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
542 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
543 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
544 ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
545 pr_err("elantech.c: sending Elantech magic knock failed.\n");
546 return -1;
547 }
548
549 /*
550 * Report this in case there are Elantech models that use a different
551 * set of magic numbers
552 */
553 if (param[0] != 0x3c || param[1] != 0x03 || param[2] != 0xc8) {
554 pr_info("elantech.c: unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n",
555 param[0], param[1], param[2]);
556 return -1;
557 }
558
559 if (set_properties) {
560 psmouse->vendor = "Elantech";
561 psmouse->name = "Touchpad";
562 }
563
564 return 0;
565}
566
567/*
568 * Clean up sysfs entries when disconnecting
569 */
570static void elantech_disconnect(struct psmouse *psmouse)
571{
572 sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj,
573 &elantech_attr_group);
574 kfree(psmouse->private);
575 psmouse->private = NULL;
576}
577
578/*
579 * Put the touchpad back into absolute mode when reconnecting
580 */
581static int elantech_reconnect(struct psmouse *psmouse)
582{
583 if (elantech_detect(psmouse, 0))
584 return -1;
585
586 if (elantech_set_absolute_mode(psmouse)) {
587 pr_err("elantech.c: failed to put touchpad back into absolute mode.\n");
588 return -1;
589 }
590
591 return 0;
592}
593
594/*
595 * Initialize the touchpad and create sysfs entries
596 */
597int elantech_init(struct psmouse *psmouse)
598{
599 struct elantech_data *etd;
600 int i, error;
601 unsigned char param[3];
602
603 etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL);
604 psmouse->private = etd;
605 if (!etd)
606 return -1;
607
608 etd->parity[0] = 1;
609 for (i = 1; i < 256; i++)
610 etd->parity[i] = etd->parity[i & (i - 1)] ^ 1;
611
612 /*
613 * Find out what version hardware this is
614 */
615 if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {
616 pr_err("elantech.c: failed to query firmware version.\n");
617 goto init_fail;
618 }
619 pr_info("elantech.c: Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",
620 param[0], param[1], param[2]);
621 etd->fw_version_maj = param[0];
622 etd->fw_version_min = param[2];
623
624 /*
625 * Assume every version greater than this is new EeePC style
626 * hardware with 6 byte packets
627 */
628 if (etd->fw_version_maj >= 0x02 && etd->fw_version_min >= 0x30) {
629 etd->hw_version = 2;
630 /* For now show extra debug information */
631 etd->debug = 1;
632 /* Don't know how to do parity checking for version 2 */
633 etd->paritycheck = 0;
634 } else {
635 etd->hw_version = 1;
636 etd->paritycheck = 1;
637 }
638 pr_info("elantech.c: assuming hardware version %d, firmware version %d.%d\n",
639 etd->hw_version, etd->fw_version_maj, etd->fw_version_min);
640
641 if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY, param)) {
642 pr_err("elantech.c: failed to query capabilities.\n");
643 goto init_fail;
644 }
645 pr_info("elantech.c: Synaptics capabilities query result 0x%02x, 0x%02x, 0x%02x.\n",
646 param[0], param[1], param[2]);
647 etd->capabilities = param[0];
648
649 if (elantech_set_absolute_mode(psmouse)) {
650 pr_err("elantech.c: failed to put touchpad into absolute mode.\n");
651 goto init_fail;
652 }
653
654 elantech_set_input_params(psmouse);
655
656 error = sysfs_create_group(&psmouse->ps2dev.serio->dev.kobj,
657 &elantech_attr_group);
658 if (error) {
659 pr_err("elantech.c: failed to create sysfs attributes, error: %d.\n",
660 error);
661 goto init_fail;
662 }
663
664 psmouse->protocol_handler = elantech_process_byte;
665 psmouse->disconnect = elantech_disconnect;
666 psmouse->reconnect = elantech_reconnect;
667 psmouse->pktsize = etd->hw_version == 2 ? 6 : 4;
668
669 return 0;
670
671 init_fail:
672 kfree(etd);
673 return -1;
674}
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
new file mode 100644
index 000000000000..bee282b540bc
--- /dev/null
+++ b/drivers/input/mouse/elantech.h
@@ -0,0 +1,124 @@
1/*
2 * Elantech Touchpad driver (v5)
3 *
4 * Copyright (C) 2007-2008 Arjan Opmeer <arjan@opmeer.net>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * Trademarks are the property of their respective owners.
11 */
12
13#ifndef _ELANTECH_H
14#define _ELANTECH_H
15
16/*
17 * Command values for Synaptics style queries
18 */
19#define ETP_FW_VERSION_QUERY 0x01
20#define ETP_CAPABILITIES_QUERY 0x02
21
22/*
23 * Command values for register reading or writing
24 */
25#define ETP_REGISTER_READ 0x10
26#define ETP_REGISTER_WRITE 0x11
27
28/*
29 * Hardware version 2 custom PS/2 command value
30 */
31#define ETP_PS2_CUSTOM_COMMAND 0xf8
32
33/*
34 * Times to retry a ps2_command and millisecond delay between tries
35 */
36#define ETP_PS2_COMMAND_TRIES 3
37#define ETP_PS2_COMMAND_DELAY 500
38
39/*
40 * Times to try to read back a register and millisecond delay between tries
41 */
42#define ETP_READ_BACK_TRIES 5
43#define ETP_READ_BACK_DELAY 2000
44
45/*
46 * Register bitmasks for hardware version 1
47 */
48#define ETP_R10_ABSOLUTE_MODE 0x04
49#define ETP_R11_4_BYTE_MODE 0x02
50
51/*
52 * Capability bitmasks
53 */
54#define ETP_CAP_HAS_ROCKER 0x04
55
56/*
57 * One hard to find application note states that X axis range is 0 to 576
58 * and Y axis range is 0 to 384 for harware version 1.
59 * Edge fuzz might be necessary because of bezel around the touchpad
60 */
61#define ETP_EDGE_FUZZ_V1 32
62
63#define ETP_XMIN_V1 ( 0 + ETP_EDGE_FUZZ_V1)
64#define ETP_XMAX_V1 (576 - ETP_EDGE_FUZZ_V1)
65#define ETP_YMIN_V1 ( 0 + ETP_EDGE_FUZZ_V1)
66#define ETP_YMAX_V1 (384 - ETP_EDGE_FUZZ_V1)
67
68/*
69 * It seems the resolution for hardware version 2 doubled.
70 * Hence the X and Y ranges are doubled too.
71 * The bezel around the pad also appears to be smaller
72 */
73#define ETP_EDGE_FUZZ_V2 8
74
75#define ETP_XMIN_V2 ( 0 + ETP_EDGE_FUZZ_V2)
76#define ETP_XMAX_V2 (1152 - ETP_EDGE_FUZZ_V2)
77#define ETP_YMIN_V2 ( 0 + ETP_EDGE_FUZZ_V2)
78#define ETP_YMAX_V2 ( 768 - ETP_EDGE_FUZZ_V2)
79
80/*
81 * For two finger touches the coordinate of each finger gets reported
82 * separately but with reduced resolution.
83 */
84#define ETP_2FT_FUZZ 4
85
86#define ETP_2FT_XMIN ( 0 + ETP_2FT_FUZZ)
87#define ETP_2FT_XMAX (288 - ETP_2FT_FUZZ)
88#define ETP_2FT_YMIN ( 0 + ETP_2FT_FUZZ)
89#define ETP_2FT_YMAX (192 - ETP_2FT_FUZZ)
90
91struct elantech_data {
92 unsigned char reg_10;
93 unsigned char reg_11;
94 unsigned char reg_20;
95 unsigned char reg_21;
96 unsigned char reg_22;
97 unsigned char reg_23;
98 unsigned char reg_24;
99 unsigned char reg_25;
100 unsigned char reg_26;
101 unsigned char debug;
102 unsigned char capabilities;
103 unsigned char fw_version_maj;
104 unsigned char fw_version_min;
105 unsigned char hw_version;
106 unsigned char paritycheck;
107 unsigned char parity[256];
108};
109
110#ifdef CONFIG_MOUSE_PS2_ELANTECH
111int elantech_detect(struct psmouse *psmouse, int set_properties);
112int elantech_init(struct psmouse *psmouse);
113#else
114static inline int elantech_detect(struct psmouse *psmouse, int set_properties)
115{
116 return -ENOSYS;
117}
118static inline int elantech_init(struct psmouse *psmouse)
119{
120 return -ENOSYS;
121}
122#endif /* CONFIG_MOUSE_PS2_ELANTECH */
123
124#endif
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index e82d34201e97..88f04bf2ad6c 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -125,7 +125,7 @@ static void hgpk_spewing_hack(struct psmouse *psmouse,
125 */ 125 */
126static int hgpk_validate_byte(unsigned char *packet) 126static int hgpk_validate_byte(unsigned char *packet)
127{ 127{
128 return (packet[0] & 0x0C) == 0x08; 128 return (packet[0] & 0x0C) != 0x08;
129} 129}
130 130
131static void hgpk_process_packet(struct psmouse *psmouse) 131static void hgpk_process_packet(struct psmouse *psmouse)
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 126e977e199e..f8f86de694bb 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -29,6 +29,7 @@
29#include "lifebook.h" 29#include "lifebook.h"
30#include "trackpoint.h" 30#include "trackpoint.h"
31#include "touchkit_ps2.h" 31#include "touchkit_ps2.h"
32#include "elantech.h"
32 33
33#define DRIVER_DESC "PS/2 mouse driver" 34#define DRIVER_DESC "PS/2 mouse driver"
34 35
@@ -650,6 +651,19 @@ static int psmouse_extensions(struct psmouse *psmouse,
650 max_proto = PSMOUSE_IMEX; 651 max_proto = PSMOUSE_IMEX;
651 } 652 }
652 653
654/*
655 * Try Elantech touchpad.
656 */
657 if (max_proto > PSMOUSE_IMEX &&
658 elantech_detect(psmouse, set_properties) == 0) {
659 if (!set_properties || elantech_init(psmouse) == 0)
660 return PSMOUSE_ELANTECH;
661/*
662 * Init failed, try basic relative protocols
663 */
664 max_proto = PSMOUSE_IMEX;
665 }
666
653 if (max_proto > PSMOUSE_IMEX) { 667 if (max_proto > PSMOUSE_IMEX) {
654 if (genius_detect(psmouse, set_properties) == 0) 668 if (genius_detect(psmouse, set_properties) == 0)
655 return PSMOUSE_GENPS; 669 return PSMOUSE_GENPS;
@@ -789,6 +803,15 @@ static const struct psmouse_protocol psmouse_protocols[] = {
789 .detect = hgpk_detect, 803 .detect = hgpk_detect,
790 }, 804 },
791#endif 805#endif
806#ifdef CONFIG_MOUSE_PS2_ELANTECH
807 {
808 .type = PSMOUSE_ELANTECH,
809 .name = "ETPS/2",
810 .alias = "elantech",
811 .detect = elantech_detect,
812 .init = elantech_init,
813 },
814 #endif
792 { 815 {
793 .type = PSMOUSE_CORTRON, 816 .type = PSMOUSE_CORTRON,
794 .name = "CortronPS/2", 817 .name = "CortronPS/2",
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index 8b608a1cdd12..54ed267894bd 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -90,6 +90,7 @@ enum psmouse_type {
90 PSMOUSE_TOUCHKIT_PS2, 90 PSMOUSE_TOUCHKIT_PS2,
91 PSMOUSE_CORTRON, 91 PSMOUSE_CORTRON,
92 PSMOUSE_HGPK, 92 PSMOUSE_HGPK,
93 PSMOUSE_ELANTECH,
93 PSMOUSE_AUTO /* This one should always be last */ 94 PSMOUSE_AUTO /* This one should always be last */
94}; 95};
95 96
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 8137e50ded87..d8c056fe7e98 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -519,7 +519,6 @@ static int mousedev_release(struct inode *inode, struct file *file)
519 struct mousedev_client *client = file->private_data; 519 struct mousedev_client *client = file->private_data;
520 struct mousedev *mousedev = client->mousedev; 520 struct mousedev *mousedev = client->mousedev;
521 521
522 mousedev_fasync(-1, file, 0);
523 mousedev_detach_client(mousedev, client); 522 mousedev_detach_client(mousedev, client);
524 kfree(client); 523 kfree(client);
525 524
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a321aea2c7b5..29e686388a2c 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -135,6 +135,14 @@ static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = {
135 DMI_MATCH(DMI_PRODUCT_VERSION, "5a"), 135 DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
136 }, 136 },
137 }, 137 },
138 {
139 .ident = "Blue FB5601",
140 .matches = {
141 DMI_MATCH(DMI_SYS_VENDOR, "blue"),
142 DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
143 DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
144 },
145 },
138 { } 146 { }
139}; 147};
140 148
@@ -329,6 +337,20 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
329 DMI_MATCH(DMI_PRODUCT_NAME, "2656"), 337 DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
330 }, 338 },
331 }, 339 },
340 {
341 .ident = "Dell XPS M1530",
342 .matches = {
343 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
344 DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
345 },
346 },
347 {
348 .ident = "Compal HEL80I",
349 .matches = {
350 DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
351 DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
352 },
353 },
332 { } 354 { }
333}; 355};
334 356
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 470770c09260..06bbd0e74c6f 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -135,7 +135,6 @@ static int serio_raw_release(struct inode *inode, struct file *file)
135 135
136 mutex_lock(&serio_raw_mutex); 136 mutex_lock(&serio_raw_mutex);
137 137
138 serio_raw_fasync(-1, file, 0);
139 serio_raw_cleanup(serio_raw); 138 serio_raw_cleanup(serio_raw);
140 139
141 mutex_unlock(&serio_raw_mutex); 140 mutex_unlock(&serio_raw_mutex);
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index ca62ec639f8f..677680e9f54f 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -66,6 +66,7 @@
66 * - Support Intuos3 4x6 66 * - Support Intuos3 4x6
67 * v1.47 (pc) - Added support for Bamboo 67 * v1.47 (pc) - Added support for Bamboo
68 * v1.48 (pc) - Added support for Bamboo1, BambooFun, and Cintiq 12WX 68 * v1.48 (pc) - Added support for Bamboo1, BambooFun, and Cintiq 12WX
69 * v1.49 (pc) - Added support for USB Tablet PC (0x90, 0x93, and 0x9A)
69 */ 70 */
70 71
71/* 72/*
@@ -86,7 +87,7 @@
86/* 87/*
87 * Version Information 88 * Version Information
88 */ 89 */
89#define DRIVER_VERSION "v1.48" 90#define DRIVER_VERSION "v1.49"
90#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>" 91#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
91#define DRIVER_DESC "USB Wacom Graphire and Wacom Intuos tablet driver" 92#define DRIVER_DESC "USB Wacom Graphire and Wacom Intuos tablet driver"
92#define DRIVER_LICENSE "GPL" 93#define DRIVER_LICENSE "GPL"
@@ -103,15 +104,15 @@ struct wacom {
103 struct usb_device *usbdev; 104 struct usb_device *usbdev;
104 struct usb_interface *intf; 105 struct usb_interface *intf;
105 struct urb *irq; 106 struct urb *irq;
106 struct wacom_wac * wacom_wac; 107 struct wacom_wac *wacom_wac;
107 struct mutex lock; 108 struct mutex lock;
108 unsigned int open:1; 109 unsigned int open:1;
109 char phys[32]; 110 char phys[32];
110}; 111};
111 112
112struct wacom_combo { 113struct wacom_combo {
113 struct wacom * wacom; 114 struct wacom *wacom;
114 struct urb * urb; 115 struct urb *urb;
115}; 116};
116 117
117extern int wacom_wac_irq(struct wacom_wac * wacom_wac, void * wcombo); 118extern int wacom_wac_irq(struct wacom_wac * wacom_wac, void * wcombo);
@@ -132,7 +133,7 @@ extern void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wa
132extern void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac); 133extern void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
133extern __u16 wacom_le16_to_cpu(unsigned char *data); 134extern __u16 wacom_le16_to_cpu(unsigned char *data);
134extern __u16 wacom_be16_to_cpu(unsigned char *data); 135extern __u16 wacom_be16_to_cpu(unsigned char *data);
135extern struct wacom_features * get_wacom_feature(const struct usb_device_id *id); 136extern struct wacom_features *get_wacom_feature(const struct usb_device_id *id);
136extern const struct usb_device_id * get_device_table(void); 137extern const struct usb_device_id *get_device_table(void);
137 138
138#endif 139#endif
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 09e227aa0d49..484496daa0f3 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -14,8 +14,41 @@
14#include "wacom.h" 14#include "wacom.h"
15#include "wacom_wac.h" 15#include "wacom_wac.h"
16 16
17/* defines to get HID report descriptor */
18#define HID_DEVICET_HID (USB_TYPE_CLASS | 0x01)
19#define HID_DEVICET_REPORT (USB_TYPE_CLASS | 0x02)
20#define HID_USAGE_UNDEFINED 0x00
21#define HID_USAGE_PAGE 0x05
22#define HID_USAGE_PAGE_DIGITIZER 0x0d
23#define HID_USAGE_PAGE_DESKTOP 0x01
24#define HID_USAGE 0x09
25#define HID_USAGE_X 0x30
26#define HID_USAGE_Y 0x31
27#define HID_USAGE_X_TILT 0x3d
28#define HID_USAGE_Y_TILT 0x3e
29#define HID_USAGE_FINGER 0x22
30#define HID_USAGE_STYLUS 0x20
31#define HID_COLLECTION 0xc0
32
33enum {
34 WCM_UNDEFINED = 0,
35 WCM_DESKTOP,
36 WCM_DIGITIZER,
37};
38
39struct hid_descriptor {
40 struct usb_descriptor_header header;
41 __le16 bcdHID;
42 u8 bCountryCode;
43 u8 bNumDescriptors;
44 u8 bDescriptorType;
45 __le16 wDescriptorLength;
46} __attribute__ ((packed));
47
48/* defines to get/set USB message */
17#define USB_REQ_GET_REPORT 0x01 49#define USB_REQ_GET_REPORT 0x01
18#define USB_REQ_SET_REPORT 0x09 50#define USB_REQ_SET_REPORT 0x09
51#define WAC_HID_FEATURE_REPORT 0x03
19 52
20static int usb_get_report(struct usb_interface *intf, unsigned char type, 53static int usb_get_report(struct usb_interface *intf, unsigned char type,
21 unsigned char id, void *buf, int size) 54 unsigned char id, void *buf, int size)
@@ -80,25 +113,21 @@ static void wacom_sys_irq(struct urb *urb)
80void wacom_report_key(void *wcombo, unsigned int key_type, int key_data) 113void wacom_report_key(void *wcombo, unsigned int key_type, int key_data)
81{ 114{
82 input_report_key(get_input_dev((struct wacom_combo *)wcombo), key_type, key_data); 115 input_report_key(get_input_dev((struct wacom_combo *)wcombo), key_type, key_data);
83 return;
84} 116}
85 117
86void wacom_report_abs(void *wcombo, unsigned int abs_type, int abs_data) 118void wacom_report_abs(void *wcombo, unsigned int abs_type, int abs_data)
87{ 119{
88 input_report_abs(get_input_dev((struct wacom_combo *)wcombo), abs_type, abs_data); 120 input_report_abs(get_input_dev((struct wacom_combo *)wcombo), abs_type, abs_data);
89 return;
90} 121}
91 122
92void wacom_report_rel(void *wcombo, unsigned int rel_type, int rel_data) 123void wacom_report_rel(void *wcombo, unsigned int rel_type, int rel_data)
93{ 124{
94 input_report_rel(get_input_dev((struct wacom_combo *)wcombo), rel_type, rel_data); 125 input_report_rel(get_input_dev((struct wacom_combo *)wcombo), rel_type, rel_data);
95 return;
96} 126}
97 127
98void wacom_input_event(void *wcombo, unsigned int type, unsigned int code, int value) 128void wacom_input_event(void *wcombo, unsigned int type, unsigned int code, int value)
99{ 129{
100 input_event(get_input_dev((struct wacom_combo *)wcombo), type, code, value); 130 input_event(get_input_dev((struct wacom_combo *)wcombo), type, code, value);
101 return;
102} 131}
103 132
104__u16 wacom_be16_to_cpu(unsigned char *data) 133__u16 wacom_be16_to_cpu(unsigned char *data)
@@ -118,7 +147,6 @@ __u16 wacom_le16_to_cpu(unsigned char *data)
118void wacom_input_sync(void *wcombo) 147void wacom_input_sync(void *wcombo)
119{ 148{
120 input_sync(get_input_dev((struct wacom_combo *)wcombo)); 149 input_sync(get_input_dev((struct wacom_combo *)wcombo));
121 return;
122} 150}
123 151
124static int wacom_open(struct input_dev *dev) 152static int wacom_open(struct input_dev *dev)
@@ -160,7 +188,7 @@ static void wacom_close(struct input_dev *dev)
160 188
161void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac) 189void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
162{ 190{
163 input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_1) | 191 input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_1) |
164 BIT_MASK(BTN_5); 192 BIT_MASK(BTN_5);
165 input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); 193 input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
166} 194}
@@ -170,7 +198,7 @@ void input_dev_g4(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
170 input_dev->evbit[0] |= BIT_MASK(EV_MSC); 198 input_dev->evbit[0] |= BIT_MASK(EV_MSC);
171 input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL); 199 input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
172 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER); 200 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER);
173 input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_0) | 201 input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_0) |
174 BIT_MASK(BTN_4); 202 BIT_MASK(BTN_4);
175} 203}
176 204
@@ -178,7 +206,7 @@ void input_dev_g(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
178{ 206{
179 input_dev->evbit[0] |= BIT_MASK(EV_REL); 207 input_dev->evbit[0] |= BIT_MASK(EV_REL);
180 input_dev->relbit[0] |= BIT_MASK(REL_WHEEL); 208 input_dev->relbit[0] |= BIT_MASK(REL_WHEEL);
181 input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_LEFT) | 209 input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_LEFT) |
182 BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); 210 BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
183 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) | 211 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
184 BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_STYLUS2); 212 BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_STYLUS2);
@@ -188,7 +216,7 @@ void input_dev_g(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
188void input_dev_i3s(struct input_dev *input_dev, struct wacom_wac *wacom_wac) 216void input_dev_i3s(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
189{ 217{
190 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER); 218 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER);
191 input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_0) | 219 input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_0) |
192 BIT_MASK(BTN_1) | BIT_MASK(BTN_2) | BIT_MASK(BTN_3); 220 BIT_MASK(BTN_1) | BIT_MASK(BTN_2) | BIT_MASK(BTN_3);
193 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); 221 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
194 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 222 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
@@ -196,14 +224,14 @@ void input_dev_i3s(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
196 224
197void input_dev_i3(struct input_dev *input_dev, struct wacom_wac *wacom_wac) 225void input_dev_i3(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
198{ 226{
199 input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_4) | 227 input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_4) |
200 BIT_MASK(BTN_5) | BIT_MASK(BTN_6) | BIT_MASK(BTN_7); 228 BIT_MASK(BTN_5) | BIT_MASK(BTN_6) | BIT_MASK(BTN_7);
201 input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); 229 input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
202} 230}
203 231
204void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac) 232void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
205{ 233{
206 input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_8) | BIT_MASK(BTN_9); 234 input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_8) | BIT_MASK(BTN_9);
207} 235}
208 236
209void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac) 237void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
@@ -211,7 +239,7 @@ void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
211 input_dev->evbit[0] |= BIT_MASK(EV_MSC) | BIT_MASK(EV_REL); 239 input_dev->evbit[0] |= BIT_MASK(EV_MSC) | BIT_MASK(EV_REL);
212 input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL); 240 input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
213 input_dev->relbit[0] |= BIT_MASK(REL_WHEEL); 241 input_dev->relbit[0] |= BIT_MASK(REL_WHEEL);
214 input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_LEFT) | 242 input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_LEFT) |
215 BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE) | 243 BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE) |
216 BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA); 244 BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA);
217 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) | 245 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
@@ -228,8 +256,7 @@ void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
228 256
229void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac) 257void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
230{ 258{
231 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_STYLUS2) | 259 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_STYLUS2);
232 BIT_MASK(BTN_TOOL_RUBBER);
233} 260}
234 261
235void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac) 262void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
@@ -237,15 +264,129 @@ void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
237 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER); 264 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER);
238} 265}
239 266
267static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
268 struct wacom_wac *wacom_wac)
269{
270 struct usb_device *dev = interface_to_usbdev(intf);
271 struct wacom_features *features = wacom_wac->features;
272 char limit = 0, result = 0;
273 int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
274 unsigned char *report;
275
276 report = kzalloc(hid_desc->wDescriptorLength, GFP_KERNEL);
277 if (!report)
278 return -ENOMEM;
279
280 /* retrive report descriptors */
281 do {
282 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
283 USB_REQ_GET_DESCRIPTOR,
284 USB_RECIP_INTERFACE | USB_DIR_IN,
285 HID_DEVICET_REPORT << 8,
286 intf->altsetting[0].desc.bInterfaceNumber, /* interface */
287 report,
288 hid_desc->wDescriptorLength,
289 5000); /* 5 secs */
290 } while (result < 0 && limit++ < 5);
291
292 if (result < 0)
293 goto out;
294
295 for (i = 0; i < hid_desc->wDescriptorLength; i++) {
296
297 switch (report[i]) {
298 case HID_USAGE_PAGE:
299 switch (report[i + 1]) {
300 case HID_USAGE_PAGE_DIGITIZER:
301 usage = WCM_DIGITIZER;
302 i++;
303 break;
304
305 case HID_USAGE_PAGE_DESKTOP:
306 usage = WCM_DESKTOP;
307 i++;
308 break;
309 }
310 break;
311
312 case HID_USAGE:
313 switch (report[i + 1]) {
314 case HID_USAGE_X:
315 if (usage == WCM_DESKTOP) {
316 if (finger) {
317 features->touch_x_max =
318 features->touch_y_max =
319 wacom_le16_to_cpu(&report[i + 3]);
320 features->x_max =
321 wacom_le16_to_cpu(&report[i + 6]);
322 i += 7;
323 } else if (pen) {
324 features->x_max =
325 wacom_le16_to_cpu(&report[i + 3]);
326 i += 4;
327 }
328 } else if (usage == WCM_DIGITIZER) {
329 /* max pressure isn't reported
330 features->pressure_max = (unsigned short)
331 (report[i+4] << 8 | report[i + 3]);
332 */
333 features->pressure_max = 255;
334 i += 4;
335 }
336 break;
337
338 case HID_USAGE_Y:
339 if (usage == WCM_DESKTOP)
340 features->y_max =
341 wacom_le16_to_cpu(&report[i + 3]);
342 i += 4;
343 break;
344
345 case HID_USAGE_FINGER:
346 finger = 1;
347 i++;
348 break;
349
350 case HID_USAGE_STYLUS:
351 pen = 1;
352 i++;
353 break;
354
355 case HID_USAGE_UNDEFINED:
356 if (usage == WCM_DESKTOP && finger) /* capacity */
357 features->pressure_max =
358 wacom_le16_to_cpu(&report[i + 3]);
359 i += 4;
360 break;
361 }
362 break;
363
364 case HID_COLLECTION:
365 /* reset UsagePage ans Finger */
366 finger = usage = 0;
367 break;
368 }
369 }
370
371 result = 0;
372
373 out:
374 kfree(report);
375 return result;
376}
377
240static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id) 378static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
241{ 379{
242 struct usb_device *dev = interface_to_usbdev(intf); 380 struct usb_device *dev = interface_to_usbdev(intf);
381 struct usb_host_interface *interface = intf->cur_altsetting;
243 struct usb_endpoint_descriptor *endpoint; 382 struct usb_endpoint_descriptor *endpoint;
244 struct wacom *wacom; 383 struct wacom *wacom;
245 struct wacom_wac *wacom_wac; 384 struct wacom_wac *wacom_wac;
385 struct wacom_features *features;
246 struct input_dev *input_dev; 386 struct input_dev *input_dev;
247 int error = -ENOMEM; 387 int error = -ENOMEM;
248 char rep_data[2], limit = 0; 388 char rep_data[2], limit = 0;
389 struct hid_descriptor *hid_desc;
249 390
250 wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); 391 wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
251 wacom_wac = kzalloc(sizeof(struct wacom_wac), GFP_KERNEL); 392 wacom_wac = kzalloc(sizeof(struct wacom_wac), GFP_KERNEL);
@@ -268,8 +409,8 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
268 usb_make_path(dev, wacom->phys, sizeof(wacom->phys)); 409 usb_make_path(dev, wacom->phys, sizeof(wacom->phys));
269 strlcat(wacom->phys, "/input0", sizeof(wacom->phys)); 410 strlcat(wacom->phys, "/input0", sizeof(wacom->phys));
270 411
271 wacom_wac->features = get_wacom_feature(id); 412 wacom_wac->features = features = get_wacom_feature(id);
272 BUG_ON(wacom_wac->features->pktlen > 10); 413 BUG_ON(features->pktlen > 10);
273 414
274 input_dev->name = wacom_wac->features->name; 415 input_dev->name = wacom_wac->features->name;
275 wacom->wacom_wac = wacom_wac; 416 wacom->wacom_wac = wacom_wac;
@@ -282,18 +423,37 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
282 input_dev->open = wacom_open; 423 input_dev->open = wacom_open;
283 input_dev->close = wacom_close; 424 input_dev->close = wacom_close;
284 425
426 endpoint = &intf->cur_altsetting->endpoint[0].desc;
427
428 /* TabletPC need to retrieve the physical and logical maximum from report descriptor */
429 if (wacom_wac->features->type == TABLETPC) {
430 if (usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc)) {
431 if (usb_get_extra_descriptor(&interface->endpoint[0],
432 HID_DEVICET_REPORT, &hid_desc)) {
433 printk("wacom: can not retrive extra class descriptor\n");
434 goto fail2;
435 }
436 }
437 error = wacom_parse_hid(intf, hid_desc, wacom_wac);
438 if (error)
439 goto fail2;
440 }
441
285 input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); 442 input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
286 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) | 443 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) |
287 BIT_MASK(BTN_TOUCH) | BIT_MASK(BTN_STYLUS); 444 BIT_MASK(BTN_TOUCH) | BIT_MASK(BTN_STYLUS);
288 input_set_abs_params(input_dev, ABS_X, 0, wacom_wac->features->x_max, 4, 0); 445 input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0);
289 input_set_abs_params(input_dev, ABS_Y, 0, wacom_wac->features->y_max, 4, 0); 446 input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0);
290 input_set_abs_params(input_dev, ABS_PRESSURE, 0, wacom_wac->features->pressure_max, 0, 0); 447 input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0);
448 if (features->type == TABLETPC) {
449 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_DOUBLETAP);
450 input_set_abs_params(input_dev, ABS_RX, 0, features->touch_x_max, 4, 0);
451 input_set_abs_params(input_dev, ABS_RY, 0, features->touch_y_max, 4, 0);
452 }
291 input_dev->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC); 453 input_dev->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC);
292 454
293 wacom_init_input_dev(input_dev, wacom_wac); 455 wacom_init_input_dev(input_dev, wacom_wac);
294 456
295 endpoint = &intf->cur_altsetting->endpoint[0].desc;
296
297 usb_fill_int_urb(wacom->irq, dev, 457 usb_fill_int_urb(wacom->irq, dev,
298 usb_rcvintpipe(dev, endpoint->bEndpointAddress), 458 usb_rcvintpipe(dev, endpoint->bEndpointAddress),
299 wacom_wac->data, wacom_wac->features->pktlen, 459 wacom_wac->data, wacom_wac->features->pktlen,
@@ -305,13 +465,22 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
305 if (error) 465 if (error)
306 goto fail3; 466 goto fail3;
307 467
308 /* Ask the tablet to report tablet data. Repeat until it succeeds */ 468 /*
309 do { 469 * Ask the tablet to report tablet data if it is not a Tablet PC.
310 rep_data[0] = 2; 470 * Repeat until it succeeds
311 rep_data[1] = 2; 471 */
312 usb_set_report(intf, 3, 2, rep_data, 2); 472 if (wacom_wac->features->type != TABLETPC) {
313 usb_get_report(intf, 3, 2, rep_data, 2); 473 do {
314 } while (rep_data[1] != 2 && limit++ < 5); 474 rep_data[0] = 2;
475 rep_data[1] = 2;
476 error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
477 2, rep_data, 2);
478 if (error >= 0)
479 error = usb_get_report(intf,
480 WAC_HID_FEATURE_REPORT, 2,
481 rep_data, 2);
482 } while ((error < 0 || rep_data[1] != 2) && limit++ < 5);
483 }
315 484
316 usb_set_intfdata(intf, wacom); 485 usb_set_intfdata(intf, wacom);
317 return 0; 486 return 0;
@@ -333,7 +502,8 @@ static void wacom_disconnect(struct usb_interface *intf)
333 usb_kill_urb(wacom->irq); 502 usb_kill_urb(wacom->irq);
334 input_unregister_device(wacom->dev); 503 input_unregister_device(wacom->dev);
335 usb_free_urb(wacom->irq); 504 usb_free_urb(wacom->irq);
336 usb_buffer_free(interface_to_usbdev(intf), 10, wacom->wacom_wac->data, wacom->data_dma); 505 usb_buffer_free(interface_to_usbdev(intf), 10,
506 wacom->wacom_wac->data, wacom->data_dma);
337 kfree(wacom->wacom_wac); 507 kfree(wacom->wacom_wac);
338 kfree(wacom); 508 kfree(wacom);
339} 509}
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index bf3d9a8b2c1b..8dc8d1e59bea 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -535,31 +535,147 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
535 return 1; 535 return 1;
536} 536}
537 537
538int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
539{
540 char *data = wacom->data;
541 int prox = 0, pressure;
542 static int stylusInProx, touchInProx = 1, touchOut;
543 struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
544
545 dbg("wacom_tpc_irq: received report #%d", data[0]);
546
547 if (urb->actual_length == 5 || data[0] == 6) { /* Touch data */
548 if (urb->actual_length == 5) { /* with touch */
549 prox = data[0] & 0x03;
550 } else { /* with capacity */
551 prox = data[1] & 0x03;
552 }
553
554 if (!stylusInProx) { /* stylus not in prox */
555 if (prox) {
556 if (touchInProx) {
557 wacom->tool[1] = BTN_TOOL_DOUBLETAP;
558 wacom->id[0] = TOUCH_DEVICE_ID;
559 if (urb->actual_length != 5) {
560 wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
561 wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
562 wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
563 wacom_report_key(wcombo, BTN_TOUCH, wacom_le16_to_cpu(&data[6]));
564 } else {
565 wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
566 wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
567 wacom_report_key(wcombo, BTN_TOUCH, 1);
568 }
569 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
570 wacom_report_key(wcombo, wacom->tool[1], prox & 0x01);
571 touchOut = 1;
572 return 1;
573 }
574 } else {
575 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
576 wacom_report_key(wcombo, wacom->tool[1], prox & 0x01);
577 wacom_report_key(wcombo, BTN_TOUCH, 0);
578 touchOut = 0;
579 touchInProx = 1;
580 return 1;
581 }
582 } else if (touchOut || !prox) { /* force touch out-prox */
583 wacom_report_abs(wcombo, ABS_MISC, TOUCH_DEVICE_ID);
584 wacom_report_key(wcombo, BTN_TOUCH, 0);
585 touchOut = 0;
586 touchInProx = 1;
587 return 1;
588 }
589 } else if (data[0] == 2) { /* Penabled */
590 prox = data[1] & 0x20;
591
592 touchInProx = 0;
593
594 wacom->id[0] = ERASER_DEVICE_ID;
595
596 /*
597 * if going from out of proximity into proximity select between the eraser
598 * and the pen based on the state of the stylus2 button, choose eraser if
599 * pressed else choose pen. if not a proximity change from out to in, send
600 * an out of proximity for previous tool then a in for new tool.
601 */
602 if (prox) { /* in prox */
603 if (!wacom->tool[0]) {
604 /* Going into proximity select tool */
605 wacom->tool[1] = (data[1] & 0x08) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
606 if (wacom->tool[1] == BTN_TOOL_PEN)
607 wacom->id[0] = STYLUS_DEVICE_ID;
608 } else if (wacom->tool[1] == BTN_TOOL_RUBBER && !(data[1] & 0x08)) {
609 /*
610 * was entered with stylus2 pressed
611 * report out proximity for previous tool
612 */
613 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
614 wacom_report_key(wcombo, wacom->tool[1], 0);
615 wacom_input_sync(wcombo);
616
617 /* set new tool */
618 wacom->tool[1] = BTN_TOOL_PEN;
619 wacom->id[0] = STYLUS_DEVICE_ID;
620 return 0;
621 }
622 if (wacom->tool[1] != BTN_TOOL_RUBBER) {
623 /* Unknown tool selected default to pen tool */
624 wacom->tool[1] = BTN_TOOL_PEN;
625 wacom->id[0] = STYLUS_DEVICE_ID;
626 }
627 wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
628 wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
629 wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
630 wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
631 pressure = ((data[7] & 0x01) << 8) | data[6];
632 if (pressure < 0)
633 pressure = wacom->features->pressure_max + pressure + 1;
634 wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
635 wacom_report_key(wcombo, BTN_TOUCH, pressure);
636 } else {
637 wacom_report_abs(wcombo, ABS_PRESSURE, 0);
638 wacom_report_key(wcombo, BTN_STYLUS, 0);
639 wacom_report_key(wcombo, BTN_STYLUS2, 0);
640 wacom_report_key(wcombo, BTN_TOUCH, 0);
641 }
642 wacom_report_key(wcombo, wacom->tool[1], prox);
643 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
644 stylusInProx = prox;
645 wacom->tool[0] = prox;
646 return 1;
647 }
648 return 0;
649}
650
538int wacom_wac_irq(struct wacom_wac *wacom_wac, void *wcombo) 651int wacom_wac_irq(struct wacom_wac *wacom_wac, void *wcombo)
539{ 652{
540 switch (wacom_wac->features->type) { 653 switch (wacom_wac->features->type) {
541 case PENPARTNER: 654 case PENPARTNER:
542 return (wacom_penpartner_irq(wacom_wac, wcombo)); 655 return wacom_penpartner_irq(wacom_wac, wcombo);
543 break; 656
544 case PL: 657 case PL:
545 return (wacom_pl_irq(wacom_wac, wcombo)); 658 return wacom_pl_irq(wacom_wac, wcombo);
546 break; 659
547 case WACOM_G4: 660 case WACOM_G4:
548 case GRAPHIRE: 661 case GRAPHIRE:
549 case WACOM_MO: 662 case WACOM_MO:
550 return (wacom_graphire_irq(wacom_wac, wcombo)); 663 return wacom_graphire_irq(wacom_wac, wcombo);
551 break; 664
552 case PTU: 665 case PTU:
553 return (wacom_ptu_irq(wacom_wac, wcombo)); 666 return wacom_ptu_irq(wacom_wac, wcombo);
554 break; 667
555 case INTUOS: 668 case INTUOS:
556 case INTUOS3S: 669 case INTUOS3S:
557 case INTUOS3: 670 case INTUOS3:
558 case INTUOS3L: 671 case INTUOS3L:
559 case CINTIQ: 672 case CINTIQ:
560 case WACOM_BEE: 673 case WACOM_BEE:
561 return (wacom_intuos_irq(wacom_wac, wcombo)); 674 return wacom_intuos_irq(wacom_wac, wcombo);
562 break; 675
676 case TABLETPC:
677 return wacom_tpc_irq(wacom_wac, wcombo);
678
563 default: 679 default:
564 return 0; 680 return 0;
565 } 681 }
@@ -586,13 +702,15 @@ void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_w
586 /* fall through */ 702 /* fall through */
587 case INTUOS3S: 703 case INTUOS3S:
588 input_dev_i3s(input_dev, wacom_wac); 704 input_dev_i3s(input_dev, wacom_wac);
705 /* fall through */
589 case INTUOS: 706 case INTUOS:
590 input_dev_i(input_dev, wacom_wac); 707 input_dev_i(input_dev, wacom_wac);
591 break; 708 break;
592 case PL: 709 case PL:
593 case PTU: 710 case PTU:
711 case TABLETPC:
594 input_dev_pl(input_dev, wacom_wac); 712 input_dev_pl(input_dev, wacom_wac);
595 break; 713 /* fall through */
596 case PENPARTNER: 714 case PENPARTNER:
597 input_dev_pt(input_dev, wacom_wac); 715 input_dev_pt(input_dev, wacom_wac);
598 break; 716 break;
@@ -611,6 +729,7 @@ static struct wacom_features wacom_features[] = {
611 { "Wacom Graphire4 6x8", 8, 16704, 12064, 511, 63, WACOM_G4 }, 729 { "Wacom Graphire4 6x8", 8, 16704, 12064, 511, 63, WACOM_G4 },
612 { "Wacom BambooFun 4x5", 9, 14760, 9225, 511, 63, WACOM_MO }, 730 { "Wacom BambooFun 4x5", 9, 14760, 9225, 511, 63, WACOM_MO },
613 { "Wacom BambooFun 6x8", 9, 21648, 13530, 511, 63, WACOM_MO }, 731 { "Wacom BambooFun 6x8", 9, 21648, 13530, 511, 63, WACOM_MO },
732 { "Wacom Bamboo1 Medium",8, 16704, 12064, 511, 63, GRAPHIRE },
614 { "Wacom Volito", 8, 5104, 3712, 511, 63, GRAPHIRE }, 733 { "Wacom Volito", 8, 5104, 3712, 511, 63, GRAPHIRE },
615 { "Wacom PenStation2", 8, 3250, 2320, 255, 63, GRAPHIRE }, 734 { "Wacom PenStation2", 8, 3250, 2320, 255, 63, GRAPHIRE },
616 { "Wacom Volito2 4x5", 8, 5104, 3712, 511, 63, GRAPHIRE }, 735 { "Wacom Volito2 4x5", 8, 5104, 3712, 511, 63, GRAPHIRE },
@@ -650,6 +769,10 @@ static struct wacom_features wacom_features[] = {
650 { "Wacom Cintiq 21UX", 10, 87200, 65600, 1023, 63, CINTIQ }, 769 { "Wacom Cintiq 21UX", 10, 87200, 65600, 1023, 63, CINTIQ },
651 { "Wacom Cintiq 20WSX", 10, 86680, 54180, 1023, 63, WACOM_BEE }, 770 { "Wacom Cintiq 20WSX", 10, 86680, 54180, 1023, 63, WACOM_BEE },
652 { "Wacom Cintiq 12WX", 10, 53020, 33440, 1023, 63, WACOM_BEE }, 771 { "Wacom Cintiq 12WX", 10, 53020, 33440, 1023, 63, WACOM_BEE },
772 { "Wacom DTU1931", 8, 37832, 30305, 511, 0, PL },
773 { "Wacom ISDv4 90", 8, 26202, 16325, 255, 0, TABLETPC },
774 { "Wacom ISDv4 93", 8, 26202, 16325, 255, 0, TABLETPC },
775 { "Wacom ISDv4 9A", 8, 26202, 16325, 255, 0, TABLETPC },
653 { "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS }, 776 { "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS },
654 { } 777 { }
655}; 778};
@@ -665,6 +788,7 @@ static struct usb_device_id wacom_ids[] = {
665 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x16) }, 788 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x16) },
666 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x17) }, 789 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x17) },
667 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x18) }, 790 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x18) },
791 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x19) },
668 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x60) }, 792 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x60) },
669 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x61) }, 793 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x61) },
670 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x62) }, 794 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x62) },
@@ -704,18 +828,26 @@ static struct usb_device_id wacom_ids[] = {
704 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x3F) }, 828 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x3F) },
705 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC5) }, 829 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC5) },
706 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC6) }, 830 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC6) },
831 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC7) },
832 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x90) },
833 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x93) },
834 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x9A) },
707 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x47) }, 835 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x47) },
708 { } 836 { }
709}; 837};
710 838
711const struct usb_device_id * get_device_table(void) { 839const struct usb_device_id *get_device_table(void)
712 const struct usb_device_id * id_table = wacom_ids; 840{
841 const struct usb_device_id *id_table = wacom_ids;
842
713 return id_table; 843 return id_table;
714} 844}
715 845
716struct wacom_features * get_wacom_feature(const struct usb_device_id * id) { 846struct wacom_features * get_wacom_feature(const struct usb_device_id *id)
847{
717 int index = id - wacom_ids; 848 int index = id - wacom_ids;
718 struct wacom_features *wf = &wacom_features[index]; 849 struct wacom_features *wf = &wacom_features[index];
850
719 return wf; 851 return wf;
720} 852}
721 853
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 3342bc05847d..f9c8b69673b7 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -10,6 +10,7 @@
10#define WACOM_WAC_H 10#define WACOM_WAC_H
11 11
12#define STYLUS_DEVICE_ID 0x02 12#define STYLUS_DEVICE_ID 0x02
13#define TOUCH_DEVICE_ID 0x03
13#define CURSOR_DEVICE_ID 0x06 14#define CURSOR_DEVICE_ID 0x06
14#define ERASER_DEVICE_ID 0x0A 15#define ERASER_DEVICE_ID 0x0A
15#define PAD_DEVICE_ID 0x0F 16#define PAD_DEVICE_ID 0x0F
@@ -27,6 +28,7 @@ enum {
27 CINTIQ, 28 CINTIQ,
28 WACOM_BEE, 29 WACOM_BEE,
29 WACOM_MO, 30 WACOM_MO,
31 TABLETPC,
30 MAX_TYPE 32 MAX_TYPE
31}; 33};
32 34
@@ -38,6 +40,8 @@ struct wacom_features {
38 int pressure_max; 40 int pressure_max;
39 int distance_max; 41 int distance_max;
40 int type; 42 int type;
43 int touch_x_max;
44 int touch_y_max;
41}; 45};
42 46
43struct wacom_wac { 47struct wacom_wac {
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 8317fdef1691..3d1ab8fa9acc 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -42,8 +42,9 @@ config TOUCHSCREEN_BITSY
42 module will be called h3600_ts_input. 42 module will be called h3600_ts_input.
43 43
44config TOUCHSCREEN_CORGI 44config TOUCHSCREEN_CORGI
45 tristate "SharpSL (Corgi and Spitz series) touchscreen driver" 45 tristate "SharpSL (Corgi and Spitz series) touchscreen driver (DEPRECATED)"
46 depends on PXA_SHARPSL 46 depends on PXA_SHARPSL
47 select CORGI_SSP_DEPRECATED
47 default y 48 default y
48 help 49 help
49 Say Y here to enable the driver for the touchscreen on the 50 Say Y here to enable the driver for the touchscreen on the
@@ -54,6 +55,9 @@ config TOUCHSCREEN_CORGI
54 To compile this driver as a module, choose M here: the 55 To compile this driver as a module, choose M here: the
55 module will be called corgi_ts. 56 module will be called corgi_ts.
56 57
58 NOTE: this driver is deprecated, try enable SPI and generic
59 ADS7846-based touchscreen driver.
60
57config TOUCHSCREEN_FUJITSU 61config TOUCHSCREEN_FUJITSU
58 tristate "Fujitsu serial touchscreen" 62 tristate "Fujitsu serial touchscreen"
59 select SERIO 63 select SERIO
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index d20689cdbd5d..8f38c5e55ce6 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -262,7 +262,7 @@ static int elo_setup_10(struct elo *elo)
262 input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0); 262 input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
263 263
264 printk(KERN_INFO "elo: %sTouch touchscreen, fw: %02x.%02x, " 264 printk(KERN_INFO "elo: %sTouch touchscreen, fw: %02x.%02x, "
265 "features: %x02x, controller: 0x%02x\n", 265 "features: 0x%02x, controller: 0x%02x\n",
266 elo_types[(packet[1] -'0') & 0x03], 266 elo_types[(packet[1] -'0') & 0x03],
267 packet[5], packet[4], packet[3], packet[7]); 267 packet[5], packet[4], packet[3], packet[7]);
268 268
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index 3ab6362f043c..928d2ed8865f 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -323,7 +323,7 @@ static struct xenbus_device_id xenkbd_ids[] = {
323 { "" } 323 { "" }
324}; 324};
325 325
326static struct xenbus_driver xenkbd = { 326static struct xenbus_driver xenkbd_driver = {
327 .name = "vkbd", 327 .name = "vkbd",
328 .owner = THIS_MODULE, 328 .owner = THIS_MODULE,
329 .ids = xenkbd_ids, 329 .ids = xenkbd_ids,
@@ -342,12 +342,12 @@ static int __init xenkbd_init(void)
342 if (xen_initial_domain()) 342 if (xen_initial_domain())
343 return -ENODEV; 343 return -ENODEV;
344 344
345 return xenbus_register_frontend(&xenkbd); 345 return xenbus_register_frontend(&xenkbd_driver);
346} 346}
347 347
348static void __exit xenkbd_cleanup(void) 348static void __exit xenkbd_cleanup(void)
349{ 349{
350 xenbus_unregister_driver(&xenkbd); 350 xenbus_unregister_driver(&xenkbd_driver);
351} 351}
352 352
353module_init(xenkbd_init); 353module_init(xenkbd_init);
diff --git a/drivers/isdn/hardware/avm/b1isa.c b/drivers/isdn/hardware/avm/b1isa.c
index 1e288eeb5e2a..6461a32bc838 100644
--- a/drivers/isdn/hardware/avm/b1isa.c
+++ b/drivers/isdn/hardware/avm/b1isa.c
@@ -233,10 +233,8 @@ static void __exit b1isa_exit(void)
233 int i; 233 int i;
234 234
235 for (i = 0; i < MAX_CARDS; i++) { 235 for (i = 0; i < MAX_CARDS; i++) {
236 if (!io[i]) 236 if (isa_dev[i].resource[0].start)
237 break; 237 b1isa_remove(&isa_dev[i]);
238
239 b1isa_remove(&isa_dev[i]);
240 } 238 }
241 unregister_capi_driver(&capi_driver_b1isa); 239 unregister_capi_driver(&capi_driver_b1isa);
242} 240}
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 84d75a3f5d17..ded9d0baf607 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1213,7 +1213,7 @@ static void HiSax_shiftcards(int idx)
1213 memcpy(&cards[i], &cards[i + 1], sizeof(cards[i])); 1213 memcpy(&cards[i], &cards[i + 1], sizeof(cards[i]));
1214} 1214}
1215 1215
1216static int HiSax_inithardware(int *busy_flag) 1216static int __init HiSax_inithardware(int *busy_flag)
1217{ 1217{
1218 int foundcards = 0; 1218 int foundcards = 0;
1219 int i = 0; 1219 int i = 0;
@@ -1542,7 +1542,9 @@ static void __exit HiSax_exit(void)
1542 printk(KERN_INFO "HiSax module removed\n"); 1542 printk(KERN_INFO "HiSax module removed\n");
1543} 1543}
1544 1544
1545int hisax_init_pcmcia(void *pcm_iob, int *busy_flag, struct IsdnCard *card) 1545#ifdef CONFIG_HOTPLUG
1546
1547int __devinit hisax_init_pcmcia(void *pcm_iob, int *busy_flag, struct IsdnCard *card)
1546{ 1548{
1547 u_char ids[16]; 1549 u_char ids[16];
1548 int ret = -1; 1550 int ret = -1;
@@ -1563,6 +1565,8 @@ error:
1563} 1565}
1564 1566
1565EXPORT_SYMBOL(hisax_init_pcmcia); 1567EXPORT_SYMBOL(hisax_init_pcmcia);
1568#endif
1569
1566EXPORT_SYMBOL(HiSax_closecard); 1570EXPORT_SYMBOL(HiSax_closecard);
1567 1571
1568#include "hisax_if.h" 1572#include "hisax_if.h"
@@ -1580,6 +1584,11 @@ static void hisax_bc_close(struct BCState *bcs);
1580static void hisax_bh(struct work_struct *work); 1584static void hisax_bh(struct work_struct *work);
1581static void EChannel_proc_rcv(struct hisax_d_if *d_if); 1585static void EChannel_proc_rcv(struct hisax_d_if *d_if);
1582 1586
1587static int hisax_setup_card_dynamic(struct IsdnCard *card)
1588{
1589 return 2;
1590}
1591
1583int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], 1592int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
1584 char *name, int protocol) 1593 char *name, int protocol)
1585{ 1594{
@@ -1599,7 +1608,8 @@ int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
1599 cards[i].protocol = protocol; 1608 cards[i].protocol = protocol;
1600 sprintf(id, "%s%d", name, i); 1609 sprintf(id, "%s%d", name, i);
1601 nrcards++; 1610 nrcards++;
1602 retval = checkcard(i, id, NULL, hisax_d_if->owner, hisax_cs_setup_card); 1611 retval = checkcard(i, id, NULL, hisax_d_if->owner,
1612 hisax_setup_card_dynamic);
1603 if (retval == 0) { // yuck 1613 if (retval == 0) { // yuck
1604 cards[i].typ = 0; 1614 cards[i].typ = 0;
1605 nrcards--; 1615 nrcards--;
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index cfa8fa5e44ab..3f2a0a20c19b 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -83,12 +83,12 @@ net_open(struct net_device *dev)
83 83
84 /* Fill in the MAC-level header (if not already set) */ 84 /* Fill in the MAC-level header (if not already set) */
85 if (!card->mac_addr[0]) { 85 if (!card->mac_addr[0]) {
86 for (i = 0; i < ETH_ALEN - sizeof(unsigned long); i++) 86 for (i = 0; i < ETH_ALEN; i++)
87 dev->dev_addr[i] = 0xfc; 87 dev->dev_addr[i] = 0xfc;
88 if ((in_dev = dev->ip_ptr) != NULL) { 88 if ((in_dev = dev->ip_ptr) != NULL) {
89 struct in_ifaddr *ifa = in_dev->ifa_list; 89 struct in_ifaddr *ifa = in_dev->ifa_list;
90 if (ifa != NULL) 90 if (ifa != NULL)
91 memcpy(dev->dev_addr + (ETH_ALEN - sizeof(unsigned long)), &ifa->ifa_local, sizeof(unsigned long)); 91 memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local));
92 } 92 }
93 } else 93 } else
94 memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN); 94 memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index bb904a0a98bd..1bfc55d7a26c 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1641,8 +1641,10 @@ isdn_net_ciscohdlck_slarp_send_reply(isdn_net_local *lp)
1641 /* slarp reply, send own ip/netmask; if values are nonsense remote 1641 /* slarp reply, send own ip/netmask; if values are nonsense remote
1642 * should think we are unable to provide it with an address via SLARP */ 1642 * should think we are unable to provide it with an address via SLARP */
1643 p += put_u32(p, CISCO_SLARP_REPLY); 1643 p += put_u32(p, CISCO_SLARP_REPLY);
1644 p += put_u32(p, addr); // address 1644 *(__be32 *)p = addr; // address
1645 p += put_u32(p, mask); // netmask 1645 p += 4;
1646 *(__be32 *)p = mask; // netmask
1647 p += 4;
1646 p += put_u16(p, 0); // unused 1648 p += put_u16(p, 0); // unused
1647 1649
1648 isdn_net_write_super(lp, skb); 1650 isdn_net_write_super(lp, skb);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index c7ff1e11ea85..e7fb7d2fcbfc 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -113,11 +113,12 @@ config LEDS_GPIO
113 outputs. To be useful the particular board must have LEDs 113 outputs. To be useful the particular board must have LEDs
114 and they must be connected to the GPIO lines. 114 and they must be connected to the GPIO lines.
115 115
116config LEDS_CM_X270 116config LEDS_HP_DISK
117 tristate "LED Support for the CM-X270 LEDs" 117 tristate "LED Support for disk protection LED on HP notebooks"
118 depends on LEDS_CLASS && MACH_ARMCORE 118 depends on LEDS_CLASS && ACPI
119 help 119 help
120 This option enables support for the CM-X270 LEDs. 120 This option enable support for disk protection LED, found on
121 newer HP notebooks.
121 122
122config LEDS_CLEVO_MAIL 123config LEDS_CLEVO_MAIL
123 tristate "Mail LED on Clevo notebook (EXPERIMENTAL)" 124 tristate "Mail LED on Clevo notebook (EXPERIMENTAL)"
@@ -157,6 +158,13 @@ config LEDS_PCA955X
157 LED driver chips accessed via the I2C bus. Supported 158 LED driver chips accessed via the I2C bus. Supported
158 devices include PCA9550, PCA9551, PCA9552, and PCA9553. 159 devices include PCA9550, PCA9551, PCA9552, and PCA9553.
159 160
161config LEDS_DA903X
162 tristate "LED Support for DA9030/DA9034 PMIC"
163 depends on LEDS_CLASS && PMIC_DA903X
164 help
165 This option enables support for on-chip LED drivers found
166 on Dialog Semiconductor DA9030/DA9034 PMICs.
167
160comment "LED Triggers" 168comment "LED Triggers"
161 169
162config LEDS_TRIGGERS 170config LEDS_TRIGGERS
@@ -193,6 +201,15 @@ config LEDS_TRIGGER_HEARTBEAT
193 load average. 201 load average.
194 If unsure, say Y. 202 If unsure, say Y.
195 203
204config LEDS_TRIGGER_BACKLIGHT
205 tristate "LED backlight Trigger"
206 depends on LEDS_TRIGGERS
207 help
208 This allows LEDs to be controlled as a backlight device: they
209 turn off and on when the display is blanked and unblanked.
210
211 If unsure, say N.
212
196config LEDS_TRIGGER_DEFAULT_ON 213config LEDS_TRIGGER_DEFAULT_ON
197 tristate "LED Default ON Trigger" 214 tristate "LED Default ON Trigger"
198 depends on LEDS_TRIGGERS 215 depends on LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index eb186c351a1c..e1967a29850e 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -17,14 +17,16 @@ obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
17obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o 17obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
18obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o 18obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
19obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o 19obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
20obj-$(CONFIG_LEDS_CM_X270) += leds-cm-x270.o
21obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o 20obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
22obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o 21obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
23obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 22obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
24obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o 23obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
24obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
25obj-$(CONFIG_LEDS_HP_DISK) += leds-hp-disk.o
25 26
26# LED Triggers 27# LED Triggers
27obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o 28obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
28obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o 29obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
29obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o 30obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
31obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
30obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o 32obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index ee74ee7b2acc..6c4a326176d7 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -34,14 +34,11 @@ static ssize_t led_brightness_show(struct device *dev,
34 struct device_attribute *attr, char *buf) 34 struct device_attribute *attr, char *buf)
35{ 35{
36 struct led_classdev *led_cdev = dev_get_drvdata(dev); 36 struct led_classdev *led_cdev = dev_get_drvdata(dev);
37 ssize_t ret = 0;
38 37
39 /* no lock needed for this */ 38 /* no lock needed for this */
40 led_update_brightness(led_cdev); 39 led_update_brightness(led_cdev);
41 sprintf(buf, "%u\n", led_cdev->brightness);
42 ret = strlen(buf) + 1;
43 40
44 return ret; 41 return sprintf(buf, "%u\n", led_cdev->brightness);
45} 42}
46 43
47static ssize_t led_brightness_store(struct device *dev, 44static ssize_t led_brightness_store(struct device *dev,
@@ -113,6 +110,9 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
113 if (rc) 110 if (rc)
114 goto err_out; 111 goto err_out;
115 112
113#ifdef CONFIG_LEDS_TRIGGERS
114 init_rwsem(&led_cdev->trigger_lock);
115#endif
116 /* add to the list of leds */ 116 /* add to the list of leds */
117 down_write(&leds_list_lock); 117 down_write(&leds_list_lock);
118 list_add_tail(&led_cdev->node, &leds_list); 118 list_add_tail(&led_cdev->node, &leds_list);
@@ -121,8 +121,6 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
121 led_update_brightness(led_cdev); 121 led_update_brightness(led_cdev);
122 122
123#ifdef CONFIG_LEDS_TRIGGERS 123#ifdef CONFIG_LEDS_TRIGGERS
124 init_rwsem(&led_cdev->trigger_lock);
125
126 rc = device_create_file(led_cdev->dev, &dev_attr_trigger); 124 rc = device_create_file(led_cdev->dev, &dev_attr_trigger);
127 if (rc) 125 if (rc)
128 goto err_out_led_list; 126 goto err_out_led_list;
@@ -147,7 +145,7 @@ err_out:
147EXPORT_SYMBOL_GPL(led_classdev_register); 145EXPORT_SYMBOL_GPL(led_classdev_register);
148 146
149/** 147/**
150 * __led_classdev_unregister - unregisters a object of led_properties class. 148 * led_classdev_unregister - unregisters a object of led_properties class.
151 * @led_cdev: the led device to unregister 149 * @led_cdev: the led device to unregister
152 * 150 *
153 * Unregisters a previously registered via led_classdev_register object. 151 * Unregisters a previously registered via led_classdev_register object.
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index 32c98b2efa3f..1bd590bb3a6e 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -107,27 +107,27 @@ static int ams_delta_led_resume(struct platform_device *dev)
107 107
108static int ams_delta_led_probe(struct platform_device *pdev) 108static int ams_delta_led_probe(struct platform_device *pdev)
109{ 109{
110 int i; 110 int i, ret;
111 int ret;
112 111
113 for (i = ret = 0; ret >= 0 && i < ARRAY_SIZE(ams_delta_leds); i++) { 112 for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) {
114 ret = led_classdev_register(&pdev->dev, 113 ret = led_classdev_register(&pdev->dev,
115 &ams_delta_leds[i].cdev); 114 &ams_delta_leds[i].cdev);
115 if (ret < 0)
116 goto fail;
116 } 117 }
117 118
118 if (ret < 0 && i > 1) { 119 return 0;
119 for (i = i - 2; i >= 0; i--) 120fail:
120 led_classdev_unregister(&ams_delta_leds[i].cdev); 121 while (--i >= 0)
121 } 122 led_classdev_unregister(&ams_delta_leds[i].cdev);
122 123 return ret;
123 return ret;
124} 124}
125 125
126static int ams_delta_led_remove(struct platform_device *pdev) 126static int ams_delta_led_remove(struct platform_device *pdev)
127{ 127{
128 int i; 128 int i;
129 129
130 for (i = ARRAY_SIZE(ams_delta_leds) - 1; i >= 0; i--) 130 for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i--)
131 led_classdev_unregister(&ams_delta_leds[i].cdev); 131 led_classdev_unregister(&ams_delta_leds[i].cdev);
132 132
133 return 0; 133 return 0;
diff --git a/drivers/leds/leds-cm-x270.c b/drivers/leds/leds-cm-x270.c
deleted file mode 100644
index 836a43d776e6..000000000000
--- a/drivers/leds/leds-cm-x270.c
+++ /dev/null
@@ -1,124 +0,0 @@
1/*
2 * drivers/leds/leds-cm-x270.c
3 *
4 * Copyright 2007 CompuLab Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on leds-corgi.c
8 * Author: Richard Purdie <rpurdie@openedhand.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/platform_device.h>
19#include <linux/leds.h>
20
21#include <mach/hardware.h>
22#include <mach/pxa-regs.h>
23
24#define GPIO_RED_LED (93)
25#define GPIO_GREEN_LED (94)
26
27static void cmx270_red_set(struct led_classdev *led_cdev,
28 enum led_brightness value)
29{
30 if (value)
31 GPCR(GPIO_RED_LED) = GPIO_bit(GPIO_RED_LED);
32 else
33 GPSR(GPIO_RED_LED) = GPIO_bit(GPIO_RED_LED);
34}
35
36static void cmx270_green_set(struct led_classdev *led_cdev,
37 enum led_brightness value)
38{
39 if (value)
40 GPCR(GPIO_GREEN_LED) = GPIO_bit(GPIO_GREEN_LED);
41 else
42 GPSR(GPIO_GREEN_LED) = GPIO_bit(GPIO_GREEN_LED);
43}
44
45static struct led_classdev cmx270_red_led = {
46 .name = "cm-x270:red",
47 .default_trigger = "nand-disk",
48 .brightness_set = cmx270_red_set,
49};
50
51static struct led_classdev cmx270_green_led = {
52 .name = "cm-x270:green",
53 .default_trigger = "heartbeat",
54 .brightness_set = cmx270_green_set,
55};
56
57#ifdef CONFIG_PM
58static int cmx270led_suspend(struct platform_device *dev, pm_message_t state)
59{
60 led_classdev_suspend(&cmx270_red_led);
61 led_classdev_suspend(&cmx270_green_led);
62 return 0;
63}
64
65static int cmx270led_resume(struct platform_device *dev)
66{
67 led_classdev_resume(&cmx270_red_led);
68 led_classdev_resume(&cmx270_green_led);
69 return 0;
70}
71#endif
72
73static int cmx270led_probe(struct platform_device *pdev)
74{
75 int ret;
76
77 ret = led_classdev_register(&pdev->dev, &cmx270_red_led);
78 if (ret < 0)
79 return ret;
80
81 ret = led_classdev_register(&pdev->dev, &cmx270_green_led);
82 if (ret < 0)
83 led_classdev_unregister(&cmx270_red_led);
84
85 return ret;
86}
87
88static int cmx270led_remove(struct platform_device *pdev)
89{
90 led_classdev_unregister(&cmx270_red_led);
91 led_classdev_unregister(&cmx270_green_led);
92 return 0;
93}
94
95static struct platform_driver cmx270led_driver = {
96 .probe = cmx270led_probe,
97 .remove = cmx270led_remove,
98#ifdef CONFIG_PM
99 .suspend = cmx270led_suspend,
100 .resume = cmx270led_resume,
101#endif
102 .driver = {
103 .name = "cm-x270-led",
104 .owner = THIS_MODULE,
105 },
106};
107
108static int __init cmx270led_init(void)
109{
110 return platform_driver_register(&cmx270led_driver);
111}
112
113static void __exit cmx270led_exit(void)
114{
115 platform_driver_unregister(&cmx270led_driver);
116}
117
118module_init(cmx270led_init);
119module_exit(cmx270led_exit);
120
121MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
122MODULE_DESCRIPTION("CM-x270 LED driver");
123MODULE_LICENSE("GPL");
124MODULE_ALIAS("platform:cm-x270-led");
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
new file mode 100644
index 000000000000..1f3cc512eff8
--- /dev/null
+++ b/drivers/leds/leds-da903x.c
@@ -0,0 +1,176 @@
1/*
2 * LEDs driver for Dialog Semiconductor DA9030/DA9034
3 *
4 * Copyright (C) 2008 Compulab, Ltd.
5 * Mike Rapoport <mike@compulab.co.il>
6 *
7 * Copyright (C) 2006-2008 Marvell International Ltd.
8 * Eric Miao <eric.miao@marvell.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/platform_device.h>
19#include <linux/leds.h>
20#include <linux/workqueue.h>
21#include <linux/mfd/da903x.h>
22
23#define DA9030_LED1_CONTROL 0x20
24#define DA9030_LED2_CONTROL 0x21
25#define DA9030_LED3_CONTROL 0x22
26#define DA9030_LED4_CONTROL 0x23
27#define DA9030_LEDPC_CONTROL 0x24
28#define DA9030_MISC_CONTROL_A 0x26 /* Vibrator Control */
29
30#define DA9034_LED1_CONTROL 0x35
31#define DA9034_LED2_CONTROL 0x36
32#define DA9034_VIBRA 0x40
33
34struct da903x_led {
35 struct led_classdev cdev;
36 struct work_struct work;
37 struct device *master;
38 enum led_brightness new_brightness;
39 int id;
40 int flags;
41};
42
43#define DA9030_LED_OFFSET(id) ((id) - DA9030_ID_LED_1)
44#define DA9034_LED_OFFSET(id) ((id) - DA9034_ID_LED_1)
45
46static void da903x_led_work(struct work_struct *work)
47{
48 struct da903x_led *led = container_of(work, struct da903x_led, work);
49 uint8_t val;
50 int offset;
51
52 switch (led->id) {
53 case DA9030_ID_LED_1:
54 case DA9030_ID_LED_2:
55 case DA9030_ID_LED_3:
56 case DA9030_ID_LED_4:
57 case DA9030_ID_LED_PC:
58 offset = DA9030_LED_OFFSET(led->id);
59 val = led->flags & ~0x87;
60 val |= (led->new_brightness) ? 0x80 : 0; /* EN bit */
61 val |= (0x7 - (led->new_brightness >> 5)) & 0x7; /* PWM<2:0> */
62 da903x_write(led->master, DA9030_LED1_CONTROL + offset, val);
63 break;
64 case DA9030_ID_VIBRA:
65 val = led->flags & ~0x80;
66 val |= (led->new_brightness) ? 0x80 : 0; /* EN bit */
67 da903x_write(led->master, DA9030_MISC_CONTROL_A, val);
68 break;
69 case DA9034_ID_LED_1:
70 case DA9034_ID_LED_2:
71 offset = DA9034_LED_OFFSET(led->id);
72 val = (led->new_brightness * 0x5f / LED_FULL) & 0x7f;
73 val |= (led->flags & DA9034_LED_RAMP) ? 0x80 : 0;
74 da903x_write(led->master, DA9034_LED1_CONTROL + offset, val);
75 break;
76 case DA9034_ID_VIBRA:
77 val = led->new_brightness & 0xfe;
78 da903x_write(led->master, DA9034_VIBRA, val);
79 break;
80 }
81}
82
83static void da903x_led_set(struct led_classdev *led_cdev,
84 enum led_brightness value)
85{
86 struct da903x_led *led;
87
88 led = container_of(led_cdev, struct da903x_led, cdev);
89 led->new_brightness = value;
90 schedule_work(&led->work);
91}
92
93static int __devinit da903x_led_probe(struct platform_device *pdev)
94{
95 struct led_info *pdata = pdev->dev.platform_data;
96 struct da903x_led *led;
97 int id, ret;
98
99 if (pdata == NULL)
100 return 0;
101
102 id = pdev->id;
103
104 if (!((id >= DA9030_ID_LED_1 && id <= DA9030_ID_VIBRA) ||
105 (id >= DA9034_ID_LED_1 && id <= DA9034_ID_VIBRA))) {
106 dev_err(&pdev->dev, "invalid LED ID (%d) specified\n", id);
107 return -EINVAL;
108 }
109
110 led = kzalloc(sizeof(struct da903x_led), GFP_KERNEL);
111 if (led == NULL) {
112 dev_err(&pdev->dev, "failed to alloc memory for LED%d\n", id);
113 return -ENOMEM;
114 }
115
116 led->cdev.name = pdata->name;
117 led->cdev.default_trigger = pdata->default_trigger;
118 led->cdev.brightness_set = da903x_led_set;
119 led->cdev.brightness = LED_OFF;
120
121 led->id = id;
122 led->flags = pdata->flags;
123 led->master = pdev->dev.parent;
124 led->new_brightness = LED_OFF;
125
126 INIT_WORK(&led->work, da903x_led_work);
127
128 ret = led_classdev_register(led->master, &led->cdev);
129 if (ret) {
130 dev_err(&pdev->dev, "failed to register LED %d\n", id);
131 goto err;
132 }
133
134 platform_set_drvdata(pdev, led);
135 return 0;
136
137err:
138 kfree(led);
139 return ret;
140}
141
142static int __devexit da903x_led_remove(struct platform_device *pdev)
143{
144 struct da903x_led *led = platform_get_drvdata(pdev);
145
146 led_classdev_unregister(&led->cdev);
147 kfree(led);
148 return 0;
149}
150
151static struct platform_driver da903x_led_driver = {
152 .driver = {
153 .name = "da903x-led",
154 .owner = THIS_MODULE,
155 },
156 .probe = da903x_led_probe,
157 .remove = __devexit_p(da903x_led_remove),
158};
159
160static int __init da903x_led_init(void)
161{
162 return platform_driver_register(&da903x_led_driver);
163}
164module_init(da903x_led_init);
165
166static void __exit da903x_led_exit(void)
167{
168 platform_driver_unregister(&da903x_led_driver);
169}
170module_exit(da903x_led_exit);
171
172MODULE_DESCRIPTION("LEDs driver for Dialog Semiconductor DA9030/DA9034");
173MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
174 "Mike Rapoport <mike@compulab.co.il>");
175MODULE_LICENSE("GPL");
176MODULE_ALIAS("platform:da903x-led");
diff --git a/drivers/leds/leds-hp-disk.c b/drivers/leds/leds-hp-disk.c
new file mode 100644
index 000000000000..44fa757d8254
--- /dev/null
+++ b/drivers/leds/leds-hp-disk.c
@@ -0,0 +1,155 @@
1/*
2 * leds-hp-disk.c - driver for HP "hard disk protection" LED
3 *
4 * Copyright (C) 2008 Pavel Machek
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/dmi.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/interrupt.h>
28#include <linux/input.h>
29#include <linux/kthread.h>
30#include <linux/leds.h>
31#include <acpi/acpi_drivers.h>
32
33#define DRIVER_NAME "leds-hp-disk"
34#define ACPI_MDPS_CLASS "led"
35
36/* For automatic insertion of the module */
37static struct acpi_device_id hpled_device_ids[] = {
38 {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
39 {"", 0},
40};
41MODULE_DEVICE_TABLE(acpi, hpled_device_ids);
42
43struct acpi_hpled {
44 struct acpi_device *device; /* The ACPI device */
45};
46
47static struct acpi_hpled adev;
48
49static acpi_status hpled_acpi_write(acpi_handle handle, int reg)
50{
51 unsigned long long ret; /* Not used when writing */
52 union acpi_object in_obj[1];
53 struct acpi_object_list args = { 1, in_obj };
54
55 in_obj[0].type = ACPI_TYPE_INTEGER;
56 in_obj[0].integer.value = reg;
57
58 return acpi_evaluate_integer(handle, "ALED", &args, &ret);
59}
60
61static void hpled_set(struct led_classdev *led_cdev,
62 enum led_brightness value)
63{
64 hpled_acpi_write(adev.device->handle, !!value);
65}
66
67static struct led_classdev hpled_led = {
68 .name = "hp:red:hddprotection",
69 .default_trigger = "heartbeat",
70 .brightness_set = hpled_set,
71};
72
73#ifdef CONFIG_PM
74static int hpled_suspend(struct acpi_device *dev, pm_message_t state)
75{
76 led_classdev_suspend(&hpled_led);
77 return 0;
78}
79
80static int hpled_resume(struct acpi_device *dev)
81{
82 led_classdev_resume(&hpled_led);
83 return 0;
84}
85#else
86#define hpled_suspend NULL
87#define hpled_resume NULL
88#endif
89
90static int hpled_add(struct acpi_device *device)
91{
92 int ret;
93
94 if (!device)
95 return -EINVAL;
96
97 adev.device = device;
98 strcpy(acpi_device_name(device), DRIVER_NAME);
99 strcpy(acpi_device_class(device), ACPI_MDPS_CLASS);
100 device->driver_data = &adev;
101
102 ret = led_classdev_register(NULL, &hpled_led);
103 return ret;
104}
105
106static int hpled_remove(struct acpi_device *device, int type)
107{
108 if (!device)
109 return -EINVAL;
110
111 led_classdev_unregister(&hpled_led);
112 return 0;
113}
114
115
116
117static struct acpi_driver leds_hp_driver = {
118 .name = DRIVER_NAME,
119 .class = ACPI_MDPS_CLASS,
120 .ids = hpled_device_ids,
121 .ops = {
122 .add = hpled_add,
123 .remove = hpled_remove,
124 .suspend = hpled_suspend,
125 .resume = hpled_resume,
126 }
127};
128
129static int __init hpled_init_module(void)
130{
131 int ret;
132
133 if (acpi_disabled)
134 return -ENODEV;
135
136 ret = acpi_bus_register_driver(&leds_hp_driver);
137 if (ret < 0)
138 return ret;
139
140 printk(KERN_INFO DRIVER_NAME " driver loaded.\n");
141
142 return 0;
143}
144
145static void __exit hpled_exit_module(void)
146{
147 acpi_bus_unregister_driver(&leds_hp_driver);
148}
149
150MODULE_DESCRIPTION("Driver for HP disk protection LED");
151MODULE_AUTHOR("Pavel Machek <pavel@suse.cz>");
152MODULE_LICENSE("GPL");
153
154module_init(hpled_init_module);
155module_exit(hpled_exit_module);
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index f508729123b5..4e2d1a42b48f 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -226,7 +226,7 @@ static void pca955x_led_work(struct work_struct *work)
226 pca955x_write_ls(pca955x->client, chip_ls, ls); 226 pca955x_write_ls(pca955x->client, chip_ls, ls);
227} 227}
228 228
229void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value) 229static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value)
230{ 230{
231 struct pca955x_led *pca955x; 231 struct pca955x_led *pca955x;
232 232
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
index 7ac61a7b56ad..2f3aa87f2a1f 100644
--- a/drivers/leds/leds-wrap.c
+++ b/drivers/leds/leds-wrap.c
@@ -53,8 +53,9 @@ static void wrap_extra_led_set(struct led_classdev *led_cdev,
53} 53}
54 54
55static struct led_classdev wrap_power_led = { 55static struct led_classdev wrap_power_led = {
56 .name = "wrap::power", 56 .name = "wrap::power",
57 .brightness_set = wrap_power_led_set, 57 .brightness_set = wrap_power_led_set,
58 .default_trigger = "default-on",
58}; 59};
59 60
60static struct led_classdev wrap_error_led = { 61static struct led_classdev wrap_error_led = {
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
new file mode 100644
index 000000000000..d3dfcfb417b8
--- /dev/null
+++ b/drivers/leds/ledtrig-backlight.c
@@ -0,0 +1,110 @@
1/*
2 * Backlight emulation LED trigger
3 *
4 * Copyright 2008 (C) Rodolfo Giometti <giometti@linux.it>
5 * Copyright 2008 (C) Eurotech S.p.A. <info@eurotech.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/fb.h>
17#include <linux/leds.h>
18#include "leds.h"
19
20#define BLANK 1
21#define UNBLANK 0
22
23struct bl_trig_notifier {
24 struct led_classdev *led;
25 int brightness;
26 int old_status;
27 struct notifier_block notifier;
28};
29
30static int fb_notifier_callback(struct notifier_block *p,
31 unsigned long event, void *data)
32{
33 struct bl_trig_notifier *n = container_of(p,
34 struct bl_trig_notifier, notifier);
35 struct led_classdev *led = n->led;
36 struct fb_event *fb_event = data;
37 int *blank = fb_event->data;
38
39 switch (event) {
40 case FB_EVENT_BLANK :
41 if (*blank && n->old_status == UNBLANK) {
42 n->brightness = led->brightness;
43 led_set_brightness(led, LED_OFF);
44 n->old_status = BLANK;
45 } else if (!*blank && n->old_status == BLANK) {
46 led_set_brightness(led, n->brightness);
47 n->old_status = UNBLANK;
48 }
49 break;
50 }
51
52 return 0;
53}
54
55static void bl_trig_activate(struct led_classdev *led)
56{
57 int ret;
58
59 struct bl_trig_notifier *n;
60
61 n = kzalloc(sizeof(struct bl_trig_notifier), GFP_KERNEL);
62 led->trigger_data = n;
63 if (!n) {
64 dev_err(led->dev, "unable to allocate backlight trigger\n");
65 return;
66 }
67
68 n->led = led;
69 n->brightness = led->brightness;
70 n->old_status = UNBLANK;
71 n->notifier.notifier_call = fb_notifier_callback;
72
73 ret = fb_register_client(&n->notifier);
74 if (ret)
75 dev_err(led->dev, "unable to register backlight trigger\n");
76}
77
78static void bl_trig_deactivate(struct led_classdev *led)
79{
80 struct bl_trig_notifier *n =
81 (struct bl_trig_notifier *) led->trigger_data;
82
83 if (n) {
84 fb_unregister_client(&n->notifier);
85 kfree(n);
86 }
87}
88
89static struct led_trigger bl_led_trigger = {
90 .name = "backlight",
91 .activate = bl_trig_activate,
92 .deactivate = bl_trig_deactivate
93};
94
95static int __init bl_trig_init(void)
96{
97 return led_trigger_register(&bl_led_trigger);
98}
99
100static void __exit bl_trig_exit(void)
101{
102 led_trigger_unregister(&bl_led_trigger);
103}
104
105module_init(bl_trig_init);
106module_exit(bl_trig_exit);
107
108MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
109MODULE_DESCRIPTION("Backlight emulation LED trigger");
110MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 5c99f4f0c692..db681962d7bb 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -70,9 +70,7 @@ static ssize_t led_delay_on_show(struct device *dev,
70 struct led_classdev *led_cdev = dev_get_drvdata(dev); 70 struct led_classdev *led_cdev = dev_get_drvdata(dev);
71 struct timer_trig_data *timer_data = led_cdev->trigger_data; 71 struct timer_trig_data *timer_data = led_cdev->trigger_data;
72 72
73 sprintf(buf, "%lu\n", timer_data->delay_on); 73 return sprintf(buf, "%lu\n", timer_data->delay_on);
74
75 return strlen(buf) + 1;
76} 74}
77 75
78static ssize_t led_delay_on_store(struct device *dev, 76static ssize_t led_delay_on_store(struct device *dev,
@@ -116,9 +114,7 @@ static ssize_t led_delay_off_show(struct device *dev,
116 struct led_classdev *led_cdev = dev_get_drvdata(dev); 114 struct led_classdev *led_cdev = dev_get_drvdata(dev);
117 struct timer_trig_data *timer_data = led_cdev->trigger_data; 115 struct timer_trig_data *timer_data = led_cdev->trigger_data;
118 116
119 sprintf(buf, "%lu\n", timer_data->delay_off); 117 return sprintf(buf, "%lu\n", timer_data->delay_off);
120
121 return strlen(buf) + 1;
122} 118}
123 119
124static ssize_t led_delay_off_store(struct device *dev, 120static ssize_t led_delay_off_store(struct device *dev,
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 2c21d4f25cc8..a98ab72adf95 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -288,7 +288,7 @@ static void __devexit rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
288 cancel_rearming_delayed_work(&rm->cpu[1].sniffer); 288 cancel_rearming_delayed_work(&rm->cpu[1].sniffer);
289} 289}
290 290
291static int rackmeter_setup(struct rackmeter *rm) 291static int __devinit rackmeter_setup(struct rackmeter *rm)
292{ 292{
293 pr_debug("rackmeter: setting up i2s..\n"); 293 pr_debug("rackmeter: setting up i2s..\n");
294 rackmeter_setup_i2s(rm); 294 rackmeter_setup_i2s(rm);
@@ -582,12 +582,12 @@ static struct of_device_id rackmeter_match[] = {
582 { } 582 { }
583}; 583};
584 584
585static struct macio_driver rackmeter_drv = { 585static struct macio_driver rackmeter_driver = {
586 .name = "rackmeter", 586 .name = "rackmeter",
587 .owner = THIS_MODULE, 587 .owner = THIS_MODULE,
588 .match_table = rackmeter_match, 588 .match_table = rackmeter_match,
589 .probe = rackmeter_probe, 589 .probe = rackmeter_probe,
590 .remove = rackmeter_remove, 590 .remove = __devexit_p(rackmeter_remove),
591 .shutdown = rackmeter_shutdown, 591 .shutdown = rackmeter_shutdown,
592}; 592};
593 593
@@ -596,14 +596,14 @@ static int __init rackmeter_init(void)
596{ 596{
597 pr_debug("rackmeter_init()\n"); 597 pr_debug("rackmeter_init()\n");
598 598
599 return macio_register_driver(&rackmeter_drv); 599 return macio_register_driver(&rackmeter_driver);
600} 600}
601 601
602static void __exit rackmeter_exit(void) 602static void __exit rackmeter_exit(void)
603{ 603{
604 pr_debug("rackmeter_exit()\n"); 604 pr_debug("rackmeter_exit()\n");
605 605
606 macio_unregister_driver(&rackmeter_drv); 606 macio_unregister_driver(&rackmeter_driver);
607} 607}
608 608
609module_init(rackmeter_init); 609module_init(rackmeter_init);
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f1ef33dfd8cf..1c615804ea76 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
34obj-$(CONFIG_DM_DELAY) += dm-delay.o 34obj-$(CONFIG_DM_DELAY) += dm-delay.o
35obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o 35obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
36obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o 36obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
37obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o 37obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
38obj-$(CONFIG_DM_ZERO) += dm-zero.o 38obj-$(CONFIG_DM_ZERO) += dm-zero.o
39 39
40quiet_cmd_unroll = UNROLL $@ 40quiet_cmd_unroll = UNROLL $@
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ac89a5deaca2..ab7c8e4a61f9 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -208,16 +208,19 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
208 */ 208 */
209 209
210/* IO operations when bitmap is stored near all superblocks */ 210/* IO operations when bitmap is stored near all superblocks */
211static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long index) 211static struct page *read_sb_page(mddev_t *mddev, long offset,
212 struct page *page,
213 unsigned long index, int size)
212{ 214{
213 /* choose a good rdev and read the page from there */ 215 /* choose a good rdev and read the page from there */
214 216
215 mdk_rdev_t *rdev; 217 mdk_rdev_t *rdev;
216 struct list_head *tmp; 218 struct list_head *tmp;
217 struct page *page = alloc_page(GFP_KERNEL);
218 sector_t target; 219 sector_t target;
219 220
220 if (!page) 221 if (!page)
222 page = alloc_page(GFP_KERNEL);
223 if (!page)
221 return ERR_PTR(-ENOMEM); 224 return ERR_PTR(-ENOMEM);
222 225
223 rdev_for_each(rdev, tmp, mddev) { 226 rdev_for_each(rdev, tmp, mddev) {
@@ -227,7 +230,9 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
227 230
228 target = rdev->sb_start + offset + index * (PAGE_SIZE/512); 231 target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
229 232
230 if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) { 233 if (sync_page_io(rdev->bdev, target,
234 roundup(size, bdev_hardsect_size(rdev->bdev)),
235 page, READ)) {
231 page->index = index; 236 page->index = index;
232 attach_page_buffers(page, NULL); /* so that free_buffer will 237 attach_page_buffers(page, NULL); /* so that free_buffer will
233 * quietly no-op */ 238 * quietly no-op */
@@ -544,7 +549,9 @@ static int bitmap_read_sb(struct bitmap *bitmap)
544 549
545 bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes); 550 bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
546 } else { 551 } else {
547 bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0); 552 bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset,
553 NULL,
554 0, sizeof(bitmap_super_t));
548 } 555 }
549 if (IS_ERR(bitmap->sb_page)) { 556 if (IS_ERR(bitmap->sb_page)) {
550 err = PTR_ERR(bitmap->sb_page); 557 err = PTR_ERR(bitmap->sb_page);
@@ -957,11 +964,16 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
957 */ 964 */
958 page = bitmap->sb_page; 965 page = bitmap->sb_page;
959 offset = sizeof(bitmap_super_t); 966 offset = sizeof(bitmap_super_t);
967 read_sb_page(bitmap->mddev, bitmap->offset,
968 page,
969 index, count);
960 } else if (file) { 970 } else if (file) {
961 page = read_page(file, index, bitmap, count); 971 page = read_page(file, index, bitmap, count);
962 offset = 0; 972 offset = 0;
963 } else { 973 } else {
964 page = read_sb_page(bitmap->mddev, bitmap->offset, index); 974 page = read_sb_page(bitmap->mddev, bitmap->offset,
975 NULL,
976 index, count);
965 offset = 0; 977 offset = 0;
966 } 978 }
967 if (IS_ERR(page)) { /* read error */ 979 if (IS_ERR(page)) { /* read error */
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 682ef9e6acd3..ce26c84af064 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -23,7 +23,7 @@
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/unaligned.h> 24#include <asm/unaligned.h>
25 25
26#include "dm.h" 26#include <linux/device-mapper.h>
27 27
28#define DM_MSG_PREFIX "crypt" 28#define DM_MSG_PREFIX "crypt"
29#define MESG_STR(x) x, sizeof(x) 29#define MESG_STR(x) x, sizeof(x)
@@ -56,6 +56,7 @@ struct dm_crypt_io {
56 atomic_t pending; 56 atomic_t pending;
57 int error; 57 int error;
58 sector_t sector; 58 sector_t sector;
59 struct dm_crypt_io *base_io;
59}; 60};
60 61
61struct dm_crypt_request { 62struct dm_crypt_request {
@@ -93,7 +94,6 @@ struct crypt_config {
93 94
94 struct workqueue_struct *io_queue; 95 struct workqueue_struct *io_queue;
95 struct workqueue_struct *crypt_queue; 96 struct workqueue_struct *crypt_queue;
96 wait_queue_head_t writeq;
97 97
98 /* 98 /*
99 * crypto related data 99 * crypto related data
@@ -534,6 +534,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
534 io->base_bio = bio; 534 io->base_bio = bio;
535 io->sector = sector; 535 io->sector = sector;
536 io->error = 0; 536 io->error = 0;
537 io->base_io = NULL;
537 atomic_set(&io->pending, 0); 538 atomic_set(&io->pending, 0);
538 539
539 return io; 540 return io;
@@ -547,6 +548,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
547/* 548/*
548 * One of the bios was finished. Check for completion of 549 * One of the bios was finished. Check for completion of
549 * the whole request and correctly clean up the buffer. 550 * the whole request and correctly clean up the buffer.
551 * If base_io is set, wait for the last fragment to complete.
550 */ 552 */
551static void crypt_dec_pending(struct dm_crypt_io *io) 553static void crypt_dec_pending(struct dm_crypt_io *io)
552{ 554{
@@ -555,7 +557,14 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
555 if (!atomic_dec_and_test(&io->pending)) 557 if (!atomic_dec_and_test(&io->pending))
556 return; 558 return;
557 559
558 bio_endio(io->base_bio, io->error); 560 if (likely(!io->base_io))
561 bio_endio(io->base_bio, io->error);
562 else {
563 if (io->error && !io->base_io->error)
564 io->base_io->error = io->error;
565 crypt_dec_pending(io->base_io);
566 }
567
559 mempool_free(io, cc->io_pool); 568 mempool_free(io, cc->io_pool);
560} 569}
561 570
@@ -646,10 +655,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
646static void kcryptd_io_write(struct dm_crypt_io *io) 655static void kcryptd_io_write(struct dm_crypt_io *io)
647{ 656{
648 struct bio *clone = io->ctx.bio_out; 657 struct bio *clone = io->ctx.bio_out;
649 struct crypt_config *cc = io->target->private;
650
651 generic_make_request(clone); 658 generic_make_request(clone);
652 wake_up(&cc->writeq);
653} 659}
654 660
655static void kcryptd_io(struct work_struct *work) 661static void kcryptd_io(struct work_struct *work)
@@ -688,7 +694,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
688 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 694 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
689 695
690 clone->bi_sector = cc->start + io->sector; 696 clone->bi_sector = cc->start + io->sector;
691 io->sector += bio_sectors(clone);
692 697
693 if (async) 698 if (async)
694 kcryptd_queue_io(io); 699 kcryptd_queue_io(io);
@@ -700,16 +705,18 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
700{ 705{
701 struct crypt_config *cc = io->target->private; 706 struct crypt_config *cc = io->target->private;
702 struct bio *clone; 707 struct bio *clone;
708 struct dm_crypt_io *new_io;
703 int crypt_finished; 709 int crypt_finished;
704 unsigned out_of_pages = 0; 710 unsigned out_of_pages = 0;
705 unsigned remaining = io->base_bio->bi_size; 711 unsigned remaining = io->base_bio->bi_size;
712 sector_t sector = io->sector;
706 int r; 713 int r;
707 714
708 /* 715 /*
709 * Prevent io from disappearing until this function completes. 716 * Prevent io from disappearing until this function completes.
710 */ 717 */
711 crypt_inc_pending(io); 718 crypt_inc_pending(io);
712 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); 719 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
713 720
714 /* 721 /*
715 * The allocated buffers can be smaller than the whole bio, 722 * The allocated buffers can be smaller than the whole bio,
@@ -726,6 +733,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
726 io->ctx.idx_out = 0; 733 io->ctx.idx_out = 0;
727 734
728 remaining -= clone->bi_size; 735 remaining -= clone->bi_size;
736 sector += bio_sectors(clone);
729 737
730 crypt_inc_pending(io); 738 crypt_inc_pending(io);
731 r = crypt_convert(cc, &io->ctx); 739 r = crypt_convert(cc, &io->ctx);
@@ -741,6 +749,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
741 */ 749 */
742 if (unlikely(r < 0)) 750 if (unlikely(r < 0))
743 break; 751 break;
752
753 io->sector = sector;
744 } 754 }
745 755
746 /* 756 /*
@@ -750,8 +760,33 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
750 if (unlikely(out_of_pages)) 760 if (unlikely(out_of_pages))
751 congestion_wait(WRITE, HZ/100); 761 congestion_wait(WRITE, HZ/100);
752 762
753 if (unlikely(remaining)) 763 /*
754 wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); 764 * With async crypto it is unsafe to share the crypto context
765 * between fragments, so switch to a new dm_crypt_io structure.
766 */
767 if (unlikely(!crypt_finished && remaining)) {
768 new_io = crypt_io_alloc(io->target, io->base_bio,
769 sector);
770 crypt_inc_pending(new_io);
771 crypt_convert_init(cc, &new_io->ctx, NULL,
772 io->base_bio, sector);
773 new_io->ctx.idx_in = io->ctx.idx_in;
774 new_io->ctx.offset_in = io->ctx.offset_in;
775
776 /*
777 * Fragments after the first use the base_io
778 * pending count.
779 */
780 if (!io->base_io)
781 new_io->base_io = io;
782 else {
783 new_io->base_io = io->base_io;
784 crypt_inc_pending(io->base_io);
785 crypt_dec_pending(io);
786 }
787
788 io = new_io;
789 }
755 } 790 }
756 791
757 crypt_dec_pending(io); 792 crypt_dec_pending(io);
@@ -1078,7 +1113,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1078 goto bad_crypt_queue; 1113 goto bad_crypt_queue;
1079 } 1114 }
1080 1115
1081 init_waitqueue_head(&cc->writeq);
1082 ti->private = cc; 1116 ti->private = cc;
1083 return 0; 1117 return 0;
1084 1118
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index bdd37f881c42..848b381f1173 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -13,7 +13,8 @@
13#include <linux/bio.h> 13#include <linux/bio.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15 15
16#include "dm.h" 16#include <linux/device-mapper.h>
17
17#include "dm-bio-list.h" 18#include "dm-bio-list.h"
18 19
19#define DM_MSG_PREFIX "delay" 20#define DM_MSG_PREFIX "delay"
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 769ab677f8e0..01590f3e0009 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -7,7 +7,6 @@
7 * This file is released under the GPL. 7 * This file is released under the GPL.
8 */ 8 */
9 9
10#include "dm.h"
11#include "dm-snap.h" 10#include "dm-snap.h"
12 11
13#include <linux/mm.h> 12#include <linux/mm.h>
@@ -105,6 +104,11 @@ struct pstore {
105 void *area; 104 void *area;
106 105
107 /* 106 /*
107 * An area of zeros used to clear the next area.
108 */
109 void *zero_area;
110
111 /*
108 * Used to keep track of which metadata area the data in 112 * Used to keep track of which metadata area the data in
109 * 'chunk' refers to. 113 * 'chunk' refers to.
110 */ 114 */
@@ -149,6 +153,13 @@ static int alloc_area(struct pstore *ps)
149 if (!ps->area) 153 if (!ps->area)
150 return r; 154 return r;
151 155
156 ps->zero_area = vmalloc(len);
157 if (!ps->zero_area) {
158 vfree(ps->area);
159 return r;
160 }
161 memset(ps->zero_area, 0, len);
162
152 return 0; 163 return 0;
153} 164}
154 165
@@ -156,6 +167,8 @@ static void free_area(struct pstore *ps)
156{ 167{
157 vfree(ps->area); 168 vfree(ps->area);
158 ps->area = NULL; 169 ps->area = NULL;
170 vfree(ps->zero_area);
171 ps->zero_area = NULL;
159} 172}
160 173
161struct mdata_req { 174struct mdata_req {
@@ -220,25 +233,41 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
220 * Read or write a metadata area. Remembering to skip the first 233 * Read or write a metadata area. Remembering to skip the first
221 * chunk which holds the header. 234 * chunk which holds the header.
222 */ 235 */
223static int area_io(struct pstore *ps, chunk_t area, int rw) 236static int area_io(struct pstore *ps, int rw)
224{ 237{
225 int r; 238 int r;
226 chunk_t chunk; 239 chunk_t chunk;
227 240
228 chunk = area_location(ps, area); 241 chunk = area_location(ps, ps->current_area);
229 242
230 r = chunk_io(ps, chunk, rw, 0); 243 r = chunk_io(ps, chunk, rw, 0);
231 if (r) 244 if (r)
232 return r; 245 return r;
233 246
234 ps->current_area = area;
235 return 0; 247 return 0;
236} 248}
237 249
238static int zero_area(struct pstore *ps, chunk_t area) 250static void zero_memory_area(struct pstore *ps)
239{ 251{
240 memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); 252 memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
241 return area_io(ps, area, WRITE); 253}
254
255static int zero_disk_area(struct pstore *ps, chunk_t area)
256{
257 struct dm_io_region where = {
258 .bdev = ps->snap->cow->bdev,
259 .sector = ps->snap->chunk_size * area_location(ps, area),
260 .count = ps->snap->chunk_size,
261 };
262 struct dm_io_request io_req = {
263 .bi_rw = WRITE,
264 .mem.type = DM_IO_VMA,
265 .mem.ptr.vma = ps->zero_area,
266 .client = ps->io_client,
267 .notify.fn = NULL,
268 };
269
270 return dm_io(&io_req, 1, &where, NULL);
242} 271}
243 272
244static int read_header(struct pstore *ps, int *new_snapshot) 273static int read_header(struct pstore *ps, int *new_snapshot)
@@ -411,15 +440,14 @@ static int insert_exceptions(struct pstore *ps, int *full)
411 440
412static int read_exceptions(struct pstore *ps) 441static int read_exceptions(struct pstore *ps)
413{ 442{
414 chunk_t area;
415 int r, full = 1; 443 int r, full = 1;
416 444
417 /* 445 /*
418 * Keeping reading chunks and inserting exceptions until 446 * Keeping reading chunks and inserting exceptions until
419 * we find a partially full area. 447 * we find a partially full area.
420 */ 448 */
421 for (area = 0; full; area++) { 449 for (ps->current_area = 0; full; ps->current_area++) {
422 r = area_io(ps, area, READ); 450 r = area_io(ps, READ);
423 if (r) 451 if (r)
424 return r; 452 return r;
425 453
@@ -428,6 +456,8 @@ static int read_exceptions(struct pstore *ps)
428 return r; 456 return r;
429 } 457 }
430 458
459 ps->current_area--;
460
431 return 0; 461 return 0;
432} 462}
433 463
@@ -486,12 +516,13 @@ static int persistent_read_metadata(struct exception_store *store)
486 return r; 516 return r;
487 } 517 }
488 518
489 r = zero_area(ps, 0); 519 ps->current_area = 0;
520 zero_memory_area(ps);
521 r = zero_disk_area(ps, 0);
490 if (r) { 522 if (r) {
491 DMWARN("zero_area(0) failed"); 523 DMWARN("zero_disk_area(0) failed");
492 return r; 524 return r;
493 } 525 }
494
495 } else { 526 } else {
496 /* 527 /*
497 * Sanity checks. 528 * Sanity checks.
@@ -551,7 +582,6 @@ static void persistent_commit(struct exception_store *store,
551 void (*callback) (void *, int success), 582 void (*callback) (void *, int success),
552 void *callback_context) 583 void *callback_context)
553{ 584{
554 int r;
555 unsigned int i; 585 unsigned int i;
556 struct pstore *ps = get_info(store); 586 struct pstore *ps = get_info(store);
557 struct disk_exception de; 587 struct disk_exception de;
@@ -572,33 +602,41 @@ static void persistent_commit(struct exception_store *store,
572 cb->context = callback_context; 602 cb->context = callback_context;
573 603
574 /* 604 /*
575 * If there are no more exceptions in flight, or we have 605 * If there are exceptions in flight and we have not yet
576 * filled this metadata area we commit the exceptions to 606 * filled this metadata area there's nothing more to do.
577 * disk.
578 */ 607 */
579 if (atomic_dec_and_test(&ps->pending_count) || 608 if (!atomic_dec_and_test(&ps->pending_count) &&
580 (ps->current_committed == ps->exceptions_per_area)) { 609 (ps->current_committed != ps->exceptions_per_area))
581 r = area_io(ps, ps->current_area, WRITE); 610 return;
582 if (r)
583 ps->valid = 0;
584 611
585 /* 612 /*
586 * Have we completely filled the current area ? 613 * If we completely filled the current area, then wipe the next one.
587 */ 614 */
588 if (ps->current_committed == ps->exceptions_per_area) { 615 if ((ps->current_committed == ps->exceptions_per_area) &&
589 ps->current_committed = 0; 616 zero_disk_area(ps, ps->current_area + 1))
590 r = zero_area(ps, ps->current_area + 1); 617 ps->valid = 0;
591 if (r)
592 ps->valid = 0;
593 }
594 618
595 for (i = 0; i < ps->callback_count; i++) { 619 /*
596 cb = ps->callbacks + i; 620 * Commit exceptions to disk.
597 cb->callback(cb->context, r == 0 ? 1 : 0); 621 */
598 } 622 if (ps->valid && area_io(ps, WRITE))
623 ps->valid = 0;
599 624
600 ps->callback_count = 0; 625 /*
626 * Advance to the next area if this one is full.
627 */
628 if (ps->current_committed == ps->exceptions_per_area) {
629 ps->current_committed = 0;
630 ps->current_area++;
631 zero_memory_area(ps);
601 } 632 }
633
634 for (i = 0; i < ps->callback_count; i++) {
635 cb = ps->callbacks + i;
636 cb->callback(cb->context, ps->valid);
637 }
638
639 ps->callback_count = 0;
602} 640}
603 641
604static void persistent_drop(struct exception_store *store) 642static void persistent_drop(struct exception_store *store)
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 4789c42d9a3a..2fd6d4450637 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -5,7 +5,7 @@
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
7 7
8#include "dm.h" 8#include <linux/device-mapper.h>
9 9
10#include <linux/bio.h> 10#include <linux/bio.h>
11#include <linux/mempool.h> 11#include <linux/mempool.h>
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index dca401dc70a0..777c948180f9 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -988,9 +988,9 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size)
988 return r; 988 return r;
989} 989}
990 990
991static inline int get_mode(struct dm_ioctl *param) 991static inline fmode_t get_mode(struct dm_ioctl *param)
992{ 992{
993 int mode = FMODE_READ | FMODE_WRITE; 993 fmode_t mode = FMODE_READ | FMODE_WRITE;
994 994
995 if (param->flags & DM_READONLY_FLAG) 995 if (param->flags & DM_READONLY_FLAG)
996 mode = FMODE_READ; 996 mode = FMODE_READ;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 996802b8a452..3073618269ea 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -22,6 +22,7 @@
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/device-mapper.h>
25#include <linux/dm-kcopyd.h> 26#include <linux/dm-kcopyd.h>
26 27
27#include "dm.h" 28#include "dm.h"
@@ -268,6 +269,17 @@ static void push(struct list_head *jobs, struct kcopyd_job *job)
268 spin_unlock_irqrestore(&kc->job_lock, flags); 269 spin_unlock_irqrestore(&kc->job_lock, flags);
269} 270}
270 271
272
273static void push_head(struct list_head *jobs, struct kcopyd_job *job)
274{
275 unsigned long flags;
276 struct dm_kcopyd_client *kc = job->kc;
277
278 spin_lock_irqsave(&kc->job_lock, flags);
279 list_add(&job->list, jobs);
280 spin_unlock_irqrestore(&kc->job_lock, flags);
281}
282
271/* 283/*
272 * These three functions process 1 item from the corresponding 284 * These three functions process 1 item from the corresponding
273 * job list. 285 * job list.
@@ -398,7 +410,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
398 * We couldn't service this job ATM, so 410 * We couldn't service this job ATM, so
399 * push this job back onto the list. 411 * push this job back onto the list.
400 */ 412 */
401 push(jobs, job); 413 push_head(jobs, job);
402 break; 414 break;
403 } 415 }
404 416
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 6449bcdf84ca..44042becad8a 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -5,12 +5,12 @@
5 */ 5 */
6 6
7#include "dm.h" 7#include "dm.h"
8
9#include <linux/module.h> 8#include <linux/module.h>
10#include <linux/init.h> 9#include <linux/init.h>
11#include <linux/blkdev.h> 10#include <linux/blkdev.h>
12#include <linux/bio.h> 11#include <linux/bio.h>
13#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/device-mapper.h>
14 14
15#define DM_MSG_PREFIX "linear" 15#define DM_MSG_PREFIX "linear"
16 16
@@ -110,20 +110,11 @@ static int linear_status(struct dm_target *ti, status_type_t type,
110 return 0; 110 return 0;
111} 111}
112 112
113static int linear_ioctl(struct dm_target *ti, struct inode *inode, 113static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
114 struct file *filp, unsigned int cmd,
115 unsigned long arg) 114 unsigned long arg)
116{ 115{
117 struct linear_c *lc = (struct linear_c *) ti->private; 116 struct linear_c *lc = (struct linear_c *) ti->private;
118 struct block_device *bdev = lc->dev->bdev; 117 return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
119 struct file fake_file = {};
120 struct dentry fake_dentry = {};
121
122 fake_file.f_mode = lc->dev->mode;
123 fake_file.f_path.dentry = &fake_dentry;
124 fake_dentry.d_inode = bdev->bd_inode;
125
126 return blkdev_driver_ioctl(bdev->bd_inode, &fake_file, bdev->bd_disk, cmd, arg);
127} 118}
128 119
129static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm, 120static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 5b48478c79f5..a8c0fc79ca78 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -12,7 +12,7 @@
12#include <linux/dm-io.h> 12#include <linux/dm-io.h>
13#include <linux/dm-dirty-log.h> 13#include <linux/dm-dirty-log.h>
14 14
15#include "dm.h" 15#include <linux/device-mapper.h>
16 16
17#define DM_MSG_PREFIX "dirty region log" 17#define DM_MSG_PREFIX "dirty region log"
18 18
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 9bf3460c5540..3d7f4923cd13 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -5,7 +5,8 @@
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
7 7
8#include "dm.h" 8#include <linux/device-mapper.h>
9
9#include "dm-path-selector.h" 10#include "dm-path-selector.h"
10#include "dm-bio-list.h" 11#include "dm-bio-list.h"
11#include "dm-bio-record.h" 12#include "dm-bio-record.h"
@@ -440,13 +441,13 @@ static void process_queued_ios(struct work_struct *work)
440 __choose_pgpath(m); 441 __choose_pgpath(m);
441 442
442 pgpath = m->current_pgpath; 443 pgpath = m->current_pgpath;
443 m->pgpath_to_activate = m->current_pgpath;
444 444
445 if ((pgpath && !m->queue_io) || 445 if ((pgpath && !m->queue_io) ||
446 (!pgpath && !m->queue_if_no_path)) 446 (!pgpath && !m->queue_if_no_path))
447 must_queue = 0; 447 must_queue = 0;
448 448
449 if (m->pg_init_required && !m->pg_init_in_progress) { 449 if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
450 m->pgpath_to_activate = pgpath;
450 m->pg_init_count++; 451 m->pg_init_count++;
451 m->pg_init_required = 0; 452 m->pg_init_required = 0;
452 m->pg_init_in_progress = 1; 453 m->pg_init_in_progress = 1;
@@ -707,6 +708,10 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
707 m->hw_handler_name = NULL; 708 m->hw_handler_name = NULL;
708 return -EINVAL; 709 return -EINVAL;
709 } 710 }
711
712 if (hw_argc > 1)
713 DMWARN("Ignoring user-specified arguments for "
714 "hardware handler \"%s\"", m->hw_handler_name);
710 consume(as, hw_argc - 1); 715 consume(as, hw_argc - 1);
711 716
712 return 0; 717 return 0;
@@ -1395,19 +1400,15 @@ error:
1395 return -EINVAL; 1400 return -EINVAL;
1396} 1401}
1397 1402
1398static int multipath_ioctl(struct dm_target *ti, struct inode *inode, 1403static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1399 struct file *filp, unsigned int cmd,
1400 unsigned long arg) 1404 unsigned long arg)
1401{ 1405{
1402 struct multipath *m = (struct multipath *) ti->private; 1406 struct multipath *m = (struct multipath *) ti->private;
1403 struct block_device *bdev = NULL; 1407 struct block_device *bdev = NULL;
1408 fmode_t mode = 0;
1404 unsigned long flags; 1409 unsigned long flags;
1405 struct file fake_file = {};
1406 struct dentry fake_dentry = {};
1407 int r = 0; 1410 int r = 0;
1408 1411
1409 fake_file.f_path.dentry = &fake_dentry;
1410
1411 spin_lock_irqsave(&m->lock, flags); 1412 spin_lock_irqsave(&m->lock, flags);
1412 1413
1413 if (!m->current_pgpath) 1414 if (!m->current_pgpath)
@@ -1415,8 +1416,7 @@ static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
1415 1416
1416 if (m->current_pgpath) { 1417 if (m->current_pgpath) {
1417 bdev = m->current_pgpath->path.dev->bdev; 1418 bdev = m->current_pgpath->path.dev->bdev;
1418 fake_dentry.d_inode = bdev->bd_inode; 1419 mode = m->current_pgpath->path.dev->mode;
1419 fake_file.f_mode = m->current_pgpath->path.dev->mode;
1420 } 1420 }
1421 1421
1422 if (m->queue_io) 1422 if (m->queue_io)
@@ -1426,8 +1426,7 @@ static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
1426 1426
1427 spin_unlock_irqrestore(&m->lock, flags); 1427 spin_unlock_irqrestore(&m->lock, flags);
1428 1428
1429 return r ? : blkdev_driver_ioctl(bdev->bd_inode, &fake_file, 1429 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
1430 bdev->bd_disk, cmd, arg);
1431} 1430}
1432 1431
1433/*----------------------------------------------------------------- 1432/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index ca1bb636a3e4..96ea226155b1 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -9,7 +9,8 @@
9 * Path selector registration. 9 * Path selector registration.
10 */ 10 */
11 11
12#include "dm.h" 12#include <linux/device-mapper.h>
13
13#include "dm-path-selector.h" 14#include "dm-path-selector.h"
14 15
15#include <linux/slab.h> 16#include <linux/slab.h>
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 29913e42c4ab..ec43f9fa4b2a 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1,30 +1,30 @@
1/* 1/*
2 * Copyright (C) 2003 Sistina Software Limited. 2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
3 * 4 *
4 * This file is released under the GPL. 5 * This file is released under the GPL.
5 */ 6 */
6 7
7#include "dm.h"
8#include "dm-bio-list.h" 8#include "dm-bio-list.h"
9#include "dm-bio-record.h" 9#include "dm-bio-record.h"
10 10
11#include <linux/ctype.h>
12#include <linux/init.h> 11#include <linux/init.h>
13#include <linux/mempool.h> 12#include <linux/mempool.h>
14#include <linux/module.h> 13#include <linux/module.h>
15#include <linux/pagemap.h> 14#include <linux/pagemap.h>
16#include <linux/slab.h> 15#include <linux/slab.h>
17#include <linux/time.h>
18#include <linux/vmalloc.h>
19#include <linux/workqueue.h> 16#include <linux/workqueue.h>
20#include <linux/log2.h> 17#include <linux/device-mapper.h>
21#include <linux/hardirq.h>
22#include <linux/dm-io.h> 18#include <linux/dm-io.h>
23#include <linux/dm-dirty-log.h> 19#include <linux/dm-dirty-log.h>
24#include <linux/dm-kcopyd.h> 20#include <linux/dm-kcopyd.h>
21#include <linux/dm-region-hash.h>
25 22
26#define DM_MSG_PREFIX "raid1" 23#define DM_MSG_PREFIX "raid1"
24
25#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
27#define DM_IO_PAGES 64 26#define DM_IO_PAGES 64
27#define DM_KCOPYD_PAGES 64
28 28
29#define DM_RAID1_HANDLE_ERRORS 0x01 29#define DM_RAID1_HANDLE_ERRORS 0x01
30#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) 30#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
@@ -32,87 +32,6 @@
32static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); 32static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
33 33
34/*----------------------------------------------------------------- 34/*-----------------------------------------------------------------
35 * Region hash
36 *
37 * The mirror splits itself up into discrete regions. Each
38 * region can be in one of three states: clean, dirty,
39 * nosync. There is no need to put clean regions in the hash.
40 *
41 * In addition to being present in the hash table a region _may_
42 * be present on one of three lists.
43 *
44 * clean_regions: Regions on this list have no io pending to
45 * them, they are in sync, we are no longer interested in them,
46 * they are dull. rh_update_states() will remove them from the
47 * hash table.
48 *
49 * quiesced_regions: These regions have been spun down, ready
50 * for recovery. rh_recovery_start() will remove regions from
51 * this list and hand them to kmirrord, which will schedule the
52 * recovery io with kcopyd.
53 *
54 * recovered_regions: Regions that kcopyd has successfully
55 * recovered. rh_update_states() will now schedule any delayed
56 * io, up the recovery_count, and remove the region from the
57 * hash.
58 *
59 * There are 2 locks:
60 * A rw spin lock 'hash_lock' protects just the hash table,
61 * this is never held in write mode from interrupt context,
62 * which I believe means that we only have to disable irqs when
63 * doing a write lock.
64 *
65 * An ordinary spin lock 'region_lock' that protects the three
66 * lists in the region_hash, with the 'state', 'list' and
67 * 'bhs_delayed' fields of the regions. This is used from irq
68 * context, so all other uses will have to suspend local irqs.
69 *---------------------------------------------------------------*/
70struct mirror_set;
71struct region_hash {
72 struct mirror_set *ms;
73 uint32_t region_size;
74 unsigned region_shift;
75
76 /* holds persistent region state */
77 struct dm_dirty_log *log;
78
79 /* hash table */
80 rwlock_t hash_lock;
81 mempool_t *region_pool;
82 unsigned int mask;
83 unsigned int nr_buckets;
84 struct list_head *buckets;
85
86 spinlock_t region_lock;
87 atomic_t recovery_in_flight;
88 struct semaphore recovery_count;
89 struct list_head clean_regions;
90 struct list_head quiesced_regions;
91 struct list_head recovered_regions;
92 struct list_head failed_recovered_regions;
93};
94
95enum {
96 RH_CLEAN,
97 RH_DIRTY,
98 RH_NOSYNC,
99 RH_RECOVERING
100};
101
102struct region {
103 struct region_hash *rh; /* FIXME: can we get rid of this ? */
104 region_t key;
105 int state;
106
107 struct list_head hash_list;
108 struct list_head list;
109
110 atomic_t pending;
111 struct bio_list delayed_bios;
112};
113
114
115/*-----------------------------------------------------------------
116 * Mirror set structures. 35 * Mirror set structures.
117 *---------------------------------------------------------------*/ 36 *---------------------------------------------------------------*/
118enum dm_raid1_error { 37enum dm_raid1_error {
@@ -132,8 +51,7 @@ struct mirror {
132struct mirror_set { 51struct mirror_set {
133 struct dm_target *ti; 52 struct dm_target *ti;
134 struct list_head list; 53 struct list_head list;
135 struct region_hash rh; 54
136 struct dm_kcopyd_client *kcopyd_client;
137 uint64_t features; 55 uint64_t features;
138 56
139 spinlock_t lock; /* protects the lists */ 57 spinlock_t lock; /* protects the lists */
@@ -141,6 +59,8 @@ struct mirror_set {
141 struct bio_list writes; 59 struct bio_list writes;
142 struct bio_list failures; 60 struct bio_list failures;
143 61
62 struct dm_region_hash *rh;
63 struct dm_kcopyd_client *kcopyd_client;
144 struct dm_io_client *io_client; 64 struct dm_io_client *io_client;
145 mempool_t *read_record_pool; 65 mempool_t *read_record_pool;
146 66
@@ -159,25 +79,14 @@ struct mirror_set {
159 79
160 struct work_struct trigger_event; 80 struct work_struct trigger_event;
161 81
162 unsigned int nr_mirrors; 82 unsigned nr_mirrors;
163 struct mirror mirror[0]; 83 struct mirror mirror[0];
164}; 84};
165 85
166/* 86static void wakeup_mirrord(void *context)
167 * Conversion fns
168 */
169static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
170{
171 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
172}
173
174static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
175{ 87{
176 return region << rh->region_shift; 88 struct mirror_set *ms = context;
177}
178 89
179static void wake(struct mirror_set *ms)
180{
181 queue_work(ms->kmirrord_wq, &ms->kmirrord_work); 90 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
182} 91}
183 92
@@ -186,7 +95,7 @@ static void delayed_wake_fn(unsigned long data)
186 struct mirror_set *ms = (struct mirror_set *) data; 95 struct mirror_set *ms = (struct mirror_set *) data;
187 96
188 clear_bit(0, &ms->timer_pending); 97 clear_bit(0, &ms->timer_pending);
189 wake(ms); 98 wakeup_mirrord(ms);
190} 99}
191 100
192static void delayed_wake(struct mirror_set *ms) 101static void delayed_wake(struct mirror_set *ms)
@@ -200,473 +109,34 @@ static void delayed_wake(struct mirror_set *ms)
200 add_timer(&ms->timer); 109 add_timer(&ms->timer);
201} 110}
202 111
203/* FIXME move this */ 112static void wakeup_all_recovery_waiters(void *context)
204static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
205
206#define MIN_REGIONS 64
207#define MAX_RECOVERY 1
208static int rh_init(struct region_hash *rh, struct mirror_set *ms,
209 struct dm_dirty_log *log, uint32_t region_size,
210 region_t nr_regions)
211{
212 unsigned int nr_buckets, max_buckets;
213 size_t i;
214
215 /*
216 * Calculate a suitable number of buckets for our hash
217 * table.
218 */
219 max_buckets = nr_regions >> 6;
220 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
221 ;
222 nr_buckets >>= 1;
223
224 rh->ms = ms;
225 rh->log = log;
226 rh->region_size = region_size;
227 rh->region_shift = ffs(region_size) - 1;
228 rwlock_init(&rh->hash_lock);
229 rh->mask = nr_buckets - 1;
230 rh->nr_buckets = nr_buckets;
231
232 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
233 if (!rh->buckets) {
234 DMERR("unable to allocate region hash memory");
235 return -ENOMEM;
236 }
237
238 for (i = 0; i < nr_buckets; i++)
239 INIT_LIST_HEAD(rh->buckets + i);
240
241 spin_lock_init(&rh->region_lock);
242 sema_init(&rh->recovery_count, 0);
243 atomic_set(&rh->recovery_in_flight, 0);
244 INIT_LIST_HEAD(&rh->clean_regions);
245 INIT_LIST_HEAD(&rh->quiesced_regions);
246 INIT_LIST_HEAD(&rh->recovered_regions);
247 INIT_LIST_HEAD(&rh->failed_recovered_regions);
248
249 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
250 sizeof(struct region));
251 if (!rh->region_pool) {
252 vfree(rh->buckets);
253 rh->buckets = NULL;
254 return -ENOMEM;
255 }
256
257 return 0;
258}
259
260static void rh_exit(struct region_hash *rh)
261{
262 unsigned int h;
263 struct region *reg, *nreg;
264
265 BUG_ON(!list_empty(&rh->quiesced_regions));
266 for (h = 0; h < rh->nr_buckets; h++) {
267 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
268 BUG_ON(atomic_read(&reg->pending));
269 mempool_free(reg, rh->region_pool);
270 }
271 }
272
273 if (rh->log)
274 dm_dirty_log_destroy(rh->log);
275 if (rh->region_pool)
276 mempool_destroy(rh->region_pool);
277 vfree(rh->buckets);
278}
279
280#define RH_HASH_MULT 2654435387U
281
282static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
283{
284 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
285}
286
287static struct region *__rh_lookup(struct region_hash *rh, region_t region)
288{
289 struct region *reg;
290
291 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
292 if (reg->key == region)
293 return reg;
294
295 return NULL;
296}
297
298static void __rh_insert(struct region_hash *rh, struct region *reg)
299{
300 unsigned int h = rh_hash(rh, reg->key);
301 list_add(&reg->hash_list, rh->buckets + h);
302}
303
304static struct region *__rh_alloc(struct region_hash *rh, region_t region)
305{
306 struct region *reg, *nreg;
307
308 read_unlock(&rh->hash_lock);
309 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
310 if (unlikely(!nreg))
311 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
312 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
313 RH_CLEAN : RH_NOSYNC;
314 nreg->rh = rh;
315 nreg->key = region;
316
317 INIT_LIST_HEAD(&nreg->list);
318
319 atomic_set(&nreg->pending, 0);
320 bio_list_init(&nreg->delayed_bios);
321 write_lock_irq(&rh->hash_lock);
322
323 reg = __rh_lookup(rh, region);
324 if (reg)
325 /* we lost the race */
326 mempool_free(nreg, rh->region_pool);
327
328 else {
329 __rh_insert(rh, nreg);
330 if (nreg->state == RH_CLEAN) {
331 spin_lock(&rh->region_lock);
332 list_add(&nreg->list, &rh->clean_regions);
333 spin_unlock(&rh->region_lock);
334 }
335 reg = nreg;
336 }
337 write_unlock_irq(&rh->hash_lock);
338 read_lock(&rh->hash_lock);
339
340 return reg;
341}
342
343static inline struct region *__rh_find(struct region_hash *rh, region_t region)
344{
345 struct region *reg;
346
347 reg = __rh_lookup(rh, region);
348 if (!reg)
349 reg = __rh_alloc(rh, region);
350
351 return reg;
352}
353
354static int rh_state(struct region_hash *rh, region_t region, int may_block)
355{
356 int r;
357 struct region *reg;
358
359 read_lock(&rh->hash_lock);
360 reg = __rh_lookup(rh, region);
361 read_unlock(&rh->hash_lock);
362
363 if (reg)
364 return reg->state;
365
366 /*
367 * The region wasn't in the hash, so we fall back to the
368 * dirty log.
369 */
370 r = rh->log->type->in_sync(rh->log, region, may_block);
371
372 /*
373 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
374 * taken as a RH_NOSYNC
375 */
376 return r == 1 ? RH_CLEAN : RH_NOSYNC;
377}
378
379static inline int rh_in_sync(struct region_hash *rh,
380 region_t region, int may_block)
381{ 113{
382 int state = rh_state(rh, region, may_block); 114 wake_up_all(&_kmirrord_recovery_stopped);
383 return state == RH_CLEAN || state == RH_DIRTY;
384} 115}
385 116
386static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list) 117static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
387{
388 struct bio *bio;
389
390 while ((bio = bio_list_pop(bio_list))) {
391 queue_bio(ms, bio, WRITE);
392 }
393}
394
395static void complete_resync_work(struct region *reg, int success)
396{
397 struct region_hash *rh = reg->rh;
398
399 rh->log->type->set_region_sync(rh->log, reg->key, success);
400
401 /*
402 * Dispatch the bios before we call 'wake_up_all'.
403 * This is important because if we are suspending,
404 * we want to know that recovery is complete and
405 * the work queue is flushed. If we wake_up_all
406 * before we dispatch_bios (queue bios and call wake()),
407 * then we risk suspending before the work queue
408 * has been properly flushed.
409 */
410 dispatch_bios(rh->ms, &reg->delayed_bios);
411 if (atomic_dec_and_test(&rh->recovery_in_flight))
412 wake_up_all(&_kmirrord_recovery_stopped);
413 up(&rh->recovery_count);
414}
415
416static void rh_update_states(struct region_hash *rh)
417{
418 struct region *reg, *next;
419
420 LIST_HEAD(clean);
421 LIST_HEAD(recovered);
422 LIST_HEAD(failed_recovered);
423
424 /*
425 * Quickly grab the lists.
426 */
427 write_lock_irq(&rh->hash_lock);
428 spin_lock(&rh->region_lock);
429 if (!list_empty(&rh->clean_regions)) {
430 list_splice_init(&rh->clean_regions, &clean);
431
432 list_for_each_entry(reg, &clean, list)
433 list_del(&reg->hash_list);
434 }
435
436 if (!list_empty(&rh->recovered_regions)) {
437 list_splice_init(&rh->recovered_regions, &recovered);
438
439 list_for_each_entry (reg, &recovered, list)
440 list_del(&reg->hash_list);
441 }
442
443 if (!list_empty(&rh->failed_recovered_regions)) {
444 list_splice_init(&rh->failed_recovered_regions,
445 &failed_recovered);
446
447 list_for_each_entry(reg, &failed_recovered, list)
448 list_del(&reg->hash_list);
449 }
450
451 spin_unlock(&rh->region_lock);
452 write_unlock_irq(&rh->hash_lock);
453
454 /*
455 * All the regions on the recovered and clean lists have
456 * now been pulled out of the system, so no need to do
457 * any more locking.
458 */
459 list_for_each_entry_safe (reg, next, &recovered, list) {
460 rh->log->type->clear_region(rh->log, reg->key);
461 complete_resync_work(reg, 1);
462 mempool_free(reg, rh->region_pool);
463 }
464
465 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
466 complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
467 mempool_free(reg, rh->region_pool);
468 }
469
470 list_for_each_entry_safe(reg, next, &clean, list) {
471 rh->log->type->clear_region(rh->log, reg->key);
472 mempool_free(reg, rh->region_pool);
473 }
474
475 rh->log->type->flush(rh->log);
476}
477
478static void rh_inc(struct region_hash *rh, region_t region)
479{
480 struct region *reg;
481
482 read_lock(&rh->hash_lock);
483 reg = __rh_find(rh, region);
484
485 spin_lock_irq(&rh->region_lock);
486 atomic_inc(&reg->pending);
487
488 if (reg->state == RH_CLEAN) {
489 reg->state = RH_DIRTY;
490 list_del_init(&reg->list); /* take off the clean list */
491 spin_unlock_irq(&rh->region_lock);
492
493 rh->log->type->mark_region(rh->log, reg->key);
494 } else
495 spin_unlock_irq(&rh->region_lock);
496
497
498 read_unlock(&rh->hash_lock);
499}
500
501static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
502{
503 struct bio *bio;
504
505 for (bio = bios->head; bio; bio = bio->bi_next)
506 rh_inc(rh, bio_to_region(rh, bio));
507}
508
509static void rh_dec(struct region_hash *rh, region_t region)
510{ 118{
511 unsigned long flags; 119 unsigned long flags;
512 struct region *reg;
513 int should_wake = 0; 120 int should_wake = 0;
121 struct bio_list *bl;
514 122
515 read_lock(&rh->hash_lock); 123 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
516 reg = __rh_lookup(rh, region); 124 spin_lock_irqsave(&ms->lock, flags);
517 read_unlock(&rh->hash_lock); 125 should_wake = !(bl->head);
518 126 bio_list_add(bl, bio);
519 spin_lock_irqsave(&rh->region_lock, flags); 127 spin_unlock_irqrestore(&ms->lock, flags);
520 if (atomic_dec_and_test(&reg->pending)) {
521 /*
522 * There is no pending I/O for this region.
523 * We can move the region to corresponding list for next action.
524 * At this point, the region is not yet connected to any list.
525 *
526 * If the state is RH_NOSYNC, the region should be kept off
527 * from clean list.
528 * The hash entry for RH_NOSYNC will remain in memory
529 * until the region is recovered or the map is reloaded.
530 */
531
532 /* do nothing for RH_NOSYNC */
533 if (reg->state == RH_RECOVERING) {
534 list_add_tail(&reg->list, &rh->quiesced_regions);
535 } else if (reg->state == RH_DIRTY) {
536 reg->state = RH_CLEAN;
537 list_add(&reg->list, &rh->clean_regions);
538 }
539 should_wake = 1;
540 }
541 spin_unlock_irqrestore(&rh->region_lock, flags);
542 128
543 if (should_wake) 129 if (should_wake)
544 wake(rh->ms); 130 wakeup_mirrord(ms);
545} 131}
546 132
547/* 133static void dispatch_bios(void *context, struct bio_list *bio_list)
548 * Starts quiescing a region in preparation for recovery.
549 */
550static int __rh_recovery_prepare(struct region_hash *rh)
551{ 134{
552 int r; 135 struct mirror_set *ms = context;
553 struct region *reg; 136 struct bio *bio;
554 region_t region;
555
556 /*
557 * Ask the dirty log what's next.
558 */
559 r = rh->log->type->get_resync_work(rh->log, &region);
560 if (r <= 0)
561 return r;
562
563 /*
564 * Get this region, and start it quiescing by setting the
565 * recovering flag.
566 */
567 read_lock(&rh->hash_lock);
568 reg = __rh_find(rh, region);
569 read_unlock(&rh->hash_lock);
570
571 spin_lock_irq(&rh->region_lock);
572 reg->state = RH_RECOVERING;
573
574 /* Already quiesced ? */
575 if (atomic_read(&reg->pending))
576 list_del_init(&reg->list);
577 else
578 list_move(&reg->list, &rh->quiesced_regions);
579
580 spin_unlock_irq(&rh->region_lock);
581
582 return 1;
583}
584
585static void rh_recovery_prepare(struct region_hash *rh)
586{
587 /* Extra reference to avoid race with rh_stop_recovery */
588 atomic_inc(&rh->recovery_in_flight);
589
590 while (!down_trylock(&rh->recovery_count)) {
591 atomic_inc(&rh->recovery_in_flight);
592 if (__rh_recovery_prepare(rh) <= 0) {
593 atomic_dec(&rh->recovery_in_flight);
594 up(&rh->recovery_count);
595 break;
596 }
597 }
598
599 /* Drop the extra reference */
600 if (atomic_dec_and_test(&rh->recovery_in_flight))
601 wake_up_all(&_kmirrord_recovery_stopped);
602}
603
604/*
605 * Returns any quiesced regions.
606 */
607static struct region *rh_recovery_start(struct region_hash *rh)
608{
609 struct region *reg = NULL;
610
611 spin_lock_irq(&rh->region_lock);
612 if (!list_empty(&rh->quiesced_regions)) {
613 reg = list_entry(rh->quiesced_regions.next,
614 struct region, list);
615 list_del_init(&reg->list); /* remove from the quiesced list */
616 }
617 spin_unlock_irq(&rh->region_lock);
618
619 return reg;
620}
621
622static void rh_recovery_end(struct region *reg, int success)
623{
624 struct region_hash *rh = reg->rh;
625
626 spin_lock_irq(&rh->region_lock);
627 if (success)
628 list_add(&reg->list, &reg->rh->recovered_regions);
629 else {
630 reg->state = RH_NOSYNC;
631 list_add(&reg->list, &reg->rh->failed_recovered_regions);
632 }
633 spin_unlock_irq(&rh->region_lock);
634
635 wake(rh->ms);
636}
637
638static int rh_flush(struct region_hash *rh)
639{
640 return rh->log->type->flush(rh->log);
641}
642
643static void rh_delay(struct region_hash *rh, struct bio *bio)
644{
645 struct region *reg;
646
647 read_lock(&rh->hash_lock);
648 reg = __rh_find(rh, bio_to_region(rh, bio));
649 bio_list_add(&reg->delayed_bios, bio);
650 read_unlock(&rh->hash_lock);
651}
652
653static void rh_stop_recovery(struct region_hash *rh)
654{
655 int i;
656
657 /* wait for any recovering regions */
658 for (i = 0; i < MAX_RECOVERY; i++)
659 down(&rh->recovery_count);
660}
661
662static void rh_start_recovery(struct region_hash *rh)
663{
664 int i;
665
666 for (i = 0; i < MAX_RECOVERY; i++)
667 up(&rh->recovery_count);
668 137
669 wake(rh->ms); 138 while ((bio = bio_list_pop(bio_list)))
139 queue_bio(ms, bio, WRITE);
670} 140}
671 141
672#define MIN_READ_RECORDS 20 142#define MIN_READ_RECORDS 20
@@ -776,8 +246,8 @@ out:
776static void recovery_complete(int read_err, unsigned long write_err, 246static void recovery_complete(int read_err, unsigned long write_err,
777 void *context) 247 void *context)
778{ 248{
779 struct region *reg = (struct region *)context; 249 struct dm_region *reg = context;
780 struct mirror_set *ms = reg->rh->ms; 250 struct mirror_set *ms = dm_rh_region_context(reg);
781 int m, bit = 0; 251 int m, bit = 0;
782 252
783 if (read_err) { 253 if (read_err) {
@@ -803,31 +273,33 @@ static void recovery_complete(int read_err, unsigned long write_err,
803 } 273 }
804 } 274 }
805 275
806 rh_recovery_end(reg, !(read_err || write_err)); 276 dm_rh_recovery_end(reg, !(read_err || write_err));
807} 277}
808 278
809static int recover(struct mirror_set *ms, struct region *reg) 279static int recover(struct mirror_set *ms, struct dm_region *reg)
810{ 280{
811 int r; 281 int r;
812 unsigned int i; 282 unsigned i;
813 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; 283 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
814 struct mirror *m; 284 struct mirror *m;
815 unsigned long flags = 0; 285 unsigned long flags = 0;
286 region_t key = dm_rh_get_region_key(reg);
287 sector_t region_size = dm_rh_get_region_size(ms->rh);
816 288
817 /* fill in the source */ 289 /* fill in the source */
818 m = get_default_mirror(ms); 290 m = get_default_mirror(ms);
819 from.bdev = m->dev->bdev; 291 from.bdev = m->dev->bdev;
820 from.sector = m->offset + region_to_sector(reg->rh, reg->key); 292 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
821 if (reg->key == (ms->nr_regions - 1)) { 293 if (key == (ms->nr_regions - 1)) {
822 /* 294 /*
823 * The final region may be smaller than 295 * The final region may be smaller than
824 * region_size. 296 * region_size.
825 */ 297 */
826 from.count = ms->ti->len & (reg->rh->region_size - 1); 298 from.count = ms->ti->len & (region_size - 1);
827 if (!from.count) 299 if (!from.count)
828 from.count = reg->rh->region_size; 300 from.count = region_size;
829 } else 301 } else
830 from.count = reg->rh->region_size; 302 from.count = region_size;
831 303
832 /* fill in the destinations */ 304 /* fill in the destinations */
833 for (i = 0, dest = to; i < ms->nr_mirrors; i++) { 305 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
@@ -836,7 +308,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
836 308
837 m = ms->mirror + i; 309 m = ms->mirror + i;
838 dest->bdev = m->dev->bdev; 310 dest->bdev = m->dev->bdev;
839 dest->sector = m->offset + region_to_sector(reg->rh, reg->key); 311 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
840 dest->count = from.count; 312 dest->count = from.count;
841 dest++; 313 dest++;
842 } 314 }
@@ -853,22 +325,22 @@ static int recover(struct mirror_set *ms, struct region *reg)
853 325
854static void do_recovery(struct mirror_set *ms) 326static void do_recovery(struct mirror_set *ms)
855{ 327{
328 struct dm_region *reg;
329 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
856 int r; 330 int r;
857 struct region *reg;
858 struct dm_dirty_log *log = ms->rh.log;
859 331
860 /* 332 /*
861 * Start quiescing some regions. 333 * Start quiescing some regions.
862 */ 334 */
863 rh_recovery_prepare(&ms->rh); 335 dm_rh_recovery_prepare(ms->rh);
864 336
865 /* 337 /*
866 * Copy any already quiesced regions. 338 * Copy any already quiesced regions.
867 */ 339 */
868 while ((reg = rh_recovery_start(&ms->rh))) { 340 while ((reg = dm_rh_recovery_start(ms->rh))) {
869 r = recover(ms, reg); 341 r = recover(ms, reg);
870 if (r) 342 if (r)
871 rh_recovery_end(reg, 0); 343 dm_rh_recovery_end(reg, 0);
872 } 344 }
873 345
874 /* 346 /*
@@ -909,9 +381,10 @@ static int default_ok(struct mirror *m)
909 381
910static int mirror_available(struct mirror_set *ms, struct bio *bio) 382static int mirror_available(struct mirror_set *ms, struct bio *bio)
911{ 383{
912 region_t region = bio_to_region(&ms->rh, bio); 384 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
385 region_t region = dm_rh_bio_to_region(ms->rh, bio);
913 386
914 if (ms->rh.log->type->in_sync(ms->rh.log, region, 0)) 387 if (log->type->in_sync(log, region, 0))
915 return choose_mirror(ms, bio->bi_sector) ? 1 : 0; 388 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
916 389
917 return 0; 390 return 0;
@@ -985,7 +458,14 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
985 458
986 map_region(&io, m, bio); 459 map_region(&io, m, bio);
987 bio_set_m(bio, m); 460 bio_set_m(bio, m);
988 (void) dm_io(&io_req, 1, &io, NULL); 461 BUG_ON(dm_io(&io_req, 1, &io, NULL));
462}
463
464static inline int region_in_sync(struct mirror_set *ms, region_t region,
465 int may_block)
466{
467 int state = dm_rh_get_state(ms->rh, region, may_block);
468 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
989} 469}
990 470
991static void do_reads(struct mirror_set *ms, struct bio_list *reads) 471static void do_reads(struct mirror_set *ms, struct bio_list *reads)
@@ -995,13 +475,13 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
995 struct mirror *m; 475 struct mirror *m;
996 476
997 while ((bio = bio_list_pop(reads))) { 477 while ((bio = bio_list_pop(reads))) {
998 region = bio_to_region(&ms->rh, bio); 478 region = dm_rh_bio_to_region(ms->rh, bio);
999 m = get_default_mirror(ms); 479 m = get_default_mirror(ms);
1000 480
1001 /* 481 /*
1002 * We can only read balance if the region is in sync. 482 * We can only read balance if the region is in sync.
1003 */ 483 */
1004 if (likely(rh_in_sync(&ms->rh, region, 1))) 484 if (likely(region_in_sync(ms, region, 1)))
1005 m = choose_mirror(ms, bio->bi_sector); 485 m = choose_mirror(ms, bio->bi_sector);
1006 else if (m && atomic_read(&m->error_count)) 486 else if (m && atomic_read(&m->error_count))
1007 m = NULL; 487 m = NULL;
@@ -1024,57 +504,6 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
1024 * NOSYNC: increment pending, just write to the default mirror 504 * NOSYNC: increment pending, just write to the default mirror
1025 *---------------------------------------------------------------*/ 505 *---------------------------------------------------------------*/
1026 506
1027/* __bio_mark_nosync
1028 * @ms
1029 * @bio
1030 * @done
1031 * @error
1032 *
1033 * The bio was written on some mirror(s) but failed on other mirror(s).
1034 * We can successfully endio the bio but should avoid the region being
1035 * marked clean by setting the state RH_NOSYNC.
1036 *
1037 * This function is _not_ safe in interrupt context!
1038 */
1039static void __bio_mark_nosync(struct mirror_set *ms,
1040 struct bio *bio, unsigned done, int error)
1041{
1042 unsigned long flags;
1043 struct region_hash *rh = &ms->rh;
1044 struct dm_dirty_log *log = ms->rh.log;
1045 struct region *reg;
1046 region_t region = bio_to_region(rh, bio);
1047 int recovering = 0;
1048
1049 /* We must inform the log that the sync count has changed. */
1050 log->type->set_region_sync(log, region, 0);
1051 ms->in_sync = 0;
1052
1053 read_lock(&rh->hash_lock);
1054 reg = __rh_find(rh, region);
1055 read_unlock(&rh->hash_lock);
1056
1057 /* region hash entry should exist because write was in-flight */
1058 BUG_ON(!reg);
1059 BUG_ON(!list_empty(&reg->list));
1060
1061 spin_lock_irqsave(&rh->region_lock, flags);
1062 /*
1063 * Possible cases:
1064 * 1) RH_DIRTY
1065 * 2) RH_NOSYNC: was dirty, other preceeding writes failed
1066 * 3) RH_RECOVERING: flushing pending writes
1067 * Either case, the region should have not been connected to list.
1068 */
1069 recovering = (reg->state == RH_RECOVERING);
1070 reg->state = RH_NOSYNC;
1071 BUG_ON(!list_empty(&reg->list));
1072 spin_unlock_irqrestore(&rh->region_lock, flags);
1073
1074 bio_endio(bio, error);
1075 if (recovering)
1076 complete_resync_work(reg, 0);
1077}
1078 507
1079static void write_callback(unsigned long error, void *context) 508static void write_callback(unsigned long error, void *context)
1080{ 509{
@@ -1119,7 +548,7 @@ static void write_callback(unsigned long error, void *context)
1119 bio_list_add(&ms->failures, bio); 548 bio_list_add(&ms->failures, bio);
1120 spin_unlock_irqrestore(&ms->lock, flags); 549 spin_unlock_irqrestore(&ms->lock, flags);
1121 if (should_wake) 550 if (should_wake)
1122 wake(ms); 551 wakeup_mirrord(ms);
1123 return; 552 return;
1124 } 553 }
1125out: 554out:
@@ -1149,7 +578,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
1149 */ 578 */
1150 bio_set_m(bio, get_default_mirror(ms)); 579 bio_set_m(bio, get_default_mirror(ms));
1151 580
1152 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL); 581 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
1153} 582}
1154 583
1155static void do_writes(struct mirror_set *ms, struct bio_list *writes) 584static void do_writes(struct mirror_set *ms, struct bio_list *writes)
@@ -1169,18 +598,19 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
1169 bio_list_init(&recover); 598 bio_list_init(&recover);
1170 599
1171 while ((bio = bio_list_pop(writes))) { 600 while ((bio = bio_list_pop(writes))) {
1172 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1); 601 state = dm_rh_get_state(ms->rh,
602 dm_rh_bio_to_region(ms->rh, bio), 1);
1173 switch (state) { 603 switch (state) {
1174 case RH_CLEAN: 604 case DM_RH_CLEAN:
1175 case RH_DIRTY: 605 case DM_RH_DIRTY:
1176 this_list = &sync; 606 this_list = &sync;
1177 break; 607 break;
1178 608
1179 case RH_NOSYNC: 609 case DM_RH_NOSYNC:
1180 this_list = &nosync; 610 this_list = &nosync;
1181 break; 611 break;
1182 612
1183 case RH_RECOVERING: 613 case DM_RH_RECOVERING:
1184 this_list = &recover; 614 this_list = &recover;
1185 break; 615 break;
1186 } 616 }
@@ -1193,9 +623,9 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
1193 * be written to (writes to recover regions are going to 623 * be written to (writes to recover regions are going to
1194 * be delayed). 624 * be delayed).
1195 */ 625 */
1196 rh_inc_pending(&ms->rh, &sync); 626 dm_rh_inc_pending(ms->rh, &sync);
1197 rh_inc_pending(&ms->rh, &nosync); 627 dm_rh_inc_pending(ms->rh, &nosync);
1198 ms->log_failure = rh_flush(&ms->rh) ? 1 : 0; 628 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0;
1199 629
1200 /* 630 /*
1201 * Dispatch io. 631 * Dispatch io.
@@ -1204,13 +634,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
1204 spin_lock_irq(&ms->lock); 634 spin_lock_irq(&ms->lock);
1205 bio_list_merge(&ms->failures, &sync); 635 bio_list_merge(&ms->failures, &sync);
1206 spin_unlock_irq(&ms->lock); 636 spin_unlock_irq(&ms->lock);
1207 wake(ms); 637 wakeup_mirrord(ms);
1208 } else 638 } else
1209 while ((bio = bio_list_pop(&sync))) 639 while ((bio = bio_list_pop(&sync)))
1210 do_write(ms, bio); 640 do_write(ms, bio);
1211 641
1212 while ((bio = bio_list_pop(&recover))) 642 while ((bio = bio_list_pop(&recover)))
1213 rh_delay(&ms->rh, bio); 643 dm_rh_delay(ms->rh, bio);
1214 644
1215 while ((bio = bio_list_pop(&nosync))) { 645 while ((bio = bio_list_pop(&nosync))) {
1216 map_bio(get_default_mirror(ms), bio); 646 map_bio(get_default_mirror(ms), bio);
@@ -1226,8 +656,10 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
1226 return; 656 return;
1227 657
1228 if (!ms->log_failure) { 658 if (!ms->log_failure) {
1229 while ((bio = bio_list_pop(failures))) 659 while ((bio = bio_list_pop(failures))) {
1230 __bio_mark_nosync(ms, bio, bio->bi_size, 0); 660 ms->in_sync = 0;
661 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
662 }
1231 return; 663 return;
1232 } 664 }
1233 665
@@ -1280,8 +712,8 @@ static void trigger_event(struct work_struct *work)
1280 *---------------------------------------------------------------*/ 712 *---------------------------------------------------------------*/
1281static void do_mirror(struct work_struct *work) 713static void do_mirror(struct work_struct *work)
1282{ 714{
1283 struct mirror_set *ms =container_of(work, struct mirror_set, 715 struct mirror_set *ms = container_of(work, struct mirror_set,
1284 kmirrord_work); 716 kmirrord_work);
1285 struct bio_list reads, writes, failures; 717 struct bio_list reads, writes, failures;
1286 unsigned long flags; 718 unsigned long flags;
1287 719
@@ -1294,7 +726,7 @@ static void do_mirror(struct work_struct *work)
1294 bio_list_init(&ms->failures); 726 bio_list_init(&ms->failures);
1295 spin_unlock_irqrestore(&ms->lock, flags); 727 spin_unlock_irqrestore(&ms->lock, flags);
1296 728
1297 rh_update_states(&ms->rh); 729 dm_rh_update_states(ms->rh, errors_handled(ms));
1298 do_recovery(ms); 730 do_recovery(ms);
1299 do_reads(ms, &reads); 731 do_reads(ms, &reads);
1300 do_writes(ms, &writes); 732 do_writes(ms, &writes);
@@ -1303,7 +735,6 @@ static void do_mirror(struct work_struct *work)
1303 dm_table_unplug_all(ms->ti->table); 735 dm_table_unplug_all(ms->ti->table);
1304} 736}
1305 737
1306
1307/*----------------------------------------------------------------- 738/*-----------------------------------------------------------------
1308 * Target functions 739 * Target functions
1309 *---------------------------------------------------------------*/ 740 *---------------------------------------------------------------*/
@@ -1315,9 +746,6 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
1315 size_t len; 746 size_t len;
1316 struct mirror_set *ms = NULL; 747 struct mirror_set *ms = NULL;
1317 748
1318 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
1319 return NULL;
1320
1321 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); 749 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
1322 750
1323 ms = kzalloc(len, GFP_KERNEL); 751 ms = kzalloc(len, GFP_KERNEL);
@@ -1353,7 +781,11 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
1353 return NULL; 781 return NULL;
1354 } 782 }
1355 783
1356 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { 784 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
785 wakeup_all_recovery_waiters,
786 ms->ti->begin, MAX_RECOVERY,
787 dl, region_size, ms->nr_regions);
788 if (IS_ERR(ms->rh)) {
1357 ti->error = "Error creating dirty region hash"; 789 ti->error = "Error creating dirty region hash";
1358 dm_io_client_destroy(ms->io_client); 790 dm_io_client_destroy(ms->io_client);
1359 mempool_destroy(ms->read_record_pool); 791 mempool_destroy(ms->read_record_pool);
@@ -1371,7 +803,7 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,
1371 dm_put_device(ti, ms->mirror[m].dev); 803 dm_put_device(ti, ms->mirror[m].dev);
1372 804
1373 dm_io_client_destroy(ms->io_client); 805 dm_io_client_destroy(ms->io_client);
1374 rh_exit(&ms->rh); 806 dm_region_hash_destroy(ms->rh);
1375 mempool_destroy(ms->read_record_pool); 807 mempool_destroy(ms->read_record_pool);
1376 kfree(ms); 808 kfree(ms);
1377} 809}
@@ -1411,10 +843,10 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1411 * Create dirty log: log_type #log_params <log_params> 843 * Create dirty log: log_type #log_params <log_params>
1412 */ 844 */
1413static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, 845static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
1414 unsigned int argc, char **argv, 846 unsigned argc, char **argv,
1415 unsigned int *args_used) 847 unsigned *args_used)
1416{ 848{
1417 unsigned int param_count; 849 unsigned param_count;
1418 struct dm_dirty_log *dl; 850 struct dm_dirty_log *dl;
1419 851
1420 if (argc < 2) { 852 if (argc < 2) {
@@ -1545,7 +977,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1545 } 977 }
1546 978
1547 ti->private = ms; 979 ti->private = ms;
1548 ti->split_io = ms->rh.region_size; 980 ti->split_io = dm_rh_get_region_size(ms->rh);
1549 981
1550 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); 982 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1551 if (!ms->kmirrord_wq) { 983 if (!ms->kmirrord_wq) {
@@ -1580,11 +1012,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1580 goto err_destroy_wq; 1012 goto err_destroy_wq;
1581 } 1013 }
1582 1014
1583 r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); 1015 r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
1584 if (r) 1016 if (r)
1585 goto err_destroy_wq; 1017 goto err_destroy_wq;
1586 1018
1587 wake(ms); 1019 wakeup_mirrord(ms);
1588 return 0; 1020 return 0;
1589 1021
1590err_destroy_wq: 1022err_destroy_wq:
@@ -1600,27 +1032,12 @@ static void mirror_dtr(struct dm_target *ti)
1600 1032
1601 del_timer_sync(&ms->timer); 1033 del_timer_sync(&ms->timer);
1602 flush_workqueue(ms->kmirrord_wq); 1034 flush_workqueue(ms->kmirrord_wq);
1035 flush_scheduled_work();
1603 dm_kcopyd_client_destroy(ms->kcopyd_client); 1036 dm_kcopyd_client_destroy(ms->kcopyd_client);
1604 destroy_workqueue(ms->kmirrord_wq); 1037 destroy_workqueue(ms->kmirrord_wq);
1605 free_context(ms, ti, ms->nr_mirrors); 1038 free_context(ms, ti, ms->nr_mirrors);
1606} 1039}
1607 1040
1608static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1609{
1610 unsigned long flags;
1611 int should_wake = 0;
1612 struct bio_list *bl;
1613
1614 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1615 spin_lock_irqsave(&ms->lock, flags);
1616 should_wake = !(bl->head);
1617 bio_list_add(bl, bio);
1618 spin_unlock_irqrestore(&ms->lock, flags);
1619
1620 if (should_wake)
1621 wake(ms);
1622}
1623
1624/* 1041/*
1625 * Mirror mapping function 1042 * Mirror mapping function
1626 */ 1043 */
@@ -1631,16 +1048,16 @@ static int mirror_map(struct dm_target *ti, struct bio *bio,
1631 struct mirror *m; 1048 struct mirror *m;
1632 struct mirror_set *ms = ti->private; 1049 struct mirror_set *ms = ti->private;
1633 struct dm_raid1_read_record *read_record = NULL; 1050 struct dm_raid1_read_record *read_record = NULL;
1051 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1634 1052
1635 if (rw == WRITE) { 1053 if (rw == WRITE) {
1636 /* Save region for mirror_end_io() handler */ 1054 /* Save region for mirror_end_io() handler */
1637 map_context->ll = bio_to_region(&ms->rh, bio); 1055 map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
1638 queue_bio(ms, bio, rw); 1056 queue_bio(ms, bio, rw);
1639 return DM_MAPIO_SUBMITTED; 1057 return DM_MAPIO_SUBMITTED;
1640 } 1058 }
1641 1059
1642 r = ms->rh.log->type->in_sync(ms->rh.log, 1060 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1643 bio_to_region(&ms->rh, bio), 0);
1644 if (r < 0 && r != -EWOULDBLOCK) 1061 if (r < 0 && r != -EWOULDBLOCK)
1645 return r; 1062 return r;
1646 1063
@@ -1688,7 +1105,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1688 * We need to dec pending if this was a write. 1105 * We need to dec pending if this was a write.
1689 */ 1106 */
1690 if (rw == WRITE) { 1107 if (rw == WRITE) {
1691 rh_dec(&ms->rh, map_context->ll); 1108 dm_rh_dec(ms->rh, map_context->ll);
1692 return error; 1109 return error;
1693 } 1110 }
1694 1111
@@ -1744,7 +1161,7 @@ out:
1744static void mirror_presuspend(struct dm_target *ti) 1161static void mirror_presuspend(struct dm_target *ti)
1745{ 1162{
1746 struct mirror_set *ms = (struct mirror_set *) ti->private; 1163 struct mirror_set *ms = (struct mirror_set *) ti->private;
1747 struct dm_dirty_log *log = ms->rh.log; 1164 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1748 1165
1749 atomic_set(&ms->suspend, 1); 1166 atomic_set(&ms->suspend, 1);
1750 1167
@@ -1752,10 +1169,10 @@ static void mirror_presuspend(struct dm_target *ti)
1752 * We must finish up all the work that we've 1169 * We must finish up all the work that we've
1753 * generated (i.e. recovery work). 1170 * generated (i.e. recovery work).
1754 */ 1171 */
1755 rh_stop_recovery(&ms->rh); 1172 dm_rh_stop_recovery(ms->rh);
1756 1173
1757 wait_event(_kmirrord_recovery_stopped, 1174 wait_event(_kmirrord_recovery_stopped,
1758 !atomic_read(&ms->rh.recovery_in_flight)); 1175 !dm_rh_recovery_in_flight(ms->rh));
1759 1176
1760 if (log->type->presuspend && log->type->presuspend(log)) 1177 if (log->type->presuspend && log->type->presuspend(log))
1761 /* FIXME: need better error handling */ 1178 /* FIXME: need better error handling */
@@ -1773,7 +1190,7 @@ static void mirror_presuspend(struct dm_target *ti)
1773static void mirror_postsuspend(struct dm_target *ti) 1190static void mirror_postsuspend(struct dm_target *ti)
1774{ 1191{
1775 struct mirror_set *ms = ti->private; 1192 struct mirror_set *ms = ti->private;
1776 struct dm_dirty_log *log = ms->rh.log; 1193 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1777 1194
1778 if (log->type->postsuspend && log->type->postsuspend(log)) 1195 if (log->type->postsuspend && log->type->postsuspend(log))
1779 /* FIXME: need better error handling */ 1196 /* FIXME: need better error handling */
@@ -1783,13 +1200,13 @@ static void mirror_postsuspend(struct dm_target *ti)
1783static void mirror_resume(struct dm_target *ti) 1200static void mirror_resume(struct dm_target *ti)
1784{ 1201{
1785 struct mirror_set *ms = ti->private; 1202 struct mirror_set *ms = ti->private;
1786 struct dm_dirty_log *log = ms->rh.log; 1203 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1787 1204
1788 atomic_set(&ms->suspend, 0); 1205 atomic_set(&ms->suspend, 0);
1789 if (log->type->resume && log->type->resume(log)) 1206 if (log->type->resume && log->type->resume(log))
1790 /* FIXME: need better error handling */ 1207 /* FIXME: need better error handling */
1791 DMWARN("log resume failed"); 1208 DMWARN("log resume failed");
1792 rh_start_recovery(&ms->rh); 1209 dm_rh_start_recovery(ms->rh);
1793} 1210}
1794 1211
1795/* 1212/*
@@ -1821,7 +1238,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1821{ 1238{
1822 unsigned int m, sz = 0; 1239 unsigned int m, sz = 0;
1823 struct mirror_set *ms = (struct mirror_set *) ti->private; 1240 struct mirror_set *ms = (struct mirror_set *) ti->private;
1824 struct dm_dirty_log *log = ms->rh.log; 1241 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1825 char buffer[ms->nr_mirrors + 1]; 1242 char buffer[ms->nr_mirrors + 1];
1826 1243
1827 switch (type) { 1244 switch (type) {
@@ -1834,15 +1251,15 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1834 buffer[m] = '\0'; 1251 buffer[m] = '\0';
1835 1252
1836 DMEMIT("%llu/%llu 1 %s ", 1253 DMEMIT("%llu/%llu 1 %s ",
1837 (unsigned long long)log->type->get_sync_count(ms->rh.log), 1254 (unsigned long long)log->type->get_sync_count(log),
1838 (unsigned long long)ms->nr_regions, buffer); 1255 (unsigned long long)ms->nr_regions, buffer);
1839 1256
1840 sz += log->type->status(ms->rh.log, type, result+sz, maxlen-sz); 1257 sz += log->type->status(log, type, result+sz, maxlen-sz);
1841 1258
1842 break; 1259 break;
1843 1260
1844 case STATUSTYPE_TABLE: 1261 case STATUSTYPE_TABLE:
1845 sz = log->type->status(ms->rh.log, type, result, maxlen); 1262 sz = log->type->status(log, type, result, maxlen);
1846 1263
1847 DMEMIT("%d", ms->nr_mirrors); 1264 DMEMIT("%d", ms->nr_mirrors);
1848 for (m = 0; m < ms->nr_mirrors; m++) 1265 for (m = 0; m < ms->nr_mirrors; m++)
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
new file mode 100644
index 000000000000..59f8d9df9e1a
--- /dev/null
+++ b/drivers/md/dm-region-hash.c
@@ -0,0 +1,704 @@
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/dm-dirty-log.h>
9#include <linux/dm-region-hash.h>
10
11#include <linux/ctype.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/vmalloc.h>
15
16#include "dm.h"
17#include "dm-bio-list.h"
18
19#define DM_MSG_PREFIX "region hash"
20
21/*-----------------------------------------------------------------
22 * Region hash
23 *
24 * The mirror splits itself up into discrete regions. Each
25 * region can be in one of three states: clean, dirty,
26 * nosync. There is no need to put clean regions in the hash.
27 *
28 * In addition to being present in the hash table a region _may_
29 * be present on one of three lists.
30 *
31 * clean_regions: Regions on this list have no io pending to
32 * them, they are in sync, we are no longer interested in them,
33 * they are dull. dm_rh_update_states() will remove them from the
34 * hash table.
35 *
36 * quiesced_regions: These regions have been spun down, ready
37 * for recovery. rh_recovery_start() will remove regions from
38 * this list and hand them to kmirrord, which will schedule the
39 * recovery io with kcopyd.
40 *
41 * recovered_regions: Regions that kcopyd has successfully
42 * recovered. dm_rh_update_states() will now schedule any delayed
43 * io, up the recovery_count, and remove the region from the
44 * hash.
45 *
46 * There are 2 locks:
47 * A rw spin lock 'hash_lock' protects just the hash table,
48 * this is never held in write mode from interrupt context,
49 * which I believe means that we only have to disable irqs when
50 * doing a write lock.
51 *
52 * An ordinary spin lock 'region_lock' that protects the three
53 * lists in the region_hash, with the 'state', 'list' and
54 * 'delayed_bios' fields of the regions. This is used from irq
55 * context, so all other uses will have to suspend local irqs.
56 *---------------------------------------------------------------*/
57struct dm_region_hash {
58 uint32_t region_size;
59 unsigned region_shift;
60
61 /* holds persistent region state */
62 struct dm_dirty_log *log;
63
64 /* hash table */
65 rwlock_t hash_lock;
66 mempool_t *region_pool;
67 unsigned mask;
68 unsigned nr_buckets;
69 unsigned prime;
70 unsigned shift;
71 struct list_head *buckets;
72
73 unsigned max_recovery; /* Max # of regions to recover in parallel */
74
75 spinlock_t region_lock;
76 atomic_t recovery_in_flight;
77 struct semaphore recovery_count;
78 struct list_head clean_regions;
79 struct list_head quiesced_regions;
80 struct list_head recovered_regions;
81 struct list_head failed_recovered_regions;
82
83 void *context;
84 sector_t target_begin;
85
86 /* Callback function to schedule bios writes */
87 void (*dispatch_bios)(void *context, struct bio_list *bios);
88
89 /* Callback function to wakeup callers worker thread. */
90 void (*wakeup_workers)(void *context);
91
92 /* Callback function to wakeup callers recovery waiters. */
93 void (*wakeup_all_recovery_waiters)(void *context);
94};
95
96struct dm_region {
97 struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */
98 region_t key;
99 int state;
100
101 struct list_head hash_list;
102 struct list_head list;
103
104 atomic_t pending;
105 struct bio_list delayed_bios;
106};
107
108/*
109 * Conversion fns
110 */
111static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
112{
113 return sector >> rh->region_shift;
114}
115
116sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
117{
118 return region << rh->region_shift;
119}
120EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
121
122region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
123{
124 return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
125}
126EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
127
128void *dm_rh_region_context(struct dm_region *reg)
129{
130 return reg->rh->context;
131}
132EXPORT_SYMBOL_GPL(dm_rh_region_context);
133
134region_t dm_rh_get_region_key(struct dm_region *reg)
135{
136 return reg->key;
137}
138EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
139
140sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
141{
142 return rh->region_size;
143}
144EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
145
146/*
147 * FIXME: shall we pass in a structure instead of all these args to
148 * dm_region_hash_create()????
149 */
150#define RH_HASH_MULT 2654435387U
151#define RH_HASH_SHIFT 12
152
153#define MIN_REGIONS 64
154struct dm_region_hash *dm_region_hash_create(
155 void *context, void (*dispatch_bios)(void *context,
156 struct bio_list *bios),
157 void (*wakeup_workers)(void *context),
158 void (*wakeup_all_recovery_waiters)(void *context),
159 sector_t target_begin, unsigned max_recovery,
160 struct dm_dirty_log *log, uint32_t region_size,
161 region_t nr_regions)
162{
163 struct dm_region_hash *rh;
164 unsigned nr_buckets, max_buckets;
165 size_t i;
166
167 /*
168 * Calculate a suitable number of buckets for our hash
169 * table.
170 */
171 max_buckets = nr_regions >> 6;
172 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
173 ;
174 nr_buckets >>= 1;
175
176 rh = kmalloc(sizeof(*rh), GFP_KERNEL);
177 if (!rh) {
178 DMERR("unable to allocate region hash memory");
179 return ERR_PTR(-ENOMEM);
180 }
181
182 rh->context = context;
183 rh->dispatch_bios = dispatch_bios;
184 rh->wakeup_workers = wakeup_workers;
185 rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
186 rh->target_begin = target_begin;
187 rh->max_recovery = max_recovery;
188 rh->log = log;
189 rh->region_size = region_size;
190 rh->region_shift = ffs(region_size) - 1;
191 rwlock_init(&rh->hash_lock);
192 rh->mask = nr_buckets - 1;
193 rh->nr_buckets = nr_buckets;
194
195 rh->shift = RH_HASH_SHIFT;
196 rh->prime = RH_HASH_MULT;
197
198 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
199 if (!rh->buckets) {
200 DMERR("unable to allocate region hash bucket memory");
201 kfree(rh);
202 return ERR_PTR(-ENOMEM);
203 }
204
205 for (i = 0; i < nr_buckets; i++)
206 INIT_LIST_HEAD(rh->buckets + i);
207
208 spin_lock_init(&rh->region_lock);
209 sema_init(&rh->recovery_count, 0);
210 atomic_set(&rh->recovery_in_flight, 0);
211 INIT_LIST_HEAD(&rh->clean_regions);
212 INIT_LIST_HEAD(&rh->quiesced_regions);
213 INIT_LIST_HEAD(&rh->recovered_regions);
214 INIT_LIST_HEAD(&rh->failed_recovered_regions);
215
216 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
217 sizeof(struct dm_region));
218 if (!rh->region_pool) {
219 vfree(rh->buckets);
220 kfree(rh);
221 rh = ERR_PTR(-ENOMEM);
222 }
223
224 return rh;
225}
226EXPORT_SYMBOL_GPL(dm_region_hash_create);
227
228void dm_region_hash_destroy(struct dm_region_hash *rh)
229{
230 unsigned h;
231 struct dm_region *reg, *nreg;
232
233 BUG_ON(!list_empty(&rh->quiesced_regions));
234 for (h = 0; h < rh->nr_buckets; h++) {
235 list_for_each_entry_safe(reg, nreg, rh->buckets + h,
236 hash_list) {
237 BUG_ON(atomic_read(&reg->pending));
238 mempool_free(reg, rh->region_pool);
239 }
240 }
241
242 if (rh->log)
243 dm_dirty_log_destroy(rh->log);
244
245 if (rh->region_pool)
246 mempool_destroy(rh->region_pool);
247
248 vfree(rh->buckets);
249 kfree(rh);
250}
251EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
252
253struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
254{
255 return rh->log;
256}
257EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
258
259static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
260{
261 return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
262}
263
264static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
265{
266 struct dm_region *reg;
267 struct list_head *bucket = rh->buckets + rh_hash(rh, region);
268
269 list_for_each_entry(reg, bucket, hash_list)
270 if (reg->key == region)
271 return reg;
272
273 return NULL;
274}
275
276static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
277{
278 list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
279}
280
281static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
282{
283 struct dm_region *reg, *nreg;
284
285 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
286 if (unlikely(!nreg))
287 nreg = kmalloc(sizeof(*nreg), GFP_NOIO);
288
289 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
290 DM_RH_CLEAN : DM_RH_NOSYNC;
291 nreg->rh = rh;
292 nreg->key = region;
293 INIT_LIST_HEAD(&nreg->list);
294 atomic_set(&nreg->pending, 0);
295 bio_list_init(&nreg->delayed_bios);
296
297 write_lock_irq(&rh->hash_lock);
298 reg = __rh_lookup(rh, region);
299 if (reg)
300 /* We lost the race. */
301 mempool_free(nreg, rh->region_pool);
302 else {
303 __rh_insert(rh, nreg);
304 if (nreg->state == DM_RH_CLEAN) {
305 spin_lock(&rh->region_lock);
306 list_add(&nreg->list, &rh->clean_regions);
307 spin_unlock(&rh->region_lock);
308 }
309
310 reg = nreg;
311 }
312 write_unlock_irq(&rh->hash_lock);
313
314 return reg;
315}
316
317static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
318{
319 struct dm_region *reg;
320
321 reg = __rh_lookup(rh, region);
322 if (!reg) {
323 read_unlock(&rh->hash_lock);
324 reg = __rh_alloc(rh, region);
325 read_lock(&rh->hash_lock);
326 }
327
328 return reg;
329}
330
331int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
332{
333 int r;
334 struct dm_region *reg;
335
336 read_lock(&rh->hash_lock);
337 reg = __rh_lookup(rh, region);
338 read_unlock(&rh->hash_lock);
339
340 if (reg)
341 return reg->state;
342
343 /*
344 * The region wasn't in the hash, so we fall back to the
345 * dirty log.
346 */
347 r = rh->log->type->in_sync(rh->log, region, may_block);
348
349 /*
350 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
351 * taken as a DM_RH_NOSYNC
352 */
353 return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
354}
355EXPORT_SYMBOL_GPL(dm_rh_get_state);
356
357static void complete_resync_work(struct dm_region *reg, int success)
358{
359 struct dm_region_hash *rh = reg->rh;
360
361 rh->log->type->set_region_sync(rh->log, reg->key, success);
362
363 /*
364 * Dispatch the bios before we call 'wake_up_all'.
365 * This is important because if we are suspending,
366 * we want to know that recovery is complete and
367 * the work queue is flushed. If we wake_up_all
368 * before we dispatch_bios (queue bios and call wake()),
369 * then we risk suspending before the work queue
370 * has been properly flushed.
371 */
372 rh->dispatch_bios(rh->context, &reg->delayed_bios);
373 if (atomic_dec_and_test(&rh->recovery_in_flight))
374 rh->wakeup_all_recovery_waiters(rh->context);
375 up(&rh->recovery_count);
376}
377
378/* dm_rh_mark_nosync
379 * @ms
380 * @bio
381 * @done
382 * @error
383 *
384 * The bio was written on some mirror(s) but failed on other mirror(s).
385 * We can successfully endio the bio but should avoid the region being
386 * marked clean by setting the state DM_RH_NOSYNC.
387 *
388 * This function is _not_ safe in interrupt context!
389 */
390void dm_rh_mark_nosync(struct dm_region_hash *rh,
391 struct bio *bio, unsigned done, int error)
392{
393 unsigned long flags;
394 struct dm_dirty_log *log = rh->log;
395 struct dm_region *reg;
396 region_t region = dm_rh_bio_to_region(rh, bio);
397 int recovering = 0;
398
399 /* We must inform the log that the sync count has changed. */
400 log->type->set_region_sync(log, region, 0);
401
402 read_lock(&rh->hash_lock);
403 reg = __rh_find(rh, region);
404 read_unlock(&rh->hash_lock);
405
406 /* region hash entry should exist because write was in-flight */
407 BUG_ON(!reg);
408 BUG_ON(!list_empty(&reg->list));
409
410 spin_lock_irqsave(&rh->region_lock, flags);
411 /*
412 * Possible cases:
413 * 1) DM_RH_DIRTY
414 * 2) DM_RH_NOSYNC: was dirty, other preceeding writes failed
415 * 3) DM_RH_RECOVERING: flushing pending writes
416 * Either case, the region should have not been connected to list.
417 */
418 recovering = (reg->state == DM_RH_RECOVERING);
419 reg->state = DM_RH_NOSYNC;
420 BUG_ON(!list_empty(&reg->list));
421 spin_unlock_irqrestore(&rh->region_lock, flags);
422
423 bio_endio(bio, error);
424 if (recovering)
425 complete_resync_work(reg, 0);
426}
427EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
428
429void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
430{
431 struct dm_region *reg, *next;
432
433 LIST_HEAD(clean);
434 LIST_HEAD(recovered);
435 LIST_HEAD(failed_recovered);
436
437 /*
438 * Quickly grab the lists.
439 */
440 write_lock_irq(&rh->hash_lock);
441 spin_lock(&rh->region_lock);
442 if (!list_empty(&rh->clean_regions)) {
443 list_splice_init(&rh->clean_regions, &clean);
444
445 list_for_each_entry(reg, &clean, list)
446 list_del(&reg->hash_list);
447 }
448
449 if (!list_empty(&rh->recovered_regions)) {
450 list_splice_init(&rh->recovered_regions, &recovered);
451
452 list_for_each_entry(reg, &recovered, list)
453 list_del(&reg->hash_list);
454 }
455
456 if (!list_empty(&rh->failed_recovered_regions)) {
457 list_splice_init(&rh->failed_recovered_regions,
458 &failed_recovered);
459
460 list_for_each_entry(reg, &failed_recovered, list)
461 list_del(&reg->hash_list);
462 }
463
464 spin_unlock(&rh->region_lock);
465 write_unlock_irq(&rh->hash_lock);
466
467 /*
468 * All the regions on the recovered and clean lists have
469 * now been pulled out of the system, so no need to do
470 * any more locking.
471 */
472 list_for_each_entry_safe(reg, next, &recovered, list) {
473 rh->log->type->clear_region(rh->log, reg->key);
474 complete_resync_work(reg, 1);
475 mempool_free(reg, rh->region_pool);
476 }
477
478 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
479 complete_resync_work(reg, errors_handled ? 0 : 1);
480 mempool_free(reg, rh->region_pool);
481 }
482
483 list_for_each_entry_safe(reg, next, &clean, list) {
484 rh->log->type->clear_region(rh->log, reg->key);
485 mempool_free(reg, rh->region_pool);
486 }
487
488 rh->log->type->flush(rh->log);
489}
490EXPORT_SYMBOL_GPL(dm_rh_update_states);
491
492static void rh_inc(struct dm_region_hash *rh, region_t region)
493{
494 struct dm_region *reg;
495
496 read_lock(&rh->hash_lock);
497 reg = __rh_find(rh, region);
498
499 spin_lock_irq(&rh->region_lock);
500 atomic_inc(&reg->pending);
501
502 if (reg->state == DM_RH_CLEAN) {
503 reg->state = DM_RH_DIRTY;
504 list_del_init(&reg->list); /* take off the clean list */
505 spin_unlock_irq(&rh->region_lock);
506
507 rh->log->type->mark_region(rh->log, reg->key);
508 } else
509 spin_unlock_irq(&rh->region_lock);
510
511
512 read_unlock(&rh->hash_lock);
513}
514
515void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
516{
517 struct bio *bio;
518
519 for (bio = bios->head; bio; bio = bio->bi_next)
520 rh_inc(rh, dm_rh_bio_to_region(rh, bio));
521}
522EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
523
524void dm_rh_dec(struct dm_region_hash *rh, region_t region)
525{
526 unsigned long flags;
527 struct dm_region *reg;
528 int should_wake = 0;
529
530 read_lock(&rh->hash_lock);
531 reg = __rh_lookup(rh, region);
532 read_unlock(&rh->hash_lock);
533
534 spin_lock_irqsave(&rh->region_lock, flags);
535 if (atomic_dec_and_test(&reg->pending)) {
536 /*
537 * There is no pending I/O for this region.
538 * We can move the region to corresponding list for next action.
539 * At this point, the region is not yet connected to any list.
540 *
541 * If the state is DM_RH_NOSYNC, the region should be kept off
542 * from clean list.
543 * The hash entry for DM_RH_NOSYNC will remain in memory
544 * until the region is recovered or the map is reloaded.
545 */
546
547 /* do nothing for DM_RH_NOSYNC */
548 if (reg->state == DM_RH_RECOVERING) {
549 list_add_tail(&reg->list, &rh->quiesced_regions);
550 } else if (reg->state == DM_RH_DIRTY) {
551 reg->state = DM_RH_CLEAN;
552 list_add(&reg->list, &rh->clean_regions);
553 }
554 should_wake = 1;
555 }
556 spin_unlock_irqrestore(&rh->region_lock, flags);
557
558 if (should_wake)
559 rh->wakeup_workers(rh->context);
560}
561EXPORT_SYMBOL_GPL(dm_rh_dec);
562
563/*
564 * Starts quiescing a region in preparation for recovery.
565 */
566static int __rh_recovery_prepare(struct dm_region_hash *rh)
567{
568 int r;
569 region_t region;
570 struct dm_region *reg;
571
572 /*
573 * Ask the dirty log what's next.
574 */
575 r = rh->log->type->get_resync_work(rh->log, &region);
576 if (r <= 0)
577 return r;
578
579 /*
580 * Get this region, and start it quiescing by setting the
581 * recovering flag.
582 */
583 read_lock(&rh->hash_lock);
584 reg = __rh_find(rh, region);
585 read_unlock(&rh->hash_lock);
586
587 spin_lock_irq(&rh->region_lock);
588 reg->state = DM_RH_RECOVERING;
589
590 /* Already quiesced ? */
591 if (atomic_read(&reg->pending))
592 list_del_init(&reg->list);
593 else
594 list_move(&reg->list, &rh->quiesced_regions);
595
596 spin_unlock_irq(&rh->region_lock);
597
598 return 1;
599}
600
601void dm_rh_recovery_prepare(struct dm_region_hash *rh)
602{
603 /* Extra reference to avoid race with dm_rh_stop_recovery */
604 atomic_inc(&rh->recovery_in_flight);
605
606 while (!down_trylock(&rh->recovery_count)) {
607 atomic_inc(&rh->recovery_in_flight);
608 if (__rh_recovery_prepare(rh) <= 0) {
609 atomic_dec(&rh->recovery_in_flight);
610 up(&rh->recovery_count);
611 break;
612 }
613 }
614
615 /* Drop the extra reference */
616 if (atomic_dec_and_test(&rh->recovery_in_flight))
617 rh->wakeup_all_recovery_waiters(rh->context);
618}
619EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
620
621/*
622 * Returns any quiesced regions.
623 */
624struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
625{
626 struct dm_region *reg = NULL;
627
628 spin_lock_irq(&rh->region_lock);
629 if (!list_empty(&rh->quiesced_regions)) {
630 reg = list_entry(rh->quiesced_regions.next,
631 struct dm_region, list);
632 list_del_init(&reg->list); /* remove from the quiesced list */
633 }
634 spin_unlock_irq(&rh->region_lock);
635
636 return reg;
637}
638EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
639
640void dm_rh_recovery_end(struct dm_region *reg, int success)
641{
642 struct dm_region_hash *rh = reg->rh;
643
644 spin_lock_irq(&rh->region_lock);
645 if (success)
646 list_add(&reg->list, &reg->rh->recovered_regions);
647 else {
648 reg->state = DM_RH_NOSYNC;
649 list_add(&reg->list, &reg->rh->failed_recovered_regions);
650 }
651 spin_unlock_irq(&rh->region_lock);
652
653 rh->wakeup_workers(rh->context);
654}
655EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
656
657/* Return recovery in flight count. */
658int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
659{
660 return atomic_read(&rh->recovery_in_flight);
661}
662EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
663
664int dm_rh_flush(struct dm_region_hash *rh)
665{
666 return rh->log->type->flush(rh->log);
667}
668EXPORT_SYMBOL_GPL(dm_rh_flush);
669
670void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
671{
672 struct dm_region *reg;
673
674 read_lock(&rh->hash_lock);
675 reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
676 bio_list_add(&reg->delayed_bios, bio);
677 read_unlock(&rh->hash_lock);
678}
679EXPORT_SYMBOL_GPL(dm_rh_delay);
680
681void dm_rh_stop_recovery(struct dm_region_hash *rh)
682{
683 int i;
684
685 /* wait for any recovering regions */
686 for (i = 0; i < rh->max_recovery; i++)
687 down(&rh->recovery_count);
688}
689EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
690
691void dm_rh_start_recovery(struct dm_region_hash *rh)
692{
693 int i;
694
695 for (i = 0; i < rh->max_recovery; i++)
696 up(&rh->recovery_count);
697
698 rh->wakeup_workers(rh->context);
699}
700EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
701
702MODULE_DESCRIPTION(DM_NAME " region hash");
703MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
704MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 391dfa2ad434..cdfbf65b28cb 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -9,7 +9,8 @@
9 * Round-robin path selector. 9 * Round-robin path selector.
10 */ 10 */
11 11
12#include "dm.h" 12#include <linux/device-mapper.h>
13
13#include "dm-path-selector.h" 14#include "dm-path-selector.h"
14 15
15#include <linux/slab.h> 16#include <linux/slab.h>
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 6e5528aecc98..6c96db26b87c 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -229,19 +229,21 @@ static void __insert_origin(struct origin *o)
229 */ 229 */
230static int register_snapshot(struct dm_snapshot *snap) 230static int register_snapshot(struct dm_snapshot *snap)
231{ 231{
232 struct origin *o; 232 struct origin *o, *new_o;
233 struct block_device *bdev = snap->origin->bdev; 233 struct block_device *bdev = snap->origin->bdev;
234 234
235 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
236 if (!new_o)
237 return -ENOMEM;
238
235 down_write(&_origins_lock); 239 down_write(&_origins_lock);
236 o = __lookup_origin(bdev); 240 o = __lookup_origin(bdev);
237 241
238 if (!o) { 242 if (o)
243 kfree(new_o);
244 else {
239 /* New origin */ 245 /* New origin */
240 o = kmalloc(sizeof(*o), GFP_KERNEL); 246 o = new_o;
241 if (!o) {
242 up_write(&_origins_lock);
243 return -ENOMEM;
244 }
245 247
246 /* Initialise the struct */ 248 /* Initialise the struct */
247 INIT_LIST_HEAD(&o->snapshots); 249 INIT_LIST_HEAD(&o->snapshots);
@@ -368,6 +370,7 @@ static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snaps
368 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, 370 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
369 GFP_NOIO); 371 GFP_NOIO);
370 372
373 atomic_inc(&s->pending_exceptions_count);
371 pe->snap = s; 374 pe->snap = s;
372 375
373 return pe; 376 return pe;
@@ -375,7 +378,11 @@ static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snaps
375 378
376static void free_pending_exception(struct dm_snap_pending_exception *pe) 379static void free_pending_exception(struct dm_snap_pending_exception *pe)
377{ 380{
378 mempool_free(pe, pe->snap->pending_pool); 381 struct dm_snapshot *s = pe->snap;
382
383 mempool_free(pe, s->pending_pool);
384 smp_mb__before_atomic_dec();
385 atomic_dec(&s->pending_exceptions_count);
379} 386}
380 387
381static void insert_completed_exception(struct dm_snapshot *s, 388static void insert_completed_exception(struct dm_snapshot *s,
@@ -600,7 +607,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
600 607
601 s->valid = 1; 608 s->valid = 1;
602 s->active = 0; 609 s->active = 0;
603 s->last_percent = 0; 610 atomic_set(&s->pending_exceptions_count, 0);
604 init_rwsem(&s->lock); 611 init_rwsem(&s->lock);
605 spin_lock_init(&s->pe_lock); 612 spin_lock_init(&s->pe_lock);
606 s->ti = ti; 613 s->ti = ti;
@@ -727,6 +734,14 @@ static void snapshot_dtr(struct dm_target *ti)
727 /* After this returns there can be no new kcopyd jobs. */ 734 /* After this returns there can be no new kcopyd jobs. */
728 unregister_snapshot(s); 735 unregister_snapshot(s);
729 736
737 while (atomic_read(&s->pending_exceptions_count))
738 yield();
739 /*
740 * Ensure instructions in mempool_destroy aren't reordered
741 * before atomic_read.
742 */
743 smp_mb();
744
730#ifdef CONFIG_DM_DEBUG 745#ifdef CONFIG_DM_DEBUG
731 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 746 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
732 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 747 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
@@ -824,8 +839,10 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
824 * the bios for the original write to the origin. 839 * the bios for the original write to the origin.
825 */ 840 */
826 if (primary_pe && 841 if (primary_pe &&
827 atomic_dec_and_test(&primary_pe->ref_count)) 842 atomic_dec_and_test(&primary_pe->ref_count)) {
828 origin_bios = bio_list_get(&primary_pe->origin_bios); 843 origin_bios = bio_list_get(&primary_pe->origin_bios);
844 free_pending_exception(primary_pe);
845 }
829 846
830 /* 847 /*
831 * Free the pe if it's not linked to an origin write or if 848 * Free the pe if it's not linked to an origin write or if
@@ -834,12 +851,6 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
834 if (!primary_pe || primary_pe != pe) 851 if (!primary_pe || primary_pe != pe)
835 free_pending_exception(pe); 852 free_pending_exception(pe);
836 853
837 /*
838 * Free the primary pe if nothing references it.
839 */
840 if (primary_pe && !atomic_read(&primary_pe->ref_count))
841 free_pending_exception(primary_pe);
842
843 return origin_bios; 854 return origin_bios;
844} 855}
845 856
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h
index 292c15609ae3..99c0106ede2d 100644
--- a/drivers/md/dm-snap.h
+++ b/drivers/md/dm-snap.h
@@ -9,7 +9,7 @@
9#ifndef DM_SNAPSHOT_H 9#ifndef DM_SNAPSHOT_H
10#define DM_SNAPSHOT_H 10#define DM_SNAPSHOT_H
11 11
12#include "dm.h" 12#include <linux/device-mapper.h>
13#include "dm-bio-list.h" 13#include "dm-bio-list.h"
14#include <linux/blkdev.h> 14#include <linux/blkdev.h>
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
@@ -158,11 +158,10 @@ struct dm_snapshot {
158 /* Used for display of table */ 158 /* Used for display of table */
159 char type; 159 char type;
160 160
161 /* The last percentage we notified */
162 int last_percent;
163
164 mempool_t *pending_pool; 161 mempool_t *pending_pool;
165 162
163 atomic_t pending_exceptions_count;
164
166 struct exception_table pending; 165 struct exception_table pending;
167 struct exception_table complete; 166 struct exception_table complete;
168 167
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index b745d8ac625b..9e4ef88d421e 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -4,7 +4,7 @@
4 * This file is released under the GPL. 4 * This file is released under the GPL.
5 */ 5 */
6 6
7#include "dm.h" 7#include <linux/device-mapper.h>
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/init.h> 10#include <linux/init.h>
@@ -60,8 +60,8 @@ static inline struct stripe_c *alloc_context(unsigned int stripes)
60{ 60{
61 size_t len; 61 size_t len;
62 62
63 if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), 63 if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
64 stripes)) 64 stripes))
65 return NULL; 65 return NULL;
66 66
67 len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); 67 len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
@@ -320,8 +320,10 @@ int __init dm_stripe_init(void)
320 int r; 320 int r;
321 321
322 r = dm_register_target(&stripe_target); 322 r = dm_register_target(&stripe_target);
323 if (r < 0) 323 if (r < 0) {
324 DMWARN("target registration failed"); 324 DMWARN("target registration failed");
325 return r;
326 }
325 327
326 kstriped = create_singlethread_workqueue("kstriped"); 328 kstriped = create_singlethread_workqueue("kstriped");
327 if (!kstriped) { 329 if (!kstriped) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a740a6950f59..04e5fd742c2c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -43,7 +43,7 @@ struct dm_table {
43 * device. This should be a combination of FMODE_READ 43 * device. This should be a combination of FMODE_READ
44 * and FMODE_WRITE. 44 * and FMODE_WRITE.
45 */ 45 */
46 int mode; 46 fmode_t mode;
47 47
48 /* a list of devices used by this table */ 48 /* a list of devices used by this table */
49 struct list_head devices; 49 struct list_head devices;
@@ -217,7 +217,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
217 return 0; 217 return 0;
218} 218}
219 219
220int dm_table_create(struct dm_table **result, int mode, 220int dm_table_create(struct dm_table **result, fmode_t mode,
221 unsigned num_targets, struct mapped_device *md) 221 unsigned num_targets, struct mapped_device *md)
222{ 222{
223 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); 223 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
@@ -313,19 +313,6 @@ static inline int check_space(struct dm_table *t)
313} 313}
314 314
315/* 315/*
316 * Convert a device path to a dev_t.
317 */
318static int lookup_device(const char *path, dev_t *dev)
319{
320 struct block_device *bdev = lookup_bdev(path);
321 if (IS_ERR(bdev))
322 return PTR_ERR(bdev);
323 *dev = bdev->bd_dev;
324 bdput(bdev);
325 return 0;
326}
327
328/*
329 * See if we've already got a device in the list. 316 * See if we've already got a device in the list.
330 */ 317 */
331static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) 318static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
@@ -357,7 +344,7 @@ static int open_dev(struct dm_dev_internal *d, dev_t dev,
357 return PTR_ERR(bdev); 344 return PTR_ERR(bdev);
358 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); 345 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
359 if (r) 346 if (r)
360 blkdev_put(bdev); 347 blkdev_put(bdev, d->dm_dev.mode);
361 else 348 else
362 d->dm_dev.bdev = bdev; 349 d->dm_dev.bdev = bdev;
363 return r; 350 return r;
@@ -372,7 +359,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
372 return; 359 return;
373 360
374 bd_release_from_disk(d->dm_dev.bdev, dm_disk(md)); 361 bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
375 blkdev_put(d->dm_dev.bdev); 362 blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
376 d->dm_dev.bdev = NULL; 363 d->dm_dev.bdev = NULL;
377} 364}
378 365
@@ -395,7 +382,7 @@ static int check_device_area(struct dm_dev_internal *dd, sector_t start,
395 * careful to leave things as they were if we fail to reopen the 382 * careful to leave things as they were if we fail to reopen the
396 * device. 383 * device.
397 */ 384 */
398static int upgrade_mode(struct dm_dev_internal *dd, int new_mode, 385static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
399 struct mapped_device *md) 386 struct mapped_device *md)
400{ 387{
401 int r; 388 int r;
@@ -421,7 +408,7 @@ static int upgrade_mode(struct dm_dev_internal *dd, int new_mode,
421 */ 408 */
422static int __table_get_device(struct dm_table *t, struct dm_target *ti, 409static int __table_get_device(struct dm_table *t, struct dm_target *ti,
423 const char *path, sector_t start, sector_t len, 410 const char *path, sector_t start, sector_t len,
424 int mode, struct dm_dev **result) 411 fmode_t mode, struct dm_dev **result)
425{ 412{
426 int r; 413 int r;
427 dev_t uninitialized_var(dev); 414 dev_t uninitialized_var(dev);
@@ -437,8 +424,12 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
437 return -EOVERFLOW; 424 return -EOVERFLOW;
438 } else { 425 } else {
439 /* convert the path to a device */ 426 /* convert the path to a device */
440 if ((r = lookup_device(path, &dev))) 427 struct block_device *bdev = lookup_bdev(path);
441 return r; 428
429 if (IS_ERR(bdev))
430 return PTR_ERR(bdev);
431 dev = bdev->bd_dev;
432 bdput(bdev);
442 } 433 }
443 434
444 dd = find_device(&t->devices, dev); 435 dd = find_device(&t->devices, dev);
@@ -537,7 +528,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
537EXPORT_SYMBOL_GPL(dm_set_device_limits); 528EXPORT_SYMBOL_GPL(dm_set_device_limits);
538 529
539int dm_get_device(struct dm_target *ti, const char *path, sector_t start, 530int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
540 sector_t len, int mode, struct dm_dev **result) 531 sector_t len, fmode_t mode, struct dm_dev **result)
541{ 532{
542 int r = __table_get_device(ti->table, ti, path, 533 int r = __table_get_device(ti->table, ti, path,
543 start, len, mode, result); 534 start, len, mode, result);
@@ -677,7 +668,7 @@ static void check_for_valid_limits(struct io_restrictions *rs)
677 if (!rs->max_segment_size) 668 if (!rs->max_segment_size)
678 rs->max_segment_size = MAX_SEGMENT_SIZE; 669 rs->max_segment_size = MAX_SEGMENT_SIZE;
679 if (!rs->seg_boundary_mask) 670 if (!rs->seg_boundary_mask)
680 rs->seg_boundary_mask = -1; 671 rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
681 if (!rs->bounce_pfn) 672 if (!rs->bounce_pfn)
682 rs->bounce_pfn = -1; 673 rs->bounce_pfn = -1;
683} 674}
@@ -887,7 +878,7 @@ struct list_head *dm_table_get_devices(struct dm_table *t)
887 return &t->devices; 878 return &t->devices;
888} 879}
889 880
890int dm_table_get_mode(struct dm_table *t) 881fmode_t dm_table_get_mode(struct dm_table *t)
891{ 882{
892 return t->mode; 883 return t->mode;
893} 884}
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index bdec206c404b..cdbf126ec106 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -4,7 +4,7 @@
4 * This file is released under the GPL. 4 * This file is released under the GPL.
5 */ 5 */
6 6
7#include "dm.h" 7#include <linux/device-mapper.h>
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/init.h> 10#include <linux/init.h>
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 327de03a5bdf..c99e4728ff41 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -21,7 +21,6 @@
21#include <linux/idr.h> 21#include <linux/idr.h>
22#include <linux/hdreg.h> 22#include <linux/hdreg.h>
23#include <linux/blktrace_api.h> 23#include <linux/blktrace_api.h>
24#include <linux/smp_lock.h>
25 24
26#define DM_MSG_PREFIX "core" 25#define DM_MSG_PREFIX "core"
27 26
@@ -76,7 +75,6 @@ union map_info *dm_get_mapinfo(struct bio *bio)
76 */ 75 */
77struct dm_wq_req { 76struct dm_wq_req {
78 enum { 77 enum {
79 DM_WQ_FLUSH_ALL,
80 DM_WQ_FLUSH_DEFERRED, 78 DM_WQ_FLUSH_DEFERRED,
81 } type; 79 } type;
82 struct work_struct work; 80 struct work_struct work;
@@ -151,40 +149,40 @@ static struct kmem_cache *_tio_cache;
151 149
152static int __init local_init(void) 150static int __init local_init(void)
153{ 151{
154 int r; 152 int r = -ENOMEM;
155 153
156 /* allocate a slab for the dm_ios */ 154 /* allocate a slab for the dm_ios */
157 _io_cache = KMEM_CACHE(dm_io, 0); 155 _io_cache = KMEM_CACHE(dm_io, 0);
158 if (!_io_cache) 156 if (!_io_cache)
159 return -ENOMEM; 157 return r;
160 158
161 /* allocate a slab for the target ios */ 159 /* allocate a slab for the target ios */
162 _tio_cache = KMEM_CACHE(dm_target_io, 0); 160 _tio_cache = KMEM_CACHE(dm_target_io, 0);
163 if (!_tio_cache) { 161 if (!_tio_cache)
164 kmem_cache_destroy(_io_cache); 162 goto out_free_io_cache;
165 return -ENOMEM;
166 }
167 163
168 r = dm_uevent_init(); 164 r = dm_uevent_init();
169 if (r) { 165 if (r)
170 kmem_cache_destroy(_tio_cache); 166 goto out_free_tio_cache;
171 kmem_cache_destroy(_io_cache);
172 return r;
173 }
174 167
175 _major = major; 168 _major = major;
176 r = register_blkdev(_major, _name); 169 r = register_blkdev(_major, _name);
177 if (r < 0) { 170 if (r < 0)
178 kmem_cache_destroy(_tio_cache); 171 goto out_uevent_exit;
179 kmem_cache_destroy(_io_cache);
180 dm_uevent_exit();
181 return r;
182 }
183 172
184 if (!_major) 173 if (!_major)
185 _major = r; 174 _major = r;
186 175
187 return 0; 176 return 0;
177
178out_uevent_exit:
179 dm_uevent_exit();
180out_free_tio_cache:
181 kmem_cache_destroy(_tio_cache);
182out_free_io_cache:
183 kmem_cache_destroy(_io_cache);
184
185 return r;
188} 186}
189 187
190static void local_exit(void) 188static void local_exit(void)
@@ -249,13 +247,13 @@ static void __exit dm_exit(void)
249/* 247/*
250 * Block device functions 248 * Block device functions
251 */ 249 */
252static int dm_blk_open(struct inode *inode, struct file *file) 250static int dm_blk_open(struct block_device *bdev, fmode_t mode)
253{ 251{
254 struct mapped_device *md; 252 struct mapped_device *md;
255 253
256 spin_lock(&_minor_lock); 254 spin_lock(&_minor_lock);
257 255
258 md = inode->i_bdev->bd_disk->private_data; 256 md = bdev->bd_disk->private_data;
259 if (!md) 257 if (!md)
260 goto out; 258 goto out;
261 259
@@ -274,11 +272,9 @@ out:
274 return md ? 0 : -ENXIO; 272 return md ? 0 : -ENXIO;
275} 273}
276 274
277static int dm_blk_close(struct inode *inode, struct file *file) 275static int dm_blk_close(struct gendisk *disk, fmode_t mode)
278{ 276{
279 struct mapped_device *md; 277 struct mapped_device *md = disk->private_data;
280
281 md = inode->i_bdev->bd_disk->private_data;
282 atomic_dec(&md->open_count); 278 atomic_dec(&md->open_count);
283 dm_put(md); 279 dm_put(md);
284 return 0; 280 return 0;
@@ -315,21 +311,14 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
315 return dm_get_geometry(md, geo); 311 return dm_get_geometry(md, geo);
316} 312}
317 313
318static int dm_blk_ioctl(struct inode *inode, struct file *file, 314static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
319 unsigned int cmd, unsigned long arg) 315 unsigned int cmd, unsigned long arg)
320{ 316{
321 struct mapped_device *md; 317 struct mapped_device *md = bdev->bd_disk->private_data;
322 struct dm_table *map; 318 struct dm_table *map = dm_get_table(md);
323 struct dm_target *tgt; 319 struct dm_target *tgt;
324 int r = -ENOTTY; 320 int r = -ENOTTY;
325 321
326 /* We don't really need this lock, but we do need 'inode'. */
327 unlock_kernel();
328
329 md = inode->i_bdev->bd_disk->private_data;
330
331 map = dm_get_table(md);
332
333 if (!map || !dm_table_get_size(map)) 322 if (!map || !dm_table_get_size(map))
334 goto out; 323 goto out;
335 324
@@ -345,12 +334,11 @@ static int dm_blk_ioctl(struct inode *inode, struct file *file,
345 } 334 }
346 335
347 if (tgt->type->ioctl) 336 if (tgt->type->ioctl)
348 r = tgt->type->ioctl(tgt, inode, file, cmd, arg); 337 r = tgt->type->ioctl(tgt, cmd, arg);
349 338
350out: 339out:
351 dm_table_put(map); 340 dm_table_put(map);
352 341
353 lock_kernel();
354 return r; 342 return r;
355} 343}
356 344
@@ -387,7 +375,7 @@ static void start_io_acct(struct dm_io *io)
387 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); 375 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
388} 376}
389 377
390static int end_io_acct(struct dm_io *io) 378static void end_io_acct(struct dm_io *io)
391{ 379{
392 struct mapped_device *md = io->md; 380 struct mapped_device *md = io->md;
393 struct bio *bio = io->bio; 381 struct bio *bio = io->bio;
@@ -403,7 +391,9 @@ static int end_io_acct(struct dm_io *io)
403 dm_disk(md)->part0.in_flight = pending = 391 dm_disk(md)->part0.in_flight = pending =
404 atomic_dec_return(&md->pending); 392 atomic_dec_return(&md->pending);
405 393
406 return !pending; 394 /* nudge anyone waiting on suspend queue */
395 if (!pending)
396 wake_up(&md->wait);
407} 397}
408 398
409/* 399/*
@@ -511,9 +501,7 @@ static void dec_pending(struct dm_io *io, int error)
511 spin_unlock_irqrestore(&io->md->pushback_lock, flags); 501 spin_unlock_irqrestore(&io->md->pushback_lock, flags);
512 } 502 }
513 503
514 if (end_io_acct(io)) 504 end_io_acct(io);
515 /* nudge anyone waiting on suspend queue */
516 wake_up(&io->md->wait);
517 505
518 if (io->error != DM_ENDIO_REQUEUE) { 506 if (io->error != DM_ENDIO_REQUEUE) {
519 blk_add_trace_bio(io->md->queue, io->bio, 507 blk_add_trace_bio(io->md->queue, io->bio,
@@ -669,6 +657,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
669 clone->bi_size = to_bytes(len); 657 clone->bi_size = to_bytes(len);
670 clone->bi_io_vec->bv_offset = offset; 658 clone->bi_io_vec->bv_offset = offset;
671 clone->bi_io_vec->bv_len = clone->bi_size; 659 clone->bi_io_vec->bv_len = clone->bi_size;
660 clone->bi_flags |= 1 << BIO_CLONED;
672 661
673 return clone; 662 return clone;
674} 663}
@@ -948,16 +937,24 @@ static void dm_unplug_all(struct request_queue *q)
948 937
949static int dm_any_congested(void *congested_data, int bdi_bits) 938static int dm_any_congested(void *congested_data, int bdi_bits)
950{ 939{
951 int r; 940 int r = bdi_bits;
952 struct mapped_device *md = (struct mapped_device *) congested_data; 941 struct mapped_device *md = congested_data;
953 struct dm_table *map = dm_get_table(md); 942 struct dm_table *map;
954 943
955 if (!map || test_bit(DMF_BLOCK_IO, &md->flags)) 944 atomic_inc(&md->pending);
956 r = bdi_bits; 945
957 else 946 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
958 r = dm_table_any_congested(map, bdi_bits); 947 map = dm_get_table(md);
948 if (map) {
949 r = dm_table_any_congested(map, bdi_bits);
950 dm_table_put(map);
951 }
952 }
953
954 if (!atomic_dec_return(&md->pending))
955 /* nudge anyone waiting on suspend queue */
956 wake_up(&md->wait);
959 957
960 dm_table_put(map);
961 return r; 958 return r;
962} 959}
963 960
@@ -1394,9 +1391,6 @@ static void dm_wq_work(struct work_struct *work)
1394 1391
1395 down_write(&md->io_lock); 1392 down_write(&md->io_lock);
1396 switch (req->type) { 1393 switch (req->type) {
1397 case DM_WQ_FLUSH_ALL:
1398 __merge_pushback_list(md);
1399 /* pass through */
1400 case DM_WQ_FLUSH_DEFERRED: 1394 case DM_WQ_FLUSH_DEFERRED:
1401 __flush_deferred_io(md); 1395 __flush_deferred_io(md);
1402 break; 1396 break;
@@ -1526,7 +1520,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1526 if (!md->suspended_bdev) { 1520 if (!md->suspended_bdev) {
1527 DMWARN("bdget failed in dm_suspend"); 1521 DMWARN("bdget failed in dm_suspend");
1528 r = -ENOMEM; 1522 r = -ENOMEM;
1529 goto flush_and_out; 1523 goto out;
1530 } 1524 }
1531 1525
1532 /* 1526 /*
@@ -1577,14 +1571,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1577 1571
1578 set_bit(DMF_SUSPENDED, &md->flags); 1572 set_bit(DMF_SUSPENDED, &md->flags);
1579 1573
1580flush_and_out:
1581 if (r && noflush)
1582 /*
1583 * Because there may be already I/Os in the pushback list,
1584 * flush them before return.
1585 */
1586 dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL);
1587
1588out: 1574out:
1589 if (r && md->suspended_bdev) { 1575 if (r && md->suspended_bdev) {
1590 bdput(md->suspended_bdev); 1576 bdput(md->suspended_bdev);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index cd189da2b2fa..0ade60cdef42 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -62,15 +62,6 @@ void dm_put_target_type(struct target_type *t);
62int dm_target_iterate(void (*iter_func)(struct target_type *tt, 62int dm_target_iterate(void (*iter_func)(struct target_type *tt,
63 void *param), void *param); 63 void *param), void *param);
64 64
65/*-----------------------------------------------------------------
66 * Useful inlines.
67 *---------------------------------------------------------------*/
68static inline int array_too_big(unsigned long fixed, unsigned long obj,
69 unsigned long num)
70{
71 return (num > (ULONG_MAX - fixed) / obj);
72}
73
74int dm_split_args(int *argc, char ***argvp, char *input); 65int dm_split_args(int *argc, char ***argvp, char *input);
75 66
76/* 67/*
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 190147c79e79..3b90c5c924ec 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -148,6 +148,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
148 148
149 min_sectors = conf->array_sectors; 149 min_sectors = conf->array_sectors;
150 sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *)); 150 sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *));
151 if (min_sectors == 0)
152 min_sectors = 1;
151 153
152 /* min_sectors is the minimum spacing that will fit the hash 154 /* min_sectors is the minimum spacing that will fit the hash
153 * table in one PAGE. This may be much smaller than needed. 155 * table in one PAGE. This may be much smaller than needed.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index aaa3d465de4e..1b1d32694f6f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -222,6 +222,9 @@ static void mddev_put(mddev_t *mddev)
222 list_del(&mddev->all_mddevs); 222 list_del(&mddev->all_mddevs);
223 spin_unlock(&all_mddevs_lock); 223 spin_unlock(&all_mddevs_lock);
224 blk_cleanup_queue(mddev->queue); 224 blk_cleanup_queue(mddev->queue);
225 if (mddev->sysfs_state)
226 sysfs_put(mddev->sysfs_state);
227 mddev->sysfs_state = NULL;
225 kobject_put(&mddev->kobj); 228 kobject_put(&mddev->kobj);
226 } else 229 } else
227 spin_unlock(&all_mddevs_lock); 230 spin_unlock(&all_mddevs_lock);
@@ -1459,6 +1462,8 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1459 kobject_del(&rdev->kobj); 1462 kobject_del(&rdev->kobj);
1460 goto fail; 1463 goto fail;
1461 } 1464 }
1465 rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
1466
1462 list_add_rcu(&rdev->same_set, &mddev->disks); 1467 list_add_rcu(&rdev->same_set, &mddev->disks);
1463 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); 1468 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1464 return 0; 1469 return 0;
@@ -1488,7 +1493,8 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1488 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1493 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1489 rdev->mddev = NULL; 1494 rdev->mddev = NULL;
1490 sysfs_remove_link(&rdev->kobj, "block"); 1495 sysfs_remove_link(&rdev->kobj, "block");
1491 1496 sysfs_put(rdev->sysfs_state);
1497 rdev->sysfs_state = NULL;
1492 /* We need to delay this, otherwise we can deadlock when 1498 /* We need to delay this, otherwise we can deadlock when
1493 * writing to 'remove' to "dev/state". We also need 1499 * writing to 'remove' to "dev/state". We also need
1494 * to delay it due to rcu usage. 1500 * to delay it due to rcu usage.
@@ -1520,7 +1526,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1520 if (err) { 1526 if (err) {
1521 printk(KERN_ERR "md: could not bd_claim %s.\n", 1527 printk(KERN_ERR "md: could not bd_claim %s.\n",
1522 bdevname(bdev, b)); 1528 bdevname(bdev, b));
1523 blkdev_put(bdev); 1529 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1524 return err; 1530 return err;
1525 } 1531 }
1526 if (!shared) 1532 if (!shared)
@@ -1536,7 +1542,7 @@ static void unlock_rdev(mdk_rdev_t *rdev)
1536 if (!bdev) 1542 if (!bdev)
1537 MD_BUG(); 1543 MD_BUG();
1538 bd_release(bdev); 1544 bd_release(bdev);
1539 blkdev_put(bdev); 1545 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1540} 1546}
1541 1547
1542void md_autodetect_dev(dev_t dev); 1548void md_autodetect_dev(dev_t dev);
@@ -1923,8 +1929,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1923 1929
1924 err = 0; 1930 err = 0;
1925 } 1931 }
1926 if (!err) 1932 if (!err && rdev->sysfs_state)
1927 sysfs_notify(&rdev->kobj, NULL, "state"); 1933 sysfs_notify_dirent(rdev->sysfs_state);
1928 return err ? err : len; 1934 return err ? err : len;
1929} 1935}
1930static struct rdev_sysfs_entry rdev_state = 1936static struct rdev_sysfs_entry rdev_state =
@@ -2019,7 +2025,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2019 rdev->raid_disk = -1; 2025 rdev->raid_disk = -1;
2020 return err; 2026 return err;
2021 } else 2027 } else
2022 sysfs_notify(&rdev->kobj, NULL, "state"); 2028 sysfs_notify_dirent(rdev->sysfs_state);
2023 sprintf(nm, "rd%d", rdev->raid_disk); 2029 sprintf(nm, "rd%d", rdev->raid_disk);
2024 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) 2030 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2025 printk(KERN_WARNING 2031 printk(KERN_WARNING
@@ -2036,7 +2042,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2036 clear_bit(Faulty, &rdev->flags); 2042 clear_bit(Faulty, &rdev->flags);
2037 clear_bit(WriteMostly, &rdev->flags); 2043 clear_bit(WriteMostly, &rdev->flags);
2038 set_bit(In_sync, &rdev->flags); 2044 set_bit(In_sync, &rdev->flags);
2039 sysfs_notify(&rdev->kobj, NULL, "state"); 2045 sysfs_notify_dirent(rdev->sysfs_state);
2040 } 2046 }
2041 return len; 2047 return len;
2042} 2048}
@@ -2770,7 +2776,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
2770 if (err) 2776 if (err)
2771 return err; 2777 return err;
2772 else { 2778 else {
2773 sysfs_notify(&mddev->kobj, NULL, "array_state"); 2779 sysfs_notify_dirent(mddev->sysfs_state);
2774 return len; 2780 return len;
2775 } 2781 }
2776} 2782}
@@ -3457,6 +3463,11 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
3457 disk->fops = &md_fops; 3463 disk->fops = &md_fops;
3458 disk->private_data = mddev; 3464 disk->private_data = mddev;
3459 disk->queue = mddev->queue; 3465 disk->queue = mddev->queue;
3466 /* Allow extended partitions. This makes the
3467 * 'mdp' device redundant, but we can really
3468 * remove it now.
3469 */
3470 disk->flags |= GENHD_FL_EXT_DEVT;
3460 add_disk(disk); 3471 add_disk(disk);
3461 mddev->gendisk = disk; 3472 mddev->gendisk = disk;
3462 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 3473 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
@@ -3465,8 +3476,10 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
3465 if (error) 3476 if (error)
3466 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 3477 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3467 disk->disk_name); 3478 disk->disk_name);
3468 else 3479 else {
3469 kobject_uevent(&mddev->kobj, KOBJ_ADD); 3480 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3481 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
3482 }
3470 return NULL; 3483 return NULL;
3471} 3484}
3472 3485
@@ -3477,7 +3490,7 @@ static void md_safemode_timeout(unsigned long data)
3477 if (!atomic_read(&mddev->writes_pending)) { 3490 if (!atomic_read(&mddev->writes_pending)) {
3478 mddev->safemode = 1; 3491 mddev->safemode = 1;
3479 if (mddev->external) 3492 if (mddev->external)
3480 set_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags); 3493 sysfs_notify_dirent(mddev->sysfs_state);
3481 } 3494 }
3482 md_wakeup_thread(mddev->thread); 3495 md_wakeup_thread(mddev->thread);
3483} 3496}
@@ -3578,7 +3591,7 @@ static int do_md_run(mddev_t * mddev)
3578 return -EINVAL; 3591 return -EINVAL;
3579 } 3592 }
3580 } 3593 }
3581 sysfs_notify(&rdev->kobj, NULL, "state"); 3594 sysfs_notify_dirent(rdev->sysfs_state);
3582 } 3595 }
3583 3596
3584 md_probe(mddev->unit, NULL, NULL); 3597 md_probe(mddev->unit, NULL, NULL);
@@ -3740,7 +3753,7 @@ static int do_md_run(mddev_t * mddev)
3740 3753
3741 mddev->changed = 1; 3754 mddev->changed = 1;
3742 md_new_event(mddev); 3755 md_new_event(mddev);
3743 sysfs_notify(&mddev->kobj, NULL, "array_state"); 3756 sysfs_notify_dirent(mddev->sysfs_state);
3744 sysfs_notify(&mddev->kobj, NULL, "sync_action"); 3757 sysfs_notify(&mddev->kobj, NULL, "sync_action");
3745 sysfs_notify(&mddev->kobj, NULL, "degraded"); 3758 sysfs_notify(&mddev->kobj, NULL, "degraded");
3746 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 3759 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
@@ -3767,7 +3780,7 @@ static int restart_array(mddev_t *mddev)
3767 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3780 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3768 md_wakeup_thread(mddev->thread); 3781 md_wakeup_thread(mddev->thread);
3769 md_wakeup_thread(mddev->sync_thread); 3782 md_wakeup_thread(mddev->sync_thread);
3770 sysfs_notify(&mddev->kobj, NULL, "array_state"); 3783 sysfs_notify_dirent(mddev->sysfs_state);
3771 return 0; 3784 return 0;
3772} 3785}
3773 3786
@@ -3847,7 +3860,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
3847 module_put(mddev->pers->owner); 3860 module_put(mddev->pers->owner);
3848 mddev->pers = NULL; 3861 mddev->pers = NULL;
3849 /* tell userspace to handle 'inactive' */ 3862 /* tell userspace to handle 'inactive' */
3850 sysfs_notify(&mddev->kobj, NULL, "array_state"); 3863 sysfs_notify_dirent(mddev->sysfs_state);
3851 3864
3852 set_capacity(disk, 0); 3865 set_capacity(disk, 0);
3853 mddev->changed = 1; 3866 mddev->changed = 1;
@@ -3927,13 +3940,14 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
3927 mddev->degraded = 0; 3940 mddev->degraded = 0;
3928 mddev->barriers_work = 0; 3941 mddev->barriers_work = 0;
3929 mddev->safemode = 0; 3942 mddev->safemode = 0;
3943 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
3930 3944
3931 } else if (mddev->pers) 3945 } else if (mddev->pers)
3932 printk(KERN_INFO "md: %s switched to read-only mode.\n", 3946 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3933 mdname(mddev)); 3947 mdname(mddev));
3934 err = 0; 3948 err = 0;
3935 md_new_event(mddev); 3949 md_new_event(mddev);
3936 sysfs_notify(&mddev->kobj, NULL, "array_state"); 3950 sysfs_notify_dirent(mddev->sysfs_state);
3937out: 3951out:
3938 return err; 3952 return err;
3939} 3953}
@@ -4297,7 +4311,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4297 if (err) 4311 if (err)
4298 export_rdev(rdev); 4312 export_rdev(rdev);
4299 else 4313 else
4300 sysfs_notify(&rdev->kobj, NULL, "state"); 4314 sysfs_notify_dirent(rdev->sysfs_state);
4301 4315
4302 md_update_sb(mddev, 1); 4316 md_update_sb(mddev, 1);
4303 if (mddev->degraded) 4317 if (mddev->degraded)
@@ -4785,7 +4799,7 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4785 return 0; 4799 return 0;
4786} 4800}
4787 4801
4788static int md_ioctl(struct inode *inode, struct file *file, 4802static int md_ioctl(struct block_device *bdev, fmode_t mode,
4789 unsigned int cmd, unsigned long arg) 4803 unsigned int cmd, unsigned long arg)
4790{ 4804{
4791 int err = 0; 4805 int err = 0;
@@ -4823,7 +4837,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
4823 * Commands creating/starting a new array: 4837 * Commands creating/starting a new array:
4824 */ 4838 */
4825 4839
4826 mddev = inode->i_bdev->bd_disk->private_data; 4840 mddev = bdev->bd_disk->private_data;
4827 4841
4828 if (!mddev) { 4842 if (!mddev) {
4829 BUG(); 4843 BUG();
@@ -4938,7 +4952,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
4938 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { 4952 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
4939 if (mddev->ro == 2) { 4953 if (mddev->ro == 2) {
4940 mddev->ro = 0; 4954 mddev->ro = 0;
4941 sysfs_notify(&mddev->kobj, NULL, "array_state"); 4955 sysfs_notify_dirent(mddev->sysfs_state);
4942 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4956 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4943 md_wakeup_thread(mddev->thread); 4957 md_wakeup_thread(mddev->thread);
4944 } else { 4958 } else {
@@ -4996,13 +5010,13 @@ abort:
4996 return err; 5010 return err;
4997} 5011}
4998 5012
4999static int md_open(struct inode *inode, struct file *file) 5013static int md_open(struct block_device *bdev, fmode_t mode)
5000{ 5014{
5001 /* 5015 /*
5002 * Succeed if we can lock the mddev, which confirms that 5016 * Succeed if we can lock the mddev, which confirms that
5003 * it isn't being stopped right now. 5017 * it isn't being stopped right now.
5004 */ 5018 */
5005 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 5019 mddev_t *mddev = bdev->bd_disk->private_data;
5006 int err; 5020 int err;
5007 5021
5008 if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) 5022 if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
@@ -5013,14 +5027,14 @@ static int md_open(struct inode *inode, struct file *file)
5013 atomic_inc(&mddev->openers); 5027 atomic_inc(&mddev->openers);
5014 mddev_unlock(mddev); 5028 mddev_unlock(mddev);
5015 5029
5016 check_disk_change(inode->i_bdev); 5030 check_disk_change(bdev);
5017 out: 5031 out:
5018 return err; 5032 return err;
5019} 5033}
5020 5034
5021static int md_release(struct inode *inode, struct file * file) 5035static int md_release(struct gendisk *disk, fmode_t mode)
5022{ 5036{
5023 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 5037 mddev_t *mddev = disk->private_data;
5024 5038
5025 BUG_ON(!mddev); 5039 BUG_ON(!mddev);
5026 atomic_dec(&mddev->openers); 5040 atomic_dec(&mddev->openers);
@@ -5048,7 +5062,7 @@ static struct block_device_operations md_fops =
5048 .owner = THIS_MODULE, 5062 .owner = THIS_MODULE,
5049 .open = md_open, 5063 .open = md_open,
5050 .release = md_release, 5064 .release = md_release,
5051 .ioctl = md_ioctl, 5065 .locked_ioctl = md_ioctl,
5052 .getgeo = md_getgeo, 5066 .getgeo = md_getgeo,
5053 .media_changed = md_media_changed, 5067 .media_changed = md_media_changed,
5054 .revalidate_disk= md_revalidate, 5068 .revalidate_disk= md_revalidate,
@@ -5612,7 +5626,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
5612 spin_unlock_irq(&mddev->write_lock); 5626 spin_unlock_irq(&mddev->write_lock);
5613 } 5627 }
5614 if (did_change) 5628 if (did_change)
5615 sysfs_notify(&mddev->kobj, NULL, "array_state"); 5629 sysfs_notify_dirent(mddev->sysfs_state);
5616 wait_event(mddev->sb_wait, 5630 wait_event(mddev->sb_wait,
5617 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && 5631 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
5618 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 5632 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
@@ -5655,7 +5669,7 @@ int md_allow_write(mddev_t *mddev)
5655 mddev->safemode = 1; 5669 mddev->safemode = 1;
5656 spin_unlock_irq(&mddev->write_lock); 5670 spin_unlock_irq(&mddev->write_lock);
5657 md_update_sb(mddev, 0); 5671 md_update_sb(mddev, 0);
5658 sysfs_notify(&mddev->kobj, NULL, "array_state"); 5672 sysfs_notify_dirent(mddev->sysfs_state);
5659 } else 5673 } else
5660 spin_unlock_irq(&mddev->write_lock); 5674 spin_unlock_irq(&mddev->write_lock);
5661 5675
@@ -6048,9 +6062,6 @@ void md_check_recovery(mddev_t *mddev)
6048 if (mddev->bitmap) 6062 if (mddev->bitmap)
6049 bitmap_daemon_work(mddev->bitmap); 6063 bitmap_daemon_work(mddev->bitmap);
6050 6064
6051 if (test_and_clear_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags))
6052 sysfs_notify(&mddev->kobj, NULL, "array_state");
6053
6054 if (mddev->ro) 6065 if (mddev->ro)
6055 return; 6066 return;
6056 6067
@@ -6103,7 +6114,7 @@ void md_check_recovery(mddev_t *mddev)
6103 mddev->safemode = 0; 6114 mddev->safemode = 0;
6104 spin_unlock_irq(&mddev->write_lock); 6115 spin_unlock_irq(&mddev->write_lock);
6105 if (did_change) 6116 if (did_change)
6106 sysfs_notify(&mddev->kobj, NULL, "array_state"); 6117 sysfs_notify_dirent(mddev->sysfs_state);
6107 } 6118 }
6108 6119
6109 if (mddev->flags) 6120 if (mddev->flags)
@@ -6111,7 +6122,7 @@ void md_check_recovery(mddev_t *mddev)
6111 6122
6112 rdev_for_each(rdev, rtmp, mddev) 6123 rdev_for_each(rdev, rtmp, mddev)
6113 if (test_and_clear_bit(StateChanged, &rdev->flags)) 6124 if (test_and_clear_bit(StateChanged, &rdev->flags))
6114 sysfs_notify(&rdev->kobj, NULL, "state"); 6125 sysfs_notify_dirent(rdev->sysfs_state);
6115 6126
6116 6127
6117 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 6128 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
@@ -6221,7 +6232,7 @@ void md_check_recovery(mddev_t *mddev)
6221 6232
6222void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 6233void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
6223{ 6234{
6224 sysfs_notify(&rdev->kobj, NULL, "state"); 6235 sysfs_notify_dirent(rdev->sysfs_state);
6225 wait_event_timeout(rdev->blocked_wait, 6236 wait_event_timeout(rdev->blocked_wait,
6226 !test_bit(Blocked, &rdev->flags), 6237 !test_bit(Blocked, &rdev->flags),
6227 msecs_to_jiffies(5000)); 6238 msecs_to_jiffies(5000));
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index da5129a24b18..970a96ef9b18 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1137,7 +1137,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1137 if (!enough(conf)) 1137 if (!enough(conf))
1138 return -EINVAL; 1138 return -EINVAL;
1139 1139
1140 if (rdev->raid_disk) 1140 if (rdev->raid_disk >= 0)
1141 first = last = rdev->raid_disk; 1141 first = last = rdev->raid_disk;
1142 1142
1143 if (rdev->saved_raid_disk >= 0 && 1143 if (rdev->saved_raid_disk >= 0 &&
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 5b34c134aa25..127b0526a727 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -545,11 +545,11 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
545 if( VFL_TYPE_GRABBER == type ) { 545 if( VFL_TYPE_GRABBER == type ) {
546 vv->video_minor = vfd->minor; 546 vv->video_minor = vfd->minor;
547 INFO(("%s: registered device video%d [v4l2]\n", 547 INFO(("%s: registered device video%d [v4l2]\n",
548 dev->name, vfd->minor & 0x1f)); 548 dev->name, vfd->num));
549 } else { 549 } else {
550 vv->vbi_minor = vfd->minor; 550 vv->vbi_minor = vfd->minor;
551 INFO(("%s: registered device vbi%d [v4l2]\n", 551 INFO(("%s: registered device vbi%d [v4l2]\n",
552 dev->name, vfd->minor & 0x1f)); 552 dev->name, vfd->num));
553 } 553 }
554 554
555 *vid = vfd; 555 *vid = vfd;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 99be9e5c85f7..fe0bd55977e3 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -834,7 +834,7 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
834 * copying is done already, arg is a kernel pointer. 834 * copying is done already, arg is a kernel pointer.
835 */ 835 */
836 836
837int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int cmd, void *arg) 837static int __saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
838{ 838{
839 struct saa7146_fh *fh = file->private_data; 839 struct saa7146_fh *fh = file->private_data;
840 struct saa7146_dev *dev = fh->dev; 840 struct saa7146_dev *dev = fh->dev;
@@ -1215,12 +1215,18 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
1215 } 1215 }
1216#endif 1216#endif
1217 default: 1217 default:
1218 return v4l_compat_translate_ioctl(inode,file,cmd,arg, 1218 return v4l_compat_translate_ioctl(file, cmd, arg,
1219 saa7146_video_do_ioctl); 1219 __saa7146_video_do_ioctl);
1220 } 1220 }
1221 return 0; 1221 return 0;
1222} 1222}
1223 1223
1224int saa7146_video_do_ioctl(struct inode *inode, struct file *file,
1225 unsigned int cmd, void *arg)
1226{
1227 return __saa7146_video_do_ioctl(file, cmd, arg);
1228}
1229
1224/*********************************************************************************/ 1230/*********************************************************************************/
1225/* buffer handling functions */ 1231/* buffer handling functions */
1226 1232
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index 14e627ef6465..c1d92f838ca8 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -376,7 +376,7 @@ static void dm1105dvb_dma_unmap(struct dm1105dvb *dm1105dvb)
376 pci_free_consistent(dm1105dvb->pdev, 6*DM1105_DMA_BYTES, dm1105dvb->ts_buf, dm1105dvb->dma_addr); 376 pci_free_consistent(dm1105dvb->pdev, 6*DM1105_DMA_BYTES, dm1105dvb->ts_buf, dm1105dvb->dma_addr);
377} 377}
378 378
379static void __devinit dm1105dvb_enable_irqs(struct dm1105dvb *dm1105dvb) 379static void dm1105dvb_enable_irqs(struct dm1105dvb *dm1105dvb)
380{ 380{
381 outb(INTMAK_ALLMASK, dm_io_mem(DM1105_INTMAK)); 381 outb(INTMAK_ALLMASK, dm_io_mem(DM1105_INTMAK));
382 outb(1, dm_io_mem(DM1105_CR)); 382 outb(1, dm_io_mem(DM1105_CR));
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 5689d1f1d444..7a421e9dba5a 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -223,6 +223,8 @@ static void dvb_frontend_init(struct dvb_frontend *fe)
223 if (fe->ops.init) 223 if (fe->ops.init)
224 fe->ops.init(fe); 224 fe->ops.init(fe);
225 if (fe->ops.tuner_ops.init) { 225 if (fe->ops.tuner_ops.init) {
226 if (fe->ops.i2c_gate_ctrl)
227 fe->ops.i2c_gate_ctrl(fe, 1);
226 fe->ops.tuner_ops.init(fe); 228 fe->ops.tuner_ops.init(fe);
227 if (fe->ops.i2c_gate_ctrl) 229 if (fe->ops.i2c_gate_ctrl)
228 fe->ops.i2c_gate_ctrl(fe, 0); 230 fe->ops.i2c_gate_ctrl(fe, 0);
@@ -583,6 +585,8 @@ restart:
583 if (fe->ops.set_voltage) 585 if (fe->ops.set_voltage)
584 fe->ops.set_voltage(fe, SEC_VOLTAGE_OFF); 586 fe->ops.set_voltage(fe, SEC_VOLTAGE_OFF);
585 if (fe->ops.tuner_ops.sleep) { 587 if (fe->ops.tuner_ops.sleep) {
588 if (fe->ops.i2c_gate_ctrl)
589 fe->ops.i2c_gate_ctrl(fe, 1);
586 fe->ops.tuner_ops.sleep(fe); 590 fe->ops.tuner_ops.sleep(fe);
587 if (fe->ops.i2c_gate_ctrl) 591 if (fe->ops.i2c_gate_ctrl)
588 fe->ops.i2c_gate_ctrl(fe, 0); 592 fe->ops.i2c_gate_ctrl(fe, 0);
@@ -932,7 +936,8 @@ void dtv_property_dump(struct dtv_property *tvp)
932int is_legacy_delivery_system(fe_delivery_system_t s) 936int is_legacy_delivery_system(fe_delivery_system_t s)
933{ 937{
934 if((s == SYS_UNDEFINED) || (s == SYS_DVBC_ANNEX_AC) || 938 if((s == SYS_UNDEFINED) || (s == SYS_DVBC_ANNEX_AC) ||
935 (s == SYS_DVBC_ANNEX_B) || (s == SYS_DVBT) || (s == SYS_DVBS)) 939 (s == SYS_DVBC_ANNEX_B) || (s == SYS_DVBT) || (s == SYS_DVBS) ||
940 (s == SYS_ATSC))
936 return 1; 941 return 1;
937 942
938 return 0; 943 return 0;
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 3c13bcfa6385..62b68c291d99 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -261,7 +261,7 @@ config DVB_USB_DW2102
261 Say Y here to support the DvbWorld DVB-S/S2 USB2.0 receivers 261 Say Y here to support the DvbWorld DVB-S/S2 USB2.0 receivers
262 and the TeVii S650. 262 and the TeVii S650.
263 263
264config DVB_USB_CINERGY_T2 264config DVB_USB_CINERGY_T2
265 tristate "Terratec CinergyT2/qanu USB 2.0 DVB-T receiver" 265 tristate "Terratec CinergyT2/qanu USB 2.0 DVB-T receiver"
266 depends on DVB_USB 266 depends on DVB_USB
267 help 267 help
@@ -283,6 +283,7 @@ config DVB_USB_ANYSEE
283config DVB_USB_DTV5100 283config DVB_USB_DTV5100
284 tristate "AME DTV-5100 USB2.0 DVB-T support" 284 tristate "AME DTV-5100 USB2.0 DVB-T support"
285 depends on DVB_USB 285 depends on DVB_USB
286 select DVB_ZL10353 if !DVB_FE_CUSTOMISE
286 select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE 287 select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE
287 help 288 help
288 Say Y here to support the AME DTV-5100 USB2.0 DVB-T receiver. 289 Say Y here to support the AME DTV-5100 USB2.0 DVB-T receiver.
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index cb0829c038ce..e9ab0249d133 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -31,13 +31,13 @@
31#include "mc44s80x.h" 31#include "mc44s80x.h"
32#endif 32#endif
33 33
34int dvb_usb_af9015_debug; 34static int dvb_usb_af9015_debug;
35module_param_named(debug, dvb_usb_af9015_debug, int, 0644); 35module_param_named(debug, dvb_usb_af9015_debug, int, 0644);
36MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS); 36MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS);
37int dvb_usb_af9015_remote; 37static int dvb_usb_af9015_remote;
38module_param_named(remote, dvb_usb_af9015_remote, int, 0644); 38module_param_named(remote, dvb_usb_af9015_remote, int, 0644);
39MODULE_PARM_DESC(remote, "select remote"); 39MODULE_PARM_DESC(remote, "select remote");
40int dvb_usb_af9015_dual_mode; 40static int dvb_usb_af9015_dual_mode;
41module_param_named(dual_mode, dvb_usb_af9015_dual_mode, int, 0644); 41module_param_named(dual_mode, dvb_usb_af9015_dual_mode, int, 0644);
42MODULE_PARM_DESC(dual_mode, "enable dual mode"); 42MODULE_PARM_DESC(dual_mode, "enable dual mode");
43DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 43DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -46,7 +46,7 @@ static DEFINE_MUTEX(af9015_usb_mutex);
46 46
47static struct af9015_config af9015_config; 47static struct af9015_config af9015_config;
48static struct dvb_usb_device_properties af9015_properties[2]; 48static struct dvb_usb_device_properties af9015_properties[2];
49int af9015_properties_count = ARRAY_SIZE(af9015_properties); 49static int af9015_properties_count = ARRAY_SIZE(af9015_properties);
50 50
51static struct af9013_config af9015_af9013_config[] = { 51static struct af9013_config af9015_af9013_config[] = {
52 { 52 {
@@ -549,7 +549,7 @@ static int af9015_eeprom_dump(struct dvb_usb_device *d)
549 return 0; 549 return 0;
550} 550}
551 551
552int af9015_download_ir_table(struct dvb_usb_device *d) 552static int af9015_download_ir_table(struct dvb_usb_device *d)
553{ 553{
554 int i, packets = 0, ret; 554 int i, packets = 0, ret;
555 u16 addr = 0x9a56; /* ir-table start address */ 555 u16 addr = 0x9a56; /* ir-table start address */
@@ -681,12 +681,6 @@ static int af9015_download_firmware(struct usb_device *udev,
681 goto error; 681 goto error;
682 } 682 }
683 683
684 /* firmware is running, reconnect device in the usb bus */
685 req.cmd = RECONNECT_USB;
686 ret = af9015_rw_udev(udev, &req);
687 if (ret)
688 err("reconnect failed: %d", ret);
689
690error: 684error:
691 return ret; 685 return ret;
692} 686}
@@ -999,7 +993,7 @@ static int af9015_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
999} 993}
1000 994
1001/* init 2nd I2C adapter */ 995/* init 2nd I2C adapter */
1002int af9015_i2c_init(struct dvb_usb_device *d) 996static int af9015_i2c_init(struct dvb_usb_device *d)
1003{ 997{
1004 int ret; 998 int ret;
1005 struct af9015_state *state = d->priv; 999 struct af9015_state *state = d->priv;
@@ -1208,6 +1202,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
1208 .usb_ctrl = DEVICE_SPECIFIC, 1202 .usb_ctrl = DEVICE_SPECIFIC,
1209 .download_firmware = af9015_download_firmware, 1203 .download_firmware = af9015_download_firmware,
1210 .firmware = "dvb-usb-af9015.fw", 1204 .firmware = "dvb-usb-af9015.fw",
1205 .no_reconnect = 1,
1211 1206
1212 .size_of_priv = sizeof(struct af9015_state), \ 1207 .size_of_priv = sizeof(struct af9015_state), \
1213 1208
@@ -1306,6 +1301,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
1306 .usb_ctrl = DEVICE_SPECIFIC, 1301 .usb_ctrl = DEVICE_SPECIFIC,
1307 .download_firmware = af9015_download_firmware, 1302 .download_firmware = af9015_download_firmware,
1308 .firmware = "dvb-usb-af9015.fw", 1303 .firmware = "dvb-usb-af9015.fw",
1304 .no_reconnect = 1,
1309 1305
1310 .size_of_priv = sizeof(struct af9015_state), \ 1306 .size_of_priv = sizeof(struct af9015_state), \
1311 1307
@@ -1419,7 +1415,7 @@ static int af9015_usb_probe(struct usb_interface *intf,
1419 return ret; 1415 return ret;
1420} 1416}
1421 1417
1422void af9015_i2c_exit(struct dvb_usb_device *d) 1418static void af9015_i2c_exit(struct dvb_usb_device *d)
1423{ 1419{
1424 struct af9015_state *state = d->priv; 1420 struct af9015_state *state = d->priv;
1425 deb_info("%s: \n", __func__); 1421 deb_info("%s: \n", __func__);
diff --git a/drivers/media/dvb/dvb-usb/af9015.h b/drivers/media/dvb/dvb-usb/af9015.h
index 882e8a4b3681..6c3c97293316 100644
--- a/drivers/media/dvb/dvb-usb/af9015.h
+++ b/drivers/media/dvb/dvb-usb/af9015.h
@@ -27,7 +27,6 @@
27#define DVB_USB_LOG_PREFIX "af9015" 27#define DVB_USB_LOG_PREFIX "af9015"
28#include "dvb-usb.h" 28#include "dvb-usb.h"
29 29
30extern int dvb_usb_af9015_debug;
31#define deb_info(args...) dprintk(dvb_usb_af9015_debug, 0x01, args) 30#define deb_info(args...) dprintk(dvb_usb_af9015_debug, 0x01, args)
32#define deb_rc(args...) dprintk(dvb_usb_af9015_debug, 0x02, args) 31#define deb_rc(args...) dprintk(dvb_usb_af9015_debug, 0x02, args)
33#define deb_xfer(args...) dprintk(dvb_usb_af9015_debug, 0x04, args) 32#define deb_xfer(args...) dprintk(dvb_usb_af9015_debug, 0x04, args)
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index c786359fba03..cd2edbcaa097 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -46,7 +46,7 @@ module_param_named(delsys, dvb_usb_anysee_delsys, int, 0644);
46MODULE_PARM_DESC(delsys, "select delivery mode (0=DVB-C, 1=DVB-T)"); 46MODULE_PARM_DESC(delsys, "select delivery mode (0=DVB-C, 1=DVB-T)");
47DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 47DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
48 48
49static struct mutex anysee_usb_mutex; 49static DEFINE_MUTEX(anysee_usb_mutex);
50 50
51static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen, 51static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen,
52 u8 *rbuf, u8 rlen) 52 u8 *rbuf, u8 rlen)
@@ -456,8 +456,6 @@ static int anysee_probe(struct usb_interface *intf,
456 struct usb_host_interface *alt; 456 struct usb_host_interface *alt;
457 int ret; 457 int ret;
458 458
459 mutex_init(&anysee_usb_mutex);
460
461 /* There is one interface with two alternate settings. 459 /* There is one interface with two alternate settings.
462 Alternate setting 0 is for bulk transfer. 460 Alternate setting 0 is for bulk transfer.
463 Alternate setting 1 is for isochronous transfer. 461 Alternate setting 1 is for isochronous transfer.
diff --git a/drivers/media/dvb/dvb-usb/dib0700.h b/drivers/media/dvb/dvb-usb/dib0700.h
index 739193943c17..8b544fe79b0d 100644
--- a/drivers/media/dvb/dvb-usb/dib0700.h
+++ b/drivers/media/dvb/dvb-usb/dib0700.h
@@ -22,7 +22,7 @@ extern int dvb_usb_dib0700_debug;
22 22
23#define REQUEST_I2C_READ 0x2 23#define REQUEST_I2C_READ 0x2
24#define REQUEST_I2C_WRITE 0x3 24#define REQUEST_I2C_WRITE 0x3
25#define REQUEST_POLL_RC 0x4 25#define REQUEST_POLL_RC 0x4 /* deprecated in firmware v1.20 */
26#define REQUEST_JUMPRAM 0x8 26#define REQUEST_JUMPRAM 0x8
27#define REQUEST_SET_CLOCK 0xB 27#define REQUEST_SET_CLOCK 0xB
28#define REQUEST_SET_GPIO 0xC 28#define REQUEST_SET_GPIO 0xC
@@ -40,11 +40,14 @@ struct dib0700_state {
40 u16 mt2060_if1[2]; 40 u16 mt2060_if1[2];
41 u8 rc_toggle; 41 u8 rc_toggle;
42 u8 rc_counter; 42 u8 rc_counter;
43 u8 rc_func_version;
43 u8 is_dib7000pc; 44 u8 is_dib7000pc;
44 u8 fw_use_new_i2c_api; 45 u8 fw_use_new_i2c_api;
45 u8 disable_streaming_master_mode; 46 u8 disable_streaming_master_mode;
46}; 47};
47 48
49extern int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
50 u32 *romversion, u32 *ramversion, u32 *fwtype);
48extern int dib0700_set_gpio(struct dvb_usb_device *, enum dib07x0_gpios gpio, u8 gpio_dir, u8 gpio_val); 51extern int dib0700_set_gpio(struct dvb_usb_device *, enum dib07x0_gpios gpio, u8 gpio_dir, u8 gpio_val);
49extern int dib0700_ctrl_clock(struct dvb_usb_device *d, u32 clk_MHz, u8 clock_out_gp3); 52extern int dib0700_ctrl_clock(struct dvb_usb_device *d, u32 clk_MHz, u8 clock_out_gp3);
50extern int dib0700_ctrl_rd(struct dvb_usb_device *d, u8 *tx, u8 txlen, u8 *rx, u8 rxlen); 53extern int dib0700_ctrl_rd(struct dvb_usb_device *d, u8 *tx, u8 txlen, u8 *rx, u8 rxlen);
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index dd53cee3896d..200b215f4d8b 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -19,6 +19,22 @@ MODULE_PARM_DESC(dvb_usb_dib0700_ir_proto, "set ir protocol (0=NEC, 1=RC5 (defau
19 19
20DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 20DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
21 21
22
23int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
24 u32 *romversion, u32 *ramversion, u32 *fwtype)
25{
26 u8 b[16];
27 int ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
28 REQUEST_GET_VERSION,
29 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
30 b, sizeof(b), USB_CTRL_GET_TIMEOUT);
31 *hwversion = (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
32 *romversion = (b[4] << 24) | (b[5] << 16) | (b[6] << 8) | b[7];
33 *ramversion = (b[8] << 24) | (b[9] << 16) | (b[10] << 8) | b[11];
34 *fwtype = (b[12] << 24) | (b[13] << 16) | (b[14] << 8) | b[15];
35 return ret;
36}
37
22/* expecting rx buffer: request data[0] data[1] ... data[2] */ 38/* expecting rx buffer: request data[0] data[1] ... data[2] */
23static int dib0700_ctrl_wr(struct dvb_usb_device *d, u8 *tx, u8 txlen) 39static int dib0700_ctrl_wr(struct dvb_usb_device *d, u8 *tx, u8 txlen)
24{ 40{
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 0cfccc24b190..f28d3ae59e04 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -38,6 +38,7 @@ static struct mt2060_config bristol_mt2060_config[2] = {
38 } 38 }
39}; 39};
40 40
41
41static struct dibx000_agc_config bristol_dib3000p_mt2060_agc_config = { 42static struct dibx000_agc_config bristol_dib3000p_mt2060_agc_config = {
42 .band_caps = BAND_VHF | BAND_UHF, 43 .band_caps = BAND_VHF | BAND_UHF,
43 .setup = (1 << 8) | (5 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (2 << 0), 44 .setup = (1 << 8) | (5 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (2 << 0),
@@ -451,8 +452,13 @@ static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
451 452
452/* Number of keypresses to ignore before start repeating */ 453/* Number of keypresses to ignore before start repeating */
453#define RC_REPEAT_DELAY 2 454#define RC_REPEAT_DELAY 2
455#define RC_REPEAT_DELAY_V1_20 5
454 456
455static int dib0700_rc_query(struct dvb_usb_device *d, u32 *event, int *state) 457
458
459/* Used by firmware versions < 1.20 (deprecated) */
460static int dib0700_rc_query_legacy(struct dvb_usb_device *d, u32 *event,
461 int *state)
456{ 462{
457 u8 key[4]; 463 u8 key[4];
458 int i; 464 int i;
@@ -529,6 +535,137 @@ static int dib0700_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
529 return 0; 535 return 0;
530} 536}
531 537
538/* This is the structure of the RC response packet starting in firmware 1.20 */
539struct dib0700_rc_response {
540 u8 report_id;
541 u8 data_state;
542 u8 system_msb;
543 u8 system_lsb;
544 u8 data;
545 u8 not_data;
546};
547
548/* This supports the new IR response format for firmware v1.20 */
549static int dib0700_rc_query_v1_20(struct dvb_usb_device *d, u32 *event,
550 int *state)
551{
552 struct dvb_usb_rc_key *keymap = d->props.rc_key_map;
553 struct dib0700_state *st = d->priv;
554 struct dib0700_rc_response poll_reply;
555 u8 buf[6];
556 int i;
557 int status;
558 int actlen;
559 int found = 0;
560
561 /* Set initial results in case we exit the function early */
562 *event = 0;
563 *state = REMOTE_NO_KEY_PRESSED;
564
565 /* Firmware v1.20 provides RC data via bulk endpoint 1 */
566 status = usb_bulk_msg(d->udev, usb_rcvbulkpipe(d->udev, 1), buf,
567 sizeof(buf), &actlen, 50);
568 if (status < 0) {
569 /* No data available (meaning no key press) */
570 return 0;
571 }
572
573 if (actlen != sizeof(buf)) {
574 /* We didn't get back the 6 byte message we expected */
575 err("Unexpected RC response size [%d]", actlen);
576 return -1;
577 }
578
579 poll_reply.report_id = buf[0];
580 poll_reply.data_state = buf[1];
581 poll_reply.system_msb = buf[2];
582 poll_reply.system_lsb = buf[3];
583 poll_reply.data = buf[4];
584 poll_reply.not_data = buf[5];
585
586 /*
587 info("rid=%02x ds=%02x sm=%02x sl=%02x d=%02x nd=%02x\n",
588 poll_reply.report_id, poll_reply.data_state,
589 poll_reply.system_msb, poll_reply.system_lsb,
590 poll_reply.data, poll_reply.not_data);
591 */
592
593 if ((poll_reply.data + poll_reply.not_data) != 0xff) {
594 /* Key failed integrity check */
595 err("key failed integrity check: %02x %02x %02x %02x",
596 poll_reply.system_msb, poll_reply.system_lsb,
597 poll_reply.data, poll_reply.not_data);
598 return -1;
599 }
600
601 /* Find the key in the map */
602 for (i = 0; i < d->props.rc_key_map_size; i++) {
603 if (keymap[i].custom == poll_reply.system_lsb &&
604 keymap[i].data == poll_reply.data) {
605 *event = keymap[i].event;
606 found = 1;
607 break;
608 }
609 }
610
611 if (found == 0) {
612 err("Unknown remote controller key: %02x %02x %02x %02x",
613 poll_reply.system_msb, poll_reply.system_lsb,
614 poll_reply.data, poll_reply.not_data);
615 d->last_event = 0;
616 return 0;
617 }
618
619 if (poll_reply.data_state == 1) {
620 /* New key hit */
621 st->rc_counter = 0;
622 *event = keymap[i].event;
623 *state = REMOTE_KEY_PRESSED;
624 d->last_event = keymap[i].event;
625 } else if (poll_reply.data_state == 2) {
626 /* Key repeated */
627 st->rc_counter++;
628
629 /* prevents unwanted double hits */
630 if (st->rc_counter > RC_REPEAT_DELAY_V1_20) {
631 *event = d->last_event;
632 *state = REMOTE_KEY_PRESSED;
633 st->rc_counter = RC_REPEAT_DELAY_V1_20;
634 }
635 } else {
636 err("Unknown data state [%d]", poll_reply.data_state);
637 }
638
639 return 0;
640}
641
642static int dib0700_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
643{
644 struct dib0700_state *st = d->priv;
645
646 /* Because some people may have improperly named firmware files,
647 let's figure out whether to use the new firmware call or the legacy
648 call based on the firmware version embedded in the file */
649 if (st->rc_func_version == 0) {
650 u32 hwver, romver, ramver, fwtype;
651 int ret = dib0700_get_version(d, &hwver, &romver, &ramver,
652 &fwtype);
653 if (ret < 0) {
654 err("Could not determine version info");
655 return -1;
656 }
657 if (ramver < 0x10200)
658 st->rc_func_version = 1;
659 else
660 st->rc_func_version = 2;
661 }
662
663 if (st->rc_func_version == 2)
664 return dib0700_rc_query_v1_20(d, event, state);
665 else
666 return dib0700_rc_query_legacy(d, event, state);
667}
668
532static struct dvb_usb_rc_key dib0700_rc_keys[] = { 669static struct dvb_usb_rc_key dib0700_rc_keys[] = {
533 /* Key codes for the tiny Pinnacle remote*/ 670 /* Key codes for the tiny Pinnacle remote*/
534 { 0x07, 0x00, KEY_MUTE }, 671 { 0x07, 0x00, KEY_MUTE },
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
index 5cef12a07f72..6fe71c6745eb 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
@@ -13,14 +13,14 @@ int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf,
13{ 13{
14 int actlen,ret = -ENOMEM; 14 int actlen,ret = -ENOMEM;
15 15
16 if (!d || wbuf == NULL || wlen == 0)
17 return -EINVAL;
18
16 if (d->props.generic_bulk_ctrl_endpoint == 0) { 19 if (d->props.generic_bulk_ctrl_endpoint == 0) {
17 err("endpoint for generic control not specified."); 20 err("endpoint for generic control not specified.");
18 return -EINVAL; 21 return -EINVAL;
19 } 22 }
20 23
21 if (wbuf == NULL || wlen == 0)
22 return -EINVAL;
23
24 if ((ret = mutex_lock_interruptible(&d->usb_mutex))) 24 if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
25 return ret; 25 return ret;
26 26
diff --git a/drivers/media/dvb/dvb-usb/usb-urb.c b/drivers/media/dvb/dvb-usb/usb-urb.c
index 397f51a7b2ad..da93b9e982c0 100644
--- a/drivers/media/dvb/dvb-usb/usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/usb-urb.c
@@ -135,7 +135,7 @@ stream->buf_list[stream->buf_num], (long long)stream->dma_addr[stream->buf_num])
135 135
136static int usb_bulk_urb_init(struct usb_data_stream *stream) 136static int usb_bulk_urb_init(struct usb_data_stream *stream)
137{ 137{
138 int i; 138 int i, j;
139 139
140 if ((i = usb_allocate_stream_buffers(stream,stream->props.count, 140 if ((i = usb_allocate_stream_buffers(stream,stream->props.count,
141 stream->props.u.bulk.buffersize)) < 0) 141 stream->props.u.bulk.buffersize)) < 0)
@@ -143,9 +143,13 @@ static int usb_bulk_urb_init(struct usb_data_stream *stream)
143 143
144 /* allocate the URBs */ 144 /* allocate the URBs */
145 for (i = 0; i < stream->props.count; i++) { 145 for (i = 0; i < stream->props.count; i++) {
146 if ((stream->urb_list[i] = usb_alloc_urb(0,GFP_ATOMIC)) == NULL) 146 stream->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
147 if (!stream->urb_list[i]) {
148 deb_mem("not enough memory for urb_alloc_urb!.\n");
149 for (j = 0; j < i; j++)
150 usb_free_urb(stream->urb_list[i]);
147 return -ENOMEM; 151 return -ENOMEM;
148 152 }
149 usb_fill_bulk_urb( stream->urb_list[i], stream->udev, 153 usb_fill_bulk_urb( stream->urb_list[i], stream->udev,
150 usb_rcvbulkpipe(stream->udev,stream->props.endpoint), 154 usb_rcvbulkpipe(stream->udev,stream->props.endpoint),
151 stream->buf_list[i], 155 stream->buf_list[i],
@@ -170,9 +174,14 @@ static int usb_isoc_urb_init(struct usb_data_stream *stream)
170 for (i = 0; i < stream->props.count; i++) { 174 for (i = 0; i < stream->props.count; i++) {
171 struct urb *urb; 175 struct urb *urb;
172 int frame_offset = 0; 176 int frame_offset = 0;
173 if ((stream->urb_list[i] = 177
174 usb_alloc_urb(stream->props.u.isoc.framesperurb,GFP_ATOMIC)) == NULL) 178 stream->urb_list[i] = usb_alloc_urb(stream->props.u.isoc.framesperurb, GFP_ATOMIC);
179 if (!stream->urb_list[i]) {
180 deb_mem("not enough memory for urb_alloc_urb!\n");
181 for (j = 0; j < i; j++)
182 usb_free_urb(stream->urb_list[i]);
175 return -ENOMEM; 183 return -ENOMEM;
184 }
176 185
177 urb = stream->urb_list[i]; 186 urb = stream->urb_list[i];
178 187
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index 21c1060cf10e..692b68a9e73b 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -1187,7 +1187,7 @@ static int af9013_read_status(struct dvb_frontend *fe, fe_status_t *status)
1187 if (tmp) 1187 if (tmp)
1188 *status |= FE_HAS_SYNC | FE_HAS_LOCK; 1188 *status |= FE_HAS_SYNC | FE_HAS_LOCK;
1189 1189
1190 if (!*status & FE_HAS_SIGNAL) { 1190 if (!(*status & FE_HAS_SIGNAL)) {
1191 /* AGC lock */ 1191 /* AGC lock */
1192 ret = af9013_read_reg_bits(state, 0xd1a0, 6, 1, &tmp); 1192 ret = af9013_read_reg_bits(state, 0xd1a0, 6, 1, &tmp);
1193 if (ret) 1193 if (ret)
@@ -1196,7 +1196,7 @@ static int af9013_read_status(struct dvb_frontend *fe, fe_status_t *status)
1196 *status |= FE_HAS_SIGNAL; 1196 *status |= FE_HAS_SIGNAL;
1197 } 1197 }
1198 1198
1199 if (!*status & FE_HAS_CARRIER) { 1199 if (!(*status & FE_HAS_CARRIER)) {
1200 /* CFO lock */ 1200 /* CFO lock */
1201 ret = af9013_read_reg_bits(state, 0xd333, 7, 1, &tmp); 1201 ret = af9013_read_reg_bits(state, 0xd333, 7, 1, &tmp);
1202 if (ret) 1202 if (ret)
@@ -1205,7 +1205,7 @@ static int af9013_read_status(struct dvb_frontend *fe, fe_status_t *status)
1205 *status |= FE_HAS_CARRIER; 1205 *status |= FE_HAS_CARRIER;
1206 } 1206 }
1207 1207
1208 if (!*status & FE_HAS_CARRIER) { 1208 if (!(*status & FE_HAS_CARRIER)) {
1209 /* SFOE lock */ 1209 /* SFOE lock */
1210 ret = af9013_read_reg_bits(state, 0xd334, 6, 1, &tmp); 1210 ret = af9013_read_reg_bits(state, 0xd334, 6, 1, &tmp);
1211 if (ret) 1211 if (ret)
diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c
index 2febfb5a846b..40644aacffcb 100644
--- a/drivers/media/dvb/frontends/s5h1411.c
+++ b/drivers/media/dvb/frontends/s5h1411.c
@@ -38,6 +38,7 @@ struct s5h1411_state {
38 struct dvb_frontend frontend; 38 struct dvb_frontend frontend;
39 39
40 fe_modulation_t current_modulation; 40 fe_modulation_t current_modulation;
41 unsigned int first_tune:1;
41 42
42 u32 current_frequency; 43 u32 current_frequency;
43 int if_freq; 44 int if_freq;
@@ -62,7 +63,7 @@ static struct init_tab {
62 { S5H1411_I2C_TOP_ADDR, 0x08, 0x0047, }, 63 { S5H1411_I2C_TOP_ADDR, 0x08, 0x0047, },
63 { S5H1411_I2C_TOP_ADDR, 0x1c, 0x0400, }, 64 { S5H1411_I2C_TOP_ADDR, 0x1c, 0x0400, },
64 { S5H1411_I2C_TOP_ADDR, 0x1e, 0x0370, }, 65 { S5H1411_I2C_TOP_ADDR, 0x1e, 0x0370, },
65 { S5H1411_I2C_TOP_ADDR, 0x1f, 0x342a, }, 66 { S5H1411_I2C_TOP_ADDR, 0x1f, 0x342c, },
66 { S5H1411_I2C_TOP_ADDR, 0x24, 0x0231, }, 67 { S5H1411_I2C_TOP_ADDR, 0x24, 0x0231, },
67 { S5H1411_I2C_TOP_ADDR, 0x25, 0x1011, }, 68 { S5H1411_I2C_TOP_ADDR, 0x25, 0x1011, },
68 { S5H1411_I2C_TOP_ADDR, 0x26, 0x0f07, }, 69 { S5H1411_I2C_TOP_ADDR, 0x26, 0x0f07, },
@@ -100,7 +101,6 @@ static struct init_tab {
100 { S5H1411_I2C_TOP_ADDR, 0x78, 0x3141, }, 101 { S5H1411_I2C_TOP_ADDR, 0x78, 0x3141, },
101 { S5H1411_I2C_TOP_ADDR, 0x7a, 0x3141, }, 102 { S5H1411_I2C_TOP_ADDR, 0x7a, 0x3141, },
102 { S5H1411_I2C_TOP_ADDR, 0xb3, 0x8003, }, 103 { S5H1411_I2C_TOP_ADDR, 0xb3, 0x8003, },
103 { S5H1411_I2C_TOP_ADDR, 0xb5, 0xafbb, },
104 { S5H1411_I2C_TOP_ADDR, 0xb5, 0xa6bb, }, 104 { S5H1411_I2C_TOP_ADDR, 0xb5, 0xa6bb, },
105 { S5H1411_I2C_TOP_ADDR, 0xb6, 0x0609, }, 105 { S5H1411_I2C_TOP_ADDR, 0xb6, 0x0609, },
106 { S5H1411_I2C_TOP_ADDR, 0xb7, 0x2f06, }, 106 { S5H1411_I2C_TOP_ADDR, 0xb7, 0x2f06, },
@@ -393,7 +393,7 @@ static int s5h1411_set_if_freq(struct dvb_frontend *fe, int KHz)
393 393
394 switch (KHz) { 394 switch (KHz) {
395 case 3250: 395 case 3250:
396 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x10d9); 396 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x10d5);
397 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x5342); 397 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x5342);
398 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x10d9); 398 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x10d9);
399 break; 399 break;
@@ -464,13 +464,25 @@ static int s5h1411_set_spectralinversion(struct dvb_frontend *fe, int inversion)
464 464
465 if (inversion == 1) 465 if (inversion == 1)
466 val |= 0x1000; /* Inverted */ 466 val |= 0x1000; /* Inverted */
467 else
468 val |= 0x0000;
469 467
470 state->inversion = inversion; 468 state->inversion = inversion;
471 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x24, val); 469 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x24, val);
472} 470}
473 471
472static int s5h1411_set_serialmode(struct dvb_frontend *fe, int serial)
473{
474 struct s5h1411_state *state = fe->demodulator_priv;
475 u16 val;
476
477 dprintk("%s(%d)\n", __func__, serial);
478 val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xbd) & ~0x100;
479
480 if (serial == 1)
481 val |= 0x100;
482
483 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, val);
484}
485
474static int s5h1411_enable_modulation(struct dvb_frontend *fe, 486static int s5h1411_enable_modulation(struct dvb_frontend *fe,
475 fe_modulation_t m) 487 fe_modulation_t m)
476{ 488{
@@ -478,6 +490,12 @@ static int s5h1411_enable_modulation(struct dvb_frontend *fe,
478 490
479 dprintk("%s(0x%08x)\n", __func__, m); 491 dprintk("%s(0x%08x)\n", __func__, m);
480 492
493 if ((state->first_tune == 0) && (m == state->current_modulation)) {
494 dprintk("%s() Already at desired modulation. Skipping...\n",
495 __func__);
496 return 0;
497 }
498
481 switch (m) { 499 switch (m) {
482 case VSB_8: 500 case VSB_8:
483 dprintk("%s() VSB_8\n", __func__); 501 dprintk("%s() VSB_8\n", __func__);
@@ -502,6 +520,7 @@ static int s5h1411_enable_modulation(struct dvb_frontend *fe,
502 } 520 }
503 521
504 state->current_modulation = m; 522 state->current_modulation = m;
523 state->first_tune = 0;
505 s5h1411_softreset(fe); 524 s5h1411_softreset(fe);
506 525
507 return 0; 526 return 0;
@@ -535,7 +554,7 @@ static int s5h1411_set_gpio(struct dvb_frontend *fe, int enable)
535 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0, val); 554 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0, val);
536} 555}
537 556
538static int s5h1411_sleep(struct dvb_frontend *fe, int enable) 557static int s5h1411_set_powerstate(struct dvb_frontend *fe, int enable)
539{ 558{
540 struct s5h1411_state *state = fe->demodulator_priv; 559 struct s5h1411_state *state = fe->demodulator_priv;
541 560
@@ -551,6 +570,11 @@ static int s5h1411_sleep(struct dvb_frontend *fe, int enable)
551 return 0; 570 return 0;
552} 571}
553 572
573static int s5h1411_sleep(struct dvb_frontend *fe)
574{
575 return s5h1411_set_powerstate(fe, 1);
576}
577
554static int s5h1411_register_reset(struct dvb_frontend *fe) 578static int s5h1411_register_reset(struct dvb_frontend *fe)
555{ 579{
556 struct s5h1411_state *state = fe->demodulator_priv; 580 struct s5h1411_state *state = fe->demodulator_priv;
@@ -574,9 +598,6 @@ static int s5h1411_set_frontend(struct dvb_frontend *fe,
574 598
575 s5h1411_enable_modulation(fe, p->u.vsb.modulation); 599 s5h1411_enable_modulation(fe, p->u.vsb.modulation);
576 600
577 /* Allow the demod to settle */
578 msleep(100);
579
580 if (fe->ops.tuner_ops.set_params) { 601 if (fe->ops.tuner_ops.set_params) {
581 if (fe->ops.i2c_gate_ctrl) 602 if (fe->ops.i2c_gate_ctrl)
582 fe->ops.i2c_gate_ctrl(fe, 1); 603 fe->ops.i2c_gate_ctrl(fe, 1);
@@ -587,6 +608,10 @@ static int s5h1411_set_frontend(struct dvb_frontend *fe,
587 fe->ops.i2c_gate_ctrl(fe, 0); 608 fe->ops.i2c_gate_ctrl(fe, 0);
588 } 609 }
589 610
611 /* Issue a reset to the demod so it knows to resync against the
612 newly tuned frequency */
613 s5h1411_softreset(fe);
614
590 return 0; 615 return 0;
591} 616}
592 617
@@ -599,7 +624,7 @@ static int s5h1411_init(struct dvb_frontend *fe)
599 624
600 dprintk("%s()\n", __func__); 625 dprintk("%s()\n", __func__);
601 626
602 s5h1411_sleep(fe, 0); 627 s5h1411_set_powerstate(fe, 0);
603 s5h1411_register_reset(fe); 628 s5h1411_register_reset(fe);
604 629
605 for (i = 0; i < ARRAY_SIZE(init_tab); i++) 630 for (i = 0; i < ARRAY_SIZE(init_tab); i++)
@@ -610,12 +635,17 @@ static int s5h1411_init(struct dvb_frontend *fe)
610 /* The datasheet says that after initialisation, VSB is default */ 635 /* The datasheet says that after initialisation, VSB is default */
611 state->current_modulation = VSB_8; 636 state->current_modulation = VSB_8;
612 637
638 /* Although the datasheet says it's in VSB, empirical evidence
639 shows problems getting lock on the first tuning request. Make
640 sure we call enable_modulation the first time around */
641 state->first_tune = 1;
642
613 if (state->config->output_mode == S5H1411_SERIAL_OUTPUT) 643 if (state->config->output_mode == S5H1411_SERIAL_OUTPUT)
614 /* Serial */ 644 /* Serial */
615 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1101); 645 s5h1411_set_serialmode(fe, 1);
616 else 646 else
617 /* Parallel */ 647 /* Parallel */
618 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1001); 648 s5h1411_set_serialmode(fe, 0);
619 649
620 s5h1411_set_spectralinversion(fe, state->config->inversion); 650 s5h1411_set_spectralinversion(fe, state->config->inversion);
621 s5h1411_set_if_freq(fe, state->config->vsb_if); 651 s5h1411_set_if_freq(fe, state->config->vsb_if);
@@ -637,28 +667,29 @@ static int s5h1411_read_status(struct dvb_frontend *fe, fe_status_t *status)
637 667
638 *status = 0; 668 *status = 0;
639 669
640 /* Get the demodulator status */ 670 /* Register F2 bit 15 = Master Lock, removed */
641 reg = (s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2) >> 15)
642 & 0x0001;
643 if (reg)
644 *status |= FE_HAS_LOCK | FE_HAS_CARRIER | FE_HAS_SIGNAL;
645 671
646 switch (state->current_modulation) { 672 switch (state->current_modulation) {
647 case QAM_64: 673 case QAM_64:
648 case QAM_256: 674 case QAM_256:
649 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf0); 675 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf0);
650 if (reg & 0x100) 676 if (reg & 0x10) /* QAM FEC Lock */
651 *status |= FE_HAS_VITERBI; 677 *status |= FE_HAS_SYNC | FE_HAS_LOCK;
652 if (reg & 0x10) 678 if (reg & 0x100) /* QAM EQ Lock */
653 *status |= FE_HAS_SYNC; 679 *status |= FE_HAS_VITERBI | FE_HAS_CARRIER | FE_HAS_SIGNAL;
680
654 break; 681 break;
655 case VSB_8: 682 case VSB_8:
656 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x5e);
657 if (reg & 0x0001)
658 *status |= FE_HAS_SYNC;
659 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2); 683 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2);
660 if (reg & 0x1000) 684 if (reg & 0x1000) /* FEC Lock */
661 *status |= FE_HAS_VITERBI; 685 *status |= FE_HAS_SYNC | FE_HAS_LOCK;
686 if (reg & 0x2000) /* EQ Lock */
687 *status |= FE_HAS_VITERBI | FE_HAS_CARRIER | FE_HAS_SIGNAL;
688
689 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x53);
690 if (reg & 0x1) /* AFC Lock */
691 *status |= FE_HAS_SIGNAL;
692
662 break; 693 break;
663 default: 694 default:
664 return -EINVAL; 695 return -EINVAL;
@@ -863,6 +894,7 @@ static struct dvb_frontend_ops s5h1411_ops = {
863 }, 894 },
864 895
865 .init = s5h1411_init, 896 .init = s5h1411_init,
897 .sleep = s5h1411_sleep,
866 .i2c_gate_ctrl = s5h1411_i2c_gate_ctrl, 898 .i2c_gate_ctrl = s5h1411_i2c_gate_ctrl,
867 .set_frontend = s5h1411_set_frontend, 899 .set_frontend = s5h1411_set_frontend,
868 .get_frontend = s5h1411_get_frontend, 900 .get_frontend = s5h1411_get_frontend,
diff --git a/drivers/media/dvb/frontends/s5h1411.h b/drivers/media/dvb/frontends/s5h1411.h
index 7d542bc00c48..45ec0f82989c 100644
--- a/drivers/media/dvb/frontends/s5h1411.h
+++ b/drivers/media/dvb/frontends/s5h1411.h
@@ -47,7 +47,7 @@ struct s5h1411_config {
47 u16 mpeg_timing; 47 u16 mpeg_timing;
48 48
49 /* IF Freq for QAM and VSB in KHz */ 49 /* IF Freq for QAM and VSB in KHz */
50#define S5H1411_IF_2500 2500 50#define S5H1411_IF_3250 3250
51#define S5H1411_IF_3500 3500 51#define S5H1411_IF_3500 3500
52#define S5H1411_IF_4000 4000 52#define S5H1411_IF_4000 4000
53#define S5H1411_IF_5380 5380 53#define S5H1411_IF_5380 5380
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c
index 6f9b77360440..e98d6caf2c23 100644
--- a/drivers/media/dvb/siano/sms-cards.c
+++ b/drivers/media/dvb/siano/sms-cards.c
@@ -95,7 +95,7 @@ static struct sms_board sms_boards[] = {
95 [SMS1XXX_BOARD_HAUPPAUGE_WINDHAM] = { 95 [SMS1XXX_BOARD_HAUPPAUGE_WINDHAM] = {
96 .name = "Hauppauge WinTV MiniStick", 96 .name = "Hauppauge WinTV MiniStick",
97 .type = SMS_NOVA_B0, 97 .type = SMS_NOVA_B0,
98 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-01.fw", 98 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw",
99 }, 99 },
100}; 100};
101 101
diff --git a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
index 66ab0c6e9783..4a3f2b8ea37d 100644
--- a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
@@ -808,6 +808,12 @@ static int ttusb_alloc_iso_urbs(struct ttusb *ttusb)
808 ISO_BUF_COUNT, 808 ISO_BUF_COUNT,
809 &ttusb->iso_dma_handle); 809 &ttusb->iso_dma_handle);
810 810
811 if (!ttusb->iso_buffer) {
812 dprintk("%s: pci_alloc_consistent - not enough memory\n",
813 __func__);
814 return -ENOMEM;
815 }
816
811 memset(ttusb->iso_buffer, 0, 817 memset(ttusb->iso_buffer, 0,
812 ISO_FRAME_SIZE * FRAMES_PER_ISO_BUF * ISO_BUF_COUNT); 818 ISO_FRAME_SIZE * FRAMES_PER_ISO_BUF * ISO_BUF_COUNT);
813 819
@@ -1659,7 +1665,14 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
1659 1665
1660 ttusb_setup_interfaces(ttusb); 1666 ttusb_setup_interfaces(ttusb);
1661 1667
1662 ttusb_alloc_iso_urbs(ttusb); 1668 result = ttusb_alloc_iso_urbs(ttusb);
1669 if (result < 0) {
1670 dprintk("%s: ttusb_alloc_iso_urbs - failed\n", __func__);
1671 mutex_unlock(&ttusb->semi2c);
1672 kfree(ttusb);
1673 return result;
1674 }
1675
1663 if (ttusb_init_controller(ttusb)) 1676 if (ttusb_init_controller(ttusb))
1664 printk("ttusb_init_controller: error\n"); 1677 printk("ttusb_init_controller: error\n");
1665 1678
diff --git a/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
index ab33fec8a19f..0aa96df80fc2 100644
--- a/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
@@ -1157,6 +1157,12 @@ static int ttusb_dec_alloc_iso_urbs(struct ttusb_dec *dec)
1157 ISO_BUF_COUNT), 1157 ISO_BUF_COUNT),
1158 &dec->iso_dma_handle); 1158 &dec->iso_dma_handle);
1159 1159
1160 if (!dec->iso_buffer) {
1161 dprintk("%s: pci_alloc_consistent - not enough memory\n",
1162 __func__);
1163 return -ENOMEM;
1164 }
1165
1160 memset(dec->iso_buffer, 0, 1166 memset(dec->iso_buffer, 0,
1161 ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF * ISO_BUF_COUNT)); 1167 ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF * ISO_BUF_COUNT));
1162 1168
@@ -1254,6 +1260,7 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec)
1254 dec->irq_buffer = usb_buffer_alloc(dec->udev,IRQ_PACKET_SIZE, 1260 dec->irq_buffer = usb_buffer_alloc(dec->udev,IRQ_PACKET_SIZE,
1255 GFP_ATOMIC, &dec->irq_dma_handle); 1261 GFP_ATOMIC, &dec->irq_dma_handle);
1256 if(!dec->irq_buffer) { 1262 if(!dec->irq_buffer) {
1263 usb_free_urb(dec->irq_urb);
1257 return -ENOMEM; 1264 return -ENOMEM;
1258 } 1265 }
1259 usb_fill_int_urb(dec->irq_urb, dec->udev,dec->irq_pipe, 1266 usb_fill_int_urb(dec->irq_urb, dec->udev,dec->irq_pipe,
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 04cd7c04bdde..5189c4eb439f 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -355,6 +355,20 @@ config USB_SI470X
355 tristate "Silicon Labs Si470x FM Radio Receiver support" 355 tristate "Silicon Labs Si470x FM Radio Receiver support"
356 depends on USB && VIDEO_V4L2 356 depends on USB && VIDEO_V4L2
357 ---help--- 357 ---help---
358 This is a driver for USB devices with the Silicon Labs SI470x
359 chip. Currently these devices are known to work:
360 - 10c4:818a: Silicon Labs USB FM Radio Reference Design
361 - 06e1:a155: ADS/Tech FM Radio Receiver (formerly Instant FM Music)
362 - 1b80:d700: KWorld USB FM Radio SnapMusic Mobile 700 (FM700)
363
364 Sound is provided by the ALSA USB Audio/MIDI driver. Therefore
365 if you don't want to use the device solely for RDS receiving,
366 it is recommended to also select SND_USB_AUDIO.
367
368 Please have a look at the documentation, especially on how
369 to redirect the audio stream from the radio to your sound device:
370 Documentation/video4linux/si470x.txt
371
358 Say Y here if you want to connect this type of radio to your 372 Say Y here if you want to connect this type of radio to your
359 computer's USB port. 373 computer's USB port.
360 374
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 78f56944e640..a5ca176a7b08 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -171,11 +171,11 @@ static int dsbr100_start(struct dsbr100_device *radio)
171 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 171 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
172 USB_REQ_GET_STATUS, 172 USB_REQ_GET_STATUS,
173 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 173 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
174 0x00, 0xC7, radio->transfer_buffer, 8, 300)<0 || 174 0x00, 0xC7, radio->transfer_buffer, 8, 300) < 0 ||
175 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 175 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
176 DSB100_ONOFF, 176 DSB100_ONOFF,
177 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 177 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
178 0x01, 0x00, radio->transfer_buffer, 8, 300)<0) 178 0x01, 0x00, radio->transfer_buffer, 8, 300) < 0)
179 return -1; 179 return -1;
180 radio->muted=0; 180 radio->muted=0;
181 return (radio->transfer_buffer)[0]; 181 return (radio->transfer_buffer)[0];
@@ -188,11 +188,11 @@ static int dsbr100_stop(struct dsbr100_device *radio)
188 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 188 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
189 USB_REQ_GET_STATUS, 189 USB_REQ_GET_STATUS,
190 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 190 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
191 0x16, 0x1C, radio->transfer_buffer, 8, 300)<0 || 191 0x16, 0x1C, radio->transfer_buffer, 8, 300) < 0 ||
192 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 192 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
193 DSB100_ONOFF, 193 DSB100_ONOFF,
194 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 194 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
195 0x00, 0x00, radio->transfer_buffer, 8, 300)<0) 195 0x00, 0x00, radio->transfer_buffer, 8, 300) < 0)
196 return -1; 196 return -1;
197 radio->muted=1; 197 radio->muted=1;
198 return (radio->transfer_buffer)[0]; 198 return (radio->transfer_buffer)[0];
@@ -201,24 +201,24 @@ static int dsbr100_stop(struct dsbr100_device *radio)
201/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */ 201/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
202static int dsbr100_setfreq(struct dsbr100_device *radio, int freq) 202static int dsbr100_setfreq(struct dsbr100_device *radio, int freq)
203{ 203{
204 freq = (freq/16*80)/1000+856; 204 freq = (freq / 16 * 80) / 1000 + 856;
205 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 205 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
206 DSB100_TUNE, 206 DSB100_TUNE,
207 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 207 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
208 (freq>>8)&0x00ff, freq&0xff, 208 (freq >> 8) & 0x00ff, freq & 0xff,
209 radio->transfer_buffer, 8, 300)<0 || 209 radio->transfer_buffer, 8, 300) < 0 ||
210 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 210 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
211 USB_REQ_GET_STATUS, 211 USB_REQ_GET_STATUS,
212 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 212 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
213 0x96, 0xB7, radio->transfer_buffer, 8, 300)<0 || 213 0x96, 0xB7, radio->transfer_buffer, 8, 300) < 0 ||
214 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 214 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
215 USB_REQ_GET_STATUS, 215 USB_REQ_GET_STATUS,
216 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 216 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
217 0x00, 0x24, radio->transfer_buffer, 8, 300)<0) { 217 0x00, 0x24, radio->transfer_buffer, 8, 300) < 0) {
218 radio->stereo = -1; 218 radio->stereo = -1;
219 return -1; 219 return -1;
220 } 220 }
221 radio->stereo = ! ((radio->transfer_buffer)[0]&0x01); 221 radio->stereo = !((radio->transfer_buffer)[0] & 0x01);
222 return (radio->transfer_buffer)[0]; 222 return (radio->transfer_buffer)[0];
223} 223}
224 224
@@ -229,10 +229,10 @@ static void dsbr100_getstat(struct dsbr100_device *radio)
229 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 229 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
230 USB_REQ_GET_STATUS, 230 USB_REQ_GET_STATUS,
231 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 231 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
232 0x00 , 0x24, radio->transfer_buffer, 8, 300)<0) 232 0x00 , 0x24, radio->transfer_buffer, 8, 300) < 0)
233 radio->stereo = -1; 233 radio->stereo = -1;
234 else 234 else
235 radio->stereo = ! (radio->transfer_buffer[0]&0x01); 235 radio->stereo = !(radio->transfer_buffer[0] & 0x01);
236} 236}
237 237
238 238
@@ -265,7 +265,7 @@ static int vidioc_querycap(struct file *file, void *priv,
265{ 265{
266 strlcpy(v->driver, "dsbr100", sizeof(v->driver)); 266 strlcpy(v->driver, "dsbr100", sizeof(v->driver));
267 strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card)); 267 strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card));
268 sprintf(v->bus_info, "ISA"); 268 sprintf(v->bus_info, "USB");
269 v->version = RADIO_VERSION; 269 v->version = RADIO_VERSION;
270 v->capabilities = V4L2_CAP_TUNER; 270 v->capabilities = V4L2_CAP_TUNER;
271 return 0; 271 return 0;
@@ -282,9 +282,9 @@ static int vidioc_g_tuner(struct file *file, void *priv,
282 dsbr100_getstat(radio); 282 dsbr100_getstat(radio);
283 strcpy(v->name, "FM"); 283 strcpy(v->name, "FM");
284 v->type = V4L2_TUNER_RADIO; 284 v->type = V4L2_TUNER_RADIO;
285 v->rangelow = FREQ_MIN*FREQ_MUL; 285 v->rangelow = FREQ_MIN * FREQ_MUL;
286 v->rangehigh = FREQ_MAX*FREQ_MUL; 286 v->rangehigh = FREQ_MAX * FREQ_MUL;
287 v->rxsubchans = V4L2_TUNER_SUB_MONO|V4L2_TUNER_SUB_STEREO; 287 v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
288 v->capability = V4L2_TUNER_CAP_LOW; 288 v->capability = V4L2_TUNER_CAP_LOW;
289 if(radio->stereo) 289 if(radio->stereo)
290 v->audmode = V4L2_TUNER_MODE_STEREO; 290 v->audmode = V4L2_TUNER_MODE_STEREO;
@@ -309,7 +309,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
309 struct dsbr100_device *radio = video_drvdata(file); 309 struct dsbr100_device *radio = video_drvdata(file);
310 310
311 radio->curfreq = f->frequency; 311 radio->curfreq = f->frequency;
312 if (dsbr100_setfreq(radio, radio->curfreq)==-1) 312 if (dsbr100_setfreq(radio, radio->curfreq) == -1)
313 dev_warn(&radio->usbdev->dev, "Set frequency failed\n"); 313 dev_warn(&radio->usbdev->dev, "Set frequency failed\n");
314 return 0; 314 return 0;
315} 315}
@@ -331,8 +331,7 @@ static int vidioc_queryctrl(struct file *file, void *priv,
331 331
332 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) { 332 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
333 if (qc->id && qc->id == radio_qctrl[i].id) { 333 if (qc->id && qc->id == radio_qctrl[i].id) {
334 memcpy(qc, &(radio_qctrl[i]), 334 memcpy(qc, &(radio_qctrl[i]), sizeof(*qc));
335 sizeof(*qc));
336 return 0; 335 return 0;
337 } 336 }
338 } 337 }
@@ -412,19 +411,25 @@ static int vidioc_s_audio(struct file *file, void *priv,
412static int usb_dsbr100_open(struct inode *inode, struct file *file) 411static int usb_dsbr100_open(struct inode *inode, struct file *file)
413{ 412{
414 struct dsbr100_device *radio = video_drvdata(file); 413 struct dsbr100_device *radio = video_drvdata(file);
414 int retval;
415 415
416 lock_kernel(); 416 lock_kernel();
417 radio->users = 1; 417 radio->users = 1;
418 radio->muted = 1; 418 radio->muted = 1;
419 419
420 if (dsbr100_start(radio)<0) { 420 if (dsbr100_start(radio) < 0) {
421 dev_warn(&radio->usbdev->dev, 421 dev_warn(&radio->usbdev->dev,
422 "Radio did not start up properly\n"); 422 "Radio did not start up properly\n");
423 radio->users = 0; 423 radio->users = 0;
424 unlock_kernel(); 424 unlock_kernel();
425 return -EIO; 425 return -EIO;
426 } 426 }
427 dsbr100_setfreq(radio, radio->curfreq); 427
428 retval = dsbr100_setfreq(radio, radio->curfreq);
429
430 if (retval == -1)
431 printk(KERN_WARNING KBUILD_MODNAME ": Set frequency failed\n");
432
428 unlock_kernel(); 433 unlock_kernel();
429 return 0; 434 return 0;
430} 435}
@@ -485,13 +490,20 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
485{ 490{
486 struct dsbr100_device *radio; 491 struct dsbr100_device *radio;
487 492
488 if (!(radio = kmalloc(sizeof(struct dsbr100_device), GFP_KERNEL))) 493 radio = kmalloc(sizeof(struct dsbr100_device), GFP_KERNEL);
494
495 if (!radio)
489 return -ENOMEM; 496 return -ENOMEM;
490 if (!(radio->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL))) { 497
498 radio->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL);
499
500 if (!(radio->transfer_buffer)) {
491 kfree(radio); 501 kfree(radio);
492 return -ENOMEM; 502 return -ENOMEM;
493 } 503 }
494 if (!(radio->videodev = video_device_alloc())) { 504 radio->videodev = video_device_alloc();
505
506 if (!(radio->videodev)) {
495 kfree(radio->transfer_buffer); 507 kfree(radio->transfer_buffer);
496 kfree(radio); 508 kfree(radio);
497 return -ENOMEM; 509 return -ENOMEM;
@@ -501,7 +513,7 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
501 radio->removed = 0; 513 radio->removed = 0;
502 radio->users = 0; 514 radio->users = 0;
503 radio->usbdev = interface_to_usbdev(intf); 515 radio->usbdev = interface_to_usbdev(intf);
504 radio->curfreq = FREQ_MIN*FREQ_MUL; 516 radio->curfreq = FREQ_MIN * FREQ_MUL;
505 video_set_drvdata(radio->videodev, radio); 517 video_set_drvdata(radio->videodev, radio);
506 if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr) < 0) { 518 if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr) < 0) {
507 dev_warn(&intf->dev, "Could not register video device\n"); 519 dev_warn(&intf->dev, "Could not register video device\n");
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index a33717c48003..256cbeffdcb6 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -469,16 +469,21 @@ static int usb_amradio_open(struct inode *inode, struct file *file)
469{ 469{
470 struct amradio_device *radio = video_get_drvdata(video_devdata(file)); 470 struct amradio_device *radio = video_get_drvdata(video_devdata(file));
471 471
472 lock_kernel();
473
472 radio->users = 1; 474 radio->users = 1;
473 radio->muted = 1; 475 radio->muted = 1;
474 476
475 if (amradio_start(radio) < 0) { 477 if (amradio_start(radio) < 0) {
476 warn("Radio did not start up properly"); 478 warn("Radio did not start up properly");
477 radio->users = 0; 479 radio->users = 0;
480 unlock_kernel();
478 return -EIO; 481 return -EIO;
479 } 482 }
480 if (amradio_setfreq(radio, radio->curfreq) < 0) 483 if (amradio_setfreq(radio, radio->curfreq) < 0)
481 warn("Set frequency failed"); 484 warn("Set frequency failed");
485
486 unlock_kernel();
482 return 0; 487 return 0;
483} 488}
484 489
diff --git a/drivers/media/radio/radio-si470x.c b/drivers/media/radio/radio-si470x.c
index 5920cd306975..3e1830293de5 100644
--- a/drivers/media/radio/radio-si470x.c
+++ b/drivers/media/radio/radio-si470x.c
@@ -4,6 +4,7 @@
4 * Driver for USB radios for the Silicon Labs Si470x FM Radio Receivers: 4 * Driver for USB radios for the Silicon Labs Si470x FM Radio Receivers:
5 * - Silicon Labs USB FM Radio Reference Design 5 * - Silicon Labs USB FM Radio Reference Design
6 * - ADS/Tech FM Radio Receiver (formerly Instant FM Music) (RDX-155-EF) 6 * - ADS/Tech FM Radio Receiver (formerly Instant FM Music) (RDX-155-EF)
7 * - KWorld USB FM Radio SnapMusic Mobile 700 (FM700)
7 * 8 *
8 * Copyright (c) 2008 Tobias Lorenz <tobias.lorenz@gmx.net> 9 * Copyright (c) 2008 Tobias Lorenz <tobias.lorenz@gmx.net>
9 * 10 *
@@ -24,19 +25,6 @@
24 25
25 26
26/* 27/*
27 * User Notes:
28 * - USB Audio is provided by the alsa snd_usb_audio module.
29 * For listing you have to redirect the sound, for example using:
30 * arecord -D hw:1,0 -r96000 -c2 -f S16_LE | artsdsp aplay -B -
31 * - regarding module parameters in /sys/module/radio_si470x/parameters:
32 * the contents of read-only files (0444) are not updated, even if
33 * space, band and de are changed using private video controls
34 * - increase tune_timeout, if you often get -EIO errors
35 * - hw_freq_seek returns -EAGAIN, when timed out or band limit is reached
36 */
37
38
39/*
40 * History: 28 * History:
41 * 2008-01-12 Tobias Lorenz <tobias.lorenz@gmx.net> 29 * 2008-01-12 Tobias Lorenz <tobias.lorenz@gmx.net>
42 * Version 1.0.0 30 * Version 1.0.0
@@ -105,6 +93,9 @@
105 * - afc indication 93 * - afc indication
106 * - more safety checks, let si470x_get_freq return errno 94 * - more safety checks, let si470x_get_freq return errno
107 * - vidioc behavior corrected according to v4l2 spec 95 * - vidioc behavior corrected according to v4l2 spec
96 * 2008-10-20 Alexey Klimov <klimov.linux@gmail.com>
97 * - add support for KWorld USB FM Radio FM700
98 * - blacklisted KWorld radio in hid-core.c and hid-ids.h
108 * 99 *
109 * ToDo: 100 * ToDo:
110 * - add firmware download/update support 101 * - add firmware download/update support
@@ -145,6 +136,8 @@ static struct usb_device_id si470x_usb_driver_id_table[] = {
145 { USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a, USB_CLASS_HID, 0, 0) }, 136 { USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a, USB_CLASS_HID, 0, 0) },
146 /* ADS/Tech FM Radio Receiver (formerly Instant FM Music) */ 137 /* ADS/Tech FM Radio Receiver (formerly Instant FM Music) */
147 { USB_DEVICE_AND_INTERFACE_INFO(0x06e1, 0xa155, USB_CLASS_HID, 0, 0) }, 138 { USB_DEVICE_AND_INTERFACE_INFO(0x06e1, 0xa155, USB_CLASS_HID, 0, 0) },
139 /* KWorld USB FM Radio SnapMusic Mobile 700 (FM700) */
140 { USB_DEVICE_AND_INTERFACE_INFO(0x1b80, 0xd700, USB_CLASS_HID, 0, 0) },
148 /* Terminating entry */ 141 /* Terminating entry */
149 { } 142 { }
150}; 143};
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index 218754b4906a..e09b00693230 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -866,7 +866,7 @@ static int __init ar_init(void)
866 } 866 }
867 867
868 printk("video%d: Found M64278 VGA (IRQ %d, Freq %dMHz).\n", 868 printk("video%d: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
869 ar->vdev->minor, M32R_IRQ_INT3, freq); 869 ar->vdev->num, M32R_IRQ_INT3, freq);
870 870
871 return 0; 871 return 0;
872 872
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 5858bf5ff41c..9ec4cec2e52d 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -4246,7 +4246,7 @@ static int __devinit bttv_register_video(struct bttv *btv)
4246 video_nr[btv->c.nr]) < 0) 4246 video_nr[btv->c.nr]) < 0)
4247 goto err; 4247 goto err;
4248 printk(KERN_INFO "bttv%d: registered device video%d\n", 4248 printk(KERN_INFO "bttv%d: registered device video%d\n",
4249 btv->c.nr,btv->video_dev->minor & 0x1f); 4249 btv->c.nr, btv->video_dev->num);
4250 if (device_create_file(&btv->video_dev->dev, 4250 if (device_create_file(&btv->video_dev->dev,
4251 &dev_attr_card)<0) { 4251 &dev_attr_card)<0) {
4252 printk(KERN_ERR "bttv%d: device_create_file 'card' " 4252 printk(KERN_ERR "bttv%d: device_create_file 'card' "
@@ -4263,7 +4263,7 @@ static int __devinit bttv_register_video(struct bttv *btv)
4263 vbi_nr[btv->c.nr]) < 0) 4263 vbi_nr[btv->c.nr]) < 0)
4264 goto err; 4264 goto err;
4265 printk(KERN_INFO "bttv%d: registered device vbi%d\n", 4265 printk(KERN_INFO "bttv%d: registered device vbi%d\n",
4266 btv->c.nr,btv->vbi_dev->minor & 0x1f); 4266 btv->c.nr, btv->vbi_dev->num);
4267 4267
4268 if (!btv->has_radio) 4268 if (!btv->has_radio)
4269 return 0; 4269 return 0;
@@ -4275,7 +4275,7 @@ static int __devinit bttv_register_video(struct bttv *btv)
4275 radio_nr[btv->c.nr]) < 0) 4275 radio_nr[btv->c.nr]) < 0)
4276 goto err; 4276 goto err;
4277 printk(KERN_INFO "bttv%d: registered device radio%d\n", 4277 printk(KERN_INFO "bttv%d: registered device radio%d\n",
4278 btv->c.nr,btv->radio_dev->minor & 0x1f); 4278 btv->c.nr, btv->radio_dev->num);
4279 4279
4280 /* all done */ 4280 /* all done */
4281 return 0; 4281 return 0;
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 17aa0adb3467..0f930d351466 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -815,7 +815,7 @@ static int init_cqcam(struct parport *port)
815 } 815 }
816 816
817 printk(KERN_INFO "video%d: Colour QuickCam found on %s\n", 817 printk(KERN_INFO "video%d: Colour QuickCam found on %s\n",
818 qcam->vdev.minor, qcam->pport->name); 818 qcam->vdev.num, qcam->pport->name);
819 819
820 qcams[num_cams++] = qcam; 820 qcams[num_cams++] = qcam;
821 821
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index fc9497bdd322..1740b9ebdcef 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -1476,12 +1476,9 @@ static int cafe_v4l_open(struct inode *inode, struct file *filp)
1476{ 1476{
1477 struct cafe_camera *cam; 1477 struct cafe_camera *cam;
1478 1478
1479 lock_kernel();
1480 cam = cafe_find_dev(iminor(inode)); 1479 cam = cafe_find_dev(iminor(inode));
1481 if (cam == NULL) { 1480 if (cam == NULL)
1482 unlock_kernel();
1483 return -ENODEV; 1481 return -ENODEV;
1484 }
1485 filp->private_data = cam; 1482 filp->private_data = cam;
1486 1483
1487 mutex_lock(&cam->s_mutex); 1484 mutex_lock(&cam->s_mutex);
@@ -1493,7 +1490,6 @@ static int cafe_v4l_open(struct inode *inode, struct file *filp)
1493 } 1490 }
1494 (cam->users)++; 1491 (cam->users)++;
1495 mutex_unlock(&cam->s_mutex); 1492 mutex_unlock(&cam->s_mutex);
1496 unlock_kernel();
1497 return 0; 1493 return 0;
1498} 1494}
1499 1495
@@ -2059,10 +2055,10 @@ static void cafe_dfs_cam_setup(struct cafe_camera *cam)
2059 2055
2060 if (!cafe_dfs_root) 2056 if (!cafe_dfs_root)
2061 return; 2057 return;
2062 sprintf(fname, "regs-%d", cam->v4ldev.minor); 2058 sprintf(fname, "regs-%d", cam->v4ldev.num);
2063 cam->dfs_regs = debugfs_create_file(fname, 0444, cafe_dfs_root, 2059 cam->dfs_regs = debugfs_create_file(fname, 0444, cafe_dfs_root,
2064 cam, &cafe_dfs_reg_ops); 2060 cam, &cafe_dfs_reg_ops);
2065 sprintf(fname, "cam-%d", cam->v4ldev.minor); 2061 sprintf(fname, "cam-%d", cam->v4ldev.num);
2066 cam->dfs_cam_regs = debugfs_create_file(fname, 0444, cafe_dfs_root, 2062 cam->dfs_cam_regs = debugfs_create_file(fname, 0444, cafe_dfs_root,
2067 cam, &cafe_dfs_cam_ops); 2063 cam, &cafe_dfs_cam_ops);
2068} 2064}
diff --git a/drivers/media/video/compat_ioctl32.c b/drivers/media/video/compat_ioctl32.c
index bd5d9de5a008..e6ca4012b5f0 100644
--- a/drivers/media/video/compat_ioctl32.c
+++ b/drivers/media/video/compat_ioctl32.c
@@ -867,6 +867,7 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
867 case VIDIOC_STREAMON32: 867 case VIDIOC_STREAMON32:
868 case VIDIOC_STREAMOFF32: 868 case VIDIOC_STREAMOFF32:
869 case VIDIOC_G_PARM: 869 case VIDIOC_G_PARM:
870 case VIDIOC_S_PARM:
870 case VIDIOC_G_STD: 871 case VIDIOC_G_STD:
871 case VIDIOC_S_STD: 872 case VIDIOC_S_STD:
872 case VIDIOC_G_TUNER: 873 case VIDIOC_G_TUNER:
@@ -885,6 +886,8 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
885 case VIDIOC_S_INPUT32: 886 case VIDIOC_S_INPUT32:
886 case VIDIOC_TRY_FMT32: 887 case VIDIOC_TRY_FMT32:
887 case VIDIOC_S_HW_FREQ_SEEK: 888 case VIDIOC_S_HW_FREQ_SEEK:
889 case VIDIOC_ENUM_FRAMESIZES:
890 case VIDIOC_ENUM_FRAMEINTERVALS:
888 ret = do_video_ioctl(file, cmd, arg); 891 ret = do_video_ioctl(file, cmd, arg);
889 break; 892 break;
890 893
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 1798b779a25a..16c094f77852 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -1347,7 +1347,7 @@ static void create_proc_cpia_cam(struct cam_data *cam)
1347 if (!cpia_proc_root || !cam) 1347 if (!cpia_proc_root || !cam)
1348 return; 1348 return;
1349 1349
1350 snprintf(name, sizeof(name), "video%d", cam->vdev.minor); 1350 snprintf(name, sizeof(name), "video%d", cam->vdev.num);
1351 1351
1352 ent = create_proc_entry(name, S_IFREG|S_IRUGO|S_IWUSR, cpia_proc_root); 1352 ent = create_proc_entry(name, S_IFREG|S_IRUGO|S_IWUSR, cpia_proc_root);
1353 if (!ent) 1353 if (!ent)
@@ -1372,7 +1372,7 @@ static void destroy_proc_cpia_cam(struct cam_data *cam)
1372 if (!cam || !cam->proc_entry) 1372 if (!cam || !cam->proc_entry)
1373 return; 1373 return;
1374 1374
1375 snprintf(name, sizeof(name), "video%d", cam->vdev.minor); 1375 snprintf(name, sizeof(name), "video%d", cam->vdev.num);
1376 remove_proc_entry(name, cpia_proc_root); 1376 remove_proc_entry(name, cpia_proc_root);
1377 cam->proc_entry = NULL; 1377 cam->proc_entry = NULL;
1378} 1378}
@@ -4005,7 +4005,7 @@ void cpia_unregister_camera(struct cam_data *cam)
4005 } 4005 }
4006 4006
4007#ifdef CONFIG_PROC_FS 4007#ifdef CONFIG_PROC_FS
4008 DBG("destroying /proc/cpia/video%d\n", cam->vdev.minor); 4008 DBG("destroying /proc/cpia/video%d\n", cam->vdev.num);
4009 destroy_proc_cpia_cam(cam); 4009 destroy_proc_cpia_cam(cam);
4010#endif 4010#endif
4011 if (!cam->open_count) { 4011 if (!cam->open_count) {
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 897e8d1a5c3c..1c6bd633f193 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -1973,7 +1973,7 @@ void cpia2_unregister_camera(struct camera_data *cam)
1973 } else { 1973 } else {
1974 LOG("/dev/video%d removed while open, " 1974 LOG("/dev/video%d removed while open, "
1975 "deferring video_unregister_device\n", 1975 "deferring video_unregister_device\n",
1976 cam->vdev->minor); 1976 cam->vdev->num);
1977 } 1977 }
1978} 1978}
1979 1979
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 085121c2b47f..7874d9790a51 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -448,7 +448,14 @@ static int __devinit cx18_init_struct1(struct cx18 *cx)
448 mutex_init(&cx->gpio_lock); 448 mutex_init(&cx->gpio_lock);
449 449
450 spin_lock_init(&cx->lock); 450 spin_lock_init(&cx->lock);
451 spin_lock_init(&cx->dma_reg_lock); 451
452 cx->work_queue = create_singlethread_workqueue(cx->name);
453 if (cx->work_queue == NULL) {
454 CX18_ERR("Could not create work queue\n");
455 return -1;
456 }
457
458 INIT_WORK(&cx->work, cx18_work_handler);
452 459
453 /* start counting open_id at 1 */ 460 /* start counting open_id at 1 */
454 cx->open_id = 1; 461 cx->open_id = 1;
@@ -581,10 +588,10 @@ static void cx18_load_and_init_modules(struct cx18 *cx)
581 588
582#ifdef MODULE 589#ifdef MODULE
583 /* load modules */ 590 /* load modules */
584#ifndef CONFIG_MEDIA_TUNER 591#ifdef CONFIG_MEDIA_TUNER_MODULE
585 hw = cx18_request_module(cx, hw, "tuner", CX18_HW_TUNER); 592 hw = cx18_request_module(cx, hw, "tuner", CX18_HW_TUNER);
586#endif 593#endif
587#ifndef CONFIG_VIDEO_CS5345 594#ifdef CONFIG_VIDEO_CS5345_MODULE
588 hw = cx18_request_module(cx, hw, "cs5345", CX18_HW_CS5345); 595 hw = cx18_request_module(cx, hw, "cs5345", CX18_HW_CS5345);
589#endif 596#endif
590#endif 597#endif
@@ -613,6 +620,7 @@ static int __devinit cx18_probe(struct pci_dev *dev,
613 const struct pci_device_id *pci_id) 620 const struct pci_device_id *pci_id)
614{ 621{
615 int retval = 0; 622 int retval = 0;
623 int i;
616 int vbi_buf_size; 624 int vbi_buf_size;
617 u32 devtype; 625 u32 devtype;
618 struct cx18 *cx; 626 struct cx18 *cx;
@@ -698,7 +706,8 @@ static int __devinit cx18_probe(struct pci_dev *dev,
698 706
699 /* active i2c */ 707 /* active i2c */
700 CX18_DEBUG_INFO("activating i2c...\n"); 708 CX18_DEBUG_INFO("activating i2c...\n");
701 if (init_cx18_i2c(cx)) { 709 retval = init_cx18_i2c(cx);
710 if (retval) {
702 CX18_ERR("Could not initialize i2c\n"); 711 CX18_ERR("Could not initialize i2c\n");
703 goto free_map; 712 goto free_map;
704 } 713 }
@@ -830,14 +839,18 @@ free_map:
830free_mem: 839free_mem:
831 release_mem_region(cx->base_addr, CX18_MEM_SIZE); 840 release_mem_region(cx->base_addr, CX18_MEM_SIZE);
832free_workqueue: 841free_workqueue:
842 destroy_workqueue(cx->work_queue);
833err: 843err:
834 if (retval == 0) 844 if (retval == 0)
835 retval = -ENODEV; 845 retval = -ENODEV;
836 CX18_ERR("Error %d on initialization\n", retval); 846 CX18_ERR("Error %d on initialization\n", retval);
837 cx18_log_statistics(cx); 847 cx18_log_statistics(cx);
838 848
839 kfree(cx18_cards[cx18_cards_active]); 849 i = cx->num;
840 cx18_cards[cx18_cards_active] = NULL; 850 spin_lock(&cx18_cards_lock);
851 kfree(cx18_cards[i]);
852 cx18_cards[i] = NULL;
853 spin_unlock(&cx18_cards_lock);
841 return retval; 854 return retval;
842} 855}
843 856
@@ -927,6 +940,9 @@ static void cx18_remove(struct pci_dev *pci_dev)
927 940
928 cx18_halt_firmware(cx); 941 cx18_halt_firmware(cx);
929 942
943 flush_workqueue(cx->work_queue);
944 destroy_workqueue(cx->work_queue);
945
930 cx18_streams_cleanup(cx, 1); 946 cx18_streams_cleanup(cx, 1);
931 947
932 exit_cx18_i2c(cx); 948 exit_cx18_i2c(cx);
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index a4b1708fafe7..bbdd5f25041d 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -199,12 +199,15 @@ struct cx18_options {
199#define CX18_F_S_APPL_IO 8 /* this stream is used read/written by an application */ 199#define CX18_F_S_APPL_IO 8 /* this stream is used read/written by an application */
200 200
201/* per-cx18, i_flags */ 201/* per-cx18, i_flags */
202#define CX18_F_I_LOADED_FW 0 /* Loaded the firmware the first time */ 202#define CX18_F_I_LOADED_FW 0 /* Loaded firmware 1st time */
203#define CX18_F_I_EOS 4 /* End of encoder stream reached */ 203#define CX18_F_I_EOS 4 /* End of encoder stream */
204#define CX18_F_I_RADIO_USER 5 /* The radio tuner is selected */ 204#define CX18_F_I_RADIO_USER 5 /* radio tuner is selected */
205#define CX18_F_I_ENC_PAUSED 13 /* the encoder is paused */ 205#define CX18_F_I_ENC_PAUSED 13 /* the encoder is paused */
206#define CX18_F_I_INITED 21 /* set after first open */ 206#define CX18_F_I_HAVE_WORK 15 /* there is work to be done */
207#define CX18_F_I_FAILED 22 /* set if first open failed */ 207#define CX18_F_I_WORK_HANDLER_DVB 18 /* work to be done for DVB */
208#define CX18_F_I_INITED 21 /* set after first open */
209#define CX18_F_I_FAILED 22 /* set if first open failed */
210#define CX18_F_I_WORK_INITED 23 /* worker thread initialized */
208 211
209/* These are the VBI types as they appear in the embedded VBI private packets. */ 212/* These are the VBI types as they appear in the embedded VBI private packets. */
210#define CX18_SLICED_TYPE_TELETEXT_B (1) 213#define CX18_SLICED_TYPE_TELETEXT_B (1)
@@ -402,8 +405,6 @@ struct cx18 {
402 spinlock_t lock; /* lock access to this struct */ 405 spinlock_t lock; /* lock access to this struct */
403 int search_pack_header; 406 int search_pack_header;
404 407
405 spinlock_t dma_reg_lock; /* lock access to DMA engine registers */
406
407 int open_id; /* incremented each time an open occurs, used as 408 int open_id; /* incremented each time an open occurs, used as
408 unique ID. Starts at 1, so 0 can be used as 409 unique ID. Starts at 1, so 0 can be used as
409 uninitialized value in the stream->id. */ 410 uninitialized value in the stream->id. */
@@ -433,6 +434,9 @@ struct cx18 {
433 /* when the current DMA is finished this queue is woken up */ 434 /* when the current DMA is finished this queue is woken up */
434 wait_queue_head_t dma_waitq; 435 wait_queue_head_t dma_waitq;
435 436
437 struct workqueue_struct *work_queue;
438 struct work_struct work;
439
436 /* i2c */ 440 /* i2c */
437 struct i2c_adapter i2c_adap[2]; 441 struct i2c_adapter i2c_adap[2];
438 struct i2c_algo_bit_data i2c_algo[2]; 442 struct i2c_algo_bit_data i2c_algo[2];
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index afc694e7bdb2..4542e2e5e3d7 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -23,6 +23,8 @@
23#include "cx18-dvb.h" 23#include "cx18-dvb.h"
24#include "cx18-io.h" 24#include "cx18-io.h"
25#include "cx18-streams.h" 25#include "cx18-streams.h"
26#include "cx18-queue.h"
27#include "cx18-scb.h"
26#include "cx18-cards.h" 28#include "cx18-cards.h"
27#include "s5h1409.h" 29#include "s5h1409.h"
28#include "mxl5005s.h" 30#include "mxl5005s.h"
@@ -300,3 +302,24 @@ static int dvb_register(struct cx18_stream *stream)
300 302
301 return ret; 303 return ret;
302} 304}
305
306void cx18_dvb_work_handler(struct cx18 *cx)
307{
308 struct cx18_buffer *buf;
309 struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_TS];
310
311 while ((buf = cx18_dequeue(s, &s->q_full)) != NULL) {
312 if (s->dvb.enabled)
313 dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
314 buf->bytesused);
315
316 cx18_enqueue(s, buf, &s->q_free);
317 cx18_buf_sync_for_device(s, buf);
318 if (s->handle == CX18_INVALID_TASK_HANDLE) /* FIXME: improve */
319 continue;
320
321 cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
322 (void __iomem *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
323 1, buf->id, s->buf_size);
324 }
325}
diff --git a/drivers/media/video/cx18/cx18-dvb.h b/drivers/media/video/cx18/cx18-dvb.h
index bf8d8f6f5455..bbdcefc87f28 100644
--- a/drivers/media/video/cx18/cx18-dvb.h
+++ b/drivers/media/video/cx18/cx18-dvb.h
@@ -23,3 +23,4 @@
23 23
24int cx18_dvb_register(struct cx18_stream *stream); 24int cx18_dvb_register(struct cx18_stream *stream);
25void cx18_dvb_unregister(struct cx18_stream *stream); 25void cx18_dvb_unregister(struct cx18_stream *stream);
26void cx18_dvb_work_handler(struct cx18 *cx);
diff --git a/drivers/media/video/cx18/cx18-io.c b/drivers/media/video/cx18/cx18-io.c
index 700ab9439c16..220fae8d4ad7 100644
--- a/drivers/media/video/cx18/cx18-io.c
+++ b/drivers/media/video/cx18/cx18-io.c
@@ -88,6 +88,19 @@ void cx18_writel_retry(struct cx18 *cx, u32 val, void __iomem *addr)
88 cx18_log_write_retries(cx, i, addr); 88 cx18_log_write_retries(cx, i, addr);
89} 89}
90 90
91void _cx18_writel_expect(struct cx18 *cx, u32 val, void __iomem *addr,
92 u32 eval, u32 mask)
93{
94 int i;
95 eval &= mask;
96 for (i = 0; i < CX18_MAX_MMIO_RETRIES; i++) {
97 cx18_writel_noretry(cx, val, addr);
98 if (eval == (cx18_readl_noretry(cx, addr) & mask))
99 break;
100 }
101 cx18_log_write_retries(cx, i, addr);
102}
103
91void cx18_writew_retry(struct cx18 *cx, u16 val, void __iomem *addr) 104void cx18_writew_retry(struct cx18 *cx, u16 val, void __iomem *addr)
92{ 105{
93 int i; 106 int i;
@@ -218,7 +231,7 @@ void cx18_memset_io(struct cx18 *cx, void __iomem *addr, int val, size_t count)
218void cx18_sw1_irq_enable(struct cx18 *cx, u32 val) 231void cx18_sw1_irq_enable(struct cx18 *cx, u32 val)
219{ 232{
220 u32 r; 233 u32 r;
221 cx18_write_reg(cx, val, SW1_INT_STATUS); 234 cx18_write_reg_expect(cx, val, SW1_INT_STATUS, ~val, val);
222 r = cx18_read_reg(cx, SW1_INT_ENABLE_PCI); 235 r = cx18_read_reg(cx, SW1_INT_ENABLE_PCI);
223 cx18_write_reg(cx, r | val, SW1_INT_ENABLE_PCI); 236 cx18_write_reg(cx, r | val, SW1_INT_ENABLE_PCI);
224} 237}
@@ -233,7 +246,7 @@ void cx18_sw1_irq_disable(struct cx18 *cx, u32 val)
233void cx18_sw2_irq_enable(struct cx18 *cx, u32 val) 246void cx18_sw2_irq_enable(struct cx18 *cx, u32 val)
234{ 247{
235 u32 r; 248 u32 r;
236 cx18_write_reg(cx, val, SW2_INT_STATUS); 249 cx18_write_reg_expect(cx, val, SW2_INT_STATUS, ~val, val);
237 r = cx18_read_reg(cx, SW2_INT_ENABLE_PCI); 250 r = cx18_read_reg(cx, SW2_INT_ENABLE_PCI);
238 cx18_write_reg(cx, r | val, SW2_INT_ENABLE_PCI); 251 cx18_write_reg(cx, r | val, SW2_INT_ENABLE_PCI);
239} 252}
diff --git a/drivers/media/video/cx18/cx18-io.h b/drivers/media/video/cx18/cx18-io.h
index 197d4fbd9f95..425244453ea7 100644
--- a/drivers/media/video/cx18/cx18-io.h
+++ b/drivers/media/video/cx18/cx18-io.h
@@ -39,7 +39,7 @@ static inline void cx18_io_delay(struct cx18 *cx)
39 39
40/* Statistics gathering */ 40/* Statistics gathering */
41static inline 41static inline
42void cx18_log_write_retries(struct cx18 *cx, int i, const void *addr) 42void cx18_log_write_retries(struct cx18 *cx, int i, const void __iomem *addr)
43{ 43{
44 if (i > CX18_MAX_MMIO_RETRIES) 44 if (i > CX18_MAX_MMIO_RETRIES)
45 i = CX18_MAX_MMIO_RETRIES; 45 i = CX18_MAX_MMIO_RETRIES;
@@ -48,7 +48,7 @@ void cx18_log_write_retries(struct cx18 *cx, int i, const void *addr)
48} 48}
49 49
50static inline 50static inline
51void cx18_log_read_retries(struct cx18 *cx, int i, const void *addr) 51void cx18_log_read_retries(struct cx18 *cx, int i, const void __iomem *addr)
52{ 52{
53 if (i > CX18_MAX_MMIO_RETRIES) 53 if (i > CX18_MAX_MMIO_RETRIES)
54 i = CX18_MAX_MMIO_RETRIES; 54 i = CX18_MAX_MMIO_RETRIES;
@@ -133,6 +133,8 @@ static inline void cx18_writel(struct cx18 *cx, u32 val, void __iomem *addr)
133 cx18_writel_noretry(cx, val, addr); 133 cx18_writel_noretry(cx, val, addr);
134} 134}
135 135
136void _cx18_writel_expect(struct cx18 *cx, u32 val, void __iomem *addr,
137 u32 eval, u32 mask);
136 138
137static inline 139static inline
138void cx18_writew_noretry(struct cx18 *cx, u16 val, void __iomem *addr) 140void cx18_writew_noretry(struct cx18 *cx, u16 val, void __iomem *addr)
@@ -271,6 +273,21 @@ static inline void cx18_write_reg(struct cx18 *cx, u32 val, u32 reg)
271 cx18_write_reg_noretry(cx, val, reg); 273 cx18_write_reg_noretry(cx, val, reg);
272} 274}
273 275
276static inline void _cx18_write_reg_expect(struct cx18 *cx, u32 val, u32 reg,
277 u32 eval, u32 mask)
278{
279 _cx18_writel_expect(cx, val, cx->reg_mem + reg, eval, mask);
280}
281
282static inline void cx18_write_reg_expect(struct cx18 *cx, u32 val, u32 reg,
283 u32 eval, u32 mask)
284{
285 if (cx18_retry_mmio)
286 _cx18_write_reg_expect(cx, val, reg, eval, mask);
287 else
288 cx18_write_reg_noretry(cx, val, reg);
289}
290
274 291
275static inline u32 cx18_read_reg_noretry(struct cx18 *cx, u32 reg) 292static inline u32 cx18_read_reg_noretry(struct cx18 *cx, u32 reg)
276{ 293{
diff --git a/drivers/media/video/cx18/cx18-irq.c b/drivers/media/video/cx18/cx18-irq.c
index 360330f5463f..5fbfbd0f1493 100644
--- a/drivers/media/video/cx18/cx18-irq.c
+++ b/drivers/media/video/cx18/cx18-irq.c
@@ -29,8 +29,20 @@
29#include "cx18-mailbox.h" 29#include "cx18-mailbox.h"
30#include "cx18-vbi.h" 30#include "cx18-vbi.h"
31#include "cx18-scb.h" 31#include "cx18-scb.h"
32#include "cx18-dvb.h"
32 33
33#define DMA_MAGIC_COOKIE 0x000001fe 34void cx18_work_handler(struct work_struct *work)
35{
36 struct cx18 *cx = container_of(work, struct cx18, work);
37 if (test_and_clear_bit(CX18_F_I_WORK_INITED, &cx->i_flags)) {
38 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
39 /* This thread must use the FIFO scheduler as it
40 * is realtime sensitive. */
41 sched_setscheduler(current, SCHED_FIFO, &param);
42 }
43 if (test_and_clear_bit(CX18_F_I_WORK_HANDLER_DVB, &cx->i_flags))
44 cx18_dvb_work_handler(cx);
45}
34 46
35static void epu_dma_done(struct cx18 *cx, struct cx18_mailbox *mb) 47static void epu_dma_done(struct cx18 *cx, struct cx18_mailbox *mb)
36{ 48{
@@ -67,17 +79,11 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_mailbox *mb)
67 if (buf) { 79 if (buf) {
68 cx18_buf_sync_for_cpu(s, buf); 80 cx18_buf_sync_for_cpu(s, buf);
69 if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) { 81 if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) {
70 /* process the buffer here */ 82 CX18_DEBUG_HI_DMA("TS recv bytesused = %d\n",
71 CX18_DEBUG_HI_DMA("TS recv and sent bytesused=%d\n",
72 buf->bytesused);
73
74 dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
75 buf->bytesused); 83 buf->bytesused);
76 84
77 cx18_buf_sync_for_device(s, buf); 85 set_bit(CX18_F_I_WORK_HANDLER_DVB, &cx->i_flags);
78 cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle, 86 set_bit(CX18_F_I_HAVE_WORK, &cx->i_flags);
79 (void __iomem *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
80 1, buf->id, s->buf_size);
81 } else 87 } else
82 set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags); 88 set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
83 } else { 89 } else {
@@ -109,7 +115,7 @@ static void epu_debug(struct cx18 *cx, struct cx18_mailbox *mb)
109 CX18_INFO("FW version: %s\n", p - 1); 115 CX18_INFO("FW version: %s\n", p - 1);
110} 116}
111 117
112static void hpu_cmd(struct cx18 *cx, u32 sw1) 118static void epu_cmd(struct cx18 *cx, u32 sw1)
113{ 119{
114 struct cx18_mailbox mb; 120 struct cx18_mailbox mb;
115 121
@@ -125,12 +131,31 @@ static void hpu_cmd(struct cx18 *cx, u32 sw1)
125 epu_debug(cx, &mb); 131 epu_debug(cx, &mb);
126 break; 132 break;
127 default: 133 default:
128 CX18_WARN("Unexpected mailbox command %08x\n", mb.cmd); 134 CX18_WARN("Unknown CPU_TO_EPU mailbox command %#08x\n",
135 mb.cmd);
129 break; 136 break;
130 } 137 }
131 } 138 }
132 if (sw1 & (IRQ_APU_TO_EPU | IRQ_HPU_TO_EPU)) 139
133 CX18_WARN("Unexpected interrupt %08x\n", sw1); 140 if (sw1 & IRQ_APU_TO_EPU) {
141 cx18_memcpy_fromio(cx, &mb, &cx->scb->apu2epu_mb, sizeof(mb));
142 CX18_WARN("Unknown APU_TO_EPU mailbox command %#08x\n", mb.cmd);
143 }
144
145 if (sw1 & IRQ_HPU_TO_EPU) {
146 cx18_memcpy_fromio(cx, &mb, &cx->scb->hpu2epu_mb, sizeof(mb));
147 CX18_WARN("Unknown HPU_TO_EPU mailbox command %#08x\n", mb.cmd);
148 }
149}
150
151static void xpu_ack(struct cx18 *cx, u32 sw2)
152{
153 if (sw2 & IRQ_CPU_TO_EPU_ACK)
154 wake_up(&cx->mb_cpu_waitq);
155 if (sw2 & IRQ_APU_TO_EPU_ACK)
156 wake_up(&cx->mb_apu_waitq);
157 if (sw2 & IRQ_HPU_TO_EPU_ACK)
158 wake_up(&cx->mb_hpu_waitq);
134} 159}
135 160
136irqreturn_t cx18_irq_handler(int irq, void *dev_id) 161irqreturn_t cx18_irq_handler(int irq, void *dev_id)
@@ -140,43 +165,36 @@ irqreturn_t cx18_irq_handler(int irq, void *dev_id)
140 u32 sw2, sw2_mask; 165 u32 sw2, sw2_mask;
141 u32 hw2, hw2_mask; 166 u32 hw2, hw2_mask;
142 167
143 spin_lock(&cx->dma_reg_lock); 168 sw1_mask = cx18_read_reg(cx, SW1_INT_ENABLE_PCI);
144 169 sw1 = cx18_read_reg(cx, SW1_INT_STATUS) & sw1_mask;
170 sw2_mask = cx18_read_reg(cx, SW2_INT_ENABLE_PCI);
171 sw2 = cx18_read_reg(cx, SW2_INT_STATUS) & sw2_mask;
145 hw2_mask = cx18_read_reg(cx, HW2_INT_MASK5_PCI); 172 hw2_mask = cx18_read_reg(cx, HW2_INT_MASK5_PCI);
146 hw2 = cx18_read_reg(cx, HW2_INT_CLR_STATUS) & hw2_mask; 173 hw2 = cx18_read_reg(cx, HW2_INT_CLR_STATUS) & hw2_mask;
147 sw2_mask = cx18_read_reg(cx, SW2_INT_ENABLE_PCI) | IRQ_EPU_TO_HPU_ACK;
148 sw2 = cx18_read_reg(cx, SW2_INT_STATUS) & sw2_mask;
149 sw1_mask = cx18_read_reg(cx, SW1_INT_ENABLE_PCI) | IRQ_EPU_TO_HPU;
150 sw1 = cx18_read_reg(cx, SW1_INT_STATUS) & sw1_mask;
151 174
152 cx18_write_reg(cx, sw2&sw2_mask, SW2_INT_STATUS); 175 if (sw1)
153 cx18_write_reg(cx, sw1&sw1_mask, SW1_INT_STATUS); 176 cx18_write_reg_expect(cx, sw1, SW1_INT_STATUS, ~sw1, sw1);
154 cx18_write_reg(cx, hw2&hw2_mask, HW2_INT_CLR_STATUS); 177 if (sw2)
178 cx18_write_reg_expect(cx, sw2, SW2_INT_STATUS, ~sw2, sw2);
179 if (hw2)
180 cx18_write_reg_expect(cx, hw2, HW2_INT_CLR_STATUS, ~hw2, hw2);
155 181
156 if (sw1 || sw2 || hw2) 182 if (sw1 || sw2 || hw2)
157 CX18_DEBUG_HI_IRQ("SW1: %x SW2: %x HW2: %x\n", sw1, sw2, hw2); 183 CX18_DEBUG_HI_IRQ("SW1: %x SW2: %x HW2: %x\n", sw1, sw2, hw2);
158 184
159 /* To do: interrupt-based I2C handling 185 /* To do: interrupt-based I2C handling
160 if (hw2 & 0x00c00000) { 186 if (hw2 & (HW2_I2C1_INT|HW2_I2C2_INT)) {
161 } 187 }
162 */ 188 */
163 189
164 if (sw2) { 190 if (sw2)
165 if (sw2 & (cx18_readl(cx, &cx->scb->cpu2hpu_irq_ack) | 191 xpu_ack(cx, sw2);
166 cx18_readl(cx, &cx->scb->cpu2epu_irq_ack)))
167 wake_up(&cx->mb_cpu_waitq);
168 if (sw2 & (cx18_readl(cx, &cx->scb->apu2hpu_irq_ack) |
169 cx18_readl(cx, &cx->scb->apu2epu_irq_ack)))
170 wake_up(&cx->mb_apu_waitq);
171 if (sw2 & cx18_readl(cx, &cx->scb->epu2hpu_irq_ack))
172 wake_up(&cx->mb_epu_waitq);
173 if (sw2 & cx18_readl(cx, &cx->scb->hpu2epu_irq_ack))
174 wake_up(&cx->mb_hpu_waitq);
175 }
176 192
177 if (sw1) 193 if (sw1)
178 hpu_cmd(cx, sw1); 194 epu_cmd(cx, sw1);
179 spin_unlock(&cx->dma_reg_lock); 195
196 if (test_and_clear_bit(CX18_F_I_HAVE_WORK, &cx->i_flags))
197 queue_work(cx->work_queue, &cx->work);
180 198
181 return (hw2 | sw1 | sw2) ? IRQ_HANDLED : IRQ_NONE; 199 return (sw1 || sw2 || hw2) ? IRQ_HANDLED : IRQ_NONE;
182} 200}
diff --git a/drivers/media/video/cx18/cx18-irq.h b/drivers/media/video/cx18/cx18-irq.h
index 379f704f5cba..6173ca3bc9e4 100644
--- a/drivers/media/video/cx18/cx18-irq.h
+++ b/drivers/media/video/cx18/cx18-irq.h
@@ -32,6 +32,4 @@
32 32
33irqreturn_t cx18_irq_handler(int irq, void *dev_id); 33irqreturn_t cx18_irq_handler(int irq, void *dev_id);
34 34
35void cx18_irq_work_handler(struct work_struct *work); 35void cx18_work_handler(struct work_struct *work);
36void cx18_dma_stream_dec_prepare(struct cx18_stream *s, u32 offset, int lock);
37void cx18_unfinished_dma(unsigned long arg);
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index 9d18dd22de76..acff7dfb60df 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -83,7 +83,7 @@ static const struct cx18_api_info api_info[] = {
83 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0), 83 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0),
84 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST), 84 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST),
85 API_ENTRY(CPU, CX18_APU_RESETAI, API_FAST), 85 API_ENTRY(CPU, CX18_APU_RESETAI, API_FAST),
86 API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL, 0), 86 API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL, API_SLOW),
87 API_ENTRY(0, 0, 0), 87 API_ENTRY(0, 0, 0),
88}; 88};
89 89
@@ -176,7 +176,7 @@ long cx18_mb_ack(struct cx18 *cx, const struct cx18_mailbox *mb)
176 176
177 cx18_setup_page(cx, SCB_OFFSET); 177 cx18_setup_page(cx, SCB_OFFSET);
178 cx18_write_sync(cx, mb->request, &ack_mb->ack); 178 cx18_write_sync(cx, mb->request, &ack_mb->ack);
179 cx18_write_reg(cx, ack_irq, SW2_INT_SET); 179 cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq);
180 return 0; 180 return 0;
181} 181}
182 182
@@ -225,7 +225,7 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
225 } 225 }
226 if (info->flags & API_FAST) 226 if (info->flags & API_FAST)
227 timeout /= 2; 227 timeout /= 2;
228 cx18_write_reg(cx, irq, SW1_INT_SET); 228 cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq);
229 229
230 while (!sig && cx18_readl(cx, &mb->ack) != cx18_readl(cx, &mb->request) 230 while (!sig && cx18_readl(cx, &mb->ack) != cx18_readl(cx, &mb->request)
231 && cnt < 660) { 231 && cnt < 660) {
diff --git a/drivers/media/video/cx18/cx18-queue.c b/drivers/media/video/cx18/cx18-queue.c
index a33ba04a2686..174682c2582f 100644
--- a/drivers/media/video/cx18/cx18-queue.c
+++ b/drivers/media/video/cx18/cx18-queue.c
@@ -88,15 +88,13 @@ struct cx18_buffer *cx18_queue_get_buf_irq(struct cx18_stream *s, u32 id,
88 88
89 if (buf->id != id) 89 if (buf->id != id)
90 continue; 90 continue;
91
91 buf->bytesused = bytesused; 92 buf->bytesused = bytesused;
92 /* the transport buffers are handled differently, 93 atomic_dec(&s->q_free.buffers);
93 they are not moved to the full queue */ 94 atomic_inc(&s->q_full.buffers);
94 if (s->type != CX18_ENC_STREAM_TYPE_TS) { 95 s->q_full.bytesused += buf->bytesused;
95 atomic_dec(&s->q_free.buffers); 96 list_move_tail(&buf->list, &s->q_full.list);
96 atomic_inc(&s->q_full.buffers); 97
97 s->q_full.bytesused += buf->bytesused;
98 list_move_tail(&buf->list, &s->q_full.list);
99 }
100 spin_unlock(&s->qlock); 98 spin_unlock(&s->qlock);
101 return buf; 99 return buf;
102 } 100 }
diff --git a/drivers/media/video/cx18/cx18-scb.h b/drivers/media/video/cx18/cx18-scb.h
index 86b4cb15d163..594713bbed68 100644
--- a/drivers/media/video/cx18/cx18-scb.h
+++ b/drivers/media/video/cx18/cx18-scb.h
@@ -128,22 +128,22 @@ struct cx18_scb {
128 u32 apu2cpu_irq; 128 u32 apu2cpu_irq;
129 /* Value to write to register SW2 register set (0xC7003140) after the 129 /* Value to write to register SW2 register set (0xC7003140) after the
130 command is cleared */ 130 command is cleared */
131 u32 apu2cpu_irq_ack; 131 u32 cpu2apu_irq_ack;
132 u32 reserved2[13]; 132 u32 reserved2[13];
133 133
134 u32 hpu2cpu_mb_offset; 134 u32 hpu2cpu_mb_offset;
135 u32 hpu2cpu_irq; 135 u32 hpu2cpu_irq;
136 u32 hpu2cpu_irq_ack; 136 u32 cpu2hpu_irq_ack;
137 u32 reserved3[13]; 137 u32 reserved3[13];
138 138
139 u32 ppu2cpu_mb_offset; 139 u32 ppu2cpu_mb_offset;
140 u32 ppu2cpu_irq; 140 u32 ppu2cpu_irq;
141 u32 ppu2cpu_irq_ack; 141 u32 cpu2ppu_irq_ack;
142 u32 reserved4[13]; 142 u32 reserved4[13];
143 143
144 u32 epu2cpu_mb_offset; 144 u32 epu2cpu_mb_offset;
145 u32 epu2cpu_irq; 145 u32 epu2cpu_irq;
146 u32 epu2cpu_irq_ack; 146 u32 cpu2epu_irq_ack;
147 u32 reserved5[13]; 147 u32 reserved5[13];
148 u32 reserved6[8]; 148 u32 reserved6[8];
149 149
@@ -153,22 +153,22 @@ struct cx18_scb {
153 u32 reserved11[7]; 153 u32 reserved11[7];
154 u32 cpu2apu_mb_offset; 154 u32 cpu2apu_mb_offset;
155 u32 cpu2apu_irq; 155 u32 cpu2apu_irq;
156 u32 cpu2apu_irq_ack; 156 u32 apu2cpu_irq_ack;
157 u32 reserved12[13]; 157 u32 reserved12[13];
158 158
159 u32 hpu2apu_mb_offset; 159 u32 hpu2apu_mb_offset;
160 u32 hpu2apu_irq; 160 u32 hpu2apu_irq;
161 u32 hpu2apu_irq_ack; 161 u32 apu2hpu_irq_ack;
162 u32 reserved13[13]; 162 u32 reserved13[13];
163 163
164 u32 ppu2apu_mb_offset; 164 u32 ppu2apu_mb_offset;
165 u32 ppu2apu_irq; 165 u32 ppu2apu_irq;
166 u32 ppu2apu_irq_ack; 166 u32 apu2ppu_irq_ack;
167 u32 reserved14[13]; 167 u32 reserved14[13];
168 168
169 u32 epu2apu_mb_offset; 169 u32 epu2apu_mb_offset;
170 u32 epu2apu_irq; 170 u32 epu2apu_irq;
171 u32 epu2apu_irq_ack; 171 u32 apu2epu_irq_ack;
172 u32 reserved15[13]; 172 u32 reserved15[13];
173 u32 reserved16[8]; 173 u32 reserved16[8];
174 174
@@ -178,22 +178,22 @@ struct cx18_scb {
178 u32 reserved21[7]; 178 u32 reserved21[7];
179 u32 cpu2hpu_mb_offset; 179 u32 cpu2hpu_mb_offset;
180 u32 cpu2hpu_irq; 180 u32 cpu2hpu_irq;
181 u32 cpu2hpu_irq_ack; 181 u32 hpu2cpu_irq_ack;
182 u32 reserved22[13]; 182 u32 reserved22[13];
183 183
184 u32 apu2hpu_mb_offset; 184 u32 apu2hpu_mb_offset;
185 u32 apu2hpu_irq; 185 u32 apu2hpu_irq;
186 u32 apu2hpu_irq_ack; 186 u32 hpu2apu_irq_ack;
187 u32 reserved23[13]; 187 u32 reserved23[13];
188 188
189 u32 ppu2hpu_mb_offset; 189 u32 ppu2hpu_mb_offset;
190 u32 ppu2hpu_irq; 190 u32 ppu2hpu_irq;
191 u32 ppu2hpu_irq_ack; 191 u32 hpu2ppu_irq_ack;
192 u32 reserved24[13]; 192 u32 reserved24[13];
193 193
194 u32 epu2hpu_mb_offset; 194 u32 epu2hpu_mb_offset;
195 u32 epu2hpu_irq; 195 u32 epu2hpu_irq;
196 u32 epu2hpu_irq_ack; 196 u32 hpu2epu_irq_ack;
197 u32 reserved25[13]; 197 u32 reserved25[13];
198 u32 reserved26[8]; 198 u32 reserved26[8];
199 199
@@ -203,22 +203,22 @@ struct cx18_scb {
203 u32 reserved31[7]; 203 u32 reserved31[7];
204 u32 cpu2ppu_mb_offset; 204 u32 cpu2ppu_mb_offset;
205 u32 cpu2ppu_irq; 205 u32 cpu2ppu_irq;
206 u32 cpu2ppu_irq_ack; 206 u32 ppu2cpu_irq_ack;
207 u32 reserved32[13]; 207 u32 reserved32[13];
208 208
209 u32 apu2ppu_mb_offset; 209 u32 apu2ppu_mb_offset;
210 u32 apu2ppu_irq; 210 u32 apu2ppu_irq;
211 u32 apu2ppu_irq_ack; 211 u32 ppu2apu_irq_ack;
212 u32 reserved33[13]; 212 u32 reserved33[13];
213 213
214 u32 hpu2ppu_mb_offset; 214 u32 hpu2ppu_mb_offset;
215 u32 hpu2ppu_irq; 215 u32 hpu2ppu_irq;
216 u32 hpu2ppu_irq_ack; 216 u32 ppu2hpu_irq_ack;
217 u32 reserved34[13]; 217 u32 reserved34[13];
218 218
219 u32 epu2ppu_mb_offset; 219 u32 epu2ppu_mb_offset;
220 u32 epu2ppu_irq; 220 u32 epu2ppu_irq;
221 u32 epu2ppu_irq_ack; 221 u32 ppu2epu_irq_ack;
222 u32 reserved35[13]; 222 u32 reserved35[13];
223 u32 reserved36[8]; 223 u32 reserved36[8];
224 224
@@ -228,22 +228,22 @@ struct cx18_scb {
228 u32 reserved41[7]; 228 u32 reserved41[7];
229 u32 cpu2epu_mb_offset; 229 u32 cpu2epu_mb_offset;
230 u32 cpu2epu_irq; 230 u32 cpu2epu_irq;
231 u32 cpu2epu_irq_ack; 231 u32 epu2cpu_irq_ack;
232 u32 reserved42[13]; 232 u32 reserved42[13];
233 233
234 u32 apu2epu_mb_offset; 234 u32 apu2epu_mb_offset;
235 u32 apu2epu_irq; 235 u32 apu2epu_irq;
236 u32 apu2epu_irq_ack; 236 u32 epu2apu_irq_ack;
237 u32 reserved43[13]; 237 u32 reserved43[13];
238 238
239 u32 hpu2epu_mb_offset; 239 u32 hpu2epu_mb_offset;
240 u32 hpu2epu_irq; 240 u32 hpu2epu_irq;
241 u32 hpu2epu_irq_ack; 241 u32 epu2hpu_irq_ack;
242 u32 reserved44[13]; 242 u32 reserved44[13];
243 243
244 u32 ppu2epu_mb_offset; 244 u32 ppu2epu_mb_offset;
245 u32 ppu2epu_irq; 245 u32 ppu2epu_irq;
246 u32 ppu2epu_irq_ack; 246 u32 epu2ppu_irq_ack;
247 u32 reserved45[13]; 247 u32 reserved45[13];
248 u32 reserved46[8]; 248 u32 reserved46[8];
249 249
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 0c8e7542cf60..e5ff7705b7a1 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -200,16 +200,18 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
200/* Initialize v4l2 variables and register v4l2 devices */ 200/* Initialize v4l2 variables and register v4l2 devices */
201int cx18_streams_setup(struct cx18 *cx) 201int cx18_streams_setup(struct cx18 *cx)
202{ 202{
203 int type; 203 int type, ret;
204 204
205 /* Setup V4L2 Devices */ 205 /* Setup V4L2 Devices */
206 for (type = 0; type < CX18_MAX_STREAMS; type++) { 206 for (type = 0; type < CX18_MAX_STREAMS; type++) {
207 /* Prepare device */ 207 /* Prepare device */
208 if (cx18_prep_dev(cx, type)) 208 ret = cx18_prep_dev(cx, type);
209 if (ret < 0)
209 break; 210 break;
210 211
211 /* Allocate Stream */ 212 /* Allocate Stream */
212 if (cx18_stream_alloc(&cx->streams[type])) 213 ret = cx18_stream_alloc(&cx->streams[type]);
214 if (ret < 0)
213 break; 215 break;
214 } 216 }
215 if (type == CX18_MAX_STREAMS) 217 if (type == CX18_MAX_STREAMS)
@@ -217,14 +219,14 @@ int cx18_streams_setup(struct cx18 *cx)
217 219
218 /* One or more streams could not be initialized. Clean 'em all up. */ 220 /* One or more streams could not be initialized. Clean 'em all up. */
219 cx18_streams_cleanup(cx, 0); 221 cx18_streams_cleanup(cx, 0);
220 return -ENOMEM; 222 return ret;
221} 223}
222 224
223static int cx18_reg_dev(struct cx18 *cx, int type) 225static int cx18_reg_dev(struct cx18 *cx, int type)
224{ 226{
225 struct cx18_stream *s = &cx->streams[type]; 227 struct cx18_stream *s = &cx->streams[type];
226 int vfl_type = cx18_stream_info[type].vfl_type; 228 int vfl_type = cx18_stream_info[type].vfl_type;
227 int num; 229 int num, ret;
228 230
229 /* TODO: Shouldn't this be a VFL_TYPE_TRANSPORT or something? 231 /* TODO: Shouldn't this be a VFL_TYPE_TRANSPORT or something?
230 * We need a VFL_TYPE_TS defined. 232 * We need a VFL_TYPE_TS defined.
@@ -233,9 +235,10 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
233 /* just return if no DVB is supported */ 235 /* just return if no DVB is supported */
234 if ((cx->card->hw_all & CX18_HW_DVB) == 0) 236 if ((cx->card->hw_all & CX18_HW_DVB) == 0)
235 return 0; 237 return 0;
236 if (cx18_dvb_register(s) < 0) { 238 ret = cx18_dvb_register(s);
239 if (ret < 0) {
237 CX18_ERR("DVB failed to register\n"); 240 CX18_ERR("DVB failed to register\n");
238 return -EINVAL; 241 return ret;
239 } 242 }
240 } 243 }
241 244
@@ -252,12 +255,13 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
252 } 255 }
253 256
254 /* Register device. First try the desired minor, then any free one. */ 257 /* Register device. First try the desired minor, then any free one. */
255 if (video_register_device(s->v4l2dev, vfl_type, num)) { 258 ret = video_register_device(s->v4l2dev, vfl_type, num);
259 if (ret < 0) {
256 CX18_ERR("Couldn't register v4l2 device for %s kernel number %d\n", 260 CX18_ERR("Couldn't register v4l2 device for %s kernel number %d\n",
257 s->name, num); 261 s->name, num);
258 video_device_release(s->v4l2dev); 262 video_device_release(s->v4l2dev);
259 s->v4l2dev = NULL; 263 s->v4l2dev = NULL;
260 return -ENOMEM; 264 return ret;
261 } 265 }
262 num = s->v4l2dev->num; 266 num = s->v4l2dev->num;
263 267
@@ -290,18 +294,22 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
290int cx18_streams_register(struct cx18 *cx) 294int cx18_streams_register(struct cx18 *cx)
291{ 295{
292 int type; 296 int type;
293 int err = 0; 297 int err;
298 int ret = 0;
294 299
295 /* Register V4L2 devices */ 300 /* Register V4L2 devices */
296 for (type = 0; type < CX18_MAX_STREAMS; type++) 301 for (type = 0; type < CX18_MAX_STREAMS; type++) {
297 err |= cx18_reg_dev(cx, type); 302 err = cx18_reg_dev(cx, type);
303 if (err && ret == 0)
304 ret = err;
305 }
298 306
299 if (err == 0) 307 if (ret == 0)
300 return 0; 308 return 0;
301 309
302 /* One or more streams could not be initialized. Clean 'em all up. */ 310 /* One or more streams could not be initialized. Clean 'em all up. */
303 cx18_streams_cleanup(cx, 1); 311 cx18_streams_cleanup(cx, 1);
304 return -ENOMEM; 312 return ret;
305} 313}
306 314
307/* Unregister v4l2 devices */ 315/* Unregister v4l2 devices */
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 395c11fa47ce..00831f3ef8f5 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1815,7 +1815,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
1815 cx23885_mc417_init(dev); 1815 cx23885_mc417_init(dev);
1816 1816
1817 printk(KERN_INFO "%s: registered device video%d [mpeg]\n", 1817 printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
1818 dev->name, dev->v4l_device->minor & 0x1f); 1818 dev->name, dev->v4l_device->num);
1819 1819
1820 return 0; 1820 return 0;
1821} 1821}
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index ab3110d6046c..c742a10be5cb 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -1543,7 +1543,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
1543 goto fail_unreg; 1543 goto fail_unreg;
1544 } 1544 }
1545 printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n", 1545 printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n",
1546 dev->name, dev->video_dev->minor & 0x1f); 1546 dev->name, dev->video_dev->num);
1547 /* initial device configuration */ 1547 /* initial device configuration */
1548 mutex_lock(&dev->lock); 1548 mutex_lock(&dev->lock);
1549 cx23885_set_tvnorm(dev, dev->tvnorm); 1549 cx23885_set_tvnorm(dev, dev->tvnorm);
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index e71369754305..d3ae5b4dfca7 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -1078,7 +1078,7 @@ static int mpeg_open(struct inode *inode, struct file *file)
1078 } 1078 }
1079 } 1079 }
1080 1080
1081 if (blackbird_initialize_codec(dev) < 0) { 1081 if (!atomic_read(&dev->core->mpeg_users) && blackbird_initialize_codec(dev) < 0) {
1082 if (drv) 1082 if (drv)
1083 drv->request_release(drv); 1083 drv->request_release(drv);
1084 unlock_kernel(); 1084 unlock_kernel();
@@ -1109,6 +1109,8 @@ static int mpeg_open(struct inode *inode, struct file *file)
1109 fh->mpegq.field); 1109 fh->mpegq.field);
1110 unlock_kernel(); 1110 unlock_kernel();
1111 1111
1112 atomic_inc(&dev->core->mpeg_users);
1113
1112 return 0; 1114 return 0;
1113} 1115}
1114 1116
@@ -1118,7 +1120,7 @@ static int mpeg_release(struct inode *inode, struct file *file)
1118 struct cx8802_dev *dev = fh->dev; 1120 struct cx8802_dev *dev = fh->dev;
1119 struct cx8802_driver *drv = NULL; 1121 struct cx8802_driver *drv = NULL;
1120 1122
1121 if (dev->mpeg_active) 1123 if (dev->mpeg_active && atomic_read(&dev->core->mpeg_users) == 1)
1122 blackbird_stop_codec(dev); 1124 blackbird_stop_codec(dev);
1123 1125
1124 cx8802_cancel_buffers(fh->dev); 1126 cx8802_cancel_buffers(fh->dev);
@@ -1138,6 +1140,8 @@ static int mpeg_release(struct inode *inode, struct file *file)
1138 if (drv) 1140 if (drv)
1139 drv->request_release(drv); 1141 drv->request_release(drv);
1140 1142
1143 atomic_dec(&dev->core->mpeg_users);
1144
1141 return 0; 1145 return 0;
1142} 1146}
1143 1147
@@ -1158,6 +1162,10 @@ static unsigned int
1158mpeg_poll(struct file *file, struct poll_table_struct *wait) 1162mpeg_poll(struct file *file, struct poll_table_struct *wait)
1159{ 1163{
1160 struct cx8802_fh *fh = file->private_data; 1164 struct cx8802_fh *fh = file->private_data;
1165 struct cx8802_dev *dev = fh->dev;
1166
1167 if (!dev->mpeg_active)
1168 blackbird_start_codec(file, fh);
1161 1169
1162 return videobuf_poll_stream(file, &fh->mpegq, wait); 1170 return videobuf_poll_stream(file, &fh->mpegq, wait);
1163} 1171}
@@ -1285,7 +1293,7 @@ static int blackbird_register_video(struct cx8802_dev *dev)
1285 return err; 1293 return err;
1286 } 1294 }
1287 printk(KERN_INFO "%s/2: registered device video%d [mpeg]\n", 1295 printk(KERN_INFO "%s/2: registered device video%d [mpeg]\n",
1288 dev->core->name,dev->mpeg_dev->minor & 0x1f); 1296 dev->core->name, dev->mpeg_dev->num);
1289 return 0; 1297 return 0;
1290} 1298}
1291 1299
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index fbc224f46e0e..5bcbb4cc7c2a 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -3044,8 +3044,8 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
3044 3044
3045 memcpy(&core->board, &cx88_boards[core->boardnr], sizeof(core->board)); 3045 memcpy(&core->board, &cx88_boards[core->boardnr], sizeof(core->board));
3046 3046
3047 if (!core->board.num_frontends) 3047 if (!core->board.num_frontends && (core->board.mpeg & CX88_MPEG_DVB))
3048 core->board.num_frontends=1; 3048 core->board.num_frontends = 1;
3049 3049
3050 info_printk(core, "subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n", 3050 info_printk(core, "subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n",
3051 pci->subsystem_vendor, pci->subsystem_device, core->board.name, 3051 pci->subsystem_vendor, pci->subsystem_device, core->board.name,
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 6968ab0181aa..309ca5e68063 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -598,6 +598,11 @@ static int dvb_register(struct cx8802_dev *dev)
598 struct videobuf_dvb_frontend *fe0, *fe1 = NULL; 598 struct videobuf_dvb_frontend *fe0, *fe1 = NULL;
599 int mfe_shared = 0; /* bus not shared by default */ 599 int mfe_shared = 0; /* bus not shared by default */
600 600
601 if (0 != core->i2c_rc) {
602 printk(KERN_ERR "%s/2: no i2c-bus available, cannot attach dvb drivers\n", core->name);
603 goto frontend_detach;
604 }
605
601 /* Get the first frontend */ 606 /* Get the first frontend */
602 fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1); 607 fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
603 if (!fe0) 608 if (!fe0)
@@ -789,7 +794,7 @@ static int dvb_register(struct cx8802_dev *dev)
789 if (fe0->dvb.frontend) 794 if (fe0->dvb.frontend)
790 fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL; 795 fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
791 if (attach_xc3028(0x61, dev) < 0) 796 if (attach_xc3028(0x61, dev) < 0)
792 return -EINVAL; 797 goto frontend_detach;
793 break; 798 break;
794 case CX88_BOARD_PCHDTV_HD3000: 799 case CX88_BOARD_PCHDTV_HD3000:
795 fe0->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000, 800 fe0->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000,
@@ -1058,7 +1063,6 @@ static int dvb_register(struct cx8802_dev *dev)
1058 goto frontend_detach; 1063 goto frontend_detach;
1059 core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage; 1064 core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
1060 fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage; 1065 fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
1061
1062 } 1066 }
1063 } 1067 }
1064 break; 1068 break;
@@ -1110,10 +1114,7 @@ static int dvb_register(struct cx8802_dev *dev)
1110 &dev->pci->dev, adapter_nr, mfe_shared); 1114 &dev->pci->dev, adapter_nr, mfe_shared);
1111 1115
1112frontend_detach: 1116frontend_detach:
1113 if (fe0->dvb.frontend) { 1117 videobuf_dvb_dealloc_frontends(&dev->frontends);
1114 dvb_frontend_detach(fe0->dvb.frontend);
1115 fe0->dvb.frontend = NULL;
1116 }
1117 return -EINVAL; 1118 return -EINVAL;
1118} 1119}
1119 1120
@@ -1246,8 +1247,11 @@ fail_core:
1246 1247
1247static int cx8802_dvb_remove(struct cx8802_driver *drv) 1248static int cx8802_dvb_remove(struct cx8802_driver *drv)
1248{ 1249{
1250 struct cx88_core *core = drv->core;
1249 struct cx8802_dev *dev = drv->core->dvbdev; 1251 struct cx8802_dev *dev = drv->core->dvbdev;
1250 1252
1253 dprintk( 1, "%s\n", __func__);
1254
1251 videobuf_dvb_unregister_bus(&dev->frontends); 1255 videobuf_dvb_unregister_bus(&dev->frontends);
1252 1256
1253 vp3054_i2c_remove(dev); 1257 vp3054_i2c_remove(dev);
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 01de23007095..1ab691d20692 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -116,8 +116,10 @@ static int detach_inform(struct i2c_client *client)
116 116
117void cx88_call_i2c_clients(struct cx88_core *core, unsigned int cmd, void *arg) 117void cx88_call_i2c_clients(struct cx88_core *core, unsigned int cmd, void *arg)
118{ 118{
119#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
119 struct videobuf_dvb_frontends *f = &core->dvbdev->frontends; 120 struct videobuf_dvb_frontends *f = &core->dvbdev->frontends;
120 struct videobuf_dvb_frontend *fe = NULL; 121 struct videobuf_dvb_frontend *fe = NULL;
122#endif
121 if (0 != core->i2c_rc) 123 if (0 != core->i2c_rc)
122 return; 124 return;
123 125
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 6df5cf314186..3ebdcd1d83f8 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -768,8 +768,7 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
768{ 768{
769 struct cx8802_dev *dev; 769 struct cx8802_dev *dev;
770 struct cx88_core *core; 770 struct cx88_core *core;
771 struct videobuf_dvb_frontend *demod; 771 int err;
772 int err,i;
773 772
774 /* general setup */ 773 /* general setup */
775 core = cx88_core_get(pci_dev); 774 core = cx88_core_get(pci_dev);
@@ -782,11 +781,6 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
782 if (!core->board.mpeg) 781 if (!core->board.mpeg)
783 goto fail_core; 782 goto fail_core;
784 783
785 if (!core->board.num_frontends) {
786 printk(KERN_ERR "%s() .num_frontends should be non-zero, err = %d\n", __func__, err);
787 goto fail_core;
788 }
789
790 err = -ENOMEM; 784 err = -ENOMEM;
791 dev = kzalloc(sizeof(*dev),GFP_KERNEL); 785 dev = kzalloc(sizeof(*dev),GFP_KERNEL);
792 if (NULL == dev) 786 if (NULL == dev)
@@ -801,19 +795,28 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
801 INIT_LIST_HEAD(&dev->drvlist); 795 INIT_LIST_HEAD(&dev->drvlist);
802 list_add_tail(&dev->devlist,&cx8802_devlist); 796 list_add_tail(&dev->devlist,&cx8802_devlist);
803 797
798#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
804 mutex_init(&dev->frontends.lock); 799 mutex_init(&dev->frontends.lock);
805 INIT_LIST_HEAD(&dev->frontends.felist); 800 INIT_LIST_HEAD(&dev->frontends.felist);
806 801
807 printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__, core->board.num_frontends); 802 if (core->board.num_frontends) {
808 803 struct videobuf_dvb_frontend *fe;
809 for (i = 1; i <= core->board.num_frontends; i++) { 804 int i;
810 demod = videobuf_dvb_alloc_frontend(&dev->frontends, i); 805
811 if(demod == NULL) { 806 printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
812 printk(KERN_ERR "%s() failed to alloc\n", __func__); 807 core->board.num_frontends);
813 err = -ENOMEM; 808 for (i = 1; i <= core->board.num_frontends; i++) {
814 goto fail_free; 809 fe = videobuf_dvb_alloc_frontend(&dev->frontends, i);
810 if(fe == NULL) {
811 printk(KERN_ERR "%s() failed to alloc\n",
812 __func__);
813 videobuf_dvb_dealloc_frontends(&dev->frontends);
814 err = -ENOMEM;
815 goto fail_free;
816 }
815 } 817 }
816 } 818 }
819#endif
817 820
818 /* Maintain a reference so cx88-video can query the 8802 device. */ 821 /* Maintain a reference so cx88-video can query the 8802 device. */
819 core->dvbdev = dev; 822 core->dvbdev = dev;
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 3904b73f52ee..b96ce991d968 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1216,8 +1216,12 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
1216 struct cx8800_fh *fh = priv; 1216 struct cx8800_fh *fh = priv;
1217 struct cx8800_dev *dev = fh->dev; 1217 struct cx8800_dev *dev = fh->dev;
1218 1218
1219 if (unlikely(fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1219 /* We should remember that this driver also supports teletext, */
1220 /* so we have to test if the v4l2_buf_type is VBI capture data. */
1221 if (unlikely((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
1222 (fh->type != V4L2_BUF_TYPE_VBI_CAPTURE)))
1220 return -EINVAL; 1223 return -EINVAL;
1224
1221 if (unlikely(i != fh->type)) 1225 if (unlikely(i != fh->type))
1222 return -EINVAL; 1226 return -EINVAL;
1223 1227
@@ -1232,8 +1236,10 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
1232 struct cx8800_dev *dev = fh->dev; 1236 struct cx8800_dev *dev = fh->dev;
1233 int err, res; 1237 int err, res;
1234 1238
1235 if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1239 if ((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
1240 (fh->type != V4L2_BUF_TYPE_VBI_CAPTURE))
1236 return -EINVAL; 1241 return -EINVAL;
1242
1237 if (i != fh->type) 1243 if (i != fh->type)
1238 return -EINVAL; 1244 return -EINVAL;
1239 1245
@@ -1911,7 +1917,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1911 goto fail_unreg; 1917 goto fail_unreg;
1912 } 1918 }
1913 printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n", 1919 printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n",
1914 core->name,dev->video_dev->minor & 0x1f); 1920 core->name, dev->video_dev->num);
1915 1921
1916 dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi"); 1922 dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi");
1917 err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI, 1923 err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
@@ -1922,7 +1928,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1922 goto fail_unreg; 1928 goto fail_unreg;
1923 } 1929 }
1924 printk(KERN_INFO "%s/0: registered device vbi%d\n", 1930 printk(KERN_INFO "%s/0: registered device vbi%d\n",
1925 core->name,dev->vbi_dev->minor & 0x1f); 1931 core->name, dev->vbi_dev->num);
1926 1932
1927 if (core->board.radio.type == CX88_RADIO) { 1933 if (core->board.radio.type == CX88_RADIO) {
1928 dev->radio_dev = cx88_vdev_init(core,dev->pci, 1934 dev->radio_dev = cx88_vdev_init(core,dev->pci,
@@ -1935,7 +1941,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1935 goto fail_unreg; 1941 goto fail_unreg;
1936 } 1942 }
1937 printk(KERN_INFO "%s/0: registered device radio%d\n", 1943 printk(KERN_INFO "%s/0: registered device radio%d\n",
1938 core->name,dev->radio_dev->minor & 0x1f); 1944 core->name, dev->radio_dev->num);
1939 } 1945 }
1940 1946
1941 /* everything worked */ 1947 /* everything worked */
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 76207c2856b7..f4240965be32 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -352,6 +352,7 @@ struct cx88_core {
352 /* various v4l controls */ 352 /* various v4l controls */
353 u32 freq; 353 u32 freq;
354 atomic_t users; 354 atomic_t users;
355 atomic_t mpeg_users;
355 356
356 /* cx88-video needs to access cx8802 for hybrid tuner pll access. */ 357 /* cx88-video needs to access cx8802 for hybrid tuner pll access. */
357 struct cx8802_dev *dvbdev; 358 struct cx8802_dev *dvbdev;
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index ac3292d7646c..7a8d49ef646e 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -62,7 +62,7 @@ static int em28xx_isoc_audio_deinit(struct em28xx *dev)
62 62
63 dprintk("Stopping isoc\n"); 63 dprintk("Stopping isoc\n");
64 for (i = 0; i < EM28XX_AUDIO_BUFS; i++) { 64 for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
65 usb_kill_urb(dev->adev->urb[i]); 65 usb_unlink_urb(dev->adev->urb[i]);
66 usb_free_urb(dev->adev->urb[i]); 66 usb_free_urb(dev->adev->urb[i]);
67 dev->adev->urb[i] = NULL; 67 dev->adev->urb[i] = NULL;
68 } 68 }
@@ -75,7 +75,6 @@ static void em28xx_audio_isocirq(struct urb *urb)
75 struct em28xx *dev = urb->context; 75 struct em28xx *dev = urb->context;
76 int i; 76 int i;
77 unsigned int oldptr; 77 unsigned int oldptr;
78 unsigned long flags;
79 int period_elapsed = 0; 78 int period_elapsed = 0;
80 int status; 79 int status;
81 unsigned char *cp; 80 unsigned char *cp;
@@ -96,9 +95,21 @@ static void em28xx_audio_isocirq(struct urb *urb)
96 if (!length) 95 if (!length)
97 continue; 96 continue;
98 97
99 spin_lock_irqsave(&dev->adev->slock, flags);
100
101 oldptr = dev->adev->hwptr_done_capture; 98 oldptr = dev->adev->hwptr_done_capture;
99 if (oldptr + length >= runtime->buffer_size) {
100 unsigned int cnt =
101 runtime->buffer_size - oldptr;
102 memcpy(runtime->dma_area + oldptr * stride, cp,
103 cnt * stride);
104 memcpy(runtime->dma_area, cp + cnt * stride,
105 length * stride - cnt * stride);
106 } else {
107 memcpy(runtime->dma_area + oldptr * stride, cp,
108 length * stride);
109 }
110
111 snd_pcm_stream_lock(substream);
112
102 dev->adev->hwptr_done_capture += length; 113 dev->adev->hwptr_done_capture += length;
103 if (dev->adev->hwptr_done_capture >= 114 if (dev->adev->hwptr_done_capture >=
104 runtime->buffer_size) 115 runtime->buffer_size)
@@ -113,19 +124,7 @@ static void em28xx_audio_isocirq(struct urb *urb)
113 period_elapsed = 1; 124 period_elapsed = 1;
114 } 125 }
115 126
116 spin_unlock_irqrestore(&dev->adev->slock, flags); 127 snd_pcm_stream_unlock(substream);
117
118 if (oldptr + length >= runtime->buffer_size) {
119 unsigned int cnt =
120 runtime->buffer_size - oldptr;
121 memcpy(runtime->dma_area + oldptr * stride, cp,
122 cnt * stride);
123 memcpy(runtime->dma_area, cp + cnt * stride,
124 length * stride - cnt * stride);
125 } else {
126 memcpy(runtime->dma_area + oldptr * stride, cp,
127 length * stride);
128 }
129 } 128 }
130 if (period_elapsed) 129 if (period_elapsed)
131 snd_pcm_period_elapsed(substream); 130 snd_pcm_period_elapsed(substream);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 5d837c16ee22..15e2b525310d 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -69,19 +69,33 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
69 int ret, byte; 69 int ret, byte;
70 70
71 if (dev->state & DEV_DISCONNECTED) 71 if (dev->state & DEV_DISCONNECTED)
72 return(-ENODEV); 72 return -ENODEV;
73
74 if (len > URB_MAX_CTRL_SIZE)
75 return -EINVAL;
73 76
74 em28xx_regdbg("req=%02x, reg=%02x ", req, reg); 77 em28xx_regdbg("req=%02x, reg=%02x ", req, reg);
75 78
79 mutex_lock(&dev->ctrl_urb_lock);
76 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), req, 80 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), req,
77 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 81 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
78 0x0000, reg, buf, len, HZ); 82 0x0000, reg, dev->urb_buf, len, HZ);
83 if (ret < 0) {
84 if (reg_debug)
85 printk(" failed!\n");
86 mutex_unlock(&dev->ctrl_urb_lock);
87 return ret;
88 }
89
90 if (len)
91 memcpy(buf, dev->urb_buf, len);
92
93 mutex_unlock(&dev->ctrl_urb_lock);
79 94
80 if (reg_debug) { 95 if (reg_debug) {
81 printk(ret < 0 ? " failed!\n" : "%02x values: ", ret); 96 printk("%02x values: ", ret);
82 for (byte = 0; byte < len; byte++) 97 for (byte = 0; byte < len; byte++)
83 printk(" %02x", (unsigned char)buf[byte]); 98 printk(" %02x", (unsigned char)buf[byte]);
84
85 printk("\n"); 99 printk("\n");
86 } 100 }
87 101
@@ -102,16 +116,20 @@ int em28xx_read_reg_req(struct em28xx *dev, u8 req, u16 reg)
102 116
103 em28xx_regdbg("req=%02x, reg=%02x:", req, reg); 117 em28xx_regdbg("req=%02x, reg=%02x:", req, reg);
104 118
119 mutex_lock(&dev->ctrl_urb_lock);
105 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), req, 120 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), req,
106 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 121 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
107 0x0000, reg, &val, 1, HZ); 122 0x0000, reg, dev->urb_buf, 1, HZ);
108 123 val = dev->urb_buf[0];
109 if (reg_debug) 124 mutex_unlock(&dev->ctrl_urb_lock);
110 printk(ret < 0 ? " failed!\n" :
111 "%02x\n", (unsigned char) val);
112 125
113 if (ret < 0) 126 if (ret < 0) {
127 printk(" failed!\n");
114 return ret; 128 return ret;
129 }
130
131 if (reg_debug)
132 printk("%02x\n", (unsigned char) val);
115 133
116 return val; 134 return val;
117} 135}
@@ -130,19 +148,13 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
130{ 148{
131 int ret; 149 int ret;
132 150
133 /*usb_control_msg seems to expect a kmalloced buffer */
134 unsigned char *bufs;
135
136 if (dev->state & DEV_DISCONNECTED) 151 if (dev->state & DEV_DISCONNECTED)
137 return -ENODEV; 152 return -ENODEV;
138 153
139 if (len < 1) 154 if ((len < 1) || (len > URB_MAX_CTRL_SIZE))
140 return -EINVAL; 155 return -EINVAL;
141 156
142 bufs = kmalloc(len, GFP_KERNEL);
143
144 em28xx_regdbg("req=%02x reg=%02x:", req, reg); 157 em28xx_regdbg("req=%02x reg=%02x:", req, reg);
145
146 if (reg_debug) { 158 if (reg_debug) {
147 int i; 159 int i;
148 for (i = 0; i < len; ++i) 160 for (i = 0; i < len; ++i)
@@ -150,16 +162,16 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
150 printk("\n"); 162 printk("\n");
151 } 163 }
152 164
153 if (!bufs) 165 mutex_lock(&dev->ctrl_urb_lock);
154 return -ENOMEM; 166 memcpy(dev->urb_buf, buf, len);
155 memcpy(bufs, buf, len);
156 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), req, 167 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), req,
157 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 168 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
158 0x0000, reg, bufs, len, HZ); 169 0x0000, reg, dev->urb_buf, len, HZ);
170 mutex_unlock(&dev->ctrl_urb_lock);
171
159 if (dev->wait_after_write) 172 if (dev->wait_after_write)
160 msleep(dev->wait_after_write); 173 msleep(dev->wait_after_write);
161 174
162 kfree(bufs);
163 return ret; 175 return ret;
164} 176}
165 177
@@ -270,6 +282,8 @@ static int em28xx_set_audio_source(struct em28xx *dev)
270 break; 282 break;
271 case EM28XX_AMUX_LINE_IN: 283 case EM28XX_AMUX_LINE_IN:
272 input = EM28XX_AUDIO_SRC_LINE; 284 input = EM28XX_AUDIO_SRC_LINE;
285 video = disable;
286 line = enable;
273 break; 287 break;
274 case EM28XX_AMUX_AC97_VIDEO: 288 case EM28XX_AMUX_AC97_VIDEO:
275 input = EM28XX_AUDIO_SRC_LINE; 289 input = EM28XX_AUDIO_SRC_LINE;
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 3bab56b997fc..2360c61ddca9 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -337,9 +337,9 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
337 /* Check if board has eeprom */ 337 /* Check if board has eeprom */
338 err = i2c_master_recv(&dev->i2c_client, &buf, 0); 338 err = i2c_master_recv(&dev->i2c_client, &buf, 0);
339 if (err < 0) { 339 if (err < 0) {
340 em28xx_errdev("%s: i2c_master_recv failed! err [%d]\n", 340 em28xx_errdev("board has no eeprom\n");
341 __func__, err); 341 memset(eedata, 0, len);
342 return err; 342 return -ENODEV;
343 } 343 }
344 344
345 buf = 0; 345 buf = 0;
@@ -609,14 +609,16 @@ int em28xx_i2c_register(struct em28xx *dev)
609 dev->i2c_client.adapter = &dev->i2c_adap; 609 dev->i2c_client.adapter = &dev->i2c_adap;
610 610
611 retval = em28xx_i2c_eeprom(dev, dev->eedata, sizeof(dev->eedata)); 611 retval = em28xx_i2c_eeprom(dev, dev->eedata, sizeof(dev->eedata));
612 if (retval < 0) { 612 if ((retval < 0) && (retval != -ENODEV)) {
613 em28xx_errdev("%s: em28xx_i2_eeprom failed! retval [%d]\n", 613 em28xx_errdev("%s: em28xx_i2_eeprom failed! retval [%d]\n",
614 __func__, retval); 614 __func__, retval);
615
615 return retval; 616 return retval;
616 } 617 }
617 618
618 if (i2c_scan) 619 if (i2c_scan)
619 em28xx_do_i2c_scan(dev); 620 em28xx_do_i2c_scan(dev);
621
620 return 0; 622 return 0;
621} 623}
622 624
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index c53649e5315b..610f535a257c 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -73,6 +73,7 @@ MODULE_DESCRIPTION(DRIVER_DESC);
73MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
74 74
75static LIST_HEAD(em28xx_devlist); 75static LIST_HEAD(em28xx_devlist);
76static DEFINE_MUTEX(em28xx_devlist_mutex);
76 77
77static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; 78static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
78static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; 79static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
@@ -1519,7 +1520,7 @@ static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
1519 struct em28xx_fh *fh; 1520 struct em28xx_fh *fh;
1520 enum v4l2_buf_type fh_type = 0; 1521 enum v4l2_buf_type fh_type = 0;
1521 1522
1522 lock_kernel(); 1523 mutex_lock(&em28xx_devlist_mutex);
1523 list_for_each_entry(h, &em28xx_devlist, devlist) { 1524 list_for_each_entry(h, &em28xx_devlist, devlist) {
1524 if (h->vdev->minor == minor) { 1525 if (h->vdev->minor == minor) {
1525 dev = h; 1526 dev = h;
@@ -1535,10 +1536,11 @@ static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
1535 dev = h; 1536 dev = h;
1536 } 1537 }
1537 } 1538 }
1538 if (NULL == dev) { 1539 mutex_unlock(&em28xx_devlist_mutex);
1539 unlock_kernel(); 1540 if (NULL == dev)
1540 return -ENODEV; 1541 return -ENODEV;
1541 } 1542
1543 mutex_lock(&dev->lock);
1542 1544
1543 em28xx_videodbg("open minor=%d type=%s users=%d\n", 1545 em28xx_videodbg("open minor=%d type=%s users=%d\n",
1544 minor, v4l2_type_names[fh_type], dev->users); 1546 minor, v4l2_type_names[fh_type], dev->users);
@@ -1547,10 +1549,9 @@ static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
1547 fh = kzalloc(sizeof(struct em28xx_fh), GFP_KERNEL); 1549 fh = kzalloc(sizeof(struct em28xx_fh), GFP_KERNEL);
1548 if (!fh) { 1550 if (!fh) {
1549 em28xx_errdev("em28xx-video.c: Out of memory?!\n"); 1551 em28xx_errdev("em28xx-video.c: Out of memory?!\n");
1550 unlock_kernel(); 1552 mutex_unlock(&dev->lock);
1551 return -ENOMEM; 1553 return -ENOMEM;
1552 } 1554 }
1553 mutex_lock(&dev->lock);
1554 fh->dev = dev; 1555 fh->dev = dev;
1555 fh->radio = radio; 1556 fh->radio = radio;
1556 fh->type = fh_type; 1557 fh->type = fh_type;
@@ -1584,7 +1585,6 @@ static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
1584 sizeof(struct em28xx_buffer), fh); 1585 sizeof(struct em28xx_buffer), fh);
1585 1586
1586 mutex_unlock(&dev->lock); 1587 mutex_unlock(&dev->lock);
1587 unlock_kernel();
1588 1588
1589 return errCode; 1589 return errCode;
1590} 1590}
@@ -1871,6 +1871,7 @@ int em28xx_register_extension(struct em28xx_ops *ops)
1871{ 1871{
1872 struct em28xx *dev = NULL; 1872 struct em28xx *dev = NULL;
1873 1873
1874 mutex_lock(&em28xx_devlist_mutex);
1874 mutex_lock(&em28xx_extension_devlist_lock); 1875 mutex_lock(&em28xx_extension_devlist_lock);
1875 list_add_tail(&ops->next, &em28xx_extension_devlist); 1876 list_add_tail(&ops->next, &em28xx_extension_devlist);
1876 list_for_each_entry(dev, &em28xx_devlist, devlist) { 1877 list_for_each_entry(dev, &em28xx_devlist, devlist) {
@@ -1879,6 +1880,7 @@ int em28xx_register_extension(struct em28xx_ops *ops)
1879 } 1880 }
1880 printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name); 1881 printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name);
1881 mutex_unlock(&em28xx_extension_devlist_lock); 1882 mutex_unlock(&em28xx_extension_devlist_lock);
1883 mutex_unlock(&em28xx_devlist_mutex);
1882 return 0; 1884 return 0;
1883} 1885}
1884EXPORT_SYMBOL(em28xx_register_extension); 1886EXPORT_SYMBOL(em28xx_register_extension);
@@ -1887,6 +1889,7 @@ void em28xx_unregister_extension(struct em28xx_ops *ops)
1887{ 1889{
1888 struct em28xx *dev = NULL; 1890 struct em28xx *dev = NULL;
1889 1891
1892 mutex_lock(&em28xx_devlist_mutex);
1890 list_for_each_entry(dev, &em28xx_devlist, devlist) { 1893 list_for_each_entry(dev, &em28xx_devlist, devlist) {
1891 if (dev) 1894 if (dev)
1892 ops->fini(dev); 1895 ops->fini(dev);
@@ -1896,6 +1899,7 @@ void em28xx_unregister_extension(struct em28xx_ops *ops)
1896 printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name); 1899 printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name);
1897 list_del(&ops->next); 1900 list_del(&ops->next);
1898 mutex_unlock(&em28xx_extension_devlist_lock); 1901 mutex_unlock(&em28xx_extension_devlist_lock);
1902 mutex_unlock(&em28xx_devlist_mutex);
1899} 1903}
1900EXPORT_SYMBOL(em28xx_unregister_extension); 1904EXPORT_SYMBOL(em28xx_unregister_extension);
1901 1905
@@ -1921,6 +1925,60 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
1921} 1925}
1922 1926
1923 1927
1928static int register_analog_devices(struct em28xx *dev)
1929{
1930 int ret;
1931
1932 /* allocate and fill video video_device struct */
1933 dev->vdev = em28xx_vdev_init(dev, &em28xx_video_template, "video");
1934 if (!dev->vdev) {
1935 em28xx_errdev("cannot allocate video_device.\n");
1936 return -ENODEV;
1937 }
1938
1939 /* register v4l2 video video_device */
1940 ret = video_register_device(dev->vdev, VFL_TYPE_GRABBER,
1941 video_nr[dev->devno]);
1942 if (ret) {
1943 em28xx_errdev("unable to register video device (error=%i).\n",
1944 ret);
1945 return ret;
1946 }
1947
1948 /* Allocate and fill vbi video_device struct */
1949 dev->vbi_dev = em28xx_vdev_init(dev, &em28xx_video_template, "vbi");
1950
1951 /* register v4l2 vbi video_device */
1952 ret = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
1953 vbi_nr[dev->devno]);
1954 if (ret < 0) {
1955 em28xx_errdev("unable to register vbi device\n");
1956 return ret;
1957 }
1958
1959 if (em28xx_boards[dev->model].radio.type == EM28XX_RADIO) {
1960 dev->radio_dev = em28xx_vdev_init(dev, &em28xx_radio_template, "radio");
1961 if (!dev->radio_dev) {
1962 em28xx_errdev("cannot allocate video_device.\n");
1963 return -ENODEV;
1964 }
1965 ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO,
1966 radio_nr[dev->devno]);
1967 if (ret < 0) {
1968 em28xx_errdev("can't register radio device\n");
1969 return ret;
1970 }
1971 em28xx_info("Registered radio device as /dev/radio%d\n",
1972 dev->radio_dev->num);
1973 }
1974
1975 em28xx_info("V4L2 device registered as /dev/video%d and /dev/vbi%d\n",
1976 dev->vdev->num, dev->vbi_dev->num);
1977
1978 return 0;
1979}
1980
1981
1924/* 1982/*
1925 * em28xx_init_dev() 1983 * em28xx_init_dev()
1926 * allocates and inits the device structs, registers i2c bus and v4l device 1984 * allocates and inits the device structs, registers i2c bus and v4l device
@@ -1936,6 +1994,7 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1936 1994
1937 dev->udev = udev; 1995 dev->udev = udev;
1938 mutex_init(&dev->lock); 1996 mutex_init(&dev->lock);
1997 mutex_init(&dev->ctrl_urb_lock);
1939 spin_lock_init(&dev->slock); 1998 spin_lock_init(&dev->slock);
1940 init_waitqueue_head(&dev->open); 1999 init_waitqueue_head(&dev->open);
1941 init_waitqueue_head(&dev->wait_frame); 2000 init_waitqueue_head(&dev->wait_frame);
@@ -1953,8 +2012,6 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1953 errCode = em28xx_config(dev); 2012 errCode = em28xx_config(dev);
1954 if (errCode) { 2013 if (errCode) {
1955 em28xx_errdev("error configuring device\n"); 2014 em28xx_errdev("error configuring device\n");
1956 em28xx_devused &= ~(1<<dev->devno);
1957 kfree(dev);
1958 return -ENOMEM; 2015 return -ENOMEM;
1959 } 2016 }
1960 2017
@@ -2001,50 +2058,6 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
2001 return errCode; 2058 return errCode;
2002 } 2059 }
2003 2060
2004 list_add_tail(&dev->devlist, &em28xx_devlist);
2005
2006 /* allocate and fill video video_device struct */
2007 dev->vdev = em28xx_vdev_init(dev, &em28xx_video_template, "video");
2008 if (NULL == dev->vdev) {
2009 em28xx_errdev("cannot allocate video_device.\n");
2010 goto fail_unreg;
2011 }
2012
2013 /* register v4l2 video video_device */
2014 retval = video_register_device(dev->vdev, VFL_TYPE_GRABBER,
2015 video_nr[dev->devno]);
2016 if (retval) {
2017 em28xx_errdev("unable to register video device (error=%i).\n",
2018 retval);
2019 goto fail_unreg;
2020 }
2021
2022 /* Allocate and fill vbi video_device struct */
2023 dev->vbi_dev = em28xx_vdev_init(dev, &em28xx_video_template, "vbi");
2024 /* register v4l2 vbi video_device */
2025 if (video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
2026 vbi_nr[dev->devno]) < 0) {
2027 em28xx_errdev("unable to register vbi device\n");
2028 retval = -ENODEV;
2029 goto fail_unreg;
2030 }
2031
2032 if (em28xx_boards[dev->model].radio.type == EM28XX_RADIO) {
2033 dev->radio_dev = em28xx_vdev_init(dev, &em28xx_radio_template, "radio");
2034 if (NULL == dev->radio_dev) {
2035 em28xx_errdev("cannot allocate video_device.\n");
2036 goto fail_unreg;
2037 }
2038 retval = video_register_device(dev->radio_dev, VFL_TYPE_RADIO,
2039 radio_nr[dev->devno]);
2040 if (retval < 0) {
2041 em28xx_errdev("can't register radio device\n");
2042 goto fail_unreg;
2043 }
2044 em28xx_info("Registered radio device as /dev/radio%d\n",
2045 dev->radio_dev->minor & 0x1f);
2046 }
2047
2048 /* init video dma queues */ 2061 /* init video dma queues */
2049 INIT_LIST_HEAD(&dev->vidq.active); 2062 INIT_LIST_HEAD(&dev->vidq.active);
2050 INIT_LIST_HEAD(&dev->vidq.queued); 2063 INIT_LIST_HEAD(&dev->vidq.queued);
@@ -2071,8 +2084,14 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
2071 2084
2072 video_mux(dev, 0); 2085 video_mux(dev, 0);
2073 2086
2074 em28xx_info("V4L2 device registered as /dev/video%d and /dev/vbi%d\n", 2087 mutex_lock(&em28xx_devlist_mutex);
2075 dev->vdev->num, dev->vbi_dev->num); 2088 list_add_tail(&dev->devlist, &em28xx_devlist);
2089 retval = register_analog_devices(dev);
2090 if (retval < 0) {
2091 em28xx_release_resources(dev);
2092 mutex_unlock(&em28xx_devlist_mutex);
2093 goto fail_reg_devices;
2094 }
2076 2095
2077 mutex_lock(&em28xx_extension_devlist_lock); 2096 mutex_lock(&em28xx_extension_devlist_lock);
2078 if (!list_empty(&em28xx_extension_devlist)) { 2097 if (!list_empty(&em28xx_extension_devlist)) {
@@ -2082,13 +2101,12 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
2082 } 2101 }
2083 } 2102 }
2084 mutex_unlock(&em28xx_extension_devlist_lock); 2103 mutex_unlock(&em28xx_extension_devlist_lock);
2104 mutex_unlock(&em28xx_devlist_mutex);
2085 2105
2086 return 0; 2106 return 0;
2087 2107
2088fail_unreg: 2108fail_reg_devices:
2089 em28xx_release_resources(dev);
2090 mutex_unlock(&dev->lock); 2109 mutex_unlock(&dev->lock);
2091 kfree(dev);
2092 return retval; 2110 return retval;
2093} 2111}
2094 2112
@@ -2231,8 +2249,12 @@ static int em28xx_usb_probe(struct usb_interface *interface,
2231 2249
2232 /* allocate device struct */ 2250 /* allocate device struct */
2233 retval = em28xx_init_dev(&dev, udev, nr); 2251 retval = em28xx_init_dev(&dev, udev, nr);
2234 if (retval) 2252 if (retval) {
2253 em28xx_devused &= ~(1<<dev->devno);
2254 kfree(dev);
2255
2235 return retval; 2256 return retval;
2257 }
2236 2258
2237 em28xx_info("Found %s\n", em28xx_boards[dev->model].name); 2259 em28xx_info("Found %s\n", em28xx_boards[dev->model].name);
2238 2260
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 82781178e0a3..5956e9b3062f 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -102,6 +102,9 @@
102#define EM28XX_MIN_BUF 4 102#define EM28XX_MIN_BUF 4
103#define EM28XX_DEF_BUF 8 103#define EM28XX_DEF_BUF 8
104 104
105/*Limits the max URB message size */
106#define URB_MAX_CTRL_SIZE 80
107
105/* Params for validated field */ 108/* Params for validated field */
106#define EM28XX_BOARD_NOT_VALIDATED 1 109#define EM28XX_BOARD_NOT_VALIDATED 1
107#define EM28XX_BOARD_VALIDATED 0 110#define EM28XX_BOARD_VALIDATED 0
@@ -430,6 +433,7 @@ struct em28xx {
430 433
431 /* locks */ 434 /* locks */
432 struct mutex lock; 435 struct mutex lock;
436 struct mutex ctrl_urb_lock; /* protects urb_buf */
433 /* spinlock_t queue_lock; */ 437 /* spinlock_t queue_lock; */
434 struct list_head inqueue, outqueue; 438 struct list_head inqueue, outqueue;
435 wait_queue_head_t open, wait_frame, wait_stream; 439 wait_queue_head_t open, wait_frame, wait_stream;
@@ -451,6 +455,8 @@ struct em28xx {
451 unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */ 455 unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */
452 struct urb *urb[EM28XX_NUM_BUFS]; /* urb for isoc transfers */ 456 struct urb *urb[EM28XX_NUM_BUFS]; /* urb for isoc transfers */
453 char *transfer_buffer[EM28XX_NUM_BUFS]; /* transfer buffers for isoc transfer */ 457 char *transfer_buffer[EM28XX_NUM_BUFS]; /* transfer buffers for isoc transfer */
458 char urb_buf[URB_MAX_CTRL_SIZE]; /* urb control msg buffer */
459
454 /* helper funcs that call usb_control_msg */ 460 /* helper funcs that call usb_control_msg */
455 int (*em28xx_write_regs) (struct em28xx *dev, u16 reg, 461 int (*em28xx_write_regs) (struct em28xx *dev, u16 reg,
456 char *buf, int len); 462 char *buf, int len);
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 7a85c41b0eea..9d0ef96c23ff 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -588,7 +588,7 @@ static int et61x251_stream_interrupt(struct et61x251_device* cam)
588 cam->state |= DEV_MISCONFIGURED; 588 cam->state |= DEV_MISCONFIGURED;
589 DBG(1, "URB timeout reached. The camera is misconfigured. To " 589 DBG(1, "URB timeout reached. The camera is misconfigured. To "
590 "use it, close and open /dev/video%d again.", 590 "use it, close and open /dev/video%d again.",
591 cam->v4ldev->minor); 591 cam->v4ldev->num);
592 return -EIO; 592 return -EIO;
593 } 593 }
594 594
@@ -1195,7 +1195,7 @@ static void et61x251_release_resources(struct kref *kref)
1195 1195
1196 cam = container_of(kref, struct et61x251_device, kref); 1196 cam = container_of(kref, struct et61x251_device, kref);
1197 1197
1198 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor); 1198 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
1199 video_set_drvdata(cam->v4ldev, NULL); 1199 video_set_drvdata(cam->v4ldev, NULL);
1200 video_unregister_device(cam->v4ldev); 1200 video_unregister_device(cam->v4ldev);
1201 usb_put_dev(cam->usbdev); 1201 usb_put_dev(cam->usbdev);
@@ -1237,7 +1237,7 @@ static int et61x251_open(struct inode* inode, struct file* filp)
1237 1237
1238 if (cam->users) { 1238 if (cam->users) {
1239 DBG(2, "Device /dev/video%d is already in use", 1239 DBG(2, "Device /dev/video%d is already in use",
1240 cam->v4ldev->minor); 1240 cam->v4ldev->num);
1241 DBG(3, "Simultaneous opens are not supported"); 1241 DBG(3, "Simultaneous opens are not supported");
1242 if ((filp->f_flags & O_NONBLOCK) || 1242 if ((filp->f_flags & O_NONBLOCK) ||
1243 (filp->f_flags & O_NDELAY)) { 1243 (filp->f_flags & O_NDELAY)) {
@@ -1280,7 +1280,7 @@ static int et61x251_open(struct inode* inode, struct file* filp)
1280 cam->frame_count = 0; 1280 cam->frame_count = 0;
1281 et61x251_empty_framequeues(cam); 1281 et61x251_empty_framequeues(cam);
1282 1282
1283 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor); 1283 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
1284 1284
1285out: 1285out:
1286 mutex_unlock(&cam->open_mutex); 1286 mutex_unlock(&cam->open_mutex);
@@ -1304,7 +1304,7 @@ static int et61x251_release(struct inode* inode, struct file* filp)
1304 cam->users--; 1304 cam->users--;
1305 wake_up_interruptible_nr(&cam->wait_open, 1); 1305 wake_up_interruptible_nr(&cam->wait_open, 1);
1306 1306
1307 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor); 1307 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
1308 1308
1309 kref_put(&cam->kref, et61x251_release_resources); 1309 kref_put(&cam->kref, et61x251_release_resources);
1310 1310
@@ -1845,7 +1845,7 @@ et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
1845 cam->state |= DEV_MISCONFIGURED; 1845 cam->state |= DEV_MISCONFIGURED;
1846 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To " 1846 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
1847 "use the camera, close and open /dev/video%d again.", 1847 "use the camera, close and open /dev/video%d again.",
1848 cam->v4ldev->minor); 1848 cam->v4ldev->num);
1849 return -EIO; 1849 return -EIO;
1850 } 1850 }
1851 1851
@@ -1858,7 +1858,7 @@ et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
1858 cam->state |= DEV_MISCONFIGURED; 1858 cam->state |= DEV_MISCONFIGURED;
1859 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To " 1859 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
1860 "use the camera, close and open /dev/video%d again.", 1860 "use the camera, close and open /dev/video%d again.",
1861 cam->v4ldev->minor); 1861 cam->v4ldev->num);
1862 return -ENOMEM; 1862 return -ENOMEM;
1863 } 1863 }
1864 1864
@@ -2068,7 +2068,7 @@ et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
2068 cam->state |= DEV_MISCONFIGURED; 2068 cam->state |= DEV_MISCONFIGURED;
2069 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To " 2069 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
2070 "use the camera, close and open /dev/video%d again.", 2070 "use the camera, close and open /dev/video%d again.",
2071 cam->v4ldev->minor); 2071 cam->v4ldev->num);
2072 return -EIO; 2072 return -EIO;
2073 } 2073 }
2074 2074
@@ -2080,7 +2080,7 @@ et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
2080 cam->state |= DEV_MISCONFIGURED; 2080 cam->state |= DEV_MISCONFIGURED;
2081 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To " 2081 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
2082 "use the camera, close and open /dev/video%d again.", 2082 "use the camera, close and open /dev/video%d again.",
2083 cam->v4ldev->minor); 2083 cam->v4ldev->num);
2084 return -ENOMEM; 2084 return -ENOMEM;
2085 } 2085 }
2086 2086
@@ -2128,7 +2128,7 @@ et61x251_vidioc_s_jpegcomp(struct et61x251_device* cam, void __user * arg)
2128 cam->state |= DEV_MISCONFIGURED; 2128 cam->state |= DEV_MISCONFIGURED;
2129 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware " 2129 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
2130 "problems. To use the camera, close and open " 2130 "problems. To use the camera, close and open "
2131 "/dev/video%d again.", cam->v4ldev->minor); 2131 "/dev/video%d again.", cam->v4ldev->num);
2132 return -EIO; 2132 return -EIO;
2133 } 2133 }
2134 2134
@@ -2605,7 +2605,7 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
2605 goto fail; 2605 goto fail;
2606 } 2606 }
2607 2607
2608 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor); 2608 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
2609 2609
2610 cam->module_param.force_munmap = force_munmap[dev_nr]; 2610 cam->module_param.force_munmap = force_munmap[dev_nr];
2611 cam->module_param.frame_timeout = frame_timeout[dev_nr]; 2611 cam->module_param.frame_timeout = frame_timeout[dev_nr];
@@ -2658,7 +2658,7 @@ static void et61x251_usb_disconnect(struct usb_interface* intf)
2658 if (cam->users) { 2658 if (cam->users) {
2659 DBG(2, "Device /dev/video%d is open! Deregistration and " 2659 DBG(2, "Device /dev/video%d is open! Deregistration and "
2660 "memory deallocation are deferred.", 2660 "memory deallocation are deferred.",
2661 cam->v4ldev->minor); 2661 cam->v4ldev->num);
2662 cam->state |= DEV_MISCONFIGURED; 2662 cam->state |= DEV_MISCONFIGURED;
2663 et61x251_stop_transfer(cam); 2663 et61x251_stop_transfer(cam);
2664 cam->state |= DEV_DISCONNECTED; 2664 cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index 4d0817471c9f..6b557c057fac 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -3,16 +3,16 @@ menuconfig USB_GSPCA
3 depends on VIDEO_V4L2 3 depends on VIDEO_V4L2
4 default m 4 default m
5 ---help--- 5 ---help---
6 Say Y here if you want to enable selecting webcams based 6 Say Y here if you want to enable selecting webcams based
7 on the GSPCA framework. 7 on the GSPCA framework.
8 8
9 See <file:Documentation/video4linux/gspca.txt> for more info. 9 See <file:Documentation/video4linux/gspca.txt> for more info.
10 10
11 This driver uses the Video For Linux API. You must say Y or M to 11 This driver uses the Video For Linux API. You must say Y or M to
12 "Video For Linux" to use this driver. 12 "Video For Linux" to use this driver.
13 13
14 To compile this driver as modules, choose M here: the 14 To compile this driver as modules, choose M here: the
15 modules will be called gspca_main. 15 modules will be called gspca_main.
16 16
17 17
18if USB_GSPCA && VIDEO_V4L2 18if USB_GSPCA && VIDEO_V4L2
@@ -23,190 +23,190 @@ config USB_GSPCA_CONEX
23 tristate "Conexant Camera Driver" 23 tristate "Conexant Camera Driver"
24 depends on VIDEO_V4L2 && USB_GSPCA 24 depends on VIDEO_V4L2 && USB_GSPCA
25 help 25 help
26 Say Y here if you want support for cameras based on the Conexant chip. 26 Say Y here if you want support for cameras based on the Conexant chip.
27 27
28 To compile this driver as a module, choose M here: the 28 To compile this driver as a module, choose M here: the
29 module will be called gspca_conex. 29 module will be called gspca_conex.
30 30
31config USB_GSPCA_ETOMS 31config USB_GSPCA_ETOMS
32 tristate "Etoms USB Camera Driver" 32 tristate "Etoms USB Camera Driver"
33 depends on VIDEO_V4L2 && USB_GSPCA 33 depends on VIDEO_V4L2 && USB_GSPCA
34 help 34 help
35 Say Y here if you want support for cameras based on the Etoms chip. 35 Say Y here if you want support for cameras based on the Etoms chip.
36 36
37 To compile this driver as a module, choose M here: the 37 To compile this driver as a module, choose M here: the
38 module will be called gspca_etoms. 38 module will be called gspca_etoms.
39 39
40config USB_GSPCA_FINEPIX 40config USB_GSPCA_FINEPIX
41 tristate "Fujifilm FinePix USB V4L2 driver" 41 tristate "Fujifilm FinePix USB V4L2 driver"
42 depends on VIDEO_V4L2 && USB_GSPCA 42 depends on VIDEO_V4L2 && USB_GSPCA
43 help 43 help
44 Say Y here if you want support for cameras based on the FinePix chip. 44 Say Y here if you want support for cameras based on the FinePix chip.
45 45
46 To compile this driver as a module, choose M here: the 46 To compile this driver as a module, choose M here: the
47 module will be called gspca_finepix. 47 module will be called gspca_finepix.
48 48
49config USB_GSPCA_MARS 49config USB_GSPCA_MARS
50 tristate "Mars USB Camera Driver" 50 tristate "Mars USB Camera Driver"
51 depends on VIDEO_V4L2 && USB_GSPCA 51 depends on VIDEO_V4L2 && USB_GSPCA
52 help 52 help
53 Say Y here if you want support for cameras based on the Mars chip. 53 Say Y here if you want support for cameras based on the Mars chip.
54 54
55 To compile this driver as a module, choose M here: the 55 To compile this driver as a module, choose M here: the
56 module will be called gspca_mars. 56 module will be called gspca_mars.
57 57
58config USB_GSPCA_OV519 58config USB_GSPCA_OV519
59 tristate "OV519 USB Camera Driver" 59 tristate "OV519 USB Camera Driver"
60 depends on VIDEO_V4L2 && USB_GSPCA 60 depends on VIDEO_V4L2 && USB_GSPCA
61 help 61 help
62 Say Y here if you want support for cameras based on the OV519 chip. 62 Say Y here if you want support for cameras based on the OV519 chip.
63 63
64 To compile this driver as a module, choose M here: the 64 To compile this driver as a module, choose M here: the
65 module will be called gspca_ov519. 65 module will be called gspca_ov519.
66 66
67config USB_GSPCA_PAC207 67config USB_GSPCA_PAC207
68 tristate "Pixart PAC207 USB Camera Driver" 68 tristate "Pixart PAC207 USB Camera Driver"
69 depends on VIDEO_V4L2 && USB_GSPCA 69 depends on VIDEO_V4L2 && USB_GSPCA
70 help 70 help
71 Say Y here if you want support for cameras based on the PAC207 chip. 71 Say Y here if you want support for cameras based on the PAC207 chip.
72 72
73 To compile this driver as a module, choose M here: the 73 To compile this driver as a module, choose M here: the
74 module will be called gspca_pac207. 74 module will be called gspca_pac207.
75 75
76config USB_GSPCA_PAC7311 76config USB_GSPCA_PAC7311
77 tristate "Pixart PAC7311 USB Camera Driver" 77 tristate "Pixart PAC7311 USB Camera Driver"
78 depends on VIDEO_V4L2 && USB_GSPCA 78 depends on VIDEO_V4L2 && USB_GSPCA
79 help 79 help
80 Say Y here if you want support for cameras based on the PAC7311 chip. 80 Say Y here if you want support for cameras based on the PAC7311 chip.
81 81
82 To compile this driver as a module, choose M here: the 82 To compile this driver as a module, choose M here: the
83 module will be called gspca_pac7311. 83 module will be called gspca_pac7311.
84 84
85config USB_GSPCA_SONIXB 85config USB_GSPCA_SONIXB
86 tristate "SN9C102 USB Camera Driver" 86 tristate "SN9C102 USB Camera Driver"
87 depends on VIDEO_V4L2 && USB_GSPCA 87 depends on VIDEO_V4L2 && USB_GSPCA
88 help 88 help
89 Say Y here if you want support for cameras based on the SONIXB chip. 89 Say Y here if you want support for cameras based on the SONIXB chip.
90 90
91 To compile this driver as a module, choose M here: the 91 To compile this driver as a module, choose M here: the
92 module will be called gspca_sonixb. 92 module will be called gspca_sonixb.
93 93
94config USB_GSPCA_SONIXJ 94config USB_GSPCA_SONIXJ
95 tristate "SONIX JPEG USB Camera Driver" 95 tristate "SONIX JPEG USB Camera Driver"
96 depends on VIDEO_V4L2 && USB_GSPCA 96 depends on VIDEO_V4L2 && USB_GSPCA
97 help 97 help
98 Say Y here if you want support for cameras based on the SONIXJ chip. 98 Say Y here if you want support for cameras based on the SONIXJ chip.
99 99
100 To compile this driver as a module, choose M here: the 100 To compile this driver as a module, choose M here: the
101 module will be called gspca_sonixj 101 module will be called gspca_sonixj
102 102
103config USB_GSPCA_SPCA500 103config USB_GSPCA_SPCA500
104 tristate "SPCA500 USB Camera Driver" 104 tristate "SPCA500 USB Camera Driver"
105 depends on VIDEO_V4L2 && USB_GSPCA 105 depends on VIDEO_V4L2 && USB_GSPCA
106 help 106 help
107 Say Y here if you want support for cameras based on the SPCA500 chip. 107 Say Y here if you want support for cameras based on the SPCA500 chip.
108 108
109 To compile this driver as a module, choose M here: the 109 To compile this driver as a module, choose M here: the
110 module will be called gspca_spca500. 110 module will be called gspca_spca500.
111 111
112config USB_GSPCA_SPCA501 112config USB_GSPCA_SPCA501
113 tristate "SPCA501 USB Camera Driver" 113 tristate "SPCA501 USB Camera Driver"
114 depends on VIDEO_V4L2 && USB_GSPCA 114 depends on VIDEO_V4L2 && USB_GSPCA
115 help 115 help
116 Say Y here if you want support for cameras based on the SPCA501 chip. 116 Say Y here if you want support for cameras based on the SPCA501 chip.
117 117
118 To compile this driver as a module, choose M here: the 118 To compile this driver as a module, choose M here: the
119 module will be called gspca_spca501. 119 module will be called gspca_spca501.
120 120
121config USB_GSPCA_SPCA505 121config USB_GSPCA_SPCA505
122 tristate "SPCA505 USB Camera Driver" 122 tristate "SPCA505 USB Camera Driver"
123 depends on VIDEO_V4L2 && USB_GSPCA 123 depends on VIDEO_V4L2 && USB_GSPCA
124 help 124 help
125 Say Y here if you want support for cameras based on the SPCA505 chip. 125 Say Y here if you want support for cameras based on the SPCA505 chip.
126 126
127 To compile this driver as a module, choose M here: the 127 To compile this driver as a module, choose M here: the
128 module will be called gspca_spca505. 128 module will be called gspca_spca505.
129 129
130config USB_GSPCA_SPCA506 130config USB_GSPCA_SPCA506
131 tristate "SPCA506 USB Camera Driver" 131 tristate "SPCA506 USB Camera Driver"
132 depends on VIDEO_V4L2 && USB_GSPCA 132 depends on VIDEO_V4L2 && USB_GSPCA
133 help 133 help
134 Say Y here if you want support for cameras based on the SPCA506 chip. 134 Say Y here if you want support for cameras based on the SPCA506 chip.
135 135
136 To compile this driver as a module, choose M here: the 136 To compile this driver as a module, choose M here: the
137 module will be called gspca_spca506. 137 module will be called gspca_spca506.
138 138
139config USB_GSPCA_SPCA508 139config USB_GSPCA_SPCA508
140 tristate "SPCA508 USB Camera Driver" 140 tristate "SPCA508 USB Camera Driver"
141 depends on VIDEO_V4L2 && USB_GSPCA 141 depends on VIDEO_V4L2 && USB_GSPCA
142 help 142 help
143 Say Y here if you want support for cameras based on the SPCA508 chip. 143 Say Y here if you want support for cameras based on the SPCA508 chip.
144 144
145 To compile this driver as a module, choose M here: the 145 To compile this driver as a module, choose M here: the
146 module will be called gspca_spca508. 146 module will be called gspca_spca508.
147 147
148config USB_GSPCA_SPCA561 148config USB_GSPCA_SPCA561
149 tristate "SPCA561 USB Camera Driver" 149 tristate "SPCA561 USB Camera Driver"
150 depends on VIDEO_V4L2 && USB_GSPCA 150 depends on VIDEO_V4L2 && USB_GSPCA
151 help 151 help
152 Say Y here if you want support for cameras based on the SPCA561 chip. 152 Say Y here if you want support for cameras based on the SPCA561 chip.
153 153
154 To compile this driver as a module, choose M here: the 154 To compile this driver as a module, choose M here: the
155 module will be called gspca_spca561. 155 module will be called gspca_spca561.
156 156
157config USB_GSPCA_STK014 157config USB_GSPCA_STK014
158 tristate "Syntek DV4000 (STK014) USB Camera Driver" 158 tristate "Syntek DV4000 (STK014) USB Camera Driver"
159 depends on VIDEO_V4L2 && USB_GSPCA 159 depends on VIDEO_V4L2 && USB_GSPCA
160 help 160 help
161 Say Y here if you want support for cameras based on the STK014 chip. 161 Say Y here if you want support for cameras based on the STK014 chip.
162 162
163 To compile this driver as a module, choose M here: the 163 To compile this driver as a module, choose M here: the
164 module will be called gspca_stk014. 164 module will be called gspca_stk014.
165 165
166config USB_GSPCA_SUNPLUS 166config USB_GSPCA_SUNPLUS
167 tristate "SUNPLUS USB Camera Driver" 167 tristate "SUNPLUS USB Camera Driver"
168 depends on VIDEO_V4L2 && USB_GSPCA 168 depends on VIDEO_V4L2 && USB_GSPCA
169 help 169 help
170 Say Y here if you want support for cameras based on the Sunplus 170 Say Y here if you want support for cameras based on the Sunplus
171 SPCA504(abc) SPCA533 SPCA536 chips. 171 SPCA504(abc) SPCA533 SPCA536 chips.
172 172
173 To compile this driver as a module, choose M here: the 173 To compile this driver as a module, choose M here: the
174 module will be called gspca_spca5xx. 174 module will be called gspca_spca5xx.
175 175
176config USB_GSPCA_T613 176config USB_GSPCA_T613
177 tristate "T613 (JPEG Compliance) USB Camera Driver" 177 tristate "T613 (JPEG Compliance) USB Camera Driver"
178 depends on VIDEO_V4L2 && USB_GSPCA 178 depends on VIDEO_V4L2 && USB_GSPCA
179 help 179 help
180 Say Y here if you want support for cameras based on the T613 chip. 180 Say Y here if you want support for cameras based on the T613 chip.
181 181
182 To compile this driver as a module, choose M here: the 182 To compile this driver as a module, choose M here: the
183 module will be called gspca_t613. 183 module will be called gspca_t613.
184 184
185config USB_GSPCA_TV8532 185config USB_GSPCA_TV8532
186 tristate "TV8532 USB Camera Driver" 186 tristate "TV8532 USB Camera Driver"
187 depends on VIDEO_V4L2 && USB_GSPCA 187 depends on VIDEO_V4L2 && USB_GSPCA
188 help 188 help
189 Say Y here if you want support for cameras based on the TV8531 chip. 189 Say Y here if you want support for cameras based on the TV8531 chip.
190 190
191 To compile this driver as a module, choose M here: the 191 To compile this driver as a module, choose M here: the
192 module will be called gspca_tv8532. 192 module will be called gspca_tv8532.
193 193
194config USB_GSPCA_VC032X 194config USB_GSPCA_VC032X
195 tristate "VC032X USB Camera Driver" 195 tristate "VC032X USB Camera Driver"
196 depends on VIDEO_V4L2 && USB_GSPCA 196 depends on VIDEO_V4L2 && USB_GSPCA
197 help 197 help
198 Say Y here if you want support for cameras based on the VC032X chip. 198 Say Y here if you want support for cameras based on the VC032X chip.
199 199
200 To compile this driver as a module, choose M here: the 200 To compile this driver as a module, choose M here: the
201 module will be called gspca_vc032x. 201 module will be called gspca_vc032x.
202 202
203config USB_GSPCA_ZC3XX 203config USB_GSPCA_ZC3XX
204 tristate "VC3xx USB Camera Driver" 204 tristate "ZC3XX USB Camera Driver"
205 depends on VIDEO_V4L2 && USB_GSPCA 205 depends on VIDEO_V4L2 && USB_GSPCA
206 help 206 help
207 Say Y here if you want support for cameras based on the ZC3XX chip. 207 Say Y here if you want support for cameras based on the ZC3XX chip.
208 208
209 To compile this driver as a module, choose M here: the 209 To compile this driver as a module, choose M here: the
210 module will be called gspca_zc3xx. 210 module will be called gspca_zc3xx.
211 211
212endif 212endif
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index a9d51ba7c57c..de28354ea5ba 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -846,10 +846,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
846 return 0; 846 return 0;
847} 847}
848 848
849/* called on streamoff with alt 0 and on disconnect */
849static void sd_stop0(struct gspca_dev *gspca_dev) 850static void sd_stop0(struct gspca_dev *gspca_dev)
850{ 851{
851 int retry = 50; 852 int retry = 50;
852 853
854 if (!gspca_dev->present)
855 return;
853 reg_w_val(gspca_dev, 0x0000, 0x00); 856 reg_w_val(gspca_dev, 0x0000, 0x00);
854 reg_r(gspca_dev, 0x0002, 1); 857 reg_r(gspca_dev, 0x0002, 1);
855 reg_w_val(gspca_dev, 0x0053, 0x00); 858 reg_w_val(gspca_dev, 0x0053, 0x00);
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index 65d3cbfe6b27..607942fd7970 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -276,6 +276,12 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
276 /* Stop the state machine */ 276 /* Stop the state machine */
277 if (dev->state != FPIX_NOP) 277 if (dev->state != FPIX_NOP)
278 wait_for_completion(&dev->can_close); 278 wait_for_completion(&dev->can_close);
279}
280
281/* called on streamoff with alt 0 and disconnect */
282static void sd_stop0(struct gspca_dev *gspca_dev)
283{
284 struct usb_fpix *dev = (struct usb_fpix *) gspca_dev;
279 285
280 usb_free_urb(dev->control_urb); 286 usb_free_urb(dev->control_urb);
281 dev->control_urb = NULL; 287 dev->control_urb = NULL;
@@ -385,6 +391,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
385error: 391error:
386 /* Free the ressources */ 392 /* Free the ressources */
387 sd_stopN(gspca_dev); 393 sd_stopN(gspca_dev);
394 sd_stop0(gspca_dev);
388 return ret; 395 return ret;
389} 396}
390 397
@@ -425,6 +432,7 @@ static const struct sd_desc sd_desc = {
425 .init = sd_init, 432 .init = sd_init,
426 .start = sd_start, 433 .start = sd_start,
427 .stopN = sd_stopN, 434 .stopN = sd_stopN,
435 .stop0 = sd_stop0,
428}; 436};
429 437
430/* -- device connect -- */ 438/* -- device connect -- */
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index e48fbfc8ad05..748a87e82e44 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -646,15 +646,14 @@ static void gspca_stream_off(struct gspca_dev *gspca_dev)
646{ 646{
647 gspca_dev->streaming = 0; 647 gspca_dev->streaming = 0;
648 atomic_set(&gspca_dev->nevent, 0); 648 atomic_set(&gspca_dev->nevent, 0);
649 if (gspca_dev->present) { 649 if (gspca_dev->present
650 if (gspca_dev->sd_desc->stopN) 650 && gspca_dev->sd_desc->stopN)
651 gspca_dev->sd_desc->stopN(gspca_dev); 651 gspca_dev->sd_desc->stopN(gspca_dev);
652 destroy_urbs(gspca_dev); 652 destroy_urbs(gspca_dev);
653 gspca_set_alt0(gspca_dev); 653 gspca_set_alt0(gspca_dev);
654 if (gspca_dev->sd_desc->stop0) 654 if (gspca_dev->sd_desc->stop0)
655 gspca_dev->sd_desc->stop0(gspca_dev); 655 gspca_dev->sd_desc->stop0(gspca_dev);
656 PDEBUG(D_STREAM, "stream off OK"); 656 PDEBUG(D_STREAM, "stream off OK");
657 }
658} 657}
659 658
660static void gspca_set_default_mode(struct gspca_dev *gspca_dev) 659static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
@@ -863,7 +862,7 @@ static int dev_open(struct inode *inode, struct file *file)
863 int ret; 862 int ret;
864 863
865 PDEBUG(D_STREAM, "%s open", current->comm); 864 PDEBUG(D_STREAM, "%s open", current->comm);
866 gspca_dev = (struct gspca_dev *) video_devdata(file); 865 gspca_dev = video_drvdata(file);
867 if (mutex_lock_interruptible(&gspca_dev->queue_lock)) 866 if (mutex_lock_interruptible(&gspca_dev->queue_lock))
868 return -ERESTARTSYS; 867 return -ERESTARTSYS;
869 if (!gspca_dev->present) { 868 if (!gspca_dev->present) {
@@ -875,6 +874,13 @@ static int dev_open(struct inode *inode, struct file *file)
875 ret = -EBUSY; 874 ret = -EBUSY;
876 goto out; 875 goto out;
877 } 876 }
877
878 /* protect the subdriver against rmmod */
879 if (!try_module_get(gspca_dev->module)) {
880 ret = -ENODEV;
881 goto out;
882 }
883
878 gspca_dev->users++; 884 gspca_dev->users++;
879 885
880 /* one more user */ 886 /* one more user */
@@ -884,10 +890,10 @@ static int dev_open(struct inode *inode, struct file *file)
884#ifdef GSPCA_DEBUG 890#ifdef GSPCA_DEBUG
885 /* activate the v4l2 debug */ 891 /* activate the v4l2 debug */
886 if (gspca_debug & D_V4L2) 892 if (gspca_debug & D_V4L2)
887 gspca_dev->vdev.debug |= V4L2_DEBUG_IOCTL 893 gspca_dev->vdev->debug |= V4L2_DEBUG_IOCTL
888 | V4L2_DEBUG_IOCTL_ARG; 894 | V4L2_DEBUG_IOCTL_ARG;
889 else 895 else
890 gspca_dev->vdev.debug &= ~(V4L2_DEBUG_IOCTL 896 gspca_dev->vdev->debug &= ~(V4L2_DEBUG_IOCTL
891 | V4L2_DEBUG_IOCTL_ARG); 897 | V4L2_DEBUG_IOCTL_ARG);
892#endif 898#endif
893 ret = 0; 899 ret = 0;
@@ -921,6 +927,7 @@ static int dev_close(struct inode *inode, struct file *file)
921 gspca_dev->memory = GSPCA_MEMORY_NO; 927 gspca_dev->memory = GSPCA_MEMORY_NO;
922 } 928 }
923 file->private_data = NULL; 929 file->private_data = NULL;
930 module_put(gspca_dev->module);
924 mutex_unlock(&gspca_dev->queue_lock); 931 mutex_unlock(&gspca_dev->queue_lock);
925 932
926 PDEBUG(D_STREAM, "close done"); 933 PDEBUG(D_STREAM, "close done");
@@ -1748,11 +1755,6 @@ out:
1748 return ret; 1755 return ret;
1749} 1756}
1750 1757
1751static void dev_release(struct video_device *vfd)
1752{
1753 /* nothing */
1754}
1755
1756static struct file_operations dev_fops = { 1758static struct file_operations dev_fops = {
1757 .owner = THIS_MODULE, 1759 .owner = THIS_MODULE,
1758 .open = dev_open, 1760 .open = dev_open,
@@ -1800,7 +1802,7 @@ static struct video_device gspca_template = {
1800 .name = "gspca main driver", 1802 .name = "gspca main driver",
1801 .fops = &dev_fops, 1803 .fops = &dev_fops,
1802 .ioctl_ops = &dev_ioctl_ops, 1804 .ioctl_ops = &dev_ioctl_ops,
1803 .release = dev_release, /* mandatory */ 1805 .release = video_device_release,
1804 .minor = -1, 1806 .minor = -1,
1805}; 1807};
1806 1808
@@ -1869,17 +1871,18 @@ int gspca_dev_probe(struct usb_interface *intf,
1869 init_waitqueue_head(&gspca_dev->wq); 1871 init_waitqueue_head(&gspca_dev->wq);
1870 1872
1871 /* init video stuff */ 1873 /* init video stuff */
1872 memcpy(&gspca_dev->vdev, &gspca_template, sizeof gspca_template); 1874 gspca_dev->vdev = video_device_alloc();
1873 gspca_dev->vdev.parent = &dev->dev; 1875 memcpy(gspca_dev->vdev, &gspca_template, sizeof gspca_template);
1874 memcpy(&gspca_dev->fops, &dev_fops, sizeof gspca_dev->fops); 1876 gspca_dev->vdev->parent = &dev->dev;
1875 gspca_dev->vdev.fops = &gspca_dev->fops; 1877 gspca_dev->module = module;
1876 gspca_dev->fops.owner = module; /* module protection */
1877 gspca_dev->present = 1; 1878 gspca_dev->present = 1;
1878 ret = video_register_device(&gspca_dev->vdev, 1879 video_set_drvdata(gspca_dev->vdev, gspca_dev);
1880 ret = video_register_device(gspca_dev->vdev,
1879 VFL_TYPE_GRABBER, 1881 VFL_TYPE_GRABBER,
1880 video_nr); 1882 video_nr);
1881 if (ret < 0) { 1883 if (ret < 0) {
1882 err("video_register_device err %d", ret); 1884 err("video_register_device err %d", ret);
1885 video_device_release(gspca_dev->vdev);
1883 goto out; 1886 goto out;
1884 } 1887 }
1885 1888
@@ -1887,7 +1890,8 @@ int gspca_dev_probe(struct usb_interface *intf,
1887 PDEBUG(D_PROBE, "probe ok"); 1890 PDEBUG(D_PROBE, "probe ok");
1888 return 0; 1891 return 0;
1889out: 1892out:
1890 kref_put(&gspca_dev->kref, gspca_delete); 1893 kfree(gspca_dev->usb_buf);
1894 kfree(gspca_dev);
1891 return ret; 1895 return ret;
1892} 1896}
1893EXPORT_SYMBOL(gspca_dev_probe); 1897EXPORT_SYMBOL(gspca_dev_probe);
@@ -1905,7 +1909,7 @@ void gspca_disconnect(struct usb_interface *intf)
1905 usb_set_intfdata(intf, NULL); 1909 usb_set_intfdata(intf, NULL);
1906 1910
1907/* We don't want people trying to open up the device */ 1911/* We don't want people trying to open up the device */
1908 video_unregister_device(&gspca_dev->vdev); 1912 video_unregister_device(gspca_dev->vdev);
1909 1913
1910 gspca_dev->present = 0; 1914 gspca_dev->present = 0;
1911 gspca_dev->streaming = 0; 1915 gspca_dev->streaming = 0;
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 1d9dc90b4791..d25e8d69373b 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -97,7 +97,7 @@ struct sd_desc {
97 cam_pkt_op pkt_scan; 97 cam_pkt_op pkt_scan;
98/* optional operations */ 98/* optional operations */
99 cam_v_op stopN; /* called on stream off - main alt */ 99 cam_v_op stopN; /* called on stream off - main alt */
100 cam_v_op stop0; /* called on stream off - alt 0 */ 100 cam_v_op stop0; /* called on stream off & disconnect - alt 0 */
101 cam_v_op dq_callback; /* called when a frame has been dequeued */ 101 cam_v_op dq_callback; /* called when a frame has been dequeued */
102 cam_jpg_op get_jcomp; 102 cam_jpg_op get_jcomp;
103 cam_jpg_op set_jcomp; 103 cam_jpg_op set_jcomp;
@@ -120,8 +120,8 @@ struct gspca_frame {
120}; 120};
121 121
122struct gspca_dev { 122struct gspca_dev {
123 struct video_device vdev; /* !! must be the first item */ 123 struct video_device *vdev;
124 struct file_operations fops; 124 struct module *module; /* subdriver handling the device */
125 struct usb_device *dev; 125 struct usb_device *dev;
126 struct kref kref; 126 struct kref kref;
127 struct file *capt_file; /* file doing video capture */ 127 struct file *capt_file; /* file doing video capture */
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index e5ff9a6199ef..fbd45e235d97 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -749,10 +749,13 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
749 reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ 749 reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */
750} 750}
751 751
752/* called on streamoff with alt 0 and on disconnect */
752static void sd_stop0(struct gspca_dev *gspca_dev) 753static void sd_stop0(struct gspca_dev *gspca_dev)
753{ 754{
754 struct sd *sd = (struct sd *) gspca_dev; 755 struct sd *sd = (struct sd *) gspca_dev;
755 756
757 if (!gspca_dev->present)
758 return;
756 if (sd->sensor == SENSOR_PAC7302) { 759 if (sd->sensor == SENSOR_PAC7302) {
757 reg_w(gspca_dev, 0xff, 0x01); 760 reg_w(gspca_dev, 0xff, 0x01);
758 reg_w(gspca_dev, 0x78, 0x40); 761 reg_w(gspca_dev, 0x78, 0x40);
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index b742f260c7ca..e29954c1c38c 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -2022,8 +2022,11 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
2022 reg_write(gspca_dev->dev, SPCA501_REG_CTLRL, 0x01, 0x00); 2022 reg_write(gspca_dev->dev, SPCA501_REG_CTLRL, 0x01, 0x00);
2023} 2023}
2024 2024
2025/* called on streamoff with alt 0 and on disconnect */
2025static void sd_stop0(struct gspca_dev *gspca_dev) 2026static void sd_stop0(struct gspca_dev *gspca_dev)
2026{ 2027{
2028 if (!gspca_dev->present)
2029 return;
2027 reg_write(gspca_dev->dev, SPCA501_REG_CTLRL, 0x05, 0x00); 2030 reg_write(gspca_dev->dev, SPCA501_REG_CTLRL, 0x05, 0x00);
2028} 2031}
2029 2032
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index b345749213cf..895b9fe4018c 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -742,8 +742,12 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
742 reg_write(gspca_dev->dev, 0x02, 0x00, 0x00); 742 reg_write(gspca_dev->dev, 0x02, 0x00, 0x00);
743} 743}
744 744
745/* called on streamoff with alt 0 and on disconnect */
745static void sd_stop0(struct gspca_dev *gspca_dev) 746static void sd_stop0(struct gspca_dev *gspca_dev)
746{ 747{
748 if (!gspca_dev->present)
749 return;
750
747 /* This maybe reset or power control */ 751 /* This maybe reset or power control */
748 reg_write(gspca_dev->dev, 0x03, 0x03, 0x20); 752 reg_write(gspca_dev->dev, 0x03, 0x03, 0x20);
749 reg_write(gspca_dev->dev, 0x03, 0x01, 0x0); 753 reg_write(gspca_dev->dev, 0x03, 0x01, 0x0);
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index 020a03c466c1..c3de4e44123d 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -766,10 +766,13 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
766 } 766 }
767} 767}
768 768
769/* called on streamoff with alt 0 and on disconnect */
769static void sd_stop0(struct gspca_dev *gspca_dev) 770static void sd_stop0(struct gspca_dev *gspca_dev)
770{ 771{
771 struct sd *sd = (struct sd *) gspca_dev; 772 struct sd *sd = (struct sd *) gspca_dev;
772 773
774 if (!gspca_dev->present)
775 return;
773 if (sd->chip_revision == Rev012A) { 776 if (sd->chip_revision == Rev012A) {
774 reg_w_val(gspca_dev->dev, 0x8118, 0x29); 777 reg_w_val(gspca_dev->dev, 0x8118, 0x29);
775 reg_w_val(gspca_dev->dev, 0x8114, 0x08); 778 reg_w_val(gspca_dev->dev, 0x8114, 0x08);
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index be46d9232540..17af353ddd1c 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -1633,10 +1633,13 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1633 reg_w(dev, 0xa0, 0x09, 0xb003); 1633 reg_w(dev, 0xa0, 0x09, 0xb003);
1634} 1634}
1635 1635
1636/* called on streamoff with alt 0 and on disconnect */
1636static void sd_stop0(struct gspca_dev *gspca_dev) 1637static void sd_stop0(struct gspca_dev *gspca_dev)
1637{ 1638{
1638 struct usb_device *dev = gspca_dev->dev; 1639 struct usb_device *dev = gspca_dev->dev;
1639 1640
1641 if (!gspca_dev->present)
1642 return;
1640 reg_w(dev, 0x89, 0xffff, 0xffff); 1643 reg_w(dev, 0x89, 0xffff, 0xffff);
1641} 1644}
1642 1645
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index d0a4451dc46f..0befacf49855 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -2266,7 +2266,7 @@ static const struct usb_action hdcs2020b_NoFliker[] = {
2266 {} 2266 {}
2267}; 2267};
2268 2268
2269static const struct usb_action hv7131bxx_Initial[] = { 2269static const struct usb_action hv7131bxx_Initial[] = { /* 320x240 */
2270 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, 2270 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
2271 {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, 2271 {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT},
2272 {0xa0, 0x00, ZC3XX_R010_CMOSSENSORSELECT}, 2272 {0xa0, 0x00, ZC3XX_R010_CMOSSENSORSELECT},
@@ -2290,7 +2290,7 @@ static const struct usb_action hv7131bxx_Initial[] = {
2290 {0xaa, 0x14, 0x0001}, 2290 {0xaa, 0x14, 0x0001},
2291 {0xaa, 0x15, 0x00e8}, 2291 {0xaa, 0x15, 0x00e8},
2292 {0xaa, 0x16, 0x0002}, 2292 {0xaa, 0x16, 0x0002},
2293 {0xaa, 0x17, 0x0086}, 2293 {0xaa, 0x17, 0x0086}, /* 00,17,88,aa */
2294 {0xaa, 0x31, 0x0038}, 2294 {0xaa, 0x31, 0x0038},
2295 {0xaa, 0x32, 0x0038}, 2295 {0xaa, 0x32, 0x0038},
2296 {0xaa, 0x33, 0x0038}, 2296 {0xaa, 0x33, 0x0038},
@@ -2309,7 +2309,7 @@ static const struct usb_action hv7131bxx_Initial[] = {
2309 {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, 2309 {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
2310 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, 2310 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
2311 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, 2311 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
2312 {0xaa, 0x02, 0x0080}, /* {0xaa, 0x02, 0x0090}; */ 2312 {0xaa, 0x02, 0x0090}, /* 00,02,80,aa */
2313 {0xa1, 0x01, 0x0002}, 2313 {0xa1, 0x01, 0x0002},
2314 {0xa0, 0x00, ZC3XX_R092_I2CADDRESSSELECT}, 2314 {0xa0, 0x00, ZC3XX_R092_I2CADDRESSSELECT},
2315 {0xa0, 0x02, ZC3XX_R090_I2CCOMMAND}, 2315 {0xa0, 0x02, ZC3XX_R090_I2CCOMMAND},
@@ -2374,7 +2374,7 @@ static const struct usb_action hv7131bxx_Initial[] = {
2374 {} 2374 {}
2375}; 2375};
2376 2376
2377static const struct usb_action hv7131bxx_InitialScale[] = { 2377static const struct usb_action hv7131bxx_InitialScale[] = { /* 640x480*/
2378 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, 2378 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
2379 {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, 2379 {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT},
2380 {0xa0, 0x00, ZC3XX_R010_CMOSSENSORSELECT}, 2380 {0xa0, 0x00, ZC3XX_R010_CMOSSENSORSELECT},
@@ -6388,6 +6388,8 @@ static void setbrightness(struct gspca_dev *gspca_dev)
6388/*fixme: is it really write to 011d and 018d for all other sensors? */ 6388/*fixme: is it really write to 011d and 018d for all other sensors? */
6389 brightness = sd->brightness; 6389 brightness = sd->brightness;
6390 reg_w(gspca_dev->dev, brightness, 0x011d); 6390 reg_w(gspca_dev->dev, brightness, 0x011d);
6391 if (sd->sensor == SENSOR_HV7131B)
6392 return;
6391 if (brightness < 0x70) 6393 if (brightness < 0x70)
6392 brightness += 0x10; 6394 brightness += 0x10;
6393 else 6395 else
@@ -6529,6 +6531,7 @@ static void setquality(struct gspca_dev *gspca_dev)
6529 6531
6530 switch (sd->sensor) { 6532 switch (sd->sensor) {
6531 case SENSOR_GC0305: 6533 case SENSOR_GC0305:
6534 case SENSOR_HV7131B:
6532 case SENSOR_OV7620: 6535 case SENSOR_OV7620:
6533 case SENSOR_PO2030: 6536 case SENSOR_PO2030:
6534 return; 6537 return;
@@ -7209,7 +7212,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
7209 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; 7212 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
7210 zc3_init = init_tb[(int) sd->sensor][mode]; 7213 zc3_init = init_tb[(int) sd->sensor][mode];
7211 switch (sd->sensor) { 7214 switch (sd->sensor) {
7212 case SENSOR_HV7131B:
7213 case SENSOR_HV7131C: 7215 case SENSOR_HV7131C:
7214 zcxx_probeSensor(gspca_dev); 7216 zcxx_probeSensor(gspca_dev);
7215 break; 7217 break;
@@ -7334,10 +7336,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
7334 return 0; 7336 return 0;
7335} 7337}
7336 7338
7339/* called on streamoff with alt 0 and on disconnect */
7337static void sd_stop0(struct gspca_dev *gspca_dev) 7340static void sd_stop0(struct gspca_dev *gspca_dev)
7338{ 7341{
7339 struct sd *sd = (struct sd *) gspca_dev; 7342 struct sd *sd = (struct sd *) gspca_dev;
7340 7343
7344 if (!gspca_dev->present)
7345 return;
7341 send_unknown(gspca_dev->dev, sd->sensor); 7346 send_unknown(gspca_dev->dev, sd->sensor);
7342} 7347}
7343 7348
diff --git a/drivers/media/video/ivtv/Kconfig b/drivers/media/video/ivtv/Kconfig
index 0069898bddab..c46bfb1569e3 100644
--- a/drivers/media/video/ivtv/Kconfig
+++ b/drivers/media/video/ivtv/Kconfig
@@ -1,6 +1,6 @@
1config VIDEO_IVTV 1config VIDEO_IVTV
2 tristate "Conexant cx23416/cx23415 MPEG encoder/decoder support" 2 tristate "Conexant cx23416/cx23415 MPEG encoder/decoder support"
3 depends on VIDEO_V4L1 && VIDEO_V4L2 && PCI && I2C && EXPERIMENTAL 3 depends on VIDEO_V4L2 && PCI && I2C
4 depends on INPUT # due to VIDEO_IR 4 depends on INPUT # due to VIDEO_IR
5 select I2C_ALGOBIT 5 select I2C_ALGOBIT
6 select VIDEO_IR 6 select VIDEO_IR
@@ -12,7 +12,6 @@ config VIDEO_IVTV
12 select VIDEO_SAA711X 12 select VIDEO_SAA711X
13 select VIDEO_SAA717X 13 select VIDEO_SAA717X
14 select VIDEO_SAA7127 14 select VIDEO_SAA7127
15 select VIDEO_TVAUDIO
16 select VIDEO_CS53L32A 15 select VIDEO_CS53L32A
17 select VIDEO_M52790 16 select VIDEO_M52790
18 select VIDEO_WM8775 17 select VIDEO_WM8775
@@ -32,7 +31,7 @@ config VIDEO_IVTV
32 31
33config VIDEO_FB_IVTV 32config VIDEO_FB_IVTV
34 tristate "Conexant cx23415 framebuffer support" 33 tristate "Conexant cx23415 framebuffer support"
35 depends on VIDEO_IVTV && FB && EXPERIMENTAL 34 depends on VIDEO_IVTV && FB
36 select FB_CFB_FILLRECT 35 select FB_CFB_FILLRECT
37 select FB_CFB_COPYAREA 36 select FB_CFB_COPYAREA
38 select FB_CFB_IMAGEBLIT 37 select FB_CFB_IMAGEBLIT
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index aeaa13f6cb36..b69cc1d55e5b 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -875,43 +875,43 @@ static void ivtv_load_and_init_modules(struct ivtv *itv)
875 875
876#ifdef MODULE 876#ifdef MODULE
877 /* load modules */ 877 /* load modules */
878#ifndef CONFIG_MEDIA_TUNER 878#ifdef CONFIG_MEDIA_TUNER_MODULE
879 hw = ivtv_request_module(itv, hw, "tuner", IVTV_HW_TUNER); 879 hw = ivtv_request_module(itv, hw, "tuner", IVTV_HW_TUNER);
880#endif 880#endif
881#ifndef CONFIG_VIDEO_CX25840 881#ifdef CONFIG_VIDEO_CX25840_MODULE
882 hw = ivtv_request_module(itv, hw, "cx25840", IVTV_HW_CX25840); 882 hw = ivtv_request_module(itv, hw, "cx25840", IVTV_HW_CX25840);
883#endif 883#endif
884#ifndef CONFIG_VIDEO_SAA711X 884#ifdef CONFIG_VIDEO_SAA711X_MODULE
885 hw = ivtv_request_module(itv, hw, "saa7115", IVTV_HW_SAA711X); 885 hw = ivtv_request_module(itv, hw, "saa7115", IVTV_HW_SAA711X);
886#endif 886#endif
887#ifndef CONFIG_VIDEO_SAA7127 887#ifdef CONFIG_VIDEO_SAA7127_MODULE
888 hw = ivtv_request_module(itv, hw, "saa7127", IVTV_HW_SAA7127); 888 hw = ivtv_request_module(itv, hw, "saa7127", IVTV_HW_SAA7127);
889#endif 889#endif
890#ifndef CONFIG_VIDEO_SAA717X 890#ifdef CONFIG_VIDEO_SAA717X_MODULE
891 hw = ivtv_request_module(itv, hw, "saa717x", IVTV_HW_SAA717X); 891 hw = ivtv_request_module(itv, hw, "saa717x", IVTV_HW_SAA717X);
892#endif 892#endif
893#ifndef CONFIG_VIDEO_UPD64031A 893#ifdef CONFIG_VIDEO_UPD64031A_MODULE
894 hw = ivtv_request_module(itv, hw, "upd64031a", IVTV_HW_UPD64031A); 894 hw = ivtv_request_module(itv, hw, "upd64031a", IVTV_HW_UPD64031A);
895#endif 895#endif
896#ifndef CONFIG_VIDEO_UPD64083 896#ifdef CONFIG_VIDEO_UPD64083_MODULE
897 hw = ivtv_request_module(itv, hw, "upd64083", IVTV_HW_UPD6408X); 897 hw = ivtv_request_module(itv, hw, "upd64083", IVTV_HW_UPD6408X);
898#endif 898#endif
899#ifndef CONFIG_VIDEO_MSP3400 899#ifdef CONFIG_VIDEO_MSP3400_MODULE
900 hw = ivtv_request_module(itv, hw, "msp3400", IVTV_HW_MSP34XX); 900 hw = ivtv_request_module(itv, hw, "msp3400", IVTV_HW_MSP34XX);
901#endif 901#endif
902#ifndef CONFIG_VIDEO_VP27SMPX 902#ifdef CONFIG_VIDEO_VP27SMPX_MODULE
903 hw = ivtv_request_module(itv, hw, "vp27smpx", IVTV_HW_VP27SMPX); 903 hw = ivtv_request_module(itv, hw, "vp27smpx", IVTV_HW_VP27SMPX);
904#endif 904#endif
905#ifndef CONFIG_VIDEO_WM8775 905#ifdef CONFIG_VIDEO_WM8775_MODULE
906 hw = ivtv_request_module(itv, hw, "wm8775", IVTV_HW_WM8775); 906 hw = ivtv_request_module(itv, hw, "wm8775", IVTV_HW_WM8775);
907#endif 907#endif
908#ifndef CONFIG_VIDEO_WM8739 908#ifdef CONFIG_VIDEO_WM8739_MODULE
909 hw = ivtv_request_module(itv, hw, "wm8739", IVTV_HW_WM8739); 909 hw = ivtv_request_module(itv, hw, "wm8739", IVTV_HW_WM8739);
910#endif 910#endif
911#ifndef CONFIG_VIDEO_CS53L32A 911#ifdef CONFIG_VIDEO_CS53L32A_MODULE
912 hw = ivtv_request_module(itv, hw, "cs53l32a", IVTV_HW_CS53L32A); 912 hw = ivtv_request_module(itv, hw, "cs53l32a", IVTV_HW_CS53L32A);
913#endif 913#endif
914#ifndef CONFIG_VIDEO_M52790 914#ifdef CONFIG_VIDEO_M52790_MODULE
915 hw = ivtv_request_module(itv, hw, "m52790", IVTV_HW_M52790); 915 hw = ivtv_request_module(itv, hw, "m52790", IVTV_HW_M52790);
916#endif 916#endif
917#endif 917#endif
@@ -1211,6 +1211,10 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1211 1211
1212 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { 1212 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
1213 ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std); 1213 ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std);
1214 /* Turn off the output signal. The mpeg decoder is not yet
1215 active so without this you would get a green image until the
1216 mpeg decoder becomes active. */
1217 ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL);
1214 } 1218 }
1215 1219
1216 /* clear interrupt mask, effectively disabling interrupts */ 1220 /* clear interrupt mask, effectively disabling interrupts */
@@ -1330,6 +1334,10 @@ int ivtv_init_on_first_open(struct ivtv *itv)
1330 ivtv_s_frequency(NULL, &fh, &vf); 1334 ivtv_s_frequency(NULL, &fh, &vf);
1331 1335
1332 if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) { 1336 if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) {
1337 /* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes
1338 the mpeg decoder so now the saa7127 receives a proper
1339 signal. */
1340 ivtv_saa7127(itv, VIDIOC_STREAMON, NULL);
1333 ivtv_init_mpeg_decoder(itv); 1341 ivtv_init_mpeg_decoder(itv);
1334 } 1342 }
1335 ivtv_s_std(NULL, &fh, &itv->tuner_std); 1343 ivtv_s_std(NULL, &fh, &itv->tuner_std);
@@ -1366,6 +1374,10 @@ static void ivtv_remove(struct pci_dev *pci_dev)
1366 1374
1367 /* Stop all decoding */ 1375 /* Stop all decoding */
1368 IVTV_DEBUG_INFO("Stopping decoding\n"); 1376 IVTV_DEBUG_INFO("Stopping decoding\n");
1377
1378 /* Turn off the TV-out */
1379 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
1380 ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL);
1369 if (atomic_read(&itv->decoding) > 0) { 1381 if (atomic_read(&itv->decoding) > 0) {
1370 int type; 1382 int type;
1371 1383
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 24700c211d52..41dbbe9621a1 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -726,6 +726,7 @@ int ivtv_saa7127(struct ivtv *itv, unsigned int cmd, void *arg)
726{ 726{
727 return ivtv_call_i2c_client(itv, IVTV_SAA7127_I2C_ADDR, cmd, arg); 727 return ivtv_call_i2c_client(itv, IVTV_SAA7127_I2C_ADDR, cmd, arg);
728} 728}
729EXPORT_SYMBOL(ivtv_saa7127);
729 730
730int ivtv_saa717x(struct ivtv *itv, unsigned int cmd, void *arg) 731int ivtv_saa717x(struct ivtv *itv, unsigned int cmd, void *arg)
731{ 732{
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 208fb54842f2..4bae38d21ef6 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1756,12 +1756,12 @@ static int ivtv_default(struct file *file, void *fh, int cmd, void *arg)
1756 return 0; 1756 return 0;
1757} 1757}
1758 1758
1759static int ivtv_serialized_ioctl(struct ivtv *itv, struct inode *inode, struct file *filp, 1759static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
1760 unsigned int cmd, unsigned long arg) 1760 unsigned int cmd, unsigned long arg)
1761{ 1761{
1762 struct video_device *vfd = video_devdata(filp); 1762 struct video_device *vfd = video_devdata(filp);
1763 struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data; 1763 struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
1764 int ret; 1764 long ret;
1765 1765
1766 /* Filter dvb ioctls that cannot be handled by the v4l ioctl framework */ 1766 /* Filter dvb ioctls that cannot be handled by the v4l ioctl framework */
1767 switch (cmd) { 1767 switch (cmd) {
@@ -1830,20 +1830,19 @@ static int ivtv_serialized_ioctl(struct ivtv *itv, struct inode *inode, struct f
1830 1830
1831 if (ivtv_debug & IVTV_DBGFLG_IOCTL) 1831 if (ivtv_debug & IVTV_DBGFLG_IOCTL)
1832 vfd->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG; 1832 vfd->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG;
1833 ret = video_ioctl2(inode, filp, cmd, arg); 1833 ret = __video_ioctl2(filp, cmd, arg);
1834 vfd->debug = 0; 1834 vfd->debug = 0;
1835 return ret; 1835 return ret;
1836} 1836}
1837 1837
1838int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 1838long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1839 unsigned long arg)
1840{ 1839{
1841 struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data; 1840 struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
1842 struct ivtv *itv = id->itv; 1841 struct ivtv *itv = id->itv;
1843 int res; 1842 long res;
1844 1843
1845 mutex_lock(&itv->serialize_lock); 1844 mutex_lock(&itv->serialize_lock);
1846 res = ivtv_serialized_ioctl(itv, inode, filp, cmd, arg); 1845 res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
1847 mutex_unlock(&itv->serialize_lock); 1846 mutex_unlock(&itv->serialize_lock);
1848 return res; 1847 return res;
1849} 1848}
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.h b/drivers/media/video/ivtv/ivtv-ioctl.h
index 70188588b4f4..58f003412afd 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.h
+++ b/drivers/media/video/ivtv/ivtv-ioctl.h
@@ -30,7 +30,6 @@ void ivtv_set_funcs(struct video_device *vdev);
30int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std); 30int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std);
31int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf); 31int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
32int ivtv_s_input(struct file *file, void *fh, unsigned int inp); 32int ivtv_s_input(struct file *file, void *fh, unsigned int inp);
33int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 33long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
34 unsigned long arg);
35 34
36#endif 35#endif
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 5bbf31e39304..9b7aa79eb267 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -48,7 +48,7 @@ static const struct file_operations ivtv_v4l2_enc_fops = {
48 .read = ivtv_v4l2_read, 48 .read = ivtv_v4l2_read,
49 .write = ivtv_v4l2_write, 49 .write = ivtv_v4l2_write,
50 .open = ivtv_v4l2_open, 50 .open = ivtv_v4l2_open,
51 .ioctl = ivtv_v4l2_ioctl, 51 .unlocked_ioctl = ivtv_v4l2_ioctl,
52 .compat_ioctl = v4l_compat_ioctl32, 52 .compat_ioctl = v4l_compat_ioctl32,
53 .release = ivtv_v4l2_close, 53 .release = ivtv_v4l2_close,
54 .poll = ivtv_v4l2_enc_poll, 54 .poll = ivtv_v4l2_enc_poll,
@@ -59,7 +59,7 @@ static const struct file_operations ivtv_v4l2_dec_fops = {
59 .read = ivtv_v4l2_read, 59 .read = ivtv_v4l2_read,
60 .write = ivtv_v4l2_write, 60 .write = ivtv_v4l2_write,
61 .open = ivtv_v4l2_open, 61 .open = ivtv_v4l2_open,
62 .ioctl = ivtv_v4l2_ioctl, 62 .unlocked_ioctl = ivtv_v4l2_ioctl,
63 .compat_ioctl = v4l_compat_ioctl32, 63 .compat_ioctl = v4l_compat_ioctl32,
64 .release = ivtv_v4l2_close, 64 .release = ivtv_v4l2_close,
65 .poll = ivtv_v4l2_dec_poll, 65 .poll = ivtv_v4l2_dec_poll,
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 8a4a150b12fb..921e281876f8 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -48,6 +48,7 @@
48#endif 48#endif
49 49
50#include "ivtv-driver.h" 50#include "ivtv-driver.h"
51#include "ivtv-i2c.h"
51#include "ivtv-udma.h" 52#include "ivtv-udma.h"
52#include "ivtv-mailbox.h" 53#include "ivtv-mailbox.h"
53 54
@@ -894,11 +895,16 @@ static int ivtvfb_blank(int blank_mode, struct fb_info *info)
894 switch (blank_mode) { 895 switch (blank_mode) {
895 case FB_BLANK_UNBLANK: 896 case FB_BLANK_UNBLANK:
896 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 1); 897 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 1);
898 ivtv_saa7127(itv, VIDIOC_STREAMON, NULL);
897 break; 899 break;
898 case FB_BLANK_NORMAL: 900 case FB_BLANK_NORMAL:
899 case FB_BLANK_HSYNC_SUSPEND: 901 case FB_BLANK_HSYNC_SUSPEND:
900 case FB_BLANK_VSYNC_SUSPEND: 902 case FB_BLANK_VSYNC_SUSPEND:
903 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
904 ivtv_saa7127(itv, VIDIOC_STREAMON, NULL);
905 break;
901 case FB_BLANK_POWERDOWN: 906 case FB_BLANK_POWERDOWN:
907 ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL);
902 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0); 908 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
903 break; 909 break;
904 } 910 }
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.c b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
index a1252d673b41..273d2a1aa220 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
@@ -402,6 +402,10 @@ static int pvr2_encoder_prep_config(struct pvr2_hdw *hdw)
402 ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 0,3,0,0); 402 ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 0,3,0,0);
403 ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4,15,0,0,0); 403 ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4,15,0,0,0);
404 404
405 /* prevent the PTSs from slowly drifting away in the generated
406 MPEG stream */
407 ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC, 2, 4, 1);
408
405 return ret; 409 return ret;
406} 410}
407 411
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 94265bd3d926..5b81ba469641 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -60,7 +60,6 @@ static struct pvr2_hdw *unit_pointers[PVR_NUM] = {[ 0 ... PVR_NUM-1 ] = NULL};
60static DEFINE_MUTEX(pvr2_unit_mtx); 60static DEFINE_MUTEX(pvr2_unit_mtx);
61 61
62static int ctlchg; 62static int ctlchg;
63static int initusbreset = 1;
64static int procreload; 63static int procreload;
65static int tuner[PVR_NUM] = { [0 ... PVR_NUM-1] = -1 }; 64static int tuner[PVR_NUM] = { [0 ... PVR_NUM-1] = -1 };
66static int tolerance[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 }; 65static int tolerance[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 };
@@ -71,8 +70,6 @@ module_param(ctlchg, int, S_IRUGO|S_IWUSR);
71MODULE_PARM_DESC(ctlchg, "0=optimize ctl change 1=always accept new ctl value"); 70MODULE_PARM_DESC(ctlchg, "0=optimize ctl change 1=always accept new ctl value");
72module_param(init_pause_msec, int, S_IRUGO|S_IWUSR); 71module_param(init_pause_msec, int, S_IRUGO|S_IWUSR);
73MODULE_PARM_DESC(init_pause_msec, "hardware initialization settling delay"); 72MODULE_PARM_DESC(init_pause_msec, "hardware initialization settling delay");
74module_param(initusbreset, int, S_IRUGO|S_IWUSR);
75MODULE_PARM_DESC(initusbreset, "Do USB reset device on probe");
76module_param(procreload, int, S_IRUGO|S_IWUSR); 73module_param(procreload, int, S_IRUGO|S_IWUSR);
77MODULE_PARM_DESC(procreload, 74MODULE_PARM_DESC(procreload,
78 "Attempt init failure recovery with firmware reload"); 75 "Attempt init failure recovery with firmware reload");
@@ -1967,9 +1964,6 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
1967 } 1964 }
1968 hdw->fw1_state = FW1_STATE_OK; 1965 hdw->fw1_state = FW1_STATE_OK;
1969 1966
1970 if (initusbreset) {
1971 pvr2_hdw_device_reset(hdw);
1972 }
1973 if (!pvr2_hdw_dev_ok(hdw)) return; 1967 if (!pvr2_hdw_dev_ok(hdw)) return;
1974 1968
1975 for (idx = 0; idx < hdw->hdw_desc->client_modules.cnt; idx++) { 1969 for (idx = 0; idx < hdw->hdw_desc->client_modules.cnt; idx++) {
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index f048d80b77e5..97ed95957992 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -168,7 +168,7 @@ static const char *get_v4l_name(int v4l_type)
168 * This is part of Video 4 Linux API. The procedure handles ioctl() calls. 168 * This is part of Video 4 Linux API. The procedure handles ioctl() calls.
169 * 169 *
170 */ 170 */
171static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file, 171static int __pvr2_v4l2_do_ioctl(struct file *file,
172 unsigned int cmd, void *arg) 172 unsigned int cmd, void *arg)
173{ 173{
174 struct pvr2_v4l2_fh *fh = file->private_data; 174 struct pvr2_v4l2_fh *fh = file->private_data;
@@ -863,8 +863,8 @@ static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
863#endif 863#endif
864 864
865 default : 865 default :
866 ret = v4l_compat_translate_ioctl(inode,file,cmd, 866 ret = v4l_compat_translate_ioctl(file, cmd,
867 arg,pvr2_v4l2_do_ioctl); 867 arg, __pvr2_v4l2_do_ioctl);
868 } 868 }
869 869
870 pvr2_hdw_commit_ctl(hdw); 870 pvr2_hdw_commit_ctl(hdw);
@@ -890,10 +890,15 @@ static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
890 return ret; 890 return ret;
891} 891}
892 892
893static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
894 unsigned int cmd, void *arg)
895{
896 return __pvr2_v4l2_do_ioctl(file, cmd, arg);
897}
893 898
894static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip) 899static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
895{ 900{
896 int minor_id = dip->devbase.minor; 901 int num = dip->devbase.num;
897 struct pvr2_hdw *hdw = dip->v4lp->channel.mc_head->hdw; 902 struct pvr2_hdw *hdw = dip->v4lp->channel.mc_head->hdw;
898 enum pvr2_config cfg = dip->config; 903 enum pvr2_config cfg = dip->config;
899 int v4l_type = dip->v4l_type; 904 int v4l_type = dip->v4l_type;
@@ -909,7 +914,7 @@ static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
909 video_unregister_device(&dip->devbase); 914 video_unregister_device(&dip->devbase);
910 915
911 printk(KERN_INFO "pvrusb2: unregistered device %s%u [%s]\n", 916 printk(KERN_INFO "pvrusb2: unregistered device %s%u [%s]\n",
912 get_v4l_name(v4l_type),minor_id & 0x1f, 917 get_v4l_name(v4l_type), num,
913 pvr2_config_get_name(cfg)); 918 pvr2_config_get_name(cfg));
914 919
915} 920}
@@ -1310,7 +1315,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
1310 } 1315 }
1311 1316
1312 printk(KERN_INFO "pvrusb2: registered device %s%u [%s]\n", 1317 printk(KERN_INFO "pvrusb2: registered device %s%u [%s]\n",
1313 get_v4l_name(dip->v4l_type),dip->devbase.minor & 0x1f, 1318 get_v4l_name(dip->v4l_type), dip->devbase.num,
1314 pvr2_config_get_name(dip->config)); 1319 pvr2_config_get_name(dip->config));
1315 1320
1316 pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw, 1321 pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index ab28389b4cda..f3897a3fdb75 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -1795,7 +1795,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1795 goto err; 1795 goto err;
1796 } 1796 }
1797 else { 1797 else {
1798 PWC_INFO("Registered as /dev/video%d.\n", pdev->vdev->minor & 0x3F); 1798 PWC_INFO("Registered as /dev/video%d.\n", pdev->vdev->num);
1799 } 1799 }
1800 1800
1801 /* occupy slot */ 1801 /* occupy slot */
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 5272926db73e..3c3f8cf73108 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -192,7 +192,7 @@ struct s2255_dmaqueue {
192#define S2255_FW_FAILED 3 192#define S2255_FW_FAILED 3
193#define S2255_FW_DISCONNECTING 4 193#define S2255_FW_DISCONNECTING 4
194 194
195#define S2255_FW_MARKER 0x22552f2f 195#define S2255_FW_MARKER cpu_to_le32(0x22552f2f)
196/* 2255 read states */ 196/* 2255 read states */
197#define S2255_READ_IDLE 0 197#define S2255_READ_IDLE 0
198#define S2255_READ_FRAME 1 198#define S2255_READ_FRAME 1
diff --git a/drivers/media/video/saa7110.c b/drivers/media/video/saa7110.c
index adf2ba79496a..37860698f782 100644
--- a/drivers/media/video/saa7110.c
+++ b/drivers/media/video/saa7110.c
@@ -47,7 +47,7 @@ module_param(debug, int, 0);
47MODULE_PARM_DESC(debug, "Debug level (0-1)"); 47MODULE_PARM_DESC(debug, "Debug level (0-1)");
48 48
49#define SAA7110_MAX_INPUT 9 /* 6 CVBS, 3 SVHS */ 49#define SAA7110_MAX_INPUT 9 /* 6 CVBS, 3 SVHS */
50#define SAA7110_MAX_OUTPUT 0 /* its a decoder only */ 50#define SAA7110_MAX_OUTPUT 1 /* 1 YUV */
51 51
52#define SAA7110_NR_REG 0x35 52#define SAA7110_NR_REG 0x35
53 53
@@ -327,7 +327,7 @@ saa7110_command (struct i2c_client *client,
327 327
328 case DECODER_SET_INPUT: 328 case DECODER_SET_INPUT:
329 v = *(int *) arg; 329 v = *(int *) arg;
330 if (v < 0 || v > SAA7110_MAX_INPUT) { 330 if (v < 0 || v >= SAA7110_MAX_INPUT) {
331 v4l_dbg(1, debug, client, "input=%d not available\n", v); 331 v4l_dbg(1, debug, client, "input=%d not available\n", v);
332 return -EINVAL; 332 return -EINVAL;
333 } 333 }
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index b686bfabbde0..dfbe08a9ad9b 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -941,7 +941,8 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
941 dev->name,(unsigned long long)pci_resource_start(pci_dev,0)); 941 dev->name,(unsigned long long)pci_resource_start(pci_dev,0));
942 goto fail1; 942 goto fail1;
943 } 943 }
944 dev->lmmio = ioremap(pci_resource_start(pci_dev,0), 0x1000); 944 dev->lmmio = ioremap(pci_resource_start(pci_dev, 0),
945 pci_resource_len(pci_dev, 0));
945 dev->bmmio = (__u8 __iomem *)dev->lmmio; 946 dev->bmmio = (__u8 __iomem *)dev->lmmio;
946 if (NULL == dev->lmmio) { 947 if (NULL == dev->lmmio) {
947 err = -EIO; 948 err = -EIO;
@@ -996,7 +997,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
996 goto fail4; 997 goto fail4;
997 } 998 }
998 printk(KERN_INFO "%s: registered device video%d [v4l2]\n", 999 printk(KERN_INFO "%s: registered device video%d [v4l2]\n",
999 dev->name,dev->video_dev->minor & 0x1f); 1000 dev->name, dev->video_dev->num);
1000 1001
1001 dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi"); 1002 dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi");
1002 1003
@@ -1005,7 +1006,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
1005 if (err < 0) 1006 if (err < 0)
1006 goto fail4; 1007 goto fail4;
1007 printk(KERN_INFO "%s: registered device vbi%d\n", 1008 printk(KERN_INFO "%s: registered device vbi%d\n",
1008 dev->name,dev->vbi_dev->minor & 0x1f); 1009 dev->name, dev->vbi_dev->num);
1009 1010
1010 if (card_has_radio(dev)) { 1011 if (card_has_radio(dev)) {
1011 dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio"); 1012 dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio");
@@ -1014,7 +1015,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
1014 if (err < 0) 1015 if (err < 0)
1015 goto fail4; 1016 goto fail4;
1016 printk(KERN_INFO "%s: registered device radio%d\n", 1017 printk(KERN_INFO "%s: registered device radio%d\n",
1017 dev->name,dev->radio_dev->minor & 0x1f); 1018 dev->name, dev->radio_dev->num);
1018 } 1019 }
1019 1020
1020 /* everything worked */ 1021 /* everything worked */
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 9a8766a78a0c..7f40511bcc04 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -534,7 +534,7 @@ static int empress_init(struct saa7134_dev *dev)
534 return err; 534 return err;
535 } 535 }
536 printk(KERN_INFO "%s: registered device video%d [mpeg]\n", 536 printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
537 dev->name,dev->empress_dev->minor & 0x1f); 537 dev->name, dev->empress_dev->num);
538 538
539 videobuf_queue_sg_init(&dev->empress_tsq, &saa7134_ts_qops, 539 videobuf_queue_sg_init(&dev->empress_tsq, &saa7134_ts_qops,
540 &dev->pci->dev, &dev->slock, 540 &dev->pci->dev, &dev->slock,
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index ae3949180c4e..044a2e94c34d 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -1412,7 +1412,7 @@ static int se401_probe(struct usb_interface *intf,
1412 return -EIO; 1412 return -EIO;
1413 } 1413 }
1414 dev_info(&intf->dev, "registered new video device: video%d\n", 1414 dev_info(&intf->dev, "registered new video device: video%d\n",
1415 se401->vdev.minor); 1415 se401->vdev.num);
1416 1416
1417 usb_set_intfdata (intf, se401); 1417 usb_set_intfdata (intf, se401);
1418 return 0; 1418 return 0;
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 20e30bd9364b..fcd2b62f92c4 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -1008,7 +1008,7 @@ static int sn9c102_stream_interrupt(struct sn9c102_device* cam)
1008 cam->state |= DEV_MISCONFIGURED; 1008 cam->state |= DEV_MISCONFIGURED;
1009 DBG(1, "URB timeout reached. The camera is misconfigured. " 1009 DBG(1, "URB timeout reached. The camera is misconfigured. "
1010 "To use it, close and open /dev/video%d again.", 1010 "To use it, close and open /dev/video%d again.",
1011 cam->v4ldev->minor); 1011 cam->v4ldev->num);
1012 return -EIO; 1012 return -EIO;
1013 } 1013 }
1014 1014
@@ -1734,7 +1734,7 @@ static void sn9c102_release_resources(struct kref *kref)
1734 1734
1735 cam = container_of(kref, struct sn9c102_device, kref); 1735 cam = container_of(kref, struct sn9c102_device, kref);
1736 1736
1737 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor); 1737 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
1738 video_set_drvdata(cam->v4ldev, NULL); 1738 video_set_drvdata(cam->v4ldev, NULL);
1739 video_unregister_device(cam->v4ldev); 1739 video_unregister_device(cam->v4ldev);
1740 usb_put_dev(cam->usbdev); 1740 usb_put_dev(cam->usbdev);
@@ -1792,7 +1792,7 @@ static int sn9c102_open(struct inode* inode, struct file* filp)
1792 1792
1793 if (cam->users) { 1793 if (cam->users) {
1794 DBG(2, "Device /dev/video%d is already in use", 1794 DBG(2, "Device /dev/video%d is already in use",
1795 cam->v4ldev->minor); 1795 cam->v4ldev->num);
1796 DBG(3, "Simultaneous opens are not supported"); 1796 DBG(3, "Simultaneous opens are not supported");
1797 /* 1797 /*
1798 open() must follow the open flags and should block 1798 open() must follow the open flags and should block
@@ -1845,7 +1845,7 @@ static int sn9c102_open(struct inode* inode, struct file* filp)
1845 cam->frame_count = 0; 1845 cam->frame_count = 0;
1846 sn9c102_empty_framequeues(cam); 1846 sn9c102_empty_framequeues(cam);
1847 1847
1848 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor); 1848 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
1849 1849
1850out: 1850out:
1851 mutex_unlock(&cam->open_mutex); 1851 mutex_unlock(&cam->open_mutex);
@@ -1870,7 +1870,7 @@ static int sn9c102_release(struct inode* inode, struct file* filp)
1870 cam->users--; 1870 cam->users--;
1871 wake_up_interruptible_nr(&cam->wait_open, 1); 1871 wake_up_interruptible_nr(&cam->wait_open, 1);
1872 1872
1873 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor); 1873 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
1874 1874
1875 kref_put(&cam->kref, sn9c102_release_resources); 1875 kref_put(&cam->kref, sn9c102_release_resources);
1876 1876
@@ -2432,7 +2432,7 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
2432 cam->state |= DEV_MISCONFIGURED; 2432 cam->state |= DEV_MISCONFIGURED;
2433 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To " 2433 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
2434 "use the camera, close and open /dev/video%d again.", 2434 "use the camera, close and open /dev/video%d again.",
2435 cam->v4ldev->minor); 2435 cam->v4ldev->num);
2436 return -EIO; 2436 return -EIO;
2437 } 2437 }
2438 2438
@@ -2445,7 +2445,7 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
2445 cam->state |= DEV_MISCONFIGURED; 2445 cam->state |= DEV_MISCONFIGURED;
2446 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To " 2446 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
2447 "use the camera, close and open /dev/video%d again.", 2447 "use the camera, close and open /dev/video%d again.",
2448 cam->v4ldev->minor); 2448 cam->v4ldev->num);
2449 return -ENOMEM; 2449 return -ENOMEM;
2450 } 2450 }
2451 2451
@@ -2689,7 +2689,7 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
2689 cam->state |= DEV_MISCONFIGURED; 2689 cam->state |= DEV_MISCONFIGURED;
2690 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To " 2690 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
2691 "use the camera, close and open /dev/video%d again.", 2691 "use the camera, close and open /dev/video%d again.",
2692 cam->v4ldev->minor); 2692 cam->v4ldev->num);
2693 return -EIO; 2693 return -EIO;
2694 } 2694 }
2695 2695
@@ -2701,7 +2701,7 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
2701 cam->state |= DEV_MISCONFIGURED; 2701 cam->state |= DEV_MISCONFIGURED;
2702 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To " 2702 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
2703 "use the camera, close and open /dev/video%d again.", 2703 "use the camera, close and open /dev/video%d again.",
2704 cam->v4ldev->minor); 2704 cam->v4ldev->num);
2705 return -ENOMEM; 2705 return -ENOMEM;
2706 } 2706 }
2707 2707
@@ -2748,7 +2748,7 @@ sn9c102_vidioc_s_jpegcomp(struct sn9c102_device* cam, void __user * arg)
2748 cam->state |= DEV_MISCONFIGURED; 2748 cam->state |= DEV_MISCONFIGURED;
2749 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware " 2749 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
2750 "problems. To use the camera, close and open " 2750 "problems. To use the camera, close and open "
2751 "/dev/video%d again.", cam->v4ldev->minor); 2751 "/dev/video%d again.", cam->v4ldev->num);
2752 return -EIO; 2752 return -EIO;
2753 } 2753 }
2754 2754
@@ -3348,7 +3348,7 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3348 goto fail; 3348 goto fail;
3349 } 3349 }
3350 3350
3351 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor); 3351 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
3352 3352
3353 video_set_drvdata(cam->v4ldev, cam); 3353 video_set_drvdata(cam->v4ldev, cam);
3354 cam->module_param.force_munmap = force_munmap[dev_nr]; 3354 cam->module_param.force_munmap = force_munmap[dev_nr];
@@ -3402,7 +3402,7 @@ static void sn9c102_usb_disconnect(struct usb_interface* intf)
3402 if (cam->users) { 3402 if (cam->users) {
3403 DBG(2, "Device /dev/video%d is open! Deregistration and " 3403 DBG(2, "Device /dev/video%d is open! Deregistration and "
3404 "memory deallocation are deferred.", 3404 "memory deallocation are deferred.",
3405 cam->v4ldev->minor); 3405 cam->v4ldev->num);
3406 cam->state |= DEV_MISCONFIGURED; 3406 cam->state |= DEV_MISCONFIGURED;
3407 sn9c102_stop_transfer(cam); 3407 sn9c102_stop_transfer(cam);
3408 cam->state |= DEV_DISCONNECTED; 3408 cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index edaea4964513..e9eb6d754d5c 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -1331,7 +1331,7 @@ static int stk_register_video_device(struct stk_camera *dev)
1331 STK_ERROR("v4l registration failed\n"); 1331 STK_ERROR("v4l registration failed\n");
1332 else 1332 else
1333 STK_INFO("Syntek USB2.0 Camera is now controlling video device" 1333 STK_INFO("Syntek USB2.0 Camera is now controlling video device"
1334 " /dev/video%d\n", dev->vdev.minor); 1334 " /dev/video%d\n", dev->vdev.num);
1335 return err; 1335 return err;
1336} 1336}
1337 1337
@@ -1426,7 +1426,7 @@ static void stk_camera_disconnect(struct usb_interface *interface)
1426 stk_remove_sysfs_files(&dev->vdev); 1426 stk_remove_sysfs_files(&dev->vdev);
1427 1427
1428 STK_INFO("Syntek USB2.0 Camera release resources " 1428 STK_INFO("Syntek USB2.0 Camera release resources "
1429 "video device /dev/video%d\n", dev->vdev.minor); 1429 "video device /dev/video%d\n", dev->vdev.num);
1430 1430
1431 video_unregister_device(&dev->vdev); 1431 video_unregister_device(&dev->vdev);
1432} 1432}
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index 9c549d935994..328c41b1517d 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -1470,7 +1470,8 @@ static int stv680_probe (struct usb_interface *intf, const struct usb_device_id
1470 retval = -EIO; 1470 retval = -EIO;
1471 goto error_vdev; 1471 goto error_vdev;
1472 } 1472 }
1473 PDEBUG (0, "STV(i): registered new video device: video%d", stv680->vdev->minor); 1473 PDEBUG(0, "STV(i): registered new video device: video%d",
1474 stv680->vdev->num);
1474 1475
1475 usb_set_intfdata (intf, stv680); 1476 usb_set_intfdata (intf, stv680);
1476 retval = stv680_create_sysfs_files(stv680->vdev); 1477 retval = stv680_create_sysfs_files(stv680->vdev);
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index b59e47272abf..3720f0e03a16 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * experimental driver for simple i2c audio chips. 2 * Driver for simple i2c audio chips.
3 * 3 *
4 * Copyright (c) 2000 Gerd Knorr 4 * Copyright (c) 2000 Gerd Knorr
5 * based on code by: 5 * based on code by:
@@ -7,6 +7,10 @@
7 * Steve VanDeBogart (vandebo@uclink.berkeley.edu) 7 * Steve VanDeBogart (vandebo@uclink.berkeley.edu)
8 * Greg Alexander (galexand@acm.org) 8 * Greg Alexander (galexand@acm.org)
9 * 9 *
10 * Copyright(c) 2005-2008 Mauro Carvalho Chehab
11 * - Some cleanups, code fixes, etc
12 * - Convert it to V4L2 API
13 *
10 * This code is placed under the terms of the GNU General Public License 14 * This code is placed under the terms of the GNU General Public License
11 * 15 *
12 * OPTIONS: 16 * OPTIONS:
@@ -30,6 +34,7 @@
30 34
31#include <media/tvaudio.h> 35#include <media/tvaudio.h>
32#include <media/v4l2-common.h> 36#include <media/v4l2-common.h>
37#include <media/v4l2-ioctl.h>
33#include <media/v4l2-chip-ident.h> 38#include <media/v4l2-chip-ident.h>
34#include <media/v4l2-i2c-drv-legacy.h> 39#include <media/v4l2-i2c-drv-legacy.h>
35 40
@@ -58,7 +63,6 @@ typedef int (*checkit)(struct CHIPSTATE*);
58typedef int (*initialize)(struct CHIPSTATE*); 63typedef int (*initialize)(struct CHIPSTATE*);
59typedef int (*getmode)(struct CHIPSTATE*); 64typedef int (*getmode)(struct CHIPSTATE*);
60typedef void (*setmode)(struct CHIPSTATE*, int mode); 65typedef void (*setmode)(struct CHIPSTATE*, int mode);
61typedef void (*checkmode)(struct CHIPSTATE*);
62 66
63/* i2c command */ 67/* i2c command */
64typedef struct AUDIOCMD { 68typedef struct AUDIOCMD {
@@ -79,6 +83,7 @@ struct CHIPDESC {
79#define CHIP_HAS_VOLUME 1 83#define CHIP_HAS_VOLUME 1
80#define CHIP_HAS_BASSTREBLE 2 84#define CHIP_HAS_BASSTREBLE 2
81#define CHIP_HAS_INPUTSEL 4 85#define CHIP_HAS_INPUTSEL 4
86#define CHIP_NEED_CHECKMODE 8
82 87
83 /* various i2c command sequences */ 88 /* various i2c command sequences */
84 audiocmd init; 89 audiocmd init;
@@ -96,23 +101,20 @@ struct CHIPDESC {
96 getmode getmode; 101 getmode getmode;
97 setmode setmode; 102 setmode setmode;
98 103
99 /* check / autoswitch audio after channel switches */
100 checkmode checkmode;
101
102 /* input switch register + values for v4l inputs */ 104 /* input switch register + values for v4l inputs */
103 int inputreg; 105 int inputreg;
104 int inputmap[4]; 106 int inputmap[4];
105 int inputmute; 107 int inputmute;
106 int inputmask; 108 int inputmask;
107}; 109};
108static struct CHIPDESC chiplist[];
109 110
110/* current state of the chip */ 111/* current state of the chip */
111struct CHIPSTATE { 112struct CHIPSTATE {
112 struct i2c_client *c; 113 struct i2c_client *c;
113 114
114 /* index into CHIPDESC array */ 115 /* chip-specific description - should point to
115 int type; 116 an entry at CHIPDESC table */
117 struct CHIPDESC *desc;
116 118
117 /* shadow register set */ 119 /* shadow register set */
118 audiocmd shadow; 120 audiocmd shadow;
@@ -152,7 +154,7 @@ static int chip_write(struct CHIPSTATE *chip, int subaddr, int val)
152{ 154{
153 unsigned char buffer[2]; 155 unsigned char buffer[2];
154 156
155 if (-1 == subaddr) { 157 if (subaddr < 0) {
156 v4l_dbg(1, debug, chip->c, "%s: chip_write: 0x%x\n", 158 v4l_dbg(1, debug, chip->c, "%s: chip_write: 0x%x\n",
157 chip->c->name, val); 159 chip->c->name, val);
158 chip->shadow.bytes[1] = val; 160 chip->shadow.bytes[1] = val;
@@ -163,6 +165,13 @@ static int chip_write(struct CHIPSTATE *chip, int subaddr, int val)
163 return -1; 165 return -1;
164 } 166 }
165 } else { 167 } else {
168 if (subaddr + 1 >= ARRAY_SIZE(chip->shadow.bytes)) {
169 v4l_info(chip->c,
170 "Tried to access a non-existent register: %d\n",
171 subaddr);
172 return -EINVAL;
173 }
174
166 v4l_dbg(1, debug, chip->c, "%s: chip_write: reg%d=0x%x\n", 175 v4l_dbg(1, debug, chip->c, "%s: chip_write: reg%d=0x%x\n",
167 chip->c->name, subaddr, val); 176 chip->c->name, subaddr, val);
168 chip->shadow.bytes[subaddr+1] = val; 177 chip->shadow.bytes[subaddr+1] = val;
@@ -177,12 +186,20 @@ static int chip_write(struct CHIPSTATE *chip, int subaddr, int val)
177 return 0; 186 return 0;
178} 187}
179 188
180static int chip_write_masked(struct CHIPSTATE *chip, int subaddr, int val, int mask) 189static int chip_write_masked(struct CHIPSTATE *chip,
190 int subaddr, int val, int mask)
181{ 191{
182 if (mask != 0) { 192 if (mask != 0) {
183 if (-1 == subaddr) { 193 if (subaddr < 0) {
184 val = (chip->shadow.bytes[1] & ~mask) | (val & mask); 194 val = (chip->shadow.bytes[1] & ~mask) | (val & mask);
185 } else { 195 } else {
196 if (subaddr + 1 >= ARRAY_SIZE(chip->shadow.bytes)) {
197 v4l_info(chip->c,
198 "Tried to access a non-existent register: %d\n",
199 subaddr);
200 return -EINVAL;
201 }
202
186 val = (chip->shadow.bytes[subaddr+1] & ~mask) | (val & mask); 203 val = (chip->shadow.bytes[subaddr+1] & ~mask) | (val & mask);
187 } 204 }
188 } 205 }
@@ -228,6 +245,15 @@ static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd)
228 if (0 == cmd->count) 245 if (0 == cmd->count)
229 return 0; 246 return 0;
230 247
248 if (cmd->count + cmd->bytes[0] - 1 >= ARRAY_SIZE(chip->shadow.bytes)) {
249 v4l_info(chip->c,
250 "Tried to access a non-existent register range: %d to %d\n",
251 cmd->bytes[0] + 1, cmd->bytes[0] + cmd->count - 1);
252 return -EINVAL;
253 }
254
255 /* FIXME: it seems that the shadow bytes are wrong bellow !*/
256
231 /* update our shadow register set; print bytes if (debug > 0) */ 257 /* update our shadow register set; print bytes if (debug > 0) */
232 v4l_dbg(1, debug, chip->c, "%s: chip_cmd(%s): reg=%d, data:", 258 v4l_dbg(1, debug, chip->c, "%s: chip_cmd(%s): reg=%d, data:",
233 chip->c->name, name,cmd->bytes[0]); 259 chip->c->name, name,cmd->bytes[0]);
@@ -263,7 +289,8 @@ static void chip_thread_wake(unsigned long data)
263static int chip_thread(void *data) 289static int chip_thread(void *data)
264{ 290{
265 struct CHIPSTATE *chip = data; 291 struct CHIPSTATE *chip = data;
266 struct CHIPDESC *desc = chiplist + chip->type; 292 struct CHIPDESC *desc = chip->desc;
293 int mode;
267 294
268 v4l_dbg(1, debug, chip->c, "%s: thread started\n", chip->c->name); 295 v4l_dbg(1, debug, chip->c, "%s: thread started\n", chip->c->name);
269 set_freezable(); 296 set_freezable();
@@ -282,7 +309,26 @@ static int chip_thread(void *data)
282 continue; 309 continue;
283 310
284 /* have a look what's going on */ 311 /* have a look what's going on */
285 desc->checkmode(chip); 312 mode = desc->getmode(chip);
313 if (mode == chip->prevmode)
314 continue;
315
316 /* chip detected a new audio mode - set it */
317 v4l_dbg(1, debug, chip->c, "%s: thread checkmode\n",
318 chip->c->name);
319
320 chip->prevmode = mode;
321
322 if (mode & V4L2_TUNER_MODE_STEREO)
323 desc->setmode(chip, V4L2_TUNER_MODE_STEREO);
324 if (mode & V4L2_TUNER_MODE_LANG1_LANG2)
325 desc->setmode(chip, V4L2_TUNER_MODE_STEREO);
326 else if (mode & V4L2_TUNER_MODE_LANG1)
327 desc->setmode(chip, V4L2_TUNER_MODE_LANG1);
328 else if (mode & V4L2_TUNER_MODE_LANG2)
329 desc->setmode(chip, V4L2_TUNER_MODE_LANG2);
330 else
331 desc->setmode(chip, V4L2_TUNER_MODE_MONO);
286 332
287 /* schedule next check */ 333 /* schedule next check */
288 mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000)); 334 mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000));
@@ -292,29 +338,6 @@ static int chip_thread(void *data)
292 return 0; 338 return 0;
293} 339}
294 340
295static void generic_checkmode(struct CHIPSTATE *chip)
296{
297 struct CHIPDESC *desc = chiplist + chip->type;
298 int mode = desc->getmode(chip);
299
300 if (mode == chip->prevmode)
301 return;
302
303 v4l_dbg(1, debug, chip->c, "%s: thread checkmode\n", chip->c->name);
304 chip->prevmode = mode;
305
306 if (mode & V4L2_TUNER_MODE_STEREO)
307 desc->setmode(chip,V4L2_TUNER_MODE_STEREO);
308 if (mode & V4L2_TUNER_MODE_LANG1_LANG2)
309 desc->setmode(chip,V4L2_TUNER_MODE_STEREO);
310 else if (mode & V4L2_TUNER_MODE_LANG1)
311 desc->setmode(chip,V4L2_TUNER_MODE_LANG1);
312 else if (mode & V4L2_TUNER_MODE_LANG2)
313 desc->setmode(chip,V4L2_TUNER_MODE_LANG2);
314 else
315 desc->setmode(chip,V4L2_TUNER_MODE_MONO);
316}
317
318/* ---------------------------------------------------------------------- */ 341/* ---------------------------------------------------------------------- */
319/* audio chip descriptions - defines+functions for tda9840 */ 342/* audio chip descriptions - defines+functions for tda9840 */
320 343
@@ -777,7 +800,7 @@ static struct tda9874a_MODES {
777 char *name; 800 char *name;
778 audiocmd cmd; 801 audiocmd cmd;
779} tda9874a_modelist[9] = { 802} tda9874a_modelist[9] = {
780 { "A2, B/G", 803 { "A2, B/G", /* default */
781 { 9, { TDA9874A_C1FRA, 0x72,0x95,0x55, 0x77,0xA0,0x00, 0x00,0x00 }} }, 804 { 9, { TDA9874A_C1FRA, 0x72,0x95,0x55, 0x77,0xA0,0x00, 0x00,0x00 }} },
782 { "A2, M (Korea)", 805 { "A2, M (Korea)",
783 { 9, { TDA9874A_C1FRA, 0x5D,0xC0,0x00, 0x62,0x6A,0xAA, 0x20,0x22 }} }, 806 { 9, { TDA9874A_C1FRA, 0x5D,0xC0,0x00, 0x62,0x6A,0xAA, 0x20,0x22 }} },
@@ -791,7 +814,7 @@ static struct tda9874a_MODES {
791 { 9, { TDA9874A_C1FRA, 0x7D,0x00,0x00, 0x88,0x8A,0xAA, 0x08,0x33 }} }, 814 { 9, { TDA9874A_C1FRA, 0x7D,0x00,0x00, 0x88,0x8A,0xAA, 0x08,0x33 }} },
792 { "NICAM, B/G", 815 { "NICAM, B/G",
793 { 9, { TDA9874A_C1FRA, 0x72,0x95,0x55, 0x79,0xEA,0xAA, 0x08,0x33 }} }, 816 { 9, { TDA9874A_C1FRA, 0x72,0x95,0x55, 0x79,0xEA,0xAA, 0x08,0x33 }} },
794 { "NICAM, D/K", /* default */ 817 { "NICAM, D/K",
795 { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x79,0xEA,0xAA, 0x08,0x33 }} }, 818 { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x79,0xEA,0xAA, 0x08,0x33 }} },
796 { "NICAM, L", 819 { "NICAM, L",
797 { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x79,0xEA,0xAA, 0x09,0x33 }} } 820 { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x79,0xEA,0xAA, 0x09,0x33 }} }
@@ -981,7 +1004,7 @@ static int tda9874a_initialize(struct CHIPSTATE *chip)
981{ 1004{
982 if (tda9874a_SIF > 2) 1005 if (tda9874a_SIF > 2)
983 tda9874a_SIF = 1; 1006 tda9874a_SIF = 1;
984 if (tda9874a_STD > 8) 1007 if (tda9874a_STD >= ARRAY_SIZE(tda9874a_modelist))
985 tda9874a_STD = 0; 1008 tda9874a_STD = 0;
986 if(tda9874a_AMSEL > 1) 1009 if(tda9874a_AMSEL > 1)
987 tda9874a_AMSEL = 0; 1010 tda9874a_AMSEL = 0;
@@ -1089,7 +1112,7 @@ static int tda8425_shift12(int val) { return (val >> 12) | 0xf0; }
1089 1112
1090static int tda8425_initialize(struct CHIPSTATE *chip) 1113static int tda8425_initialize(struct CHIPSTATE *chip)
1091{ 1114{
1092 struct CHIPDESC *desc = chiplist + chip->type; 1115 struct CHIPDESC *desc = chip->desc;
1093 int inputmap[4] = { /* tuner */ TDA8425_S1_CH2, /* radio */ TDA8425_S1_CH1, 1116 int inputmap[4] = { /* tuner */ TDA8425_S1_CH2, /* radio */ TDA8425_S1_CH1,
1094 /* extern */ TDA8425_S1_CH1, /* intern */ TDA8425_S1_OFF}; 1117 /* extern */ TDA8425_S1_CH1, /* intern */ TDA8425_S1_OFF};
1095 1118
@@ -1259,27 +1282,28 @@ static struct CHIPDESC chiplist[] = {
1259 .addr_lo = I2C_ADDR_TDA9840 >> 1, 1282 .addr_lo = I2C_ADDR_TDA9840 >> 1,
1260 .addr_hi = I2C_ADDR_TDA9840 >> 1, 1283 .addr_hi = I2C_ADDR_TDA9840 >> 1,
1261 .registers = 5, 1284 .registers = 5,
1285 .flags = CHIP_NEED_CHECKMODE,
1262 1286
1287 /* callbacks */
1263 .checkit = tda9840_checkit, 1288 .checkit = tda9840_checkit,
1264 .getmode = tda9840_getmode, 1289 .getmode = tda9840_getmode,
1265 .setmode = tda9840_setmode, 1290 .setmode = tda9840_setmode,
1266 .checkmode = generic_checkmode,
1267 1291
1268 .init = { 2, { TDA9840_TEST, TDA9840_TEST_INT1SN 1292 .init = { 2, { TDA9840_TEST, TDA9840_TEST_INT1SN
1269 /* ,TDA9840_SW, TDA9840_MONO */} } 1293 /* ,TDA9840_SW, TDA9840_MONO */} }
1270 }, 1294 },
1271 { 1295 {
1272 .name = "tda9873h", 1296 .name = "tda9873h",
1273 .checkit = tda9873_checkit,
1274 .insmodopt = &tda9873, 1297 .insmodopt = &tda9873,
1275 .addr_lo = I2C_ADDR_TDA985x_L >> 1, 1298 .addr_lo = I2C_ADDR_TDA985x_L >> 1,
1276 .addr_hi = I2C_ADDR_TDA985x_H >> 1, 1299 .addr_hi = I2C_ADDR_TDA985x_H >> 1,
1277 .registers = 3, 1300 .registers = 3,
1278 .flags = CHIP_HAS_INPUTSEL, 1301 .flags = CHIP_HAS_INPUTSEL | CHIP_NEED_CHECKMODE,
1279 1302
1303 /* callbacks */
1304 .checkit = tda9873_checkit,
1280 .getmode = tda9873_getmode, 1305 .getmode = tda9873_getmode,
1281 .setmode = tda9873_setmode, 1306 .setmode = tda9873_setmode,
1282 .checkmode = generic_checkmode,
1283 1307
1284 .init = { 4, { TDA9873_SW, 0xa4, 0x06, 0x03 } }, 1308 .init = { 4, { TDA9873_SW, 0xa4, 0x06, 0x03 } },
1285 .inputreg = TDA9873_SW, 1309 .inputreg = TDA9873_SW,
@@ -1290,15 +1314,16 @@ static struct CHIPDESC chiplist[] = {
1290 }, 1314 },
1291 { 1315 {
1292 .name = "tda9874h/a", 1316 .name = "tda9874h/a",
1293 .checkit = tda9874a_checkit,
1294 .initialize = tda9874a_initialize,
1295 .insmodopt = &tda9874a, 1317 .insmodopt = &tda9874a,
1296 .addr_lo = I2C_ADDR_TDA9874 >> 1, 1318 .addr_lo = I2C_ADDR_TDA9874 >> 1,
1297 .addr_hi = I2C_ADDR_TDA9874 >> 1, 1319 .addr_hi = I2C_ADDR_TDA9874 >> 1,
1320 .flags = CHIP_NEED_CHECKMODE,
1298 1321
1322 /* callbacks */
1323 .initialize = tda9874a_initialize,
1324 .checkit = tda9874a_checkit,
1299 .getmode = tda9874a_getmode, 1325 .getmode = tda9874a_getmode,
1300 .setmode = tda9874a_setmode, 1326 .setmode = tda9874a_setmode,
1301 .checkmode = generic_checkmode,
1302 }, 1327 },
1303 { 1328 {
1304 .name = "tda9850", 1329 .name = "tda9850",
@@ -1324,10 +1349,11 @@ static struct CHIPDESC chiplist[] = {
1324 .rightreg = TDA9855_VR, 1349 .rightreg = TDA9855_VR,
1325 .bassreg = TDA9855_BA, 1350 .bassreg = TDA9855_BA,
1326 .treblereg = TDA9855_TR, 1351 .treblereg = TDA9855_TR,
1352
1353 /* callbacks */
1327 .volfunc = tda9855_volume, 1354 .volfunc = tda9855_volume,
1328 .bassfunc = tda9855_bass, 1355 .bassfunc = tda9855_bass,
1329 .treblefunc = tda9855_treble, 1356 .treblefunc = tda9855_treble,
1330
1331 .getmode = tda985x_getmode, 1357 .getmode = tda985x_getmode,
1332 .setmode = tda985x_setmode, 1358 .setmode = tda985x_setmode,
1333 1359
@@ -1348,6 +1374,8 @@ static struct CHIPDESC chiplist[] = {
1348 .rightreg = TEA6300_VL, 1374 .rightreg = TEA6300_VL,
1349 .bassreg = TEA6300_BA, 1375 .bassreg = TEA6300_BA,
1350 .treblereg = TEA6300_TR, 1376 .treblereg = TEA6300_TR,
1377
1378 /* callbacks */
1351 .volfunc = tea6300_shift10, 1379 .volfunc = tea6300_shift10,
1352 .bassfunc = tea6300_shift12, 1380 .bassfunc = tea6300_shift12,
1353 .treblefunc = tea6300_shift12, 1381 .treblefunc = tea6300_shift12,
@@ -1358,7 +1386,6 @@ static struct CHIPDESC chiplist[] = {
1358 }, 1386 },
1359 { 1387 {
1360 .name = "tea6320", 1388 .name = "tea6320",
1361 .initialize = tea6320_initialize,
1362 .insmodopt = &tea6320, 1389 .insmodopt = &tea6320,
1363 .addr_lo = I2C_ADDR_TEA6300 >> 1, 1390 .addr_lo = I2C_ADDR_TEA6300 >> 1,
1364 .addr_hi = I2C_ADDR_TEA6300 >> 1, 1391 .addr_hi = I2C_ADDR_TEA6300 >> 1,
@@ -1369,6 +1396,9 @@ static struct CHIPDESC chiplist[] = {
1369 .rightreg = TEA6320_V, 1396 .rightreg = TEA6320_V,
1370 .bassreg = TEA6320_BA, 1397 .bassreg = TEA6320_BA,
1371 .treblereg = TEA6320_TR, 1398 .treblereg = TEA6320_TR,
1399
1400 /* callbacks */
1401 .initialize = tea6320_initialize,
1372 .volfunc = tea6320_volume, 1402 .volfunc = tea6320_volume,
1373 .bassfunc = tea6320_shift11, 1403 .bassfunc = tea6320_shift11,
1374 .treblefunc = tea6320_shift11, 1404 .treblefunc = tea6320_shift11,
@@ -1401,16 +1431,18 @@ static struct CHIPDESC chiplist[] = {
1401 .rightreg = TDA8425_VR, 1431 .rightreg = TDA8425_VR,
1402 .bassreg = TDA8425_BA, 1432 .bassreg = TDA8425_BA,
1403 .treblereg = TDA8425_TR, 1433 .treblereg = TDA8425_TR,
1434
1435 /* callbacks */
1436 .initialize = tda8425_initialize,
1404 .volfunc = tda8425_shift10, 1437 .volfunc = tda8425_shift10,
1405 .bassfunc = tda8425_shift12, 1438 .bassfunc = tda8425_shift12,
1406 .treblefunc = tda8425_shift12, 1439 .treblefunc = tda8425_shift12,
1440 .setmode = tda8425_setmode,
1407 1441
1408 .inputreg = TDA8425_S1, 1442 .inputreg = TDA8425_S1,
1409 .inputmap = { TDA8425_S1_CH1, TDA8425_S1_CH1, TDA8425_S1_CH1 }, 1443 .inputmap = { TDA8425_S1_CH1, TDA8425_S1_CH1, TDA8425_S1_CH1 },
1410 .inputmute = TDA8425_S1_OFF, 1444 .inputmute = TDA8425_S1_OFF,
1411 1445
1412 .setmode = tda8425_setmode,
1413 .initialize = tda8425_initialize,
1414 }, 1446 },
1415 { 1447 {
1416 .name = "pic16c54 (PV951)", 1448 .name = "pic16c54 (PV951)",
@@ -1434,10 +1466,11 @@ static struct CHIPDESC chiplist[] = {
1434 .addr_lo = I2C_ADDR_TDA9840 >> 1, 1466 .addr_lo = I2C_ADDR_TDA9840 >> 1,
1435 .addr_hi = I2C_ADDR_TDA9840 >> 1, 1467 .addr_hi = I2C_ADDR_TDA9840 >> 1,
1436 .registers = 2, 1468 .registers = 2,
1469 .flags = CHIP_NEED_CHECKMODE,
1437 1470
1471 /* callbacks */
1438 .getmode = ta8874z_getmode, 1472 .getmode = ta8874z_getmode,
1439 .setmode = ta8874z_setmode, 1473 .setmode = ta8874z_setmode,
1440 .checkmode = generic_checkmode,
1441 1474
1442 .init = {2, { TA8874Z_MONO_SET, TA8874Z_SEPARATION_DEFAULT}}, 1475 .init = {2, { TA8874Z_MONO_SET, TA8874Z_SEPARATION_DEFAULT}},
1443 }, 1476 },
@@ -1481,6 +1514,7 @@ static int chip_probe(struct i2c_client *client, const struct i2c_device_id *id)
1481 } 1514 }
1482 if (desc->name == NULL) { 1515 if (desc->name == NULL) {
1483 v4l_dbg(1, debug, client, "no matching chip description found\n"); 1516 v4l_dbg(1, debug, client, "no matching chip description found\n");
1517 kfree(chip);
1484 return -EIO; 1518 return -EIO;
1485 } 1519 }
1486 v4l_info(client, "%s found @ 0x%x (%s)\n", desc->name, client->addr<<1, client->adapter->name); 1520 v4l_info(client, "%s found @ 0x%x (%s)\n", desc->name, client->addr<<1, client->adapter->name);
@@ -1494,7 +1528,7 @@ static int chip_probe(struct i2c_client *client, const struct i2c_device_id *id)
1494 /* fill required data structures */ 1528 /* fill required data structures */
1495 if (!id) 1529 if (!id)
1496 strlcpy(client->name, desc->name, I2C_NAME_SIZE); 1530 strlcpy(client->name, desc->name, I2C_NAME_SIZE);
1497 chip->type = desc-chiplist; 1531 chip->desc = desc;
1498 chip->shadow.count = desc->registers+1; 1532 chip->shadow.count = desc->registers+1;
1499 chip->prevmode = -1; 1533 chip->prevmode = -1;
1500 chip->audmode = V4L2_TUNER_MODE_LANG1; 1534 chip->audmode = V4L2_TUNER_MODE_LANG1;
@@ -1506,20 +1540,49 @@ static int chip_probe(struct i2c_client *client, const struct i2c_device_id *id)
1506 chip_cmd(chip,"init",&desc->init); 1540 chip_cmd(chip,"init",&desc->init);
1507 1541
1508 if (desc->flags & CHIP_HAS_VOLUME) { 1542 if (desc->flags & CHIP_HAS_VOLUME) {
1509 chip->left = desc->leftinit ? desc->leftinit : 65535; 1543 if (!desc->volfunc) {
1510 chip->right = desc->rightinit ? desc->rightinit : 65535; 1544 /* This shouldn't be happen. Warn user, but keep working
1511 chip_write(chip,desc->leftreg,desc->volfunc(chip->left)); 1545 without volume controls
1512 chip_write(chip,desc->rightreg,desc->volfunc(chip->right)); 1546 */
1547 v4l_info(chip->c, "volume callback undefined!\n");
1548 desc->flags &= ~CHIP_HAS_VOLUME;
1549 } else {
1550 chip->left = desc->leftinit ? desc->leftinit : 65535;
1551 chip->right = desc->rightinit ? desc->rightinit : 65535;
1552 chip_write(chip, desc->leftreg,
1553 desc->volfunc(chip->left));
1554 chip_write(chip, desc->rightreg,
1555 desc->volfunc(chip->right));
1556 }
1513 } 1557 }
1514 if (desc->flags & CHIP_HAS_BASSTREBLE) { 1558 if (desc->flags & CHIP_HAS_BASSTREBLE) {
1515 chip->treble = desc->trebleinit ? desc->trebleinit : 32768; 1559 if (!desc->bassfunc || !desc->treblefunc) {
1516 chip->bass = desc->bassinit ? desc->bassinit : 32768; 1560 /* This shouldn't be happen. Warn user, but keep working
1517 chip_write(chip,desc->bassreg,desc->bassfunc(chip->bass)); 1561 without bass/treble controls
1518 chip_write(chip,desc->treblereg,desc->treblefunc(chip->treble)); 1562 */
1563 v4l_info(chip->c, "bass/treble callbacks undefined!\n");
1564 desc->flags &= ~CHIP_HAS_BASSTREBLE;
1565 } else {
1566 chip->treble = desc->trebleinit ?
1567 desc->trebleinit : 32768;
1568 chip->bass = desc->bassinit ?
1569 desc->bassinit : 32768;
1570 chip_write(chip, desc->bassreg,
1571 desc->bassfunc(chip->bass));
1572 chip_write(chip, desc->treblereg,
1573 desc->treblefunc(chip->treble));
1574 }
1519 } 1575 }
1520 1576
1521 chip->thread = NULL; 1577 chip->thread = NULL;
1522 if (desc->checkmode) { 1578 if (desc->flags & CHIP_NEED_CHECKMODE) {
1579 if (!desc->getmode || !desc->setmode) {
1580 /* This shouldn't be happen. Warn user, but keep working
1581 without kthread
1582 */
1583 v4l_info(chip->c, "set/get mode callbacks undefined!\n");
1584 return 0;
1585 }
1523 /* start async thread */ 1586 /* start async thread */
1524 init_timer(&chip->wt); 1587 init_timer(&chip->wt);
1525 chip->wt.function = chip_thread_wake; 1588 chip->wt.function = chip_thread_wake;
@@ -1552,7 +1615,7 @@ static int chip_remove(struct i2c_client *client)
1552static int tvaudio_get_ctrl(struct CHIPSTATE *chip, 1615static int tvaudio_get_ctrl(struct CHIPSTATE *chip,
1553 struct v4l2_control *ctrl) 1616 struct v4l2_control *ctrl)
1554{ 1617{
1555 struct CHIPDESC *desc = chiplist + chip->type; 1618 struct CHIPDESC *desc = chip->desc;
1556 1619
1557 switch (ctrl->id) { 1620 switch (ctrl->id) {
1558 case V4L2_CID_AUDIO_MUTE: 1621 case V4L2_CID_AUDIO_MUTE:
@@ -1576,13 +1639,13 @@ static int tvaudio_get_ctrl(struct CHIPSTATE *chip,
1576 return 0; 1639 return 0;
1577 } 1640 }
1578 case V4L2_CID_AUDIO_BASS: 1641 case V4L2_CID_AUDIO_BASS:
1579 if (desc->flags & CHIP_HAS_BASSTREBLE) 1642 if (!(desc->flags & CHIP_HAS_BASSTREBLE))
1580 break; 1643 break;
1581 ctrl->value = chip->bass; 1644 ctrl->value = chip->bass;
1582 return 0; 1645 return 0;
1583 case V4L2_CID_AUDIO_TREBLE: 1646 case V4L2_CID_AUDIO_TREBLE:
1584 if (desc->flags & CHIP_HAS_BASSTREBLE) 1647 if (!(desc->flags & CHIP_HAS_BASSTREBLE))
1585 return -EINVAL; 1648 break;
1586 ctrl->value = chip->treble; 1649 ctrl->value = chip->treble;
1587 return 0; 1650 return 0;
1588 } 1651 }
@@ -1592,7 +1655,7 @@ static int tvaudio_get_ctrl(struct CHIPSTATE *chip,
1592static int tvaudio_set_ctrl(struct CHIPSTATE *chip, 1655static int tvaudio_set_ctrl(struct CHIPSTATE *chip,
1593 struct v4l2_control *ctrl) 1656 struct v4l2_control *ctrl)
1594{ 1657{
1595 struct CHIPDESC *desc = chiplist + chip->type; 1658 struct CHIPDESC *desc = chip->desc;
1596 1659
1597 switch (ctrl->id) { 1660 switch (ctrl->id) {
1598 case V4L2_CID_AUDIO_MUTE: 1661 case V4L2_CID_AUDIO_MUTE:
@@ -1642,16 +1705,15 @@ static int tvaudio_set_ctrl(struct CHIPSTATE *chip,
1642 return 0; 1705 return 0;
1643 } 1706 }
1644 case V4L2_CID_AUDIO_BASS: 1707 case V4L2_CID_AUDIO_BASS:
1645 if (desc->flags & CHIP_HAS_BASSTREBLE) 1708 if (!(desc->flags & CHIP_HAS_BASSTREBLE))
1646 break; 1709 break;
1647 chip->bass = ctrl->value; 1710 chip->bass = ctrl->value;
1648 chip_write(chip,desc->bassreg,desc->bassfunc(chip->bass)); 1711 chip_write(chip,desc->bassreg,desc->bassfunc(chip->bass));
1649 1712
1650 return 0; 1713 return 0;
1651 case V4L2_CID_AUDIO_TREBLE: 1714 case V4L2_CID_AUDIO_TREBLE:
1652 if (desc->flags & CHIP_HAS_BASSTREBLE) 1715 if (!(desc->flags & CHIP_HAS_BASSTREBLE))
1653 return -EINVAL; 1716 break;
1654
1655 chip->treble = ctrl->value; 1717 chip->treble = ctrl->value;
1656 chip_write(chip,desc->treblereg,desc->treblefunc(chip->treble)); 1718 chip_write(chip,desc->treblereg,desc->treblefunc(chip->treble));
1657 1719
@@ -1668,9 +1730,12 @@ static int chip_command(struct i2c_client *client,
1668 unsigned int cmd, void *arg) 1730 unsigned int cmd, void *arg)
1669{ 1731{
1670 struct CHIPSTATE *chip = i2c_get_clientdata(client); 1732 struct CHIPSTATE *chip = i2c_get_clientdata(client);
1671 struct CHIPDESC *desc = chiplist + chip->type; 1733 struct CHIPDESC *desc = chip->desc;
1672 1734
1673 v4l_dbg(1, debug, chip->c, "%s: chip_command 0x%x\n", chip->c->name, cmd); 1735 if (debug > 0) {
1736 v4l_i2c_print_ioctl(chip->c, cmd);
1737 printk("\n");
1738 }
1674 1739
1675 switch (cmd) { 1740 switch (cmd) {
1676 case AUDC_SET_RADIO: 1741 case AUDC_SET_RADIO:
@@ -1695,7 +1760,7 @@ static int chip_command(struct i2c_client *client,
1695 break; 1760 break;
1696 case V4L2_CID_AUDIO_BASS: 1761 case V4L2_CID_AUDIO_BASS:
1697 case V4L2_CID_AUDIO_TREBLE: 1762 case V4L2_CID_AUDIO_TREBLE:
1698 if (desc->flags & CHIP_HAS_BASSTREBLE) 1763 if (!(desc->flags & CHIP_HAS_BASSTREBLE))
1699 return -EINVAL; 1764 return -EINVAL;
1700 break; 1765 break;
1701 default: 1766 default:
@@ -1792,12 +1857,20 @@ static int chip_command(struct i2c_client *client,
1792 break; 1857 break;
1793 case VIDIOC_S_FREQUENCY: 1858 case VIDIOC_S_FREQUENCY:
1794 chip->mode = 0; /* automatic */ 1859 chip->mode = 0; /* automatic */
1795 if (desc->checkmode && desc->setmode) { 1860
1861 /* For chips that provide getmode and setmode, and doesn't
1862 automatically follows the stereo carrier, a kthread is
1863 created to set the audio standard. In this case, when then
1864 the video channel is changed, tvaudio starts on MONO mode.
1865 After waiting for 2 seconds, the kernel thread is called,
1866 to follow whatever audio standard is pointed by the
1867 audio carrier.
1868 */
1869 if (chip->thread) {
1796 desc->setmode(chip,V4L2_TUNER_MODE_MONO); 1870 desc->setmode(chip,V4L2_TUNER_MODE_MONO);
1797 if (chip->prevmode != V4L2_TUNER_MODE_MONO) 1871 if (chip->prevmode != V4L2_TUNER_MODE_MONO)
1798 chip->prevmode = -1; /* reset previous mode */ 1872 chip->prevmode = -1; /* reset previous mode */
1799 mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000)); 1873 mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000));
1800 /* the thread will call checkmode() later */
1801 } 1874 }
1802 break; 1875 break;
1803 1876
@@ -1836,9 +1909,3 @@ static struct v4l2_i2c_driver_data v4l2_i2c_data = {
1836 .legacy_probe = chip_legacy_probe, 1909 .legacy_probe = chip_legacy_probe,
1837 .id_table = chip_id, 1910 .id_table = chip_id,
1838}; 1911};
1839
1840/*
1841 * Local variables:
1842 * c-basic-offset: 8
1843 * End:
1844 */
diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
index 28421d386f1e..c710bcd1df48 100644
--- a/drivers/media/video/usbvideo/ibmcam.c
+++ b/drivers/media/video/usbvideo/ibmcam.c
@@ -3695,7 +3695,7 @@ static int ibmcam_probe(struct usb_interface *intf, const struct usb_device_id *
3695 unsigned char video_ep = 0; 3695 unsigned char video_ep = 0;
3696 3696
3697 if (debug >= 1) 3697 if (debug >= 1)
3698 dev_info(&uvd->dev->dev, "ibmcam_probe(%p,%u.)\n", intf, ifnum); 3698 dev_info(&dev->dev, "ibmcam_probe(%p,%u.)\n", intf, ifnum);
3699 3699
3700 /* We don't handle multi-config cameras */ 3700 /* We don't handle multi-config cameras */
3701 if (dev->descriptor.bNumConfigurations != 1) 3701 if (dev->descriptor.bNumConfigurations != 1)
@@ -3746,7 +3746,7 @@ static int ibmcam_probe(struct usb_interface *intf, const struct usb_device_id *
3746 brand = "IBM PC Camera"; /* a.k.a. Xirlink C-It */ 3746 brand = "IBM PC Camera"; /* a.k.a. Xirlink C-It */
3747 break; 3747 break;
3748 } 3748 }
3749 dev_info(&uvd->dev->dev, 3749 dev_info(&dev->dev,
3750 "%s USB camera found (model %d, rev. 0x%04x)\n", 3750 "%s USB camera found (model %d, rev. 0x%04x)\n",
3751 brand, model, le16_to_cpu(dev->descriptor.bcdDevice)); 3751 brand, model, le16_to_cpu(dev->descriptor.bcdDevice));
3752 } while (0); 3752 } while (0);
@@ -3754,7 +3754,7 @@ static int ibmcam_probe(struct usb_interface *intf, const struct usb_device_id *
3754 /* Validate found interface: must have one ISO endpoint */ 3754 /* Validate found interface: must have one ISO endpoint */
3755 nas = intf->num_altsetting; 3755 nas = intf->num_altsetting;
3756 if (debug > 0) 3756 if (debug > 0)
3757 dev_info(&uvd->dev->dev, "Number of alternate settings=%d.\n", 3757 dev_info(&dev->dev, "Number of alternate settings=%d.\n",
3758 nas); 3758 nas);
3759 if (nas < 2) { 3759 if (nas < 2) {
3760 err("Too few alternate settings for this camera!"); 3760 err("Too few alternate settings for this camera!");
@@ -3799,7 +3799,7 @@ static int ibmcam_probe(struct usb_interface *intf, const struct usb_device_id *
3799 actInterface = i; 3799 actInterface = i;
3800 maxPS = le16_to_cpu(endpoint->wMaxPacketSize); 3800 maxPS = le16_to_cpu(endpoint->wMaxPacketSize);
3801 if (debug > 0) 3801 if (debug > 0)
3802 dev_info(&uvd->dev->dev, 3802 dev_info(&dev->dev,
3803 "Active setting=%d. " 3803 "Active setting=%d. "
3804 "maxPS=%d.\n", i, maxPS); 3804 "maxPS=%d.\n", i, maxPS);
3805 } else 3805 } else
@@ -3840,7 +3840,7 @@ static int ibmcam_probe(struct usb_interface *intf, const struct usb_device_id *
3840 RESTRICT_TO_RANGE(framerate, 0, 5); 3840 RESTRICT_TO_RANGE(framerate, 0, 5);
3841 break; 3841 break;
3842 default: 3842 default:
3843 dev_info(&uvd->dev->dev, "IBM camera: using 320x240\n"); 3843 dev_info(&dev->dev, "IBM camera: using 320x240\n");
3844 size = SIZE_320x240; 3844 size = SIZE_320x240;
3845 /* No break here */ 3845 /* No break here */
3846 case SIZE_320x240: 3846 case SIZE_320x240:
@@ -3869,7 +3869,7 @@ static int ibmcam_probe(struct usb_interface *intf, const struct usb_device_id *
3869 canvasY = 120; 3869 canvasY = 120;
3870 break; 3870 break;
3871 default: 3871 default:
3872 dev_info(&uvd->dev->dev, "IBM NetCamera: using 176x144\n"); 3872 dev_info(&dev->dev, "IBM NetCamera: using 176x144\n");
3873 size = SIZE_176x144; 3873 size = SIZE_176x144;
3874 /* No break here */ 3874 /* No break here */
3875 case SIZE_176x144: 3875 case SIZE_176x144:
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index 07cd87d16f69..7c575bb8184f 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -1059,7 +1059,7 @@ int usbvideo_RegisterVideoDevice(struct uvd *uvd)
1059 1059
1060 dev_info(&uvd->dev->dev, "%s on /dev/video%d: canvas=%s videosize=%s\n", 1060 dev_info(&uvd->dev->dev, "%s on /dev/video%d: canvas=%s videosize=%s\n",
1061 (uvd->handle != NULL) ? uvd->handle->drvName : "???", 1061 (uvd->handle != NULL) ? uvd->handle->drvName : "???",
1062 uvd->vdev.minor, tmp2, tmp1); 1062 uvd->vdev.num, tmp2, tmp1);
1063 1063
1064 usb_get_dev(uvd->dev); 1064 usb_get_dev(uvd->dev);
1065 return 0; 1065 return 0;
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 7a127d6bfdee..8e2d58bec481 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -877,7 +877,8 @@ vicam_probe( struct usb_interface *intf, const struct usb_device_id *id)
877 return -EIO; 877 return -EIO;
878 } 878 }
879 879
880 printk(KERN_INFO "ViCam webcam driver now controlling video device %d\n",cam->vdev.minor); 880 printk(KERN_INFO "ViCam webcam driver now controlling video device %d\n",
881 cam->vdev.num);
881 882
882 usb_set_intfdata (intf, cam); 883 usb_set_intfdata (intf, cam);
883 884
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index 92427fdc1459..9907b9aff2b9 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -236,7 +236,7 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
236 sizeof(struct i2c_client)); 236 sizeof(struct i2c_client));
237 237
238 sprintf(usbvision->i2c_adap.name + strlen(usbvision->i2c_adap.name), 238 sprintf(usbvision->i2c_adap.name + strlen(usbvision->i2c_adap.name),
239 " #%d", usbvision->vdev->minor & 0x1f); 239 " #%d", usbvision->vdev->num);
240 PDEBUG(DBG_I2C,"Adaptername: %s", usbvision->i2c_adap.name); 240 PDEBUG(DBG_I2C,"Adaptername: %s", usbvision->i2c_adap.name);
241 usbvision->i2c_adap.dev.parent = &usbvision->dev->dev; 241 usbvision->i2c_adap.dev.parent = &usbvision->dev->dev;
242 242
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 77aeb39b2750..d185b57fdcd0 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1440,7 +1440,7 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
1440 // vbi Device: 1440 // vbi Device:
1441 if (usbvision->vbi) { 1441 if (usbvision->vbi) {
1442 PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]", 1442 PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]",
1443 usbvision->vbi->minor & 0x1f); 1443 usbvision->vbi->num);
1444 if (usbvision->vbi->minor != -1) { 1444 if (usbvision->vbi->minor != -1) {
1445 video_unregister_device(usbvision->vbi); 1445 video_unregister_device(usbvision->vbi);
1446 } else { 1446 } else {
@@ -1452,7 +1452,7 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
1452 // Radio Device: 1452 // Radio Device:
1453 if (usbvision->rdev) { 1453 if (usbvision->rdev) {
1454 PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]", 1454 PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]",
1455 usbvision->rdev->minor & 0x1f); 1455 usbvision->rdev->num);
1456 if (usbvision->rdev->minor != -1) { 1456 if (usbvision->rdev->minor != -1) {
1457 video_unregister_device(usbvision->rdev); 1457 video_unregister_device(usbvision->rdev);
1458 } else { 1458 } else {
@@ -1464,7 +1464,7 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
1464 // Video Device: 1464 // Video Device:
1465 if (usbvision->vdev) { 1465 if (usbvision->vdev) {
1466 PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]", 1466 PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]",
1467 usbvision->vdev->minor & 0x1f); 1467 usbvision->vdev->num);
1468 if (usbvision->vdev->minor != -1) { 1468 if (usbvision->vdev->minor != -1) {
1469 video_unregister_device(usbvision->vdev); 1469 video_unregister_device(usbvision->vdev);
1470 } else { 1470 } else {
@@ -1490,7 +1490,7 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
1490 goto err_exit; 1490 goto err_exit;
1491 } 1491 }
1492 printk(KERN_INFO "USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]\n", 1492 printk(KERN_INFO "USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]\n",
1493 usbvision->nr,usbvision->vdev->minor & 0x1f); 1493 usbvision->nr, usbvision->vdev->num);
1494 1494
1495 // Radio Device: 1495 // Radio Device:
1496 if (usbvision_device_data[usbvision->DevModel].Radio) { 1496 if (usbvision_device_data[usbvision->DevModel].Radio) {
@@ -1507,7 +1507,7 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
1507 goto err_exit; 1507 goto err_exit;
1508 } 1508 }
1509 printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]\n", 1509 printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]\n",
1510 usbvision->nr, usbvision->rdev->minor & 0x1f); 1510 usbvision->nr, usbvision->rdev->num);
1511 } 1511 }
1512 // vbi Device: 1512 // vbi Device:
1513 if (usbvision_device_data[usbvision->DevModel].vbi) { 1513 if (usbvision_device_data[usbvision->DevModel].vbi) {
@@ -1523,7 +1523,7 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
1523 goto err_exit; 1523 goto err_exit;
1524 } 1524 }
1525 printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)\n", 1525 printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)\n",
1526 usbvision->nr,usbvision->vbi->minor & 0x1f); 1526 usbvision->nr, usbvision->vbi->num);
1527 } 1527 }
1528 // all done 1528 // all done
1529 return 0; 1529 return 0;
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 78e4c4e09d89..758dfefaba8d 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -464,7 +464,7 @@ static int uvc_v4l2_release(struct inode *inode, struct file *file)
464 return 0; 464 return 0;
465} 465}
466 466
467static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file, 467static int __uvc_v4l2_do_ioctl(struct file *file,
468 unsigned int cmd, void *arg) 468 unsigned int cmd, void *arg)
469{ 469{
470 struct video_device *vdev = video_devdata(file); 470 struct video_device *vdev = video_devdata(file);
@@ -978,8 +978,8 @@ static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
978 return uvc_xu_ctrl_query(video, arg, 1); 978 return uvc_xu_ctrl_query(video, arg, 1);
979 979
980 default: 980 default:
981 if ((ret = v4l_compat_translate_ioctl(inode, file, cmd, arg, 981 if ((ret = v4l_compat_translate_ioctl(file, cmd, arg,
982 uvc_v4l2_do_ioctl)) == -ENOIOCTLCMD) 982 __uvc_v4l2_do_ioctl)) == -ENOIOCTLCMD)
983 uvc_trace(UVC_TRACE_IOCTL, "Unknown ioctl 0x%08x\n", 983 uvc_trace(UVC_TRACE_IOCTL, "Unknown ioctl 0x%08x\n",
984 cmd); 984 cmd);
985 return ret; 985 return ret;
@@ -988,6 +988,12 @@ static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
988 return ret; 988 return ret;
989} 989}
990 990
991static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
992 unsigned int cmd, void *arg)
993{
994 return __uvc_v4l2_do_ioctl(file, cmd, arg);
995}
996
991static int uvc_v4l2_ioctl(struct inode *inode, struct file *file, 997static int uvc_v4l2_ioctl(struct inode *inode, struct file *file,
992 unsigned int cmd, unsigned long arg) 998 unsigned int cmd, unsigned long arg)
993{ 999{
diff --git a/drivers/media/video/v4l1-compat.c b/drivers/media/video/v4l1-compat.c
index 928cb4037372..f13c0a9d684f 100644
--- a/drivers/media/video/v4l1-compat.c
+++ b/drivers/media/video/v4l1-compat.c
@@ -57,8 +57,7 @@ MODULE_LICENSE("GPL");
57 */ 57 */
58 58
59static int 59static int
60get_v4l_control(struct inode *inode, 60get_v4l_control(struct file *file,
61 struct file *file,
62 int cid, 61 int cid,
63 v4l2_kioctl drv) 62 v4l2_kioctl drv)
64{ 63{
@@ -67,12 +66,12 @@ get_v4l_control(struct inode *inode,
67 int err; 66 int err;
68 67
69 qctrl2.id = cid; 68 qctrl2.id = cid;
70 err = drv(inode, file, VIDIOC_QUERYCTRL, &qctrl2); 69 err = drv(file, VIDIOC_QUERYCTRL, &qctrl2);
71 if (err < 0) 70 if (err < 0)
72 dprintk("VIDIOC_QUERYCTRL: %d\n", err); 71 dprintk("VIDIOC_QUERYCTRL: %d\n", err);
73 if (err == 0 && !(qctrl2.flags & V4L2_CTRL_FLAG_DISABLED)) { 72 if (err == 0 && !(qctrl2.flags & V4L2_CTRL_FLAG_DISABLED)) {
74 ctrl2.id = qctrl2.id; 73 ctrl2.id = qctrl2.id;
75 err = drv(inode, file, VIDIOC_G_CTRL, &ctrl2); 74 err = drv(file, VIDIOC_G_CTRL, &ctrl2);
76 if (err < 0) { 75 if (err < 0) {
77 dprintk("VIDIOC_G_CTRL: %d\n", err); 76 dprintk("VIDIOC_G_CTRL: %d\n", err);
78 return 0; 77 return 0;
@@ -85,8 +84,7 @@ get_v4l_control(struct inode *inode,
85} 84}
86 85
87static int 86static int
88set_v4l_control(struct inode *inode, 87set_v4l_control(struct file *file,
89 struct file *file,
90 int cid, 88 int cid,
91 int value, 89 int value,
92 v4l2_kioctl drv) 90 v4l2_kioctl drv)
@@ -96,7 +94,7 @@ set_v4l_control(struct inode *inode,
96 int err; 94 int err;
97 95
98 qctrl2.id = cid; 96 qctrl2.id = cid;
99 err = drv(inode, file, VIDIOC_QUERYCTRL, &qctrl2); 97 err = drv(file, VIDIOC_QUERYCTRL, &qctrl2);
100 if (err < 0) 98 if (err < 0)
101 dprintk("VIDIOC_QUERYCTRL: %d\n", err); 99 dprintk("VIDIOC_QUERYCTRL: %d\n", err);
102 if (err == 0 && 100 if (err == 0 &&
@@ -114,7 +112,7 @@ set_v4l_control(struct inode *inode,
114 + 32767) 112 + 32767)
115 / 65535; 113 / 65535;
116 ctrl2.value += qctrl2.minimum; 114 ctrl2.value += qctrl2.minimum;
117 err = drv(inode, file, VIDIOC_S_CTRL, &ctrl2); 115 err = drv(file, VIDIOC_S_CTRL, &ctrl2);
118 if (err < 0) 116 if (err < 0)
119 dprintk("VIDIOC_S_CTRL: %d\n", err); 117 dprintk("VIDIOC_S_CTRL: %d\n", err);
120 } 118 }
@@ -222,7 +220,6 @@ static int poll_one(struct file *file, struct poll_wqueues *pwq)
222} 220}
223 221
224static int count_inputs( 222static int count_inputs(
225 struct inode *inode,
226 struct file *file, 223 struct file *file,
227 v4l2_kioctl drv) 224 v4l2_kioctl drv)
228{ 225{
@@ -232,14 +229,13 @@ static int count_inputs(
232 for (i = 0;; i++) { 229 for (i = 0;; i++) {
233 memset(&input2, 0, sizeof(input2)); 230 memset(&input2, 0, sizeof(input2));
234 input2.index = i; 231 input2.index = i;
235 if (0 != drv(inode, file, VIDIOC_ENUMINPUT, &input2)) 232 if (0 != drv(file, VIDIOC_ENUMINPUT, &input2))
236 break; 233 break;
237 } 234 }
238 return i; 235 return i;
239} 236}
240 237
241static int check_size( 238static int check_size(
242 struct inode *inode,
243 struct file *file, 239 struct file *file,
244 v4l2_kioctl drv, 240 v4l2_kioctl drv,
245 int *maxw, 241 int *maxw,
@@ -252,14 +248,14 @@ static int check_size(
252 memset(&fmt2, 0, sizeof(fmt2)); 248 memset(&fmt2, 0, sizeof(fmt2));
253 249
254 desc2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 250 desc2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
255 if (0 != drv(inode, file, VIDIOC_ENUM_FMT, &desc2)) 251 if (0 != drv(file, VIDIOC_ENUM_FMT, &desc2))
256 goto done; 252 goto done;
257 253
258 fmt2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 254 fmt2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
259 fmt2.fmt.pix.width = 10000; 255 fmt2.fmt.pix.width = 10000;
260 fmt2.fmt.pix.height = 10000; 256 fmt2.fmt.pix.height = 10000;
261 fmt2.fmt.pix.pixelformat = desc2.pixelformat; 257 fmt2.fmt.pix.pixelformat = desc2.pixelformat;
262 if (0 != drv(inode, file, VIDIOC_TRY_FMT, &fmt2)) 258 if (0 != drv(file, VIDIOC_TRY_FMT, &fmt2))
263 goto done; 259 goto done;
264 260
265 *maxw = fmt2.fmt.pix.width; 261 *maxw = fmt2.fmt.pix.width;
@@ -273,7 +269,6 @@ done:
273 269
274static noinline int v4l1_compat_get_capabilities( 270static noinline int v4l1_compat_get_capabilities(
275 struct video_capability *cap, 271 struct video_capability *cap,
276 struct inode *inode,
277 struct file *file, 272 struct file *file,
278 v4l2_kioctl drv) 273 v4l2_kioctl drv)
279{ 274{
@@ -289,13 +284,13 @@ static noinline int v4l1_compat_get_capabilities(
289 memset(cap, 0, sizeof(*cap)); 284 memset(cap, 0, sizeof(*cap));
290 memset(&fbuf, 0, sizeof(fbuf)); 285 memset(&fbuf, 0, sizeof(fbuf));
291 286
292 err = drv(inode, file, VIDIOC_QUERYCAP, cap2); 287 err = drv(file, VIDIOC_QUERYCAP, cap2);
293 if (err < 0) { 288 if (err < 0) {
294 dprintk("VIDIOCGCAP / VIDIOC_QUERYCAP: %d\n", err); 289 dprintk("VIDIOCGCAP / VIDIOC_QUERYCAP: %d\n", err);
295 goto done; 290 goto done;
296 } 291 }
297 if (cap2->capabilities & V4L2_CAP_VIDEO_OVERLAY) { 292 if (cap2->capabilities & V4L2_CAP_VIDEO_OVERLAY) {
298 err = drv(inode, file, VIDIOC_G_FBUF, &fbuf); 293 err = drv(file, VIDIOC_G_FBUF, &fbuf);
299 if (err < 0) { 294 if (err < 0) {
300 dprintk("VIDIOCGCAP / VIDIOC_G_FBUF: %d\n", err); 295 dprintk("VIDIOCGCAP / VIDIOC_G_FBUF: %d\n", err);
301 memset(&fbuf, 0, sizeof(fbuf)); 296 memset(&fbuf, 0, sizeof(fbuf));
@@ -317,8 +312,8 @@ static noinline int v4l1_compat_get_capabilities(
317 if (fbuf.capability & V4L2_FBUF_CAP_LIST_CLIPPING) 312 if (fbuf.capability & V4L2_FBUF_CAP_LIST_CLIPPING)
318 cap->type |= VID_TYPE_CLIPPING; 313 cap->type |= VID_TYPE_CLIPPING;
319 314
320 cap->channels = count_inputs(inode, file, drv); 315 cap->channels = count_inputs(file, drv);
321 check_size(inode, file, drv, 316 check_size(file, drv,
322 &cap->maxwidth, &cap->maxheight); 317 &cap->maxwidth, &cap->maxheight);
323 cap->audios = 0; /* FIXME */ 318 cap->audios = 0; /* FIXME */
324 cap->minwidth = 48; /* FIXME */ 319 cap->minwidth = 48; /* FIXME */
@@ -331,7 +326,6 @@ done:
331 326
332static noinline int v4l1_compat_get_frame_buffer( 327static noinline int v4l1_compat_get_frame_buffer(
333 struct video_buffer *buffer, 328 struct video_buffer *buffer,
334 struct inode *inode,
335 struct file *file, 329 struct file *file,
336 v4l2_kioctl drv) 330 v4l2_kioctl drv)
337{ 331{
@@ -341,7 +335,7 @@ static noinline int v4l1_compat_get_frame_buffer(
341 memset(buffer, 0, sizeof(*buffer)); 335 memset(buffer, 0, sizeof(*buffer));
342 memset(&fbuf, 0, sizeof(fbuf)); 336 memset(&fbuf, 0, sizeof(fbuf));
343 337
344 err = drv(inode, file, VIDIOC_G_FBUF, &fbuf); 338 err = drv(file, VIDIOC_G_FBUF, &fbuf);
345 if (err < 0) { 339 if (err < 0) {
346 dprintk("VIDIOCGFBUF / VIDIOC_G_FBUF: %d\n", err); 340 dprintk("VIDIOCGFBUF / VIDIOC_G_FBUF: %d\n", err);
347 goto done; 341 goto done;
@@ -386,7 +380,6 @@ done:
386 380
387static noinline int v4l1_compat_set_frame_buffer( 381static noinline int v4l1_compat_set_frame_buffer(
388 struct video_buffer *buffer, 382 struct video_buffer *buffer,
389 struct inode *inode,
390 struct file *file, 383 struct file *file,
391 v4l2_kioctl drv) 384 v4l2_kioctl drv)
392{ 385{
@@ -415,7 +408,7 @@ static noinline int v4l1_compat_set_frame_buffer(
415 break; 408 break;
416 } 409 }
417 fbuf.fmt.bytesperline = buffer->bytesperline; 410 fbuf.fmt.bytesperline = buffer->bytesperline;
418 err = drv(inode, file, VIDIOC_S_FBUF, &fbuf); 411 err = drv(file, VIDIOC_S_FBUF, &fbuf);
419 if (err < 0) 412 if (err < 0)
420 dprintk("VIDIOCSFBUF / VIDIOC_S_FBUF: %d\n", err); 413 dprintk("VIDIOCSFBUF / VIDIOC_S_FBUF: %d\n", err);
421 return err; 414 return err;
@@ -423,7 +416,6 @@ static noinline int v4l1_compat_set_frame_buffer(
423 416
424static noinline int v4l1_compat_get_win_cap_dimensions( 417static noinline int v4l1_compat_get_win_cap_dimensions(
425 struct video_window *win, 418 struct video_window *win,
426 struct inode *inode,
427 struct file *file, 419 struct file *file,
428 v4l2_kioctl drv) 420 v4l2_kioctl drv)
429{ 421{
@@ -438,7 +430,7 @@ static noinline int v4l1_compat_get_win_cap_dimensions(
438 memset(win, 0, sizeof(*win)); 430 memset(win, 0, sizeof(*win));
439 431
440 fmt->type = V4L2_BUF_TYPE_VIDEO_OVERLAY; 432 fmt->type = V4L2_BUF_TYPE_VIDEO_OVERLAY;
441 err = drv(inode, file, VIDIOC_G_FMT, fmt); 433 err = drv(file, VIDIOC_G_FMT, fmt);
442 if (err < 0) 434 if (err < 0)
443 dprintk("VIDIOCGWIN / VIDIOC_G_WIN: %d\n", err); 435 dprintk("VIDIOCGWIN / VIDIOC_G_WIN: %d\n", err);
444 if (err == 0) { 436 if (err == 0) {
@@ -453,7 +445,7 @@ static noinline int v4l1_compat_get_win_cap_dimensions(
453 } 445 }
454 446
455 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 447 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
456 err = drv(inode, file, VIDIOC_G_FMT, fmt); 448 err = drv(file, VIDIOC_G_FMT, fmt);
457 if (err < 0) { 449 if (err < 0) {
458 dprintk("VIDIOCGWIN / VIDIOC_G_FMT: %d\n", err); 450 dprintk("VIDIOCGWIN / VIDIOC_G_FMT: %d\n", err);
459 goto done; 451 goto done;
@@ -472,7 +464,6 @@ done:
472 464
473static noinline int v4l1_compat_set_win_cap_dimensions( 465static noinline int v4l1_compat_set_win_cap_dimensions(
474 struct video_window *win, 466 struct video_window *win,
475 struct inode *inode,
476 struct file *file, 467 struct file *file,
477 v4l2_kioctl drv) 468 v4l2_kioctl drv)
478{ 469{
@@ -485,8 +476,8 @@ static noinline int v4l1_compat_set_win_cap_dimensions(
485 return err; 476 return err;
486 } 477 }
487 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 478 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
488 drv(inode, file, VIDIOC_STREAMOFF, &fmt->type); 479 drv(file, VIDIOC_STREAMOFF, &fmt->type);
489 err1 = drv(inode, file, VIDIOC_G_FMT, fmt); 480 err1 = drv(file, VIDIOC_G_FMT, fmt);
490 if (err1 < 0) 481 if (err1 < 0)
491 dprintk("VIDIOCSWIN / VIDIOC_G_FMT: %d\n", err1); 482 dprintk("VIDIOCSWIN / VIDIOC_G_FMT: %d\n", err1);
492 if (err1 == 0) { 483 if (err1 == 0) {
@@ -494,7 +485,7 @@ static noinline int v4l1_compat_set_win_cap_dimensions(
494 fmt->fmt.pix.height = win->height; 485 fmt->fmt.pix.height = win->height;
495 fmt->fmt.pix.field = V4L2_FIELD_ANY; 486 fmt->fmt.pix.field = V4L2_FIELD_ANY;
496 fmt->fmt.pix.bytesperline = 0; 487 fmt->fmt.pix.bytesperline = 0;
497 err = drv(inode, file, VIDIOC_S_FMT, fmt); 488 err = drv(file, VIDIOC_S_FMT, fmt);
498 if (err < 0) 489 if (err < 0)
499 dprintk("VIDIOCSWIN / VIDIOC_S_FMT #1: %d\n", 490 dprintk("VIDIOCSWIN / VIDIOC_S_FMT #1: %d\n",
500 err); 491 err);
@@ -511,7 +502,7 @@ static noinline int v4l1_compat_set_win_cap_dimensions(
511 fmt->fmt.win.chromakey = win->chromakey; 502 fmt->fmt.win.chromakey = win->chromakey;
512 fmt->fmt.win.clips = (void __user *)win->clips; 503 fmt->fmt.win.clips = (void __user *)win->clips;
513 fmt->fmt.win.clipcount = win->clipcount; 504 fmt->fmt.win.clipcount = win->clipcount;
514 err2 = drv(inode, file, VIDIOC_S_FMT, fmt); 505 err2 = drv(file, VIDIOC_S_FMT, fmt);
515 if (err2 < 0) 506 if (err2 < 0)
516 dprintk("VIDIOCSWIN / VIDIOC_S_FMT #2: %d\n", err2); 507 dprintk("VIDIOCSWIN / VIDIOC_S_FMT #2: %d\n", err2);
517 508
@@ -525,7 +516,6 @@ static noinline int v4l1_compat_set_win_cap_dimensions(
525 516
526static noinline int v4l1_compat_turn_preview_on_off( 517static noinline int v4l1_compat_turn_preview_on_off(
527 int *on, 518 int *on,
528 struct inode *inode,
529 struct file *file, 519 struct file *file,
530 v4l2_kioctl drv) 520 v4l2_kioctl drv)
531{ 521{
@@ -536,9 +526,9 @@ static noinline int v4l1_compat_turn_preview_on_off(
536 /* dirty hack time. But v4l1 has no STREAMOFF 526 /* dirty hack time. But v4l1 has no STREAMOFF
537 * equivalent in the API, and this one at 527 * equivalent in the API, and this one at
538 * least comes close ... */ 528 * least comes close ... */
539 drv(inode, file, VIDIOC_STREAMOFF, &captype); 529 drv(file, VIDIOC_STREAMOFF, &captype);
540 } 530 }
541 err = drv(inode, file, VIDIOC_OVERLAY, on); 531 err = drv(file, VIDIOC_OVERLAY, on);
542 if (err < 0) 532 if (err < 0)
543 dprintk("VIDIOCCAPTURE / VIDIOC_PREVIEW: %d\n", err); 533 dprintk("VIDIOCCAPTURE / VIDIOC_PREVIEW: %d\n", err);
544 return err; 534 return err;
@@ -546,7 +536,6 @@ static noinline int v4l1_compat_turn_preview_on_off(
546 536
547static noinline int v4l1_compat_get_input_info( 537static noinline int v4l1_compat_get_input_info(
548 struct video_channel *chan, 538 struct video_channel *chan,
549 struct inode *inode,
550 struct file *file, 539 struct file *file,
551 v4l2_kioctl drv) 540 v4l2_kioctl drv)
552{ 541{
@@ -556,7 +545,7 @@ static noinline int v4l1_compat_get_input_info(
556 545
557 memset(&input2, 0, sizeof(input2)); 546 memset(&input2, 0, sizeof(input2));
558 input2.index = chan->channel; 547 input2.index = chan->channel;
559 err = drv(inode, file, VIDIOC_ENUMINPUT, &input2); 548 err = drv(file, VIDIOC_ENUMINPUT, &input2);
560 if (err < 0) { 549 if (err < 0) {
561 dprintk("VIDIOCGCHAN / VIDIOC_ENUMINPUT: " 550 dprintk("VIDIOCGCHAN / VIDIOC_ENUMINPUT: "
562 "channel=%d err=%d\n", chan->channel, err); 551 "channel=%d err=%d\n", chan->channel, err);
@@ -578,7 +567,7 @@ static noinline int v4l1_compat_get_input_info(
578 break; 567 break;
579 } 568 }
580 chan->norm = 0; 569 chan->norm = 0;
581 err = drv(inode, file, VIDIOC_G_STD, &sid); 570 err = drv(file, VIDIOC_G_STD, &sid);
582 if (err < 0) 571 if (err < 0)
583 dprintk("VIDIOCGCHAN / VIDIOC_G_STD: %d\n", err); 572 dprintk("VIDIOCGCHAN / VIDIOC_G_STD: %d\n", err);
584 if (err == 0) { 573 if (err == 0) {
@@ -595,14 +584,13 @@ done:
595 584
596static noinline int v4l1_compat_set_input( 585static noinline int v4l1_compat_set_input(
597 struct video_channel *chan, 586 struct video_channel *chan,
598 struct inode *inode,
599 struct file *file, 587 struct file *file,
600 v4l2_kioctl drv) 588 v4l2_kioctl drv)
601{ 589{
602 int err; 590 int err;
603 v4l2_std_id sid = 0; 591 v4l2_std_id sid = 0;
604 592
605 err = drv(inode, file, VIDIOC_S_INPUT, &chan->channel); 593 err = drv(file, VIDIOC_S_INPUT, &chan->channel);
606 if (err < 0) 594 if (err < 0)
607 dprintk("VIDIOCSCHAN / VIDIOC_S_INPUT: %d\n", err); 595 dprintk("VIDIOCSCHAN / VIDIOC_S_INPUT: %d\n", err);
608 switch (chan->norm) { 596 switch (chan->norm) {
@@ -617,7 +605,7 @@ static noinline int v4l1_compat_set_input(
617 break; 605 break;
618 } 606 }
619 if (0 != sid) { 607 if (0 != sid) {
620 err = drv(inode, file, VIDIOC_S_STD, &sid); 608 err = drv(file, VIDIOC_S_STD, &sid);
621 if (err < 0) 609 if (err < 0)
622 dprintk("VIDIOCSCHAN / VIDIOC_S_STD: %d\n", err); 610 dprintk("VIDIOCSCHAN / VIDIOC_S_STD: %d\n", err);
623 } 611 }
@@ -626,7 +614,6 @@ static noinline int v4l1_compat_set_input(
626 614
627static noinline int v4l1_compat_get_picture( 615static noinline int v4l1_compat_get_picture(
628 struct video_picture *pict, 616 struct video_picture *pict,
629 struct inode *inode,
630 struct file *file, 617 struct file *file,
631 v4l2_kioctl drv) 618 v4l2_kioctl drv)
632{ 619{
@@ -639,19 +626,19 @@ static noinline int v4l1_compat_get_picture(
639 return err; 626 return err;
640 } 627 }
641 628
642 pict->brightness = get_v4l_control(inode, file, 629 pict->brightness = get_v4l_control(file,
643 V4L2_CID_BRIGHTNESS, drv); 630 V4L2_CID_BRIGHTNESS, drv);
644 pict->hue = get_v4l_control(inode, file, 631 pict->hue = get_v4l_control(file,
645 V4L2_CID_HUE, drv); 632 V4L2_CID_HUE, drv);
646 pict->contrast = get_v4l_control(inode, file, 633 pict->contrast = get_v4l_control(file,
647 V4L2_CID_CONTRAST, drv); 634 V4L2_CID_CONTRAST, drv);
648 pict->colour = get_v4l_control(inode, file, 635 pict->colour = get_v4l_control(file,
649 V4L2_CID_SATURATION, drv); 636 V4L2_CID_SATURATION, drv);
650 pict->whiteness = get_v4l_control(inode, file, 637 pict->whiteness = get_v4l_control(file,
651 V4L2_CID_WHITENESS, drv); 638 V4L2_CID_WHITENESS, drv);
652 639
653 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 640 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
654 err = drv(inode, file, VIDIOC_G_FMT, fmt); 641 err = drv(file, VIDIOC_G_FMT, fmt);
655 if (err < 0) { 642 if (err < 0) {
656 dprintk("VIDIOCGPICT / VIDIOC_G_FMT: %d\n", err); 643 dprintk("VIDIOCGPICT / VIDIOC_G_FMT: %d\n", err);
657 goto done; 644 goto done;
@@ -669,7 +656,6 @@ done:
669 656
670static noinline int v4l1_compat_set_picture( 657static noinline int v4l1_compat_set_picture(
671 struct video_picture *pict, 658 struct video_picture *pict,
672 struct inode *inode,
673 struct file *file, 659 struct file *file,
674 v4l2_kioctl drv) 660 v4l2_kioctl drv)
675{ 661{
@@ -685,15 +671,15 @@ static noinline int v4l1_compat_set_picture(
685 } 671 }
686 memset(&fbuf, 0, sizeof(fbuf)); 672 memset(&fbuf, 0, sizeof(fbuf));
687 673
688 set_v4l_control(inode, file, 674 set_v4l_control(file,
689 V4L2_CID_BRIGHTNESS, pict->brightness, drv); 675 V4L2_CID_BRIGHTNESS, pict->brightness, drv);
690 set_v4l_control(inode, file, 676 set_v4l_control(file,
691 V4L2_CID_HUE, pict->hue, drv); 677 V4L2_CID_HUE, pict->hue, drv);
692 set_v4l_control(inode, file, 678 set_v4l_control(file,
693 V4L2_CID_CONTRAST, pict->contrast, drv); 679 V4L2_CID_CONTRAST, pict->contrast, drv);
694 set_v4l_control(inode, file, 680 set_v4l_control(file,
695 V4L2_CID_SATURATION, pict->colour, drv); 681 V4L2_CID_SATURATION, pict->colour, drv);
696 set_v4l_control(inode, file, 682 set_v4l_control(file,
697 V4L2_CID_WHITENESS, pict->whiteness, drv); 683 V4L2_CID_WHITENESS, pict->whiteness, drv);
698 /* 684 /*
699 * V4L1 uses this ioctl to set both memory capture and overlay 685 * V4L1 uses this ioctl to set both memory capture and overlay
@@ -703,7 +689,7 @@ static noinline int v4l1_compat_set_picture(
703 */ 689 */
704 690
705 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 691 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
706 err = drv(inode, file, VIDIOC_G_FMT, fmt); 692 err = drv(file, VIDIOC_G_FMT, fmt);
707 /* If VIDIOC_G_FMT failed, then the driver likely doesn't 693 /* If VIDIOC_G_FMT failed, then the driver likely doesn't
708 support memory capture. Trying to set the memory capture 694 support memory capture. Trying to set the memory capture
709 parameters would be pointless. */ 695 parameters would be pointless. */
@@ -714,13 +700,13 @@ static noinline int v4l1_compat_set_picture(
714 palette_to_pixelformat(pict->palette)) { 700 palette_to_pixelformat(pict->palette)) {
715 fmt->fmt.pix.pixelformat = palette_to_pixelformat( 701 fmt->fmt.pix.pixelformat = palette_to_pixelformat(
716 pict->palette); 702 pict->palette);
717 mem_err = drv(inode, file, VIDIOC_S_FMT, fmt); 703 mem_err = drv(file, VIDIOC_S_FMT, fmt);
718 if (mem_err < 0) 704 if (mem_err < 0)
719 dprintk("VIDIOCSPICT / VIDIOC_S_FMT: %d\n", 705 dprintk("VIDIOCSPICT / VIDIOC_S_FMT: %d\n",
720 mem_err); 706 mem_err);
721 } 707 }
722 708
723 err = drv(inode, file, VIDIOC_G_FBUF, &fbuf); 709 err = drv(file, VIDIOC_G_FBUF, &fbuf);
724 /* If VIDIOC_G_FBUF failed, then the driver likely doesn't 710 /* If VIDIOC_G_FBUF failed, then the driver likely doesn't
725 support overlay. Trying to set the overlay parameters 711 support overlay. Trying to set the overlay parameters
726 would be quite pointless. */ 712 would be quite pointless. */
@@ -731,7 +717,7 @@ static noinline int v4l1_compat_set_picture(
731 palette_to_pixelformat(pict->palette)) { 717 palette_to_pixelformat(pict->palette)) {
732 fbuf.fmt.pixelformat = palette_to_pixelformat( 718 fbuf.fmt.pixelformat = palette_to_pixelformat(
733 pict->palette); 719 pict->palette);
734 ovl_err = drv(inode, file, VIDIOC_S_FBUF, &fbuf); 720 ovl_err = drv(file, VIDIOC_S_FBUF, &fbuf);
735 if (ovl_err < 0) 721 if (ovl_err < 0)
736 dprintk("VIDIOCSPICT / VIDIOC_S_FBUF: %d\n", 722 dprintk("VIDIOCSPICT / VIDIOC_S_FBUF: %d\n",
737 ovl_err); 723 ovl_err);
@@ -752,7 +738,6 @@ static noinline int v4l1_compat_set_picture(
752 738
753static noinline int v4l1_compat_get_tuner( 739static noinline int v4l1_compat_get_tuner(
754 struct video_tuner *tun, 740 struct video_tuner *tun,
755 struct inode *inode,
756 struct file *file, 741 struct file *file,
757 v4l2_kioctl drv) 742 v4l2_kioctl drv)
758{ 743{
@@ -762,7 +747,7 @@ static noinline int v4l1_compat_get_tuner(
762 v4l2_std_id sid; 747 v4l2_std_id sid;
763 748
764 memset(&tun2, 0, sizeof(tun2)); 749 memset(&tun2, 0, sizeof(tun2));
765 err = drv(inode, file, VIDIOC_G_TUNER, &tun2); 750 err = drv(file, VIDIOC_G_TUNER, &tun2);
766 if (err < 0) { 751 if (err < 0) {
767 dprintk("VIDIOCGTUNER / VIDIOC_G_TUNER: %d\n", err); 752 dprintk("VIDIOCGTUNER / VIDIOC_G_TUNER: %d\n", err);
768 goto done; 753 goto done;
@@ -778,7 +763,7 @@ static noinline int v4l1_compat_get_tuner(
778 for (i = 0; i < 64; i++) { 763 for (i = 0; i < 64; i++) {
779 memset(&std2, 0, sizeof(std2)); 764 memset(&std2, 0, sizeof(std2));
780 std2.index = i; 765 std2.index = i;
781 if (0 != drv(inode, file, VIDIOC_ENUMSTD, &std2)) 766 if (0 != drv(file, VIDIOC_ENUMSTD, &std2))
782 break; 767 break;
783 if (std2.id & V4L2_STD_PAL) 768 if (std2.id & V4L2_STD_PAL)
784 tun->flags |= VIDEO_TUNER_PAL; 769 tun->flags |= VIDEO_TUNER_PAL;
@@ -788,7 +773,7 @@ static noinline int v4l1_compat_get_tuner(
788 tun->flags |= VIDEO_TUNER_SECAM; 773 tun->flags |= VIDEO_TUNER_SECAM;
789 } 774 }
790 775
791 err = drv(inode, file, VIDIOC_G_STD, &sid); 776 err = drv(file, VIDIOC_G_STD, &sid);
792 if (err < 0) 777 if (err < 0)
793 dprintk("VIDIOCGTUNER / VIDIOC_G_STD: %d\n", err); 778 dprintk("VIDIOCGTUNER / VIDIOC_G_STD: %d\n", err);
794 if (err == 0) { 779 if (err == 0) {
@@ -811,7 +796,6 @@ done:
811 796
812static noinline int v4l1_compat_select_tuner( 797static noinline int v4l1_compat_select_tuner(
813 struct video_tuner *tun, 798 struct video_tuner *tun,
814 struct inode *inode,
815 struct file *file, 799 struct file *file,
816 v4l2_kioctl drv) 800 v4l2_kioctl drv)
817{ 801{
@@ -821,7 +805,7 @@ static noinline int v4l1_compat_select_tuner(
821 805
822 t.index = tun->tuner; 806 t.index = tun->tuner;
823 807
824 err = drv(inode, file, VIDIOC_S_INPUT, &t); 808 err = drv(file, VIDIOC_S_INPUT, &t);
825 if (err < 0) 809 if (err < 0)
826 dprintk("VIDIOCSTUNER / VIDIOC_S_INPUT: %d\n", err); 810 dprintk("VIDIOCSTUNER / VIDIOC_S_INPUT: %d\n", err);
827 return err; 811 return err;
@@ -829,7 +813,6 @@ static noinline int v4l1_compat_select_tuner(
829 813
830static noinline int v4l1_compat_get_frequency( 814static noinline int v4l1_compat_get_frequency(
831 unsigned long *freq, 815 unsigned long *freq,
832 struct inode *inode,
833 struct file *file, 816 struct file *file,
834 v4l2_kioctl drv) 817 v4l2_kioctl drv)
835{ 818{
@@ -838,7 +821,7 @@ static noinline int v4l1_compat_get_frequency(
838 memset(&freq2, 0, sizeof(freq2)); 821 memset(&freq2, 0, sizeof(freq2));
839 822
840 freq2.tuner = 0; 823 freq2.tuner = 0;
841 err = drv(inode, file, VIDIOC_G_FREQUENCY, &freq2); 824 err = drv(file, VIDIOC_G_FREQUENCY, &freq2);
842 if (err < 0) 825 if (err < 0)
843 dprintk("VIDIOCGFREQ / VIDIOC_G_FREQUENCY: %d\n", err); 826 dprintk("VIDIOCGFREQ / VIDIOC_G_FREQUENCY: %d\n", err);
844 if (0 == err) 827 if (0 == err)
@@ -848,7 +831,6 @@ static noinline int v4l1_compat_get_frequency(
848 831
849static noinline int v4l1_compat_set_frequency( 832static noinline int v4l1_compat_set_frequency(
850 unsigned long *freq, 833 unsigned long *freq,
851 struct inode *inode,
852 struct file *file, 834 struct file *file,
853 v4l2_kioctl drv) 835 v4l2_kioctl drv)
854{ 836{
@@ -856,9 +838,9 @@ static noinline int v4l1_compat_set_frequency(
856 struct v4l2_frequency freq2; 838 struct v4l2_frequency freq2;
857 memset(&freq2, 0, sizeof(freq2)); 839 memset(&freq2, 0, sizeof(freq2));
858 840
859 drv(inode, file, VIDIOC_G_FREQUENCY, &freq2); 841 drv(file, VIDIOC_G_FREQUENCY, &freq2);
860 freq2.frequency = *freq; 842 freq2.frequency = *freq;
861 err = drv(inode, file, VIDIOC_S_FREQUENCY, &freq2); 843 err = drv(file, VIDIOC_S_FREQUENCY, &freq2);
862 if (err < 0) 844 if (err < 0)
863 dprintk("VIDIOCSFREQ / VIDIOC_S_FREQUENCY: %d\n", err); 845 dprintk("VIDIOCSFREQ / VIDIOC_S_FREQUENCY: %d\n", err);
864 return err; 846 return err;
@@ -866,7 +848,6 @@ static noinline int v4l1_compat_set_frequency(
866 848
867static noinline int v4l1_compat_get_audio( 849static noinline int v4l1_compat_get_audio(
868 struct video_audio *aud, 850 struct video_audio *aud,
869 struct inode *inode,
870 struct file *file, 851 struct file *file,
871 v4l2_kioctl drv) 852 v4l2_kioctl drv)
872{ 853{
@@ -876,7 +857,7 @@ static noinline int v4l1_compat_get_audio(
876 struct v4l2_tuner tun2; 857 struct v4l2_tuner tun2;
877 memset(&aud2, 0, sizeof(aud2)); 858 memset(&aud2, 0, sizeof(aud2));
878 859
879 err = drv(inode, file, VIDIOC_G_AUDIO, &aud2); 860 err = drv(file, VIDIOC_G_AUDIO, &aud2);
880 if (err < 0) { 861 if (err < 0) {
881 dprintk("VIDIOCGAUDIO / VIDIOC_G_AUDIO: %d\n", err); 862 dprintk("VIDIOCGAUDIO / VIDIOC_G_AUDIO: %d\n", err);
882 goto done; 863 goto done;
@@ -886,27 +867,27 @@ static noinline int v4l1_compat_get_audio(
886 aud->name[sizeof(aud->name) - 1] = 0; 867 aud->name[sizeof(aud->name) - 1] = 0;
887 aud->audio = aud2.index; 868 aud->audio = aud2.index;
888 aud->flags = 0; 869 aud->flags = 0;
889 i = get_v4l_control(inode, file, V4L2_CID_AUDIO_VOLUME, drv); 870 i = get_v4l_control(file, V4L2_CID_AUDIO_VOLUME, drv);
890 if (i >= 0) { 871 if (i >= 0) {
891 aud->volume = i; 872 aud->volume = i;
892 aud->flags |= VIDEO_AUDIO_VOLUME; 873 aud->flags |= VIDEO_AUDIO_VOLUME;
893 } 874 }
894 i = get_v4l_control(inode, file, V4L2_CID_AUDIO_BASS, drv); 875 i = get_v4l_control(file, V4L2_CID_AUDIO_BASS, drv);
895 if (i >= 0) { 876 if (i >= 0) {
896 aud->bass = i; 877 aud->bass = i;
897 aud->flags |= VIDEO_AUDIO_BASS; 878 aud->flags |= VIDEO_AUDIO_BASS;
898 } 879 }
899 i = get_v4l_control(inode, file, V4L2_CID_AUDIO_TREBLE, drv); 880 i = get_v4l_control(file, V4L2_CID_AUDIO_TREBLE, drv);
900 if (i >= 0) { 881 if (i >= 0) {
901 aud->treble = i; 882 aud->treble = i;
902 aud->flags |= VIDEO_AUDIO_TREBLE; 883 aud->flags |= VIDEO_AUDIO_TREBLE;
903 } 884 }
904 i = get_v4l_control(inode, file, V4L2_CID_AUDIO_BALANCE, drv); 885 i = get_v4l_control(file, V4L2_CID_AUDIO_BALANCE, drv);
905 if (i >= 0) { 886 if (i >= 0) {
906 aud->balance = i; 887 aud->balance = i;
907 aud->flags |= VIDEO_AUDIO_BALANCE; 888 aud->flags |= VIDEO_AUDIO_BALANCE;
908 } 889 }
909 i = get_v4l_control(inode, file, V4L2_CID_AUDIO_MUTE, drv); 890 i = get_v4l_control(file, V4L2_CID_AUDIO_MUTE, drv);
910 if (i >= 0) { 891 if (i >= 0) {
911 if (i) 892 if (i)
912 aud->flags |= VIDEO_AUDIO_MUTE; 893 aud->flags |= VIDEO_AUDIO_MUTE;
@@ -914,13 +895,13 @@ static noinline int v4l1_compat_get_audio(
914 } 895 }
915 aud->step = 1; 896 aud->step = 1;
916 qctrl2.id = V4L2_CID_AUDIO_VOLUME; 897 qctrl2.id = V4L2_CID_AUDIO_VOLUME;
917 if (drv(inode, file, VIDIOC_QUERYCTRL, &qctrl2) == 0 && 898 if (drv(file, VIDIOC_QUERYCTRL, &qctrl2) == 0 &&
918 !(qctrl2.flags & V4L2_CTRL_FLAG_DISABLED)) 899 !(qctrl2.flags & V4L2_CTRL_FLAG_DISABLED))
919 aud->step = qctrl2.step; 900 aud->step = qctrl2.step;
920 aud->mode = 0; 901 aud->mode = 0;
921 902
922 memset(&tun2, 0, sizeof(tun2)); 903 memset(&tun2, 0, sizeof(tun2));
923 err = drv(inode, file, VIDIOC_G_TUNER, &tun2); 904 err = drv(file, VIDIOC_G_TUNER, &tun2);
924 if (err < 0) { 905 if (err < 0) {
925 dprintk("VIDIOCGAUDIO / VIDIOC_G_TUNER: %d\n", err); 906 dprintk("VIDIOCGAUDIO / VIDIOC_G_TUNER: %d\n", err);
926 err = 0; 907 err = 0;
@@ -939,7 +920,6 @@ done:
939 920
940static noinline int v4l1_compat_set_audio( 921static noinline int v4l1_compat_set_audio(
941 struct video_audio *aud, 922 struct video_audio *aud,
942 struct inode *inode,
943 struct file *file, 923 struct file *file,
944 v4l2_kioctl drv) 924 v4l2_kioctl drv)
945{ 925{
@@ -951,24 +931,24 @@ static noinline int v4l1_compat_set_audio(
951 memset(&tun2, 0, sizeof(tun2)); 931 memset(&tun2, 0, sizeof(tun2));
952 932
953 aud2.index = aud->audio; 933 aud2.index = aud->audio;
954 err = drv(inode, file, VIDIOC_S_AUDIO, &aud2); 934 err = drv(file, VIDIOC_S_AUDIO, &aud2);
955 if (err < 0) { 935 if (err < 0) {
956 dprintk("VIDIOCSAUDIO / VIDIOC_S_AUDIO: %d\n", err); 936 dprintk("VIDIOCSAUDIO / VIDIOC_S_AUDIO: %d\n", err);
957 goto done; 937 goto done;
958 } 938 }
959 939
960 set_v4l_control(inode, file, V4L2_CID_AUDIO_VOLUME, 940 set_v4l_control(file, V4L2_CID_AUDIO_VOLUME,
961 aud->volume, drv); 941 aud->volume, drv);
962 set_v4l_control(inode, file, V4L2_CID_AUDIO_BASS, 942 set_v4l_control(file, V4L2_CID_AUDIO_BASS,
963 aud->bass, drv); 943 aud->bass, drv);
964 set_v4l_control(inode, file, V4L2_CID_AUDIO_TREBLE, 944 set_v4l_control(file, V4L2_CID_AUDIO_TREBLE,
965 aud->treble, drv); 945 aud->treble, drv);
966 set_v4l_control(inode, file, V4L2_CID_AUDIO_BALANCE, 946 set_v4l_control(file, V4L2_CID_AUDIO_BALANCE,
967 aud->balance, drv); 947 aud->balance, drv);
968 set_v4l_control(inode, file, V4L2_CID_AUDIO_MUTE, 948 set_v4l_control(file, V4L2_CID_AUDIO_MUTE,
969 !!(aud->flags & VIDEO_AUDIO_MUTE), drv); 949 !!(aud->flags & VIDEO_AUDIO_MUTE), drv);
970 950
971 err = drv(inode, file, VIDIOC_G_TUNER, &tun2); 951 err = drv(file, VIDIOC_G_TUNER, &tun2);
972 if (err < 0) 952 if (err < 0)
973 dprintk("VIDIOCSAUDIO / VIDIOC_G_TUNER: %d\n", err); 953 dprintk("VIDIOCSAUDIO / VIDIOC_G_TUNER: %d\n", err);
974 if (err == 0) { 954 if (err == 0) {
@@ -985,7 +965,7 @@ static noinline int v4l1_compat_set_audio(
985 tun2.audmode = V4L2_TUNER_MODE_LANG2; 965 tun2.audmode = V4L2_TUNER_MODE_LANG2;
986 break; 966 break;
987 } 967 }
988 err = drv(inode, file, VIDIOC_S_TUNER, &tun2); 968 err = drv(file, VIDIOC_S_TUNER, &tun2);
989 if (err < 0) 969 if (err < 0)
990 dprintk("VIDIOCSAUDIO / VIDIOC_S_TUNER: %d\n", err); 970 dprintk("VIDIOCSAUDIO / VIDIOC_S_TUNER: %d\n", err);
991 } 971 }
@@ -996,7 +976,6 @@ done:
996 976
997static noinline int v4l1_compat_capture_frame( 977static noinline int v4l1_compat_capture_frame(
998 struct video_mmap *mm, 978 struct video_mmap *mm,
999 struct inode *inode,
1000 struct file *file, 979 struct file *file,
1001 v4l2_kioctl drv) 980 v4l2_kioctl drv)
1002{ 981{
@@ -1013,7 +992,7 @@ static noinline int v4l1_compat_capture_frame(
1013 memset(&buf, 0, sizeof(buf)); 992 memset(&buf, 0, sizeof(buf));
1014 993
1015 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 994 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1016 err = drv(inode, file, VIDIOC_G_FMT, fmt); 995 err = drv(file, VIDIOC_G_FMT, fmt);
1017 if (err < 0) { 996 if (err < 0) {
1018 dprintk("VIDIOCMCAPTURE / VIDIOC_G_FMT: %d\n", err); 997 dprintk("VIDIOCMCAPTURE / VIDIOC_G_FMT: %d\n", err);
1019 goto done; 998 goto done;
@@ -1029,7 +1008,7 @@ static noinline int v4l1_compat_capture_frame(
1029 palette_to_pixelformat(mm->format); 1008 palette_to_pixelformat(mm->format);
1030 fmt->fmt.pix.field = V4L2_FIELD_ANY; 1009 fmt->fmt.pix.field = V4L2_FIELD_ANY;
1031 fmt->fmt.pix.bytesperline = 0; 1010 fmt->fmt.pix.bytesperline = 0;
1032 err = drv(inode, file, VIDIOC_S_FMT, fmt); 1011 err = drv(file, VIDIOC_S_FMT, fmt);
1033 if (err < 0) { 1012 if (err < 0) {
1034 dprintk("VIDIOCMCAPTURE / VIDIOC_S_FMT: %d\n", err); 1013 dprintk("VIDIOCMCAPTURE / VIDIOC_S_FMT: %d\n", err);
1035 goto done; 1014 goto done;
@@ -1037,17 +1016,17 @@ static noinline int v4l1_compat_capture_frame(
1037 } 1016 }
1038 buf.index = mm->frame; 1017 buf.index = mm->frame;
1039 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1018 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1040 err = drv(inode, file, VIDIOC_QUERYBUF, &buf); 1019 err = drv(file, VIDIOC_QUERYBUF, &buf);
1041 if (err < 0) { 1020 if (err < 0) {
1042 dprintk("VIDIOCMCAPTURE / VIDIOC_QUERYBUF: %d\n", err); 1021 dprintk("VIDIOCMCAPTURE / VIDIOC_QUERYBUF: %d\n", err);
1043 goto done; 1022 goto done;
1044 } 1023 }
1045 err = drv(inode, file, VIDIOC_QBUF, &buf); 1024 err = drv(file, VIDIOC_QBUF, &buf);
1046 if (err < 0) { 1025 if (err < 0) {
1047 dprintk("VIDIOCMCAPTURE / VIDIOC_QBUF: %d\n", err); 1026 dprintk("VIDIOCMCAPTURE / VIDIOC_QBUF: %d\n", err);
1048 goto done; 1027 goto done;
1049 } 1028 }
1050 err = drv(inode, file, VIDIOC_STREAMON, &captype); 1029 err = drv(file, VIDIOC_STREAMON, &captype);
1051 if (err < 0) 1030 if (err < 0)
1052 dprintk("VIDIOCMCAPTURE / VIDIOC_STREAMON: %d\n", err); 1031 dprintk("VIDIOCMCAPTURE / VIDIOC_STREAMON: %d\n", err);
1053done: 1032done:
@@ -1057,7 +1036,6 @@ done:
1057 1036
1058static noinline int v4l1_compat_sync( 1037static noinline int v4l1_compat_sync(
1059 int *i, 1038 int *i,
1060 struct inode *inode,
1061 struct file *file, 1039 struct file *file,
1062 v4l2_kioctl drv) 1040 v4l2_kioctl drv)
1063{ 1041{
@@ -1069,7 +1047,7 @@ static noinline int v4l1_compat_sync(
1069 memset(&buf, 0, sizeof(buf)); 1047 memset(&buf, 0, sizeof(buf));
1070 buf.index = *i; 1048 buf.index = *i;
1071 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1049 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1072 err = drv(inode, file, VIDIOC_QUERYBUF, &buf); 1050 err = drv(file, VIDIOC_QUERYBUF, &buf);
1073 if (err < 0) { 1051 if (err < 0) {
1074 /* No such buffer */ 1052 /* No such buffer */
1075 dprintk("VIDIOCSYNC / VIDIOC_QUERYBUF: %d\n", err); 1053 dprintk("VIDIOCSYNC / VIDIOC_QUERYBUF: %d\n", err);
@@ -1082,7 +1060,7 @@ static noinline int v4l1_compat_sync(
1082 } 1060 }
1083 1061
1084 /* make sure capture actually runs so we don't block forever */ 1062 /* make sure capture actually runs so we don't block forever */
1085 err = drv(inode, file, VIDIOC_STREAMON, &captype); 1063 err = drv(file, VIDIOC_STREAMON, &captype);
1086 if (err < 0) { 1064 if (err < 0) {
1087 dprintk("VIDIOCSYNC / VIDIOC_STREAMON: %d\n", err); 1065 dprintk("VIDIOCSYNC / VIDIOC_STREAMON: %d\n", err);
1088 goto done; 1066 goto done;
@@ -1096,7 +1074,7 @@ static noinline int v4l1_compat_sync(
1096 if (err < 0 || /* error or sleep was interrupted */ 1074 if (err < 0 || /* error or sleep was interrupted */
1097 err == 0) /* timeout? Shouldn't occur. */ 1075 err == 0) /* timeout? Shouldn't occur. */
1098 break; 1076 break;
1099 err = drv(inode, file, VIDIOC_QUERYBUF, &buf); 1077 err = drv(file, VIDIOC_QUERYBUF, &buf);
1100 if (err < 0) 1078 if (err < 0)
1101 dprintk("VIDIOCSYNC / VIDIOC_QUERYBUF: %d\n", err); 1079 dprintk("VIDIOCSYNC / VIDIOC_QUERYBUF: %d\n", err);
1102 } 1080 }
@@ -1104,7 +1082,7 @@ static noinline int v4l1_compat_sync(
1104 if (!(buf.flags & V4L2_BUF_FLAG_DONE)) /* not done */ 1082 if (!(buf.flags & V4L2_BUF_FLAG_DONE)) /* not done */
1105 goto done; 1083 goto done;
1106 do { 1084 do {
1107 err = drv(inode, file, VIDIOC_DQBUF, &buf); 1085 err = drv(file, VIDIOC_DQBUF, &buf);
1108 if (err < 0) 1086 if (err < 0)
1109 dprintk("VIDIOCSYNC / VIDIOC_DQBUF: %d\n", err); 1087 dprintk("VIDIOCSYNC / VIDIOC_DQBUF: %d\n", err);
1110 } while (err == 0 && buf.index != *i); 1088 } while (err == 0 && buf.index != *i);
@@ -1114,7 +1092,6 @@ done:
1114 1092
1115static noinline int v4l1_compat_get_vbi_format( 1093static noinline int v4l1_compat_get_vbi_format(
1116 struct vbi_format *fmt, 1094 struct vbi_format *fmt,
1117 struct inode *inode,
1118 struct file *file, 1095 struct file *file,
1119 v4l2_kioctl drv) 1096 v4l2_kioctl drv)
1120{ 1097{
@@ -1128,7 +1105,7 @@ static noinline int v4l1_compat_get_vbi_format(
1128 } 1105 }
1129 fmt2->type = V4L2_BUF_TYPE_VBI_CAPTURE; 1106 fmt2->type = V4L2_BUF_TYPE_VBI_CAPTURE;
1130 1107
1131 err = drv(inode, file, VIDIOC_G_FMT, fmt2); 1108 err = drv(file, VIDIOC_G_FMT, fmt2);
1132 if (err < 0) { 1109 if (err < 0) {
1133 dprintk("VIDIOCGVBIFMT / VIDIOC_G_FMT: %d\n", err); 1110 dprintk("VIDIOCGVBIFMT / VIDIOC_G_FMT: %d\n", err);
1134 goto done; 1111 goto done;
@@ -1153,7 +1130,6 @@ done:
1153 1130
1154static noinline int v4l1_compat_set_vbi_format( 1131static noinline int v4l1_compat_set_vbi_format(
1155 struct vbi_format *fmt, 1132 struct vbi_format *fmt,
1156 struct inode *inode,
1157 struct file *file, 1133 struct file *file,
1158 v4l2_kioctl drv) 1134 v4l2_kioctl drv)
1159{ 1135{
@@ -1179,7 +1155,7 @@ static noinline int v4l1_compat_set_vbi_format(
1179 fmt2->fmt.vbi.start[1] = fmt->start[1]; 1155 fmt2->fmt.vbi.start[1] = fmt->start[1];
1180 fmt2->fmt.vbi.count[1] = fmt->count[1]; 1156 fmt2->fmt.vbi.count[1] = fmt->count[1];
1181 fmt2->fmt.vbi.flags = fmt->flags; 1157 fmt2->fmt.vbi.flags = fmt->flags;
1182 err = drv(inode, file, VIDIOC_TRY_FMT, fmt2); 1158 err = drv(file, VIDIOC_TRY_FMT, fmt2);
1183 if (err < 0) { 1159 if (err < 0) {
1184 dprintk("VIDIOCSVBIFMT / VIDIOC_TRY_FMT: %d\n", err); 1160 dprintk("VIDIOCSVBIFMT / VIDIOC_TRY_FMT: %d\n", err);
1185 goto done; 1161 goto done;
@@ -1196,7 +1172,7 @@ static noinline int v4l1_compat_set_vbi_format(
1196 err = -EINVAL; 1172 err = -EINVAL;
1197 goto done; 1173 goto done;
1198 } 1174 }
1199 err = drv(inode, file, VIDIOC_S_FMT, fmt2); 1175 err = drv(file, VIDIOC_S_FMT, fmt2);
1200 if (err < 0) 1176 if (err < 0)
1201 dprintk("VIDIOCSVBIFMT / VIDIOC_S_FMT: %d\n", err); 1177 dprintk("VIDIOCSVBIFMT / VIDIOC_S_FMT: %d\n", err);
1202done: 1178done:
@@ -1208,8 +1184,7 @@ done:
1208 * This function is exported. 1184 * This function is exported.
1209 */ 1185 */
1210int 1186int
1211v4l_compat_translate_ioctl(struct inode *inode, 1187v4l_compat_translate_ioctl(struct file *file,
1212 struct file *file,
1213 int cmd, 1188 int cmd,
1214 void *arg, 1189 void *arg,
1215 v4l2_kioctl drv) 1190 v4l2_kioctl drv)
@@ -1218,64 +1193,64 @@ v4l_compat_translate_ioctl(struct inode *inode,
1218 1193
1219 switch (cmd) { 1194 switch (cmd) {
1220 case VIDIOCGCAP: /* capability */ 1195 case VIDIOCGCAP: /* capability */
1221 err = v4l1_compat_get_capabilities(arg, inode, file, drv); 1196 err = v4l1_compat_get_capabilities(arg, file, drv);
1222 break; 1197 break;
1223 case VIDIOCGFBUF: /* get frame buffer */ 1198 case VIDIOCGFBUF: /* get frame buffer */
1224 err = v4l1_compat_get_frame_buffer(arg, inode, file, drv); 1199 err = v4l1_compat_get_frame_buffer(arg, file, drv);
1225 break; 1200 break;
1226 case VIDIOCSFBUF: /* set frame buffer */ 1201 case VIDIOCSFBUF: /* set frame buffer */
1227 err = v4l1_compat_set_frame_buffer(arg, inode, file, drv); 1202 err = v4l1_compat_set_frame_buffer(arg, file, drv);
1228 break; 1203 break;
1229 case VIDIOCGWIN: /* get window or capture dimensions */ 1204 case VIDIOCGWIN: /* get window or capture dimensions */
1230 err = v4l1_compat_get_win_cap_dimensions(arg, inode, file, drv); 1205 err = v4l1_compat_get_win_cap_dimensions(arg, file, drv);
1231 break; 1206 break;
1232 case VIDIOCSWIN: /* set window and/or capture dimensions */ 1207 case VIDIOCSWIN: /* set window and/or capture dimensions */
1233 err = v4l1_compat_set_win_cap_dimensions(arg, inode, file, drv); 1208 err = v4l1_compat_set_win_cap_dimensions(arg, file, drv);
1234 break; 1209 break;
1235 case VIDIOCCAPTURE: /* turn on/off preview */ 1210 case VIDIOCCAPTURE: /* turn on/off preview */
1236 err = v4l1_compat_turn_preview_on_off(arg, inode, file, drv); 1211 err = v4l1_compat_turn_preview_on_off(arg, file, drv);
1237 break; 1212 break;
1238 case VIDIOCGCHAN: /* get input information */ 1213 case VIDIOCGCHAN: /* get input information */
1239 err = v4l1_compat_get_input_info(arg, inode, file, drv); 1214 err = v4l1_compat_get_input_info(arg, file, drv);
1240 break; 1215 break;
1241 case VIDIOCSCHAN: /* set input */ 1216 case VIDIOCSCHAN: /* set input */
1242 err = v4l1_compat_set_input(arg, inode, file, drv); 1217 err = v4l1_compat_set_input(arg, file, drv);
1243 break; 1218 break;
1244 case VIDIOCGPICT: /* get tone controls & partial capture format */ 1219 case VIDIOCGPICT: /* get tone controls & partial capture format */
1245 err = v4l1_compat_get_picture(arg, inode, file, drv); 1220 err = v4l1_compat_get_picture(arg, file, drv);
1246 break; 1221 break;
1247 case VIDIOCSPICT: /* set tone controls & partial capture format */ 1222 case VIDIOCSPICT: /* set tone controls & partial capture format */
1248 err = v4l1_compat_set_picture(arg, inode, file, drv); 1223 err = v4l1_compat_set_picture(arg, file, drv);
1249 break; 1224 break;
1250 case VIDIOCGTUNER: /* get tuner information */ 1225 case VIDIOCGTUNER: /* get tuner information */
1251 err = v4l1_compat_get_tuner(arg, inode, file, drv); 1226 err = v4l1_compat_get_tuner(arg, file, drv);
1252 break; 1227 break;
1253 case VIDIOCSTUNER: /* select a tuner input */ 1228 case VIDIOCSTUNER: /* select a tuner input */
1254 err = v4l1_compat_select_tuner(arg, inode, file, drv); 1229 err = v4l1_compat_select_tuner(arg, file, drv);
1255 break; 1230 break;
1256 case VIDIOCGFREQ: /* get frequency */ 1231 case VIDIOCGFREQ: /* get frequency */
1257 err = v4l1_compat_get_frequency(arg, inode, file, drv); 1232 err = v4l1_compat_get_frequency(arg, file, drv);
1258 break; 1233 break;
1259 case VIDIOCSFREQ: /* set frequency */ 1234 case VIDIOCSFREQ: /* set frequency */
1260 err = v4l1_compat_set_frequency(arg, inode, file, drv); 1235 err = v4l1_compat_set_frequency(arg, file, drv);
1261 break; 1236 break;
1262 case VIDIOCGAUDIO: /* get audio properties/controls */ 1237 case VIDIOCGAUDIO: /* get audio properties/controls */
1263 err = v4l1_compat_get_audio(arg, inode, file, drv); 1238 err = v4l1_compat_get_audio(arg, file, drv);
1264 break; 1239 break;
1265 case VIDIOCSAUDIO: /* set audio controls */ 1240 case VIDIOCSAUDIO: /* set audio controls */
1266 err = v4l1_compat_set_audio(arg, inode, file, drv); 1241 err = v4l1_compat_set_audio(arg, file, drv);
1267 break; 1242 break;
1268 case VIDIOCMCAPTURE: /* capture a frame */ 1243 case VIDIOCMCAPTURE: /* capture a frame */
1269 err = v4l1_compat_capture_frame(arg, inode, file, drv); 1244 err = v4l1_compat_capture_frame(arg, file, drv);
1270 break; 1245 break;
1271 case VIDIOCSYNC: /* wait for a frame */ 1246 case VIDIOCSYNC: /* wait for a frame */
1272 err = v4l1_compat_sync(arg, inode, file, drv); 1247 err = v4l1_compat_sync(arg, file, drv);
1273 break; 1248 break;
1274 case VIDIOCGVBIFMT: /* query VBI data capture format */ 1249 case VIDIOCGVBIFMT: /* query VBI data capture format */
1275 err = v4l1_compat_get_vbi_format(arg, inode, file, drv); 1250 err = v4l1_compat_get_vbi_format(arg, file, drv);
1276 break; 1251 break;
1277 case VIDIOCSVBIFMT: 1252 case VIDIOCSVBIFMT:
1278 err = v4l1_compat_set_vbi_format(arg, inode, file, drv); 1253 err = v4l1_compat_set_vbi_format(arg, file, drv);
1279 break; 1254 break;
1280 default: 1255 default:
1281 err = -ENOIOCTLCMD; 1256 err = -ENOIOCTLCMD;
diff --git a/drivers/media/video/v4l2-int-device.c b/drivers/media/video/v4l2-int-device.c
index 0e4549922f26..a935bae538ef 100644
--- a/drivers/media/video/v4l2-int-device.c
+++ b/drivers/media/video/v4l2-int-device.c
@@ -32,7 +32,7 @@
32static DEFINE_MUTEX(mutex); 32static DEFINE_MUTEX(mutex);
33static LIST_HEAD(int_list); 33static LIST_HEAD(int_list);
34 34
35static void v4l2_int_device_try_attach_all(void) 35void v4l2_int_device_try_attach_all(void)
36{ 36{
37 struct v4l2_int_device *m, *s; 37 struct v4l2_int_device *m, *s;
38 38
@@ -66,6 +66,7 @@ static void v4l2_int_device_try_attach_all(void)
66 } 66 }
67 } 67 }
68} 68}
69EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all);
69 70
70static int ioctl_sort_cmp(const void *a, const void *b) 71static int ioctl_sort_cmp(const void *a, const void *b)
71{ 72{
@@ -144,6 +145,7 @@ int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd)
144 find_ioctl(d->u.slave, cmd, 145 find_ioctl(d->u.slave, cmd,
145 (v4l2_int_ioctl_func *)no_such_ioctl_0))(d); 146 (v4l2_int_ioctl_func *)no_such_ioctl_0))(d);
146} 147}
148EXPORT_SYMBOL_GPL(v4l2_int_ioctl_0);
147 149
148static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg) 150static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg)
149{ 151{
@@ -156,5 +158,6 @@ int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg)
156 find_ioctl(d->u.slave, cmd, 158 find_ioctl(d->u.slave, cmd,
157 (v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg); 159 (v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg);
158} 160}
161EXPORT_SYMBOL_GPL(v4l2_int_ioctl_1);
159 162
160MODULE_LICENSE("GPL"); 163MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 155c9d77a463..710e1a40c422 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -625,13 +625,13 @@ static int check_fmt(const struct v4l2_ioctl_ops *ops, enum v4l2_buf_type type)
625 return -EINVAL; 625 return -EINVAL;
626} 626}
627 627
628static int __video_do_ioctl(struct inode *inode, struct file *file, 628static int __video_do_ioctl(struct file *file,
629 unsigned int cmd, void *arg) 629 unsigned int cmd, void *arg)
630{ 630{
631 struct video_device *vfd = video_devdata(file); 631 struct video_device *vfd = video_devdata(file);
632 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; 632 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
633 void *fh = file->private_data; 633 void *fh = file->private_data;
634 int ret = -EINVAL; 634 int ret = -EINVAL;
635 635
636 if ((vfd->debug & V4L2_DEBUG_IOCTL) && 636 if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
637 !(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) { 637 !(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) {
@@ -675,7 +675,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
675 V4L2 ioctls. 675 V4L2 ioctls.
676 ********************************************************/ 676 ********************************************************/
677 if (_IOC_TYPE(cmd) == 'v' && _IOC_NR(cmd) < BASE_VIDIOCPRIVATE) 677 if (_IOC_TYPE(cmd) == 'v' && _IOC_NR(cmd) < BASE_VIDIOCPRIVATE)
678 return v4l_compat_translate_ioctl(inode, file, cmd, arg, 678 return v4l_compat_translate_ioctl(file, cmd, arg,
679 __video_do_ioctl); 679 __video_do_ioctl);
680#endif 680#endif
681 681
@@ -1768,7 +1768,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
1768 return ret; 1768 return ret;
1769} 1769}
1770 1770
1771int video_ioctl2(struct inode *inode, struct file *file, 1771int __video_ioctl2(struct file *file,
1772 unsigned int cmd, unsigned long arg) 1772 unsigned int cmd, unsigned long arg)
1773{ 1773{
1774 char sbuf[128]; 1774 char sbuf[128];
@@ -1832,7 +1832,7 @@ int video_ioctl2(struct inode *inode, struct file *file,
1832 } 1832 }
1833 1833
1834 /* Handles IOCTL */ 1834 /* Handles IOCTL */
1835 err = __video_do_ioctl(inode, file, cmd, parg); 1835 err = __video_do_ioctl(file, cmd, parg);
1836 if (err == -ENOIOCTLCMD) 1836 if (err == -ENOIOCTLCMD)
1837 err = -EINVAL; 1837 err = -EINVAL;
1838 if (is_ext_ctrl) { 1838 if (is_ext_ctrl) {
@@ -1860,4 +1860,11 @@ out:
1860 kfree(mbuf); 1860 kfree(mbuf);
1861 return err; 1861 return err;
1862} 1862}
1863EXPORT_SYMBOL(__video_ioctl2);
1864
1865int video_ioctl2(struct inode *inode, struct file *file,
1866 unsigned int cmd, unsigned long arg)
1867{
1868 return __video_ioctl2(file, cmd, arg);
1869}
1863EXPORT_SYMBOL(video_ioctl2); 1870EXPORT_SYMBOL(video_ioctl2);
diff --git a/drivers/media/video/videobuf-dvb.c b/drivers/media/video/videobuf-dvb.c
index 917277d36605..0e7dcba8e4ae 100644
--- a/drivers/media/video/videobuf-dvb.c
+++ b/drivers/media/video/videobuf-dvb.c
@@ -296,29 +296,7 @@ EXPORT_SYMBOL(videobuf_dvb_register_bus);
296 296
297void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f) 297void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f)
298{ 298{
299 struct list_head *list, *q; 299 videobuf_dvb_dealloc_frontends(f);
300 struct videobuf_dvb_frontend *fe;
301
302 mutex_lock(&f->lock);
303 list_for_each_safe(list, q, &f->felist) {
304 fe = list_entry(list, struct videobuf_dvb_frontend, felist);
305 if (fe->dvb.net.dvbdev) {
306 dvb_net_release(&fe->dvb.net);
307 fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
308 &fe->dvb.fe_mem);
309 fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
310 &fe->dvb.fe_hw);
311 dvb_dmxdev_release(&fe->dvb.dmxdev);
312 dvb_dmx_release(&fe->dvb.demux);
313 dvb_unregister_frontend(fe->dvb.frontend);
314 }
315 if (fe->dvb.frontend)
316 /* always allocated, may have been reset */
317 dvb_frontend_detach(fe->dvb.frontend);
318 list_del(list);
319 kfree(fe);
320 }
321 mutex_unlock(&f->lock);
322 300
323 dvb_unregister_adapter(&f->adapter); 301 dvb_unregister_adapter(&f->adapter);
324} 302}
@@ -389,3 +367,31 @@ fail_alloc:
389 return fe; 367 return fe;
390} 368}
391EXPORT_SYMBOL(videobuf_dvb_alloc_frontend); 369EXPORT_SYMBOL(videobuf_dvb_alloc_frontend);
370
371void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f)
372{
373 struct list_head *list, *q;
374 struct videobuf_dvb_frontend *fe;
375
376 mutex_lock(&f->lock);
377 list_for_each_safe(list, q, &f->felist) {
378 fe = list_entry(list, struct videobuf_dvb_frontend, felist);
379 if (fe->dvb.net.dvbdev) {
380 dvb_net_release(&fe->dvb.net);
381 fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
382 &fe->dvb.fe_mem);
383 fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
384 &fe->dvb.fe_hw);
385 dvb_dmxdev_release(&fe->dvb.dmxdev);
386 dvb_dmx_release(&fe->dvb.demux);
387 dvb_unregister_frontend(fe->dvb.frontend);
388 }
389 if (fe->dvb.frontend)
390 /* always allocated, may have been reset */
391 dvb_frontend_detach(fe->dvb.frontend);
392 list_del(list); /* remove list entry */
393 kfree(fe); /* free frontend allocation */
394 }
395 mutex_unlock(&f->lock);
396}
397EXPORT_SYMBOL(videobuf_dvb_dealloc_frontends);
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 7d7e51def461..e15e48f04be7 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -1163,11 +1163,11 @@ static int vivi_release(void)
1163 1163
1164 if (-1 != dev->vfd->minor) { 1164 if (-1 != dev->vfd->minor) {
1165 printk(KERN_INFO "%s: unregistering /dev/video%d\n", 1165 printk(KERN_INFO "%s: unregistering /dev/video%d\n",
1166 VIVI_MODULE_NAME, dev->vfd->minor); 1166 VIVI_MODULE_NAME, dev->vfd->num);
1167 video_unregister_device(dev->vfd); 1167 video_unregister_device(dev->vfd);
1168 } else { 1168 } else {
1169 printk(KERN_INFO "%s: releasing /dev/video%d\n", 1169 printk(KERN_INFO "%s: releasing /dev/video%d\n",
1170 VIVI_MODULE_NAME, dev->vfd->minor); 1170 VIVI_MODULE_NAME, dev->vfd->num);
1171 video_device_release(dev->vfd); 1171 video_device_release(dev->vfd);
1172 } 1172 }
1173 1173
@@ -1307,7 +1307,7 @@ static int __init vivi_init(void)
1307 1307
1308 dev->vfd = vfd; 1308 dev->vfd = vfd;
1309 printk(KERN_INFO "%s: V4L2 device registered as /dev/video%d\n", 1309 printk(KERN_INFO "%s: V4L2 device registered as /dev/video%d\n",
1310 VIVI_MODULE_NAME, vfd->minor); 1310 VIVI_MODULE_NAME, vfd->num);
1311 } 1311 }
1312 1312
1313 if (ret < 0) { 1313 if (ret < 0) {
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index dcd45dbd82dc..4dfb43bd1846 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -2398,7 +2398,7 @@ error:
2398 cam->sensor = CC_UNKNOWN; 2398 cam->sensor = CC_UNKNOWN;
2399 DBG(1, "Image sensor initialization failed for %s (/dev/video%d). " 2399 DBG(1, "Image sensor initialization failed for %s (/dev/video%d). "
2400 "Try to detach and attach this device again", 2400 "Try to detach and attach this device again",
2401 symbolic(camlist, cam->id), cam->v4ldev->minor) 2401 symbolic(camlist, cam->id), cam->v4ldev->num)
2402 return err; 2402 return err;
2403} 2403}
2404 2404
@@ -2644,7 +2644,7 @@ static void w9968cf_release_resources(struct w9968cf_device* cam)
2644{ 2644{
2645 mutex_lock(&w9968cf_devlist_mutex); 2645 mutex_lock(&w9968cf_devlist_mutex);
2646 2646
2647 DBG(2, "V4L device deregistered: /dev/video%d", cam->v4ldev->minor) 2647 DBG(2, "V4L device deregistered: /dev/video%d", cam->v4ldev->num)
2648 2648
2649 video_unregister_device(cam->v4ldev); 2649 video_unregister_device(cam->v4ldev);
2650 list_del(&cam->v4llist); 2650 list_del(&cam->v4llist);
@@ -2679,7 +2679,7 @@ static int w9968cf_open(struct inode* inode, struct file* filp)
2679 DBG(2, "No supported image sensor has been detected by the " 2679 DBG(2, "No supported image sensor has been detected by the "
2680 "'ovcamchip' module for the %s (/dev/video%d). Make " 2680 "'ovcamchip' module for the %s (/dev/video%d). Make "
2681 "sure it is loaded *before* (re)connecting the camera.", 2681 "sure it is loaded *before* (re)connecting the camera.",
2682 symbolic(camlist, cam->id), cam->v4ldev->minor) 2682 symbolic(camlist, cam->id), cam->v4ldev->num)
2683 mutex_unlock(&cam->dev_mutex); 2683 mutex_unlock(&cam->dev_mutex);
2684 up_read(&w9968cf_disconnect); 2684 up_read(&w9968cf_disconnect);
2685 return -ENODEV; 2685 return -ENODEV;
@@ -2687,7 +2687,7 @@ static int w9968cf_open(struct inode* inode, struct file* filp)
2687 2687
2688 if (cam->users) { 2688 if (cam->users) {
2689 DBG(2, "%s (/dev/video%d) has been already occupied by '%s'", 2689 DBG(2, "%s (/dev/video%d) has been already occupied by '%s'",
2690 symbolic(camlist, cam->id),cam->v4ldev->minor,cam->command) 2690 symbolic(camlist, cam->id), cam->v4ldev->num, cam->command)
2691 if ((filp->f_flags & O_NONBLOCK)||(filp->f_flags & O_NDELAY)) { 2691 if ((filp->f_flags & O_NONBLOCK)||(filp->f_flags & O_NDELAY)) {
2692 mutex_unlock(&cam->dev_mutex); 2692 mutex_unlock(&cam->dev_mutex);
2693 up_read(&w9968cf_disconnect); 2693 up_read(&w9968cf_disconnect);
@@ -2709,7 +2709,7 @@ static int w9968cf_open(struct inode* inode, struct file* filp)
2709 } 2709 }
2710 2710
2711 DBG(5, "Opening '%s', /dev/video%d ...", 2711 DBG(5, "Opening '%s', /dev/video%d ...",
2712 symbolic(camlist, cam->id), cam->v4ldev->minor) 2712 symbolic(camlist, cam->id), cam->v4ldev->num)
2713 2713
2714 cam->streaming = 0; 2714 cam->streaming = 0;
2715 cam->misconfigured = 0; 2715 cam->misconfigured = 0;
@@ -2947,7 +2947,7 @@ static int w9968cf_v4l_ioctl(struct inode* inode, struct file* filp,
2947 .minheight = cam->minheight, 2947 .minheight = cam->minheight,
2948 }; 2948 };
2949 sprintf(cap.name, "W996[87]CF USB Camera #%d", 2949 sprintf(cap.name, "W996[87]CF USB Camera #%d",
2950 cam->v4ldev->minor); 2950 cam->v4ldev->num);
2951 cap.maxwidth = (cam->upscaling && w9968cf_vpp) 2951 cap.maxwidth = (cam->upscaling && w9968cf_vpp)
2952 ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth) 2952 ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth)
2953 : cam->maxwidth; 2953 : cam->maxwidth;
@@ -3567,7 +3567,7 @@ w9968cf_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3567 goto fail; 3567 goto fail;
3568 } 3568 }
3569 3569
3570 DBG(2, "V4L device registered as /dev/video%d", cam->v4ldev->minor) 3570 DBG(2, "V4L device registered as /dev/video%d", cam->v4ldev->num)
3571 3571
3572 /* Set some basic constants */ 3572 /* Set some basic constants */
3573 w9968cf_configure_camera(cam, udev, mod_id, dev_nr); 3573 w9968cf_configure_camera(cam, udev, mod_id, dev_nr);
@@ -3618,7 +3618,7 @@ static void w9968cf_usb_disconnect(struct usb_interface* intf)
3618 DBG(2, "The device is open (/dev/video%d)! " 3618 DBG(2, "The device is open (/dev/video%d)! "
3619 "Process name: %s. Deregistration and memory " 3619 "Process name: %s. Deregistration and memory "
3620 "deallocation are deferred on close.", 3620 "deallocation are deferred on close.",
3621 cam->v4ldev->minor, cam->command) 3621 cam->v4ldev->num, cam->command)
3622 cam->misconfigured = 1; 3622 cam->misconfigured = 1;
3623 w9968cf_stop_transfer(cam); 3623 w9968cf_stop_transfer(cam);
3624 wake_up_interruptible(&cam->wait_queue); 3624 wake_up_interruptible(&cam->wait_queue);
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index 6a0902bcba6b..9fc581707638 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -539,7 +539,7 @@ static int zc0301_stream_interrupt(struct zc0301_device* cam)
539 cam->state |= DEV_MISCONFIGURED; 539 cam->state |= DEV_MISCONFIGURED;
540 DBG(1, "URB timeout reached. The camera is misconfigured. To " 540 DBG(1, "URB timeout reached. The camera is misconfigured. To "
541 "use it, close and open /dev/video%d again.", 541 "use it, close and open /dev/video%d again.",
542 cam->v4ldev->minor); 542 cam->v4ldev->num);
543 return -EIO; 543 return -EIO;
544 } 544 }
545 545
@@ -640,7 +640,7 @@ static void zc0301_release_resources(struct kref *kref)
640{ 640{
641 struct zc0301_device *cam = container_of(kref, struct zc0301_device, 641 struct zc0301_device *cam = container_of(kref, struct zc0301_device,
642 kref); 642 kref);
643 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor); 643 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
644 video_set_drvdata(cam->v4ldev, NULL); 644 video_set_drvdata(cam->v4ldev, NULL);
645 video_unregister_device(cam->v4ldev); 645 video_unregister_device(cam->v4ldev);
646 usb_put_dev(cam->usbdev); 646 usb_put_dev(cam->usbdev);
@@ -679,7 +679,7 @@ static int zc0301_open(struct inode* inode, struct file* filp)
679 } 679 }
680 680
681 if (cam->users) { 681 if (cam->users) {
682 DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->minor); 682 DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->num);
683 DBG(3, "Simultaneous opens are not supported"); 683 DBG(3, "Simultaneous opens are not supported");
684 if ((filp->f_flags & O_NONBLOCK) || 684 if ((filp->f_flags & O_NONBLOCK) ||
685 (filp->f_flags & O_NDELAY)) { 685 (filp->f_flags & O_NDELAY)) {
@@ -722,7 +722,7 @@ static int zc0301_open(struct inode* inode, struct file* filp)
722 cam->frame_count = 0; 722 cam->frame_count = 0;
723 zc0301_empty_framequeues(cam); 723 zc0301_empty_framequeues(cam);
724 724
725 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor); 725 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
726 726
727out: 727out:
728 mutex_unlock(&cam->open_mutex); 728 mutex_unlock(&cam->open_mutex);
@@ -746,7 +746,7 @@ static int zc0301_release(struct inode* inode, struct file* filp)
746 cam->users--; 746 cam->users--;
747 wake_up_interruptible_nr(&cam->wait_open, 1); 747 wake_up_interruptible_nr(&cam->wait_open, 1);
748 748
749 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor); 749 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
750 750
751 kref_put(&cam->kref, zc0301_release_resources); 751 kref_put(&cam->kref, zc0301_release_resources);
752 752
@@ -1275,7 +1275,7 @@ zc0301_vidioc_s_crop(struct zc0301_device* cam, void __user * arg)
1275 cam->state |= DEV_MISCONFIGURED; 1275 cam->state |= DEV_MISCONFIGURED;
1276 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To " 1276 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
1277 "use the camera, close and open /dev/video%d again.", 1277 "use the camera, close and open /dev/video%d again.",
1278 cam->v4ldev->minor); 1278 cam->v4ldev->num);
1279 return -EIO; 1279 return -EIO;
1280 } 1280 }
1281 1281
@@ -1288,7 +1288,7 @@ zc0301_vidioc_s_crop(struct zc0301_device* cam, void __user * arg)
1288 cam->state |= DEV_MISCONFIGURED; 1288 cam->state |= DEV_MISCONFIGURED;
1289 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To " 1289 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
1290 "use the camera, close and open /dev/video%d again.", 1290 "use the camera, close and open /dev/video%d again.",
1291 cam->v4ldev->minor); 1291 cam->v4ldev->num);
1292 return -ENOMEM; 1292 return -ENOMEM;
1293 } 1293 }
1294 1294
@@ -1470,7 +1470,7 @@ zc0301_vidioc_try_s_fmt(struct zc0301_device* cam, unsigned int cmd,
1470 cam->state |= DEV_MISCONFIGURED; 1470 cam->state |= DEV_MISCONFIGURED;
1471 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To " 1471 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
1472 "use the camera, close and open /dev/video%d again.", 1472 "use the camera, close and open /dev/video%d again.",
1473 cam->v4ldev->minor); 1473 cam->v4ldev->num);
1474 return -EIO; 1474 return -EIO;
1475 } 1475 }
1476 1476
@@ -1482,7 +1482,7 @@ zc0301_vidioc_try_s_fmt(struct zc0301_device* cam, unsigned int cmd,
1482 cam->state |= DEV_MISCONFIGURED; 1482 cam->state |= DEV_MISCONFIGURED;
1483 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To " 1483 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
1484 "use the camera, close and open /dev/video%d again.", 1484 "use the camera, close and open /dev/video%d again.",
1485 cam->v4ldev->minor); 1485 cam->v4ldev->num);
1486 return -ENOMEM; 1486 return -ENOMEM;
1487 } 1487 }
1488 1488
@@ -1529,7 +1529,7 @@ zc0301_vidioc_s_jpegcomp(struct zc0301_device* cam, void __user * arg)
1529 cam->state |= DEV_MISCONFIGURED; 1529 cam->state |= DEV_MISCONFIGURED;
1530 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware " 1530 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
1531 "problems. To use the camera, close and open " 1531 "problems. To use the camera, close and open "
1532 "/dev/video%d again.", cam->v4ldev->minor); 1532 "/dev/video%d again.", cam->v4ldev->num);
1533 return -EIO; 1533 return -EIO;
1534 } 1534 }
1535 1535
@@ -2005,7 +2005,7 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
2005 goto fail; 2005 goto fail;
2006 } 2006 }
2007 2007
2008 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor); 2008 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
2009 2009
2010 cam->module_param.force_munmap = force_munmap[dev_nr]; 2010 cam->module_param.force_munmap = force_munmap[dev_nr];
2011 cam->module_param.frame_timeout = frame_timeout[dev_nr]; 2011 cam->module_param.frame_timeout = frame_timeout[dev_nr];
@@ -2044,7 +2044,7 @@ static void zc0301_usb_disconnect(struct usb_interface* intf)
2044 if (cam->users) { 2044 if (cam->users) {
2045 DBG(2, "Device /dev/video%d is open! Deregistration and " 2045 DBG(2, "Device /dev/video%d is open! Deregistration and "
2046 "memory deallocation are deferred.", 2046 "memory deallocation are deferred.",
2047 cam->v4ldev->minor); 2047 cam->v4ldev->num);
2048 cam->state |= DEV_MISCONFIGURED; 2048 cam->state |= DEV_MISCONFIGURED;
2049 zc0301_stop_transfer(cam); 2049 zc0301_stop_transfer(cam);
2050 cam->state |= DEV_DISCONNECTED; 2050 cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 7cdac99deea6..a1d81ed44c7c 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -885,7 +885,7 @@ static int zr364xx_probe(struct usb_interface *intf,
885 usb_set_intfdata(intf, cam); 885 usb_set_intfdata(intf, cam);
886 886
887 dev_info(&udev->dev, DRIVER_DESC " controlling video device %d\n", 887 dev_info(&udev->dev, DRIVER_DESC " controlling video device %d\n",
888 cam->vdev->minor); 888 cam->vdev->num);
889 return 0; 889 return 0;
890} 890}
891 891
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 5263913e0c69..7911151e56a3 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -172,9 +172,9 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error);
172 172
173/*** Block device ***/ 173/*** Block device ***/
174 174
175static int mspro_block_bd_open(struct inode *inode, struct file *filp) 175static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode)
176{ 176{
177 struct gendisk *disk = inode->i_bdev->bd_disk; 177 struct gendisk *disk = bdev->bd_disk;
178 struct mspro_block_data *msb = disk->private_data; 178 struct mspro_block_data *msb = disk->private_data;
179 int rc = -ENXIO; 179 int rc = -ENXIO;
180 180
@@ -182,7 +182,7 @@ static int mspro_block_bd_open(struct inode *inode, struct file *filp)
182 182
183 if (msb && msb->card) { 183 if (msb && msb->card) {
184 msb->usage_count++; 184 msb->usage_count++;
185 if ((filp->f_mode & FMODE_WRITE) && msb->read_only) 185 if ((mode & FMODE_WRITE) && msb->read_only)
186 rc = -EROFS; 186 rc = -EROFS;
187 else 187 else
188 rc = 0; 188 rc = 0;
@@ -218,9 +218,8 @@ static int mspro_block_disk_release(struct gendisk *disk)
218 return 0; 218 return 0;
219} 219}
220 220
221static int mspro_block_bd_release(struct inode *inode, struct file *filp) 221static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
222{ 222{
223 struct gendisk *disk = inode->i_bdev->bd_disk;
224 return mspro_block_disk_release(disk); 223 return mspro_block_disk_release(disk);
225} 224}
226 225
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index f5233f3d9eff..b89f476cd0a9 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -559,12 +559,6 @@ mptctl_fasync(int fd, struct file *filep, int mode)
559 return ret; 559 return ret;
560} 560}
561 561
562static int
563mptctl_release(struct inode *inode, struct file *filep)
564{
565 return fasync_helper(-1, filep, 0, &async_queue);
566}
567
568/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 562/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
569/* 563/*
570 * MPT ioctl handler 564 * MPT ioctl handler
@@ -2706,7 +2700,6 @@ mptctl_hp_targetinfo(unsigned long arg)
2706static const struct file_operations mptctl_fops = { 2700static const struct file_operations mptctl_fops = {
2707 .owner = THIS_MODULE, 2701 .owner = THIS_MODULE,
2708 .llseek = no_llseek, 2702 .llseek = no_llseek,
2709 .release = mptctl_release,
2710 .fasync = mptctl_fasync, 2703 .fasync = mptctl_fasync,
2711 .unlocked_ioctl = mptctl_ioctl, 2704 .unlocked_ioctl = mptctl_ioctl,
2712#ifdef CONFIG_COMPAT 2705#ifdef CONFIG_COMPAT
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index a1abf95cf751..603ffd008c73 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -77,12 +77,6 @@ MODULE_VERSION(my_VERSION);
77 * Fusion MPT LAN private structures 77 * Fusion MPT LAN private structures
78 */ 78 */
79 79
80struct NAA_Hosed {
81 u16 NAA;
82 u8 ieee[FC_ALEN];
83 struct NAA_Hosed *next;
84};
85
86struct BufferControl { 80struct BufferControl {
87 struct sk_buff *skb; 81 struct sk_buff *skb;
88 dma_addr_t dma; 82 dma_addr_t dma;
@@ -159,11 +153,6 @@ static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
159static u32 max_buckets_out = 127; 153static u32 max_buckets_out = 127;
160static u32 tx_max_out_p = 127 - 16; 154static u32 tx_max_out_p = 127 - 16;
161 155
162#ifdef QLOGIC_NAA_WORKAROUND
163static struct NAA_Hosed *mpt_bad_naa = NULL;
164DEFINE_RWLOCK(bad_naa_lock);
165#endif
166
167/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 156/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
168/** 157/**
169 * lan_reply - Handle all data sent from the hardware. 158 * lan_reply - Handle all data sent from the hardware.
@@ -780,30 +769,6 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
780// ctx, skb, skb->data)); 769// ctx, skb, skb->data));
781 770
782 mac = skb_mac_header(skb); 771 mac = skb_mac_header(skb);
783#ifdef QLOGIC_NAA_WORKAROUND
784{
785 struct NAA_Hosed *nh;
786
787 /* Munge the NAA for Tx packets to QLogic boards, which don't follow
788 RFC 2625. The longer I look at this, the more my opinion of Qlogic
789 drops. */
790 read_lock_irq(&bad_naa_lock);
791 for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
792 if ((nh->ieee[0] == mac[0]) &&
793 (nh->ieee[1] == mac[1]) &&
794 (nh->ieee[2] == mac[2]) &&
795 (nh->ieee[3] == mac[3]) &&
796 (nh->ieee[4] == mac[4]) &&
797 (nh->ieee[5] == mac[5])) {
798 cur_naa = nh->NAA;
799 dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
800 "= %04x.\n", cur_naa));
801 break;
802 }
803 }
804 read_unlock_irq(&bad_naa_lock);
805}
806#endif
807 772
808 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | 773 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
809 (mac[0] << 8) | 774 (mac[0] << 8) |
@@ -1572,79 +1537,6 @@ mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1572 1537
1573 fcllc = (struct fcllc *)skb->data; 1538 fcllc = (struct fcllc *)skb->data;
1574 1539
1575#ifdef QLOGIC_NAA_WORKAROUND
1576{
1577 u16 source_naa = fch->stype, found = 0;
1578
1579 /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1580 value. */
1581
1582 if ((source_naa & 0xF000) == 0)
1583 source_naa = swab16(source_naa);
1584
1585 if (fcllc->ethertype == htons(ETH_P_ARP))
1586 dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1587 "%04x.\n", source_naa));
1588
1589 if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1590 ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){
1591 struct NAA_Hosed *nh, *prevnh;
1592 int i;
1593
1594 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1595 "system with non-RFC 2625 NAA value (%04x).\n",
1596 source_naa));
1597
1598 write_lock_irq(&bad_naa_lock);
1599 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1600 prevnh=nh, nh=nh->next) {
1601 if ((nh->ieee[0] == fch->saddr[0]) &&
1602 (nh->ieee[1] == fch->saddr[1]) &&
1603 (nh->ieee[2] == fch->saddr[2]) &&
1604 (nh->ieee[3] == fch->saddr[3]) &&
1605 (nh->ieee[4] == fch->saddr[4]) &&
1606 (nh->ieee[5] == fch->saddr[5])) {
1607 found = 1;
1608 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1609 "q/Rep w/ bad NAA from system already"
1610 " in DB.\n"));
1611 break;
1612 }
1613 }
1614
1615 if ((!found) && (nh == NULL)) {
1616
1617 nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1618 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1619 " bad NAA from system not yet in DB.\n"));
1620
1621 if (nh != NULL) {
1622 nh->next = NULL;
1623 if (!mpt_bad_naa)
1624 mpt_bad_naa = nh;
1625 if (prevnh)
1626 prevnh->next = nh;
1627
1628 nh->NAA = source_naa; /* Set the S_NAA value. */
1629 for (i = 0; i < FC_ALEN; i++)
1630 nh->ieee[i] = fch->saddr[i];
1631 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1632 "%02x:%02x with non-compliant S_NAA value.\n",
1633 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1634 fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1635 } else {
1636 printk (KERN_ERR "mptlan/type_trans: Unable to"
1637 " kmalloc a NAA_Hosed struct.\n");
1638 }
1639 } else if (!found) {
1640 printk (KERN_ERR "mptlan/type_trans: found not"
1641 " set, but nh isn't null. Evil "
1642 "funkiness abounds.\n");
1643 }
1644 write_unlock_irq(&bad_naa_lock);
1645 }
1646}
1647#endif
1648 1540
1649 /* Strip the SNAP header from ARP packets since we don't 1541 /* Strip the SNAP header from ARP packets since we don't
1650 * pass them through to the 802.2/SNAP layers. 1542 * pass them through to the 802.2/SNAP layers.
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 9f9354fd3516..ee090413e598 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1760,10 +1760,9 @@ mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
1760 case FC: 1760 case FC:
1761 return 40; 1761 return 40;
1762 case SAS: 1762 case SAS:
1763 return 10;
1764 case SPI: 1763 case SPI:
1765 default: 1764 default:
1766 return 2; 1765 return 10;
1767 } 1766 }
1768} 1767}
1769 1768
@@ -2009,6 +2008,9 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
2009 return FAILED; 2008 return FAILED;
2010 } 2009 }
2011 2010
2011 /* make sure we have no outstanding commands at this stage */
2012 mptscsih_flush_running_cmds(hd);
2013
2012 ioc = hd->ioc; 2014 ioc = hd->ioc;
2013 printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n", 2015 printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n",
2014 ioc->name, SCpnt); 2016 ioc->name, SCpnt);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 81483de8c0fd..a443e136dc41 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -354,7 +354,7 @@ static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
354 * @req: the request to prepare 354 * @req: the request to prepare
355 * 355 *
356 * Allocate the necessary i2o_block_request struct and connect it to 356 * Allocate the necessary i2o_block_request struct and connect it to
357 * the request. This is needed that we not loose the SG list later on. 357 * the request. This is needed that we not lose the SG list later on.
358 * 358 *
359 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. 359 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
360 */ 360 */
@@ -567,17 +567,17 @@ static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
567 567
568/** 568/**
569 * i2o_block_open - Open the block device 569 * i2o_block_open - Open the block device
570 * @inode: inode for block device being opened 570 * @bdev: block device being opened
571 * @file: file to open 571 * @mode: file open mode
572 * 572 *
573 * Power up the device, mount and lock the media. This function is called, 573 * Power up the device, mount and lock the media. This function is called,
574 * if the block device is opened for access. 574 * if the block device is opened for access.
575 * 575 *
576 * Returns 0 on success or negative error code on failure. 576 * Returns 0 on success or negative error code on failure.
577 */ 577 */
578static int i2o_block_open(struct inode *inode, struct file *file) 578static int i2o_block_open(struct block_device *bdev, fmode_t mode)
579{ 579{
580 struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data; 580 struct i2o_block_device *dev = bdev->bd_disk->private_data;
581 581
582 if (!dev->i2o_dev) 582 if (!dev->i2o_dev)
583 return -ENODEV; 583 return -ENODEV;
@@ -596,17 +596,16 @@ static int i2o_block_open(struct inode *inode, struct file *file)
596 596
597/** 597/**
598 * i2o_block_release - Release the I2O block device 598 * i2o_block_release - Release the I2O block device
599 * @inode: inode for block device being released 599 * @disk: gendisk device being released
600 * @file: file to close 600 * @mode: file open mode
601 * 601 *
602 * Unlock and unmount the media, and power down the device. Gets called if 602 * Unlock and unmount the media, and power down the device. Gets called if
603 * the block device is closed. 603 * the block device is closed.
604 * 604 *
605 * Returns 0 on success or negative error code on failure. 605 * Returns 0 on success or negative error code on failure.
606 */ 606 */
607static int i2o_block_release(struct inode *inode, struct file *file) 607static int i2o_block_release(struct gendisk *disk, fmode_t mode)
608{ 608{
609 struct gendisk *disk = inode->i_bdev->bd_disk;
610 struct i2o_block_device *dev = disk->private_data; 609 struct i2o_block_device *dev = disk->private_data;
611 u8 operation; 610 u8 operation;
612 611
@@ -644,8 +643,8 @@ static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
644 643
645/** 644/**
646 * i2o_block_ioctl - Issue device specific ioctl calls. 645 * i2o_block_ioctl - Issue device specific ioctl calls.
647 * @inode: inode for block device ioctl 646 * @bdev: block device being opened
648 * @file: file for ioctl 647 * @mode: file open mode
649 * @cmd: ioctl command 648 * @cmd: ioctl command
650 * @arg: arg 649 * @arg: arg
651 * 650 *
@@ -653,10 +652,10 @@ static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
653 * 652 *
654 * Return 0 on success or negative error on failure. 653 * Return 0 on success or negative error on failure.
655 */ 654 */
656static int i2o_block_ioctl(struct inode *inode, struct file *file, 655static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode,
657 unsigned int cmd, unsigned long arg) 656 unsigned int cmd, unsigned long arg)
658{ 657{
659 struct gendisk *disk = inode->i_bdev->bd_disk; 658 struct gendisk *disk = bdev->bd_disk;
660 struct i2o_block_device *dev = disk->private_data; 659 struct i2o_block_device *dev = disk->private_data;
661 660
662 /* Anyone capable of this syscall can do *real bad* things */ 661 /* Anyone capable of this syscall can do *real bad* things */
@@ -933,7 +932,7 @@ static struct block_device_operations i2o_block_fops = {
933 .owner = THIS_MODULE, 932 .owner = THIS_MODULE,
934 .open = i2o_block_open, 933 .open = i2o_block_open,
935 .release = i2o_block_release, 934 .release = i2o_block_release,
936 .ioctl = i2o_block_ioctl, 935 .locked_ioctl = i2o_block_ioctl,
937 .getgeo = i2o_block_getgeo, 936 .getgeo = i2o_block_getgeo,
938 .media_changed = i2o_block_media_changed 937 .media_changed = i2o_block_media_changed
939}; 938};
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index a3fabdbe6ca6..f3384c32b9a1 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -1097,28 +1097,17 @@ static int cfg_fasync(int fd, struct file *fp, int on)
1097static int cfg_release(struct inode *inode, struct file *file) 1097static int cfg_release(struct inode *inode, struct file *file)
1098{ 1098{
1099 ulong id = (ulong) file->private_data; 1099 ulong id = (ulong) file->private_data;
1100 struct i2o_cfg_info *p1, *p2; 1100 struct i2o_cfg_info *p, **q;
1101 unsigned long flags; 1101 unsigned long flags;
1102 1102
1103 lock_kernel(); 1103 lock_kernel();
1104 p1 = p2 = NULL;
1105
1106 spin_lock_irqsave(&i2o_config_lock, flags); 1104 spin_lock_irqsave(&i2o_config_lock, flags);
1107 for (p1 = open_files; p1;) { 1105 for (q = &open_files; (p = *q) != NULL; q = &p->next) {
1108 if (p1->q_id == id) { 1106 if (p->q_id == id) {
1109 1107 *q = p->next;
1110 if (p1->fasync) 1108 kfree(p);
1111 cfg_fasync(-1, file, 0);
1112 if (p2)
1113 p2->next = p1->next;
1114 else
1115 open_files = p1->next;
1116
1117 kfree(p1);
1118 break; 1109 break;
1119 } 1110 }
1120 p2 = p1;
1121 p1 = p1->next;
1122 } 1111 }
1123 spin_unlock_irqrestore(&i2o_config_lock, flags); 1112 spin_unlock_irqrestore(&i2o_config_lock, flags);
1124 unlock_kernel(); 1113 unlock_kernel();
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index be2b5926d26c..6e53a30bfd38 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -49,7 +49,6 @@ static int i2o_hrt_get(struct i2o_controller *c);
49/** 49/**
50 * i2o_msg_get_wait - obtain an I2O message from the IOP 50 * i2o_msg_get_wait - obtain an I2O message from the IOP
51 * @c: I2O controller 51 * @c: I2O controller
52 * @msg: pointer to a I2O message pointer
53 * @wait: how long to wait until timeout 52 * @wait: how long to wait until timeout
54 * 53 *
55 * This function waits up to wait seconds for a message slot to be 54 * This function waits up to wait seconds for a message slot to be
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 5a79d2d4cdae..257277394f8c 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -81,7 +81,7 @@ config MFD_TMIO
81 81
82config MFD_T7L66XB 82config MFD_T7L66XB
83 bool "Support Toshiba T7L66XB" 83 bool "Support Toshiba T7L66XB"
84 depends on ARM 84 depends on ARM && HAVE_CLK
85 select MFD_CORE 85 select MFD_CORE
86 select MFD_TMIO 86 select MFD_TMIO
87 help 87 help
@@ -89,7 +89,7 @@ config MFD_T7L66XB
89 89
90config MFD_TC6387XB 90config MFD_TC6387XB
91 bool "Support Toshiba TC6387XB" 91 bool "Support Toshiba TC6387XB"
92 depends on ARM 92 depends on ARM && HAVE_CLK
93 select MFD_CORE 93 select MFD_CORE
94 select MFD_TMIO 94 select MFD_TMIO
95 help 95 help
@@ -103,8 +103,20 @@ config MFD_TC6393XB
103 help 103 help
104 Support for Toshiba Mobile IO Controller TC6393XB 104 Support for Toshiba Mobile IO Controller TC6393XB
105 105
106config PMIC_DA903X
107 bool "Dialog Semiconductor DA9030/DA9034 PMIC Support"
108 depends on I2C=y
109 help
110 Say yes here to support for Dialog Semiconductor DA9030 (a.k.a
111 ARAVA) and DA9034 (a.k.a MICCO), these are Power Management IC
112 usually found on PXA processors-based platforms. This includes
113 the I2C driver and the core APIs _only_, you have to select
114 individual components like LCD backlight, voltage regulators,
115 LEDs and battery-charger under the corresponding menus.
116
106config MFD_WM8400 117config MFD_WM8400
107 tristate "Support Wolfson Microelectronics WM8400" 118 tristate "Support Wolfson Microelectronics WM8400"
119 depends on I2C
108 help 120 help
109 Support for the Wolfson Microelecronics WM8400 PMIC and audio 121 Support for the Wolfson Microelecronics WM8400 PMIC and audio
110 CODEC. This driver adds provides common support for accessing 122 CODEC. This driver adds provides common support for accessing
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 68e237b830ad..9a5ad8af9116 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -17,7 +17,7 @@ wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o
17obj-$(CONFIG_MFD_WM8350) += wm8350.o 17obj-$(CONFIG_MFD_WM8350) += wm8350.o
18obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o 18obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
19 19
20obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o 20obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o twl4030-irq.o
21 21
22obj-$(CONFIG_MFD_CORE) += mfd-core.o 22obj-$(CONFIG_MFD_CORE) += mfd-core.o
23 23
@@ -30,3 +30,5 @@ ifeq ($(CONFIG_SA1100_ASSABET),y)
30obj-$(CONFIG_MCP_UCB1200) += ucb1x00-assabet.o 30obj-$(CONFIG_MCP_UCB1200) += ucb1x00-assabet.o
31endif 31endif
32obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o 32obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
33
34obj-$(CONFIG_PMIC_DA903X) += da903x.o \ No newline at end of file
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index b57326ae464d..0b5bd85dfcec 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -267,7 +267,7 @@ static int da9030_mask_events(struct da903x_chip *chip, unsigned int events)
267{ 267{
268 uint8_t v[3]; 268 uint8_t v[3];
269 269
270 chip->events_mask &= ~events; 270 chip->events_mask |= events;
271 271
272 v[0] = (chip->events_mask & 0xff); 272 v[0] = (chip->events_mask & 0xff);
273 v[1] = (chip->events_mask >> 8) & 0xff; 273 v[1] = (chip->events_mask >> 8) & 0xff;
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 220e4371266b..170f9d47c2f9 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1374,31 +1374,31 @@ static int sm501_init_dev(struct sm501_devdata *sm)
1374static int sm501_plat_probe(struct platform_device *dev) 1374static int sm501_plat_probe(struct platform_device *dev)
1375{ 1375{
1376 struct sm501_devdata *sm; 1376 struct sm501_devdata *sm;
1377 int err; 1377 int ret;
1378 1378
1379 sm = kzalloc(sizeof(struct sm501_devdata), GFP_KERNEL); 1379 sm = kzalloc(sizeof(struct sm501_devdata), GFP_KERNEL);
1380 if (sm == NULL) { 1380 if (sm == NULL) {
1381 dev_err(&dev->dev, "no memory for device data\n"); 1381 dev_err(&dev->dev, "no memory for device data\n");
1382 err = -ENOMEM; 1382 ret = -ENOMEM;
1383 goto err1; 1383 goto err1;
1384 } 1384 }
1385 1385
1386 sm->dev = &dev->dev; 1386 sm->dev = &dev->dev;
1387 sm->pdev_id = dev->id; 1387 sm->pdev_id = dev->id;
1388 sm->irq = platform_get_irq(dev, 0);
1389 sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
1390 sm->mem_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
1391 sm->platdata = dev->dev.platform_data; 1388 sm->platdata = dev->dev.platform_data;
1392 1389
1393 if (sm->irq < 0) { 1390 ret = platform_get_irq(dev, 0);
1391 if (ret < 0) {
1394 dev_err(&dev->dev, "failed to get irq resource\n"); 1392 dev_err(&dev->dev, "failed to get irq resource\n");
1395 err = sm->irq;
1396 goto err_res; 1393 goto err_res;
1397 } 1394 }
1395 sm->irq = ret;
1398 1396
1397 sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
1398 sm->mem_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
1399 if (sm->io_res == NULL || sm->mem_res == NULL) { 1399 if (sm->io_res == NULL || sm->mem_res == NULL) {
1400 dev_err(&dev->dev, "failed to get IO resource\n"); 1400 dev_err(&dev->dev, "failed to get IO resource\n");
1401 err = -ENOENT; 1401 ret = -ENOENT;
1402 goto err_res; 1402 goto err_res;
1403 } 1403 }
1404 1404
@@ -1407,7 +1407,7 @@ static int sm501_plat_probe(struct platform_device *dev)
1407 1407
1408 if (sm->regs_claim == NULL) { 1408 if (sm->regs_claim == NULL) {
1409 dev_err(&dev->dev, "cannot claim registers\n"); 1409 dev_err(&dev->dev, "cannot claim registers\n");
1410 err= -EBUSY; 1410 ret = -EBUSY;
1411 goto err_res; 1411 goto err_res;
1412 } 1412 }
1413 1413
@@ -1418,7 +1418,7 @@ static int sm501_plat_probe(struct platform_device *dev)
1418 1418
1419 if (sm->regs == NULL) { 1419 if (sm->regs == NULL) {
1420 dev_err(&dev->dev, "cannot remap registers\n"); 1420 dev_err(&dev->dev, "cannot remap registers\n");
1421 err = -EIO; 1421 ret = -EIO;
1422 goto err_claim; 1422 goto err_claim;
1423 } 1423 }
1424 1424
@@ -1430,7 +1430,7 @@ static int sm501_plat_probe(struct platform_device *dev)
1430 err_res: 1430 err_res:
1431 kfree(sm); 1431 kfree(sm);
1432 err1: 1432 err1:
1433 return err; 1433 return ret;
1434 1434
1435} 1435}
1436 1436
@@ -1625,8 +1625,7 @@ static int sm501_pci_probe(struct pci_dev *dev,
1625 goto err3; 1625 goto err3;
1626 } 1626 }
1627 1627
1628 sm->regs = ioremap(pci_resource_start(dev, 1), 1628 sm->regs = pci_ioremap_bar(dev, 1);
1629 pci_resource_len(dev, 1));
1630 1629
1631 if (sm->regs == NULL) { 1630 if (sm->regs == NULL) {
1632 dev_err(&dev->dev, "cannot remap registers\n"); 1631 dev_err(&dev->dev, "cannot remap registers\n");
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c
index fd9a0160202c..dd843c4fbcc7 100644
--- a/drivers/mfd/twl4030-core.c
+++ b/drivers/mfd/twl4030-core.c
@@ -27,15 +27,11 @@
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 */ 28 */
29 29
30#include <linux/kernel_stat.h>
31#include <linux/init.h> 30#include <linux/init.h>
32#include <linux/mutex.h> 31#include <linux/mutex.h>
33#include <linux/interrupt.h>
34#include <linux/irq.h>
35#include <linux/random.h>
36#include <linux/kthread.h>
37#include <linux/platform_device.h> 32#include <linux/platform_device.h>
38#include <linux/clk.h> 33#include <linux/clk.h>
34#include <linux/err.h>
39 35
40#include <linux/i2c.h> 36#include <linux/i2c.h>
41#include <linux/i2c/twl4030.h> 37#include <linux/i2c/twl4030.h>
@@ -93,26 +89,6 @@
93#define twl_has_usb() false 89#define twl_has_usb() false
94#endif 90#endif
95 91
96static inline void activate_irq(int irq)
97{
98#ifdef CONFIG_ARM
99 /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
100 * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
101 */
102 set_irq_flags(irq, IRQF_VALID);
103#else
104 /* same effect on other architectures */
105 set_irq_noprobe(irq);
106#endif
107}
108
109/* Primary Interrupt Handler on TWL4030 Registers */
110
111/* Register Definitions */
112
113#define REG_PIH_ISR_P1 (0x1)
114#define REG_PIH_ISR_P2 (0x2)
115#define REG_PIH_SIR (0x3)
116 92
117/* Triton Core internal information (BEGIN) */ 93/* Triton Core internal information (BEGIN) */
118 94
@@ -175,138 +151,6 @@ static inline void activate_irq(int irq)
175 151
176/*----------------------------------------------------------------------*/ 152/*----------------------------------------------------------------------*/
177 153
178/**
179 * struct twl4030_mod_iregs - TWL module IMR/ISR regs to mask/clear at init
180 * @mod_no: TWL4030 module number (e.g., TWL4030_MODULE_GPIO)
181 * @sih_ctrl: address of module SIH_CTRL register
182 * @reg_cnt: number of IMR/ISR regs
183 * @imrs: pointer to array of TWL module interrupt mask register indices
184 * @isrs: pointer to array of TWL module interrupt status register indices
185 *
186 * Ties together TWL4030 modules and lists of IMR/ISR registers to mask/clear
187 * during twl_init_irq().
188 */
189struct twl4030_mod_iregs {
190 const u8 mod_no;
191 const u8 sih_ctrl;
192 const u8 reg_cnt;
193 const u8 *imrs;
194 const u8 *isrs;
195};
196
197/* TWL4030 INT module interrupt mask registers */
198static const u8 __initconst twl4030_int_imr_regs[] = {
199 TWL4030_INT_PWR_IMR1,
200 TWL4030_INT_PWR_IMR2,
201};
202
203/* TWL4030 INT module interrupt status registers */
204static const u8 __initconst twl4030_int_isr_regs[] = {
205 TWL4030_INT_PWR_ISR1,
206 TWL4030_INT_PWR_ISR2,
207};
208
209/* TWL4030 INTERRUPTS module interrupt mask registers */
210static const u8 __initconst twl4030_interrupts_imr_regs[] = {
211 TWL4030_INTERRUPTS_BCIIMR1A,
212 TWL4030_INTERRUPTS_BCIIMR1B,
213 TWL4030_INTERRUPTS_BCIIMR2A,
214 TWL4030_INTERRUPTS_BCIIMR2B,
215};
216
217/* TWL4030 INTERRUPTS module interrupt status registers */
218static const u8 __initconst twl4030_interrupts_isr_regs[] = {
219 TWL4030_INTERRUPTS_BCIISR1A,
220 TWL4030_INTERRUPTS_BCIISR1B,
221 TWL4030_INTERRUPTS_BCIISR2A,
222 TWL4030_INTERRUPTS_BCIISR2B,
223};
224
225/* TWL4030 MADC module interrupt mask registers */
226static const u8 __initconst twl4030_madc_imr_regs[] = {
227 TWL4030_MADC_IMR1,
228 TWL4030_MADC_IMR2,
229};
230
231/* TWL4030 MADC module interrupt status registers */
232static const u8 __initconst twl4030_madc_isr_regs[] = {
233 TWL4030_MADC_ISR1,
234 TWL4030_MADC_ISR2,
235};
236
237/* TWL4030 keypad module interrupt mask registers */
238static const u8 __initconst twl4030_keypad_imr_regs[] = {
239 TWL4030_KEYPAD_KEYP_IMR1,
240 TWL4030_KEYPAD_KEYP_IMR2,
241};
242
243/* TWL4030 keypad module interrupt status registers */
244static const u8 __initconst twl4030_keypad_isr_regs[] = {
245 TWL4030_KEYPAD_KEYP_ISR1,
246 TWL4030_KEYPAD_KEYP_ISR2,
247};
248
249/* TWL4030 GPIO module interrupt mask registers */
250static const u8 __initconst twl4030_gpio_imr_regs[] = {
251 REG_GPIO_IMR1A,
252 REG_GPIO_IMR1B,
253 REG_GPIO_IMR2A,
254 REG_GPIO_IMR2B,
255 REG_GPIO_IMR3A,
256 REG_GPIO_IMR3B,
257};
258
259/* TWL4030 GPIO module interrupt status registers */
260static const u8 __initconst twl4030_gpio_isr_regs[] = {
261 REG_GPIO_ISR1A,
262 REG_GPIO_ISR1B,
263 REG_GPIO_ISR2A,
264 REG_GPIO_ISR2B,
265 REG_GPIO_ISR3A,
266 REG_GPIO_ISR3B,
267};
268
269/* TWL4030 modules that have IMR/ISR registers that must be masked/cleared */
270static const struct twl4030_mod_iregs __initconst twl4030_mod_regs[] = {
271 {
272 .mod_no = TWL4030_MODULE_INT,
273 .sih_ctrl = TWL4030_INT_PWR_SIH_CTRL,
274 .reg_cnt = ARRAY_SIZE(twl4030_int_imr_regs),
275 .imrs = twl4030_int_imr_regs,
276 .isrs = twl4030_int_isr_regs,
277 },
278 {
279 .mod_no = TWL4030_MODULE_INTERRUPTS,
280 .sih_ctrl = TWL4030_INTERRUPTS_BCISIHCTRL,
281 .reg_cnt = ARRAY_SIZE(twl4030_interrupts_imr_regs),
282 .imrs = twl4030_interrupts_imr_regs,
283 .isrs = twl4030_interrupts_isr_regs,
284 },
285 {
286 .mod_no = TWL4030_MODULE_MADC,
287 .sih_ctrl = TWL4030_MADC_SIH_CTRL,
288 .reg_cnt = ARRAY_SIZE(twl4030_madc_imr_regs),
289 .imrs = twl4030_madc_imr_regs,
290 .isrs = twl4030_madc_isr_regs,
291 },
292 {
293 .mod_no = TWL4030_MODULE_KEYPAD,
294 .sih_ctrl = TWL4030_KEYPAD_KEYP_SIH_CTRL,
295 .reg_cnt = ARRAY_SIZE(twl4030_keypad_imr_regs),
296 .imrs = twl4030_keypad_imr_regs,
297 .isrs = twl4030_keypad_isr_regs,
298 },
299 {
300 .mod_no = TWL4030_MODULE_GPIO,
301 .sih_ctrl = REG_GPIO_SIH_CTRL,
302 .reg_cnt = ARRAY_SIZE(twl4030_gpio_imr_regs),
303 .imrs = twl4030_gpio_imr_regs,
304 .isrs = twl4030_gpio_isr_regs,
305 },
306};
307
308/*----------------------------------------------------------------*/
309
310/* is driver active, bound to a chip? */ 154/* is driver active, bound to a chip? */
311static bool inuse; 155static bool inuse;
312 156
@@ -367,33 +211,6 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
367 211
368/*----------------------------------------------------------------------*/ 212/*----------------------------------------------------------------------*/
369 213
370/*
371 * TWL4030 doesn't have PIH mask, hence dummy function for mask
372 * and unmask of the (eight) interrupts reported at that level ...
373 * masking is only available from SIH (secondary) modules.
374 */
375
376static void twl4030_i2c_ackirq(unsigned int irq)
377{
378}
379
380static void twl4030_i2c_disableint(unsigned int irq)
381{
382}
383
384static void twl4030_i2c_enableint(unsigned int irq)
385{
386}
387
388static struct irq_chip twl4030_irq_chip = {
389 .name = "twl4030",
390 .ack = twl4030_i2c_ackirq,
391 .mask = twl4030_i2c_disableint,
392 .unmask = twl4030_i2c_enableint,
393};
394
395/*----------------------------------------------------------------------*/
396
397/* Exported Functions */ 214/* Exported Functions */
398 215
399/** 216/**
@@ -535,108 +352,11 @@ EXPORT_SYMBOL(twl4030_i2c_read_u8);
535 352
536/*----------------------------------------------------------------------*/ 353/*----------------------------------------------------------------------*/
537 354
538static unsigned twl4030_irq_base;
539
540static struct completion irq_event;
541
542/*
543 * This thread processes interrupts reported by the Primary Interrupt Handler.
544 */
545static int twl4030_irq_thread(void *data)
546{
547 long irq = (long)data;
548 irq_desc_t *desc = irq_desc + irq;
549 static unsigned i2c_errors;
550 const static unsigned max_i2c_errors = 100;
551
552 current->flags |= PF_NOFREEZE;
553
554 while (!kthread_should_stop()) {
555 int ret;
556 int module_irq;
557 u8 pih_isr;
558
559 /* Wait for IRQ, then read PIH irq status (also blocking) */
560 wait_for_completion_interruptible(&irq_event);
561
562 ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
563 REG_PIH_ISR_P1);
564 if (ret) {
565 pr_warning("%s: I2C error %d reading PIH ISR\n",
566 DRIVER_NAME, ret);
567 if (++i2c_errors >= max_i2c_errors) {
568 printk(KERN_ERR "Maximum I2C error count"
569 " exceeded. Terminating %s.\n",
570 __func__);
571 break;
572 }
573 complete(&irq_event);
574 continue;
575 }
576
577 /* these handlers deal with the relevant SIH irq status */
578 local_irq_disable();
579 for (module_irq = twl4030_irq_base;
580 pih_isr;
581 pih_isr >>= 1, module_irq++) {
582 if (pih_isr & 0x1) {
583 irq_desc_t *d = irq_desc + module_irq;
584
585 d->handle_irq(module_irq, d);
586 }
587 }
588 local_irq_enable();
589
590 desc->chip->unmask(irq);
591 }
592
593 return 0;
594}
595
596/* 355/*
597 * do_twl4030_irq() is the desc->handle method for the twl4030 interrupt. 356 * NOTE: We know the first 8 IRQs after pdata->base_irq are
598 * This is a chained interrupt, so there is no desc->action method for it. 357 * for the PIH, and the next are for the PWR_INT SIH, since
599 * Now we need to query the interrupt controller in the twl4030 to determine 358 * that's how twl_init_irq() sets things up.
600 * which module is generating the interrupt request. However, we can't do i2c
601 * transactions in interrupt context, so we must defer that work to a kernel
602 * thread. All we do here is acknowledge and mask the interrupt and wakeup
603 * the kernel thread.
604 */ 359 */
605static void do_twl4030_irq(unsigned int irq, irq_desc_t *desc)
606{
607 const unsigned int cpu = smp_processor_id();
608
609 /*
610 * Earlier this was desc->triggered = 1;
611 */
612 desc->status |= IRQ_LEVEL;
613
614 /*
615 * Acknowledge, clear _AND_ disable the interrupt.
616 */
617 desc->chip->ack(irq);
618
619 if (!desc->depth) {
620 kstat_cpu(cpu).irqs[irq]++;
621
622 complete(&irq_event);
623 }
624}
625
626static struct task_struct * __init start_twl4030_irq_thread(long irq)
627{
628 struct task_struct *thread;
629
630 init_completion(&irq_event);
631 thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq");
632 if (!thread)
633 pr_err("%s: could not create twl4030 irq %ld thread!\n",
634 DRIVER_NAME, irq);
635
636 return thread;
637}
638
639/*----------------------------------------------------------------------*/
640 360
641static int add_children(struct twl4030_platform_data *pdata) 361static int add_children(struct twl4030_platform_data *pdata)
642{ 362{
@@ -668,7 +388,7 @@ static int add_children(struct twl4030_platform_data *pdata)
668 388
669 if (status == 0) { 389 if (status == 0) {
670 struct resource r = { 390 struct resource r = {
671 .start = TWL4030_PWRIRQ_CHG_PRES, 391 .start = pdata->irq_base + 8 + 1,
672 .flags = IORESOURCE_IRQ, 392 .flags = IORESOURCE_IRQ,
673 }; 393 };
674 394
@@ -817,8 +537,7 @@ static int add_children(struct twl4030_platform_data *pdata)
817 /* RTC module IRQ */ 537 /* RTC module IRQ */
818 if (status == 0) { 538 if (status == 0) {
819 struct resource r = { 539 struct resource r = {
820 /* REVISIT don't hard-wire this stuff */ 540 .start = pdata->irq_base + 8 + 3,
821 .start = TWL4030_PWRIRQ_RTC,
822 .flags = IORESOURCE_IRQ, 541 .flags = IORESOURCE_IRQ,
823 }; 542 };
824 543
@@ -863,7 +582,7 @@ static int add_children(struct twl4030_platform_data *pdata)
863 582
864 if (status == 0) { 583 if (status == 0) {
865 struct resource r = { 584 struct resource r = {
866 .start = TWL4030_PWRIRQ_USB_PRES, 585 .start = pdata->irq_base + 8 + 2,
867 .flags = IORESOURCE_IRQ, 586 .flags = IORESOURCE_IRQ,
868 }; 587 };
869 588
@@ -965,123 +684,17 @@ static void __init clocks_init(void)
965 684
966/*----------------------------------------------------------------------*/ 685/*----------------------------------------------------------------------*/
967 686
968/** 687int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end);
969 * twl4030_i2c_clear_isr - clear TWL4030 SIH ISR regs via read + write 688int twl_exit_irq(void);
970 * @mod_no: TWL4030 module number
971 * @reg: register index to clear
972 * @cor: value of the <module>_SIH_CTRL.COR bit (1 or 0)
973 *
974 * Either reads (cor == 1) or writes (cor == 0) to a TWL4030 interrupt
975 * status register to ensure that any prior interrupts are cleared.
976 * Returns the status from the I2C read operation.
977 */
978static int __init twl4030_i2c_clear_isr(u8 mod_no, u8 reg, u8 cor)
979{
980 u8 tmp;
981
982 return (cor) ? twl4030_i2c_read_u8(mod_no, &tmp, reg) :
983 twl4030_i2c_write_u8(mod_no, 0xff, reg);
984}
985
986/**
987 * twl4030_read_cor_bit - are TWL module ISRs cleared by reads or writes?
988 * @mod_no: TWL4030 module number
989 * @reg: register index to clear
990 *
991 * Returns 1 if the TWL4030 SIH interrupt status registers (ISRs) for
992 * the specified TWL module are cleared by reads, or 0 if cleared by
993 * writes.
994 */
995static int twl4030_read_cor_bit(u8 mod_no, u8 reg)
996{
997 u8 tmp = 0;
998
999 WARN_ON(twl4030_i2c_read_u8(mod_no, &tmp, reg) < 0);
1000
1001 tmp &= TWL4030_SIH_CTRL_COR_MASK;
1002 tmp >>= __ffs(TWL4030_SIH_CTRL_COR_MASK);
1003
1004 return tmp;
1005}
1006
1007/**
1008 * twl4030_mask_clear_intrs - mask and clear all TWL4030 interrupts
1009 * @t: pointer to twl4030_mod_iregs array
1010 * @t_sz: ARRAY_SIZE(t) (starting at 1)
1011 *
1012 * Mask all TWL4030 interrupt mask registers (IMRs) and clear all
1013 * interrupt status registers (ISRs). No return value, but will WARN if
1014 * any I2C operations fail.
1015 */
1016static void __init twl4030_mask_clear_intrs(const struct twl4030_mod_iregs *t,
1017 const u8 t_sz)
1018{
1019 int i, j;
1020
1021 /*
1022 * N.B. - further efficiency is possible here. Eight I2C
1023 * operations on BCI and GPIO modules are avoidable if I2C
1024 * burst read/write transactions were implemented. Would
1025 * probably save about 1ms of boot time and a small amount of
1026 * power.
1027 */
1028 for (i = 0; i < t_sz; i++) {
1029 const struct twl4030_mod_iregs tmr = t[i];
1030 int cor;
1031
1032 /* Are ISRs cleared by reads or writes? */
1033 cor = twl4030_read_cor_bit(tmr.mod_no, tmr.sih_ctrl);
1034
1035 for (j = 0; j < tmr.reg_cnt; j++) {
1036
1037 /* Mask interrupts at the TWL4030 */
1038 WARN_ON(twl4030_i2c_write_u8(tmr.mod_no, 0xff,
1039 tmr.imrs[j]) < 0);
1040
1041 /* Clear TWL4030 ISRs */
1042 WARN_ON(twl4030_i2c_clear_isr(tmr.mod_no,
1043 tmr.isrs[j], cor) < 0);
1044 }
1045 }
1046}
1047
1048
1049static void twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
1050{
1051 int i;
1052
1053 /*
1054 * Mask and clear all TWL4030 interrupts since initially we do
1055 * not have any TWL4030 module interrupt handlers present
1056 */
1057 twl4030_mask_clear_intrs(twl4030_mod_regs,
1058 ARRAY_SIZE(twl4030_mod_regs));
1059
1060 twl4030_irq_base = irq_base;
1061
1062 /* install an irq handler for each of the PIH modules */
1063 for (i = irq_base; i < irq_end; i++) {
1064 set_irq_chip_and_handler(i, &twl4030_irq_chip,
1065 handle_simple_irq);
1066 activate_irq(i);
1067 }
1068
1069 /* install an irq handler to demultiplex the TWL4030 interrupt */
1070 set_irq_data(irq_num, start_twl4030_irq_thread(irq_num));
1071 set_irq_chained_handler(irq_num, do_twl4030_irq);
1072}
1073
1074/*----------------------------------------------------------------------*/
1075 689
1076static int twl4030_remove(struct i2c_client *client) 690static int twl4030_remove(struct i2c_client *client)
1077{ 691{
1078 unsigned i; 692 unsigned i;
693 int status;
1079 694
1080 /* FIXME undo twl_init_irq() */ 695 status = twl_exit_irq();
1081 if (twl4030_irq_base) { 696 if (status < 0)
1082 dev_err(&client->dev, "can't yet clean up IRQs?\n"); 697 return status;
1083 return -ENOSYS;
1084 }
1085 698
1086 for (i = 0; i < TWL4030_NUM_SLAVES; i++) { 699 for (i = 0; i < TWL4030_NUM_SLAVES; i++) {
1087 struct twl4030_client *twl = &twl4030_modules[i]; 700 struct twl4030_client *twl = &twl4030_modules[i];
@@ -1112,7 +725,7 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
1112 return -EIO; 725 return -EIO;
1113 } 726 }
1114 727
1115 if (inuse || twl4030_irq_base) { 728 if (inuse) {
1116 dev_dbg(&client->dev, "driver is already in use\n"); 729 dev_dbg(&client->dev, "driver is already in use\n");
1117 return -EBUSY; 730 return -EBUSY;
1118 } 731 }
@@ -1146,9 +759,9 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
1146 if (client->irq 759 if (client->irq
1147 && pdata->irq_base 760 && pdata->irq_base
1148 && pdata->irq_end > pdata->irq_base) { 761 && pdata->irq_end > pdata->irq_base) {
1149 twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end); 762 status = twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end);
1150 dev_info(&client->dev, "IRQ %d chains IRQs %d..%d\n", 763 if (status < 0)
1151 client->irq, pdata->irq_base, pdata->irq_end - 1); 764 goto fail;
1152 } 765 }
1153 766
1154 status = add_children(pdata); 767 status = add_children(pdata);
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
new file mode 100644
index 000000000000..fae868a8d499
--- /dev/null
+++ b/drivers/mfd/twl4030-irq.c
@@ -0,0 +1,743 @@
1/*
2 * twl4030-irq.c - TWL4030/TPS659x0 irq support
3 *
4 * Copyright (C) 2005-2006 Texas Instruments, Inc.
5 *
6 * Modifications to defer interrupt handling to a kernel thread:
7 * Copyright (C) 2006 MontaVista Software, Inc.
8 *
9 * Based on tlv320aic23.c:
10 * Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
11 *
12 * Code cleanup and modifications to IRQ handler.
13 * by syed khasim <x0khasim@ti.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 */
29
30#include <linux/init.h>
31#include <linux/interrupt.h>
32#include <linux/irq.h>
33#include <linux/kthread.h>
34
35#include <linux/i2c/twl4030.h>
36
37
38/*
39 * TWL4030 IRQ handling has two stages in hardware, and thus in software.
40 * The Primary Interrupt Handler (PIH) stage exposes status bits saying
41 * which Secondary Interrupt Handler (SIH) stage is raising an interrupt.
42 * SIH modules are more traditional IRQ components, which support per-IRQ
43 * enable/disable and trigger controls; they do most of the work.
44 *
45 * These chips are designed to support IRQ handling from two different
46 * I2C masters. Each has a dedicated IRQ line, and dedicated IRQ status
47 * and mask registers in the PIH and SIH modules.
48 *
49 * We set up IRQs starting at a platform-specified base, always starting
50 * with PIH and the SIH for PWR_INT and then usually adding GPIO:
51 * base + 0 .. base + 7 PIH
52 * base + 8 .. base + 15 SIH for PWR_INT
53 * base + 16 .. base + 33 SIH for GPIO
54 */
55
56/* PIH register offsets */
57#define REG_PIH_ISR_P1 0x01
58#define REG_PIH_ISR_P2 0x02
59#define REG_PIH_SIR 0x03 /* for testing */
60
61
62/* Linux could (eventually) use either IRQ line */
63static int irq_line;
64
65struct sih {
66 char name[8];
67 u8 module; /* module id */
68 u8 control_offset; /* for SIH_CTRL */
69 bool set_cor;
70
71 u8 bits; /* valid in isr/imr */
72 u8 bytes_ixr; /* bytelen of ISR/IMR/SIR */
73
74 u8 edr_offset;
75 u8 bytes_edr; /* bytelen of EDR */
76
77 /* SIR ignored -- set interrupt, for testing only */
78 struct irq_data {
79 u8 isr_offset;
80 u8 imr_offset;
81 } mask[2];
82 /* + 2 bytes padding */
83};
84
85#define SIH_INITIALIZER(modname, nbits) \
86 .module = TWL4030_MODULE_ ## modname, \
87 .control_offset = TWL4030_ ## modname ## _SIH_CTRL, \
88 .bits = nbits, \
89 .bytes_ixr = DIV_ROUND_UP(nbits, 8), \
90 .edr_offset = TWL4030_ ## modname ## _EDR, \
91 .bytes_edr = DIV_ROUND_UP((2*(nbits)), 8), \
92 .mask = { { \
93 .isr_offset = TWL4030_ ## modname ## _ISR1, \
94 .imr_offset = TWL4030_ ## modname ## _IMR1, \
95 }, \
96 { \
97 .isr_offset = TWL4030_ ## modname ## _ISR2, \
98 .imr_offset = TWL4030_ ## modname ## _IMR2, \
99 }, },
100
101/* register naming policies are inconsistent ... */
102#define TWL4030_INT_PWR_EDR TWL4030_INT_PWR_EDR1
103#define TWL4030_MODULE_KEYPAD_KEYP TWL4030_MODULE_KEYPAD
104#define TWL4030_MODULE_INT_PWR TWL4030_MODULE_INT
105
106
107/* Order in this table matches order in PIH_ISR. That is,
108 * BIT(n) in PIH_ISR is sih_modules[n].
109 */
110static const struct sih sih_modules[6] = {
111 [0] = {
112 .name = "gpio",
113 .module = TWL4030_MODULE_GPIO,
114 .control_offset = REG_GPIO_SIH_CTRL,
115 .set_cor = true,
116 .bits = TWL4030_GPIO_MAX,
117 .bytes_ixr = 3,
118 /* Note: *all* of these IRQs default to no-trigger */
119 .edr_offset = REG_GPIO_EDR1,
120 .bytes_edr = 5,
121 .mask = { {
122 .isr_offset = REG_GPIO_ISR1A,
123 .imr_offset = REG_GPIO_IMR1A,
124 }, {
125 .isr_offset = REG_GPIO_ISR1B,
126 .imr_offset = REG_GPIO_IMR1B,
127 }, },
128 },
129 [1] = {
130 .name = "keypad",
131 .set_cor = true,
132 SIH_INITIALIZER(KEYPAD_KEYP, 4)
133 },
134 [2] = {
135 .name = "bci",
136 .module = TWL4030_MODULE_INTERRUPTS,
137 .control_offset = TWL4030_INTERRUPTS_BCISIHCTRL,
138 .bits = 12,
139 .bytes_ixr = 2,
140 .edr_offset = TWL4030_INTERRUPTS_BCIEDR1,
141 /* Note: most of these IRQs default to no-trigger */
142 .bytes_edr = 3,
143 .mask = { {
144 .isr_offset = TWL4030_INTERRUPTS_BCIISR1A,
145 .imr_offset = TWL4030_INTERRUPTS_BCIIMR1A,
146 }, {
147 .isr_offset = TWL4030_INTERRUPTS_BCIISR1B,
148 .imr_offset = TWL4030_INTERRUPTS_BCIIMR1B,
149 }, },
150 },
151 [3] = {
152 .name = "madc",
153 SIH_INITIALIZER(MADC, 4)
154 },
155 [4] = {
156 /* USB doesn't use the same SIH organization */
157 .name = "usb",
158 },
159 [5] = {
160 .name = "power",
161 .set_cor = true,
162 SIH_INITIALIZER(INT_PWR, 8)
163 },
164 /* there are no SIH modules #6 or #7 ... */
165};
166
167#undef TWL4030_MODULE_KEYPAD_KEYP
168#undef TWL4030_MODULE_INT_PWR
169#undef TWL4030_INT_PWR_EDR
170
171/*----------------------------------------------------------------------*/
172
173static unsigned twl4030_irq_base;
174
175static struct completion irq_event;
176
177/*
178 * This thread processes interrupts reported by the Primary Interrupt Handler.
179 */
180static int twl4030_irq_thread(void *data)
181{
182 long irq = (long)data;
183 irq_desc_t *desc = irq_desc + irq;
184 static unsigned i2c_errors;
185 const static unsigned max_i2c_errors = 100;
186
187 current->flags |= PF_NOFREEZE;
188
189 while (!kthread_should_stop()) {
190 int ret;
191 int module_irq;
192 u8 pih_isr;
193
194 /* Wait for IRQ, then read PIH irq status (also blocking) */
195 wait_for_completion_interruptible(&irq_event);
196
197 ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
198 REG_PIH_ISR_P1);
199 if (ret) {
200 pr_warning("twl4030: I2C error %d reading PIH ISR\n",
201 ret);
202 if (++i2c_errors >= max_i2c_errors) {
203 printk(KERN_ERR "Maximum I2C error count"
204 " exceeded. Terminating %s.\n",
205 __func__);
206 break;
207 }
208 complete(&irq_event);
209 continue;
210 }
211
212 /* these handlers deal with the relevant SIH irq status */
213 local_irq_disable();
214 for (module_irq = twl4030_irq_base;
215 pih_isr;
216 pih_isr >>= 1, module_irq++) {
217 if (pih_isr & 0x1) {
218 irq_desc_t *d = irq_desc + module_irq;
219
220 /* These can't be masked ... always warn
221 * if we get any surprises.
222 */
223 if (d->status & IRQ_DISABLED)
224 note_interrupt(module_irq, d,
225 IRQ_NONE);
226 else
227 d->handle_irq(module_irq, d);
228 }
229 }
230 local_irq_enable();
231
232 desc->chip->unmask(irq);
233 }
234
235 return 0;
236}
237
238/*
239 * handle_twl4030_pih() is the desc->handle method for the twl4030 interrupt.
240 * This is a chained interrupt, so there is no desc->action method for it.
241 * Now we need to query the interrupt controller in the twl4030 to determine
242 * which module is generating the interrupt request. However, we can't do i2c
243 * transactions in interrupt context, so we must defer that work to a kernel
244 * thread. All we do here is acknowledge and mask the interrupt and wakeup
245 * the kernel thread.
246 */
247static void handle_twl4030_pih(unsigned int irq, irq_desc_t *desc)
248{
249 /* Acknowledge, clear *AND* mask the interrupt... */
250 desc->chip->ack(irq);
251 complete(&irq_event);
252}
253
254static struct task_struct *start_twl4030_irq_thread(long irq)
255{
256 struct task_struct *thread;
257
258 init_completion(&irq_event);
259 thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq");
260 if (!thread)
261 pr_err("twl4030: could not create irq %ld thread!\n", irq);
262
263 return thread;
264}
265
266/*----------------------------------------------------------------------*/
267
268/*
269 * twl4030_init_sih_modules() ... start from a known state where no
270 * IRQs will be coming in, and where we can quickly enable them then
271 * handle them as they arrive. Mask all IRQs: maybe init SIH_CTRL.
272 *
273 * NOTE: we don't touch EDR registers here; they stay with hardware
274 * defaults or whatever the last value was. Note that when both EDR
275 * bits for an IRQ are clear, that's as if its IMR bit is set...
276 */
277static int twl4030_init_sih_modules(unsigned line)
278{
279 const struct sih *sih;
280 u8 buf[4];
281 int i;
282 int status;
283
284 /* line 0 == int1_n signal; line 1 == int2_n signal */
285 if (line > 1)
286 return -EINVAL;
287
288 irq_line = line;
289
290 /* disable all interrupts on our line */
291 memset(buf, 0xff, sizeof buf);
292 sih = sih_modules;
293 for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) {
294
295 /* skip USB -- it's funky */
296 if (!sih->bytes_ixr)
297 continue;
298
299 status = twl4030_i2c_write(sih->module, buf,
300 sih->mask[line].imr_offset, sih->bytes_ixr);
301 if (status < 0)
302 pr_err("twl4030: err %d initializing %s %s\n",
303 status, sih->name, "IMR");
304
305 /* Maybe disable "exclusive" mode; buffer second pending irq;
306 * set Clear-On-Read (COR) bit.
307 *
308 * NOTE that sometimes COR polarity is documented as being
309 * inverted: for MADC and BCI, COR=1 means "clear on write".
310 * And for PWR_INT it's not documented...
311 */
312 if (sih->set_cor) {
313 status = twl4030_i2c_write_u8(sih->module,
314 TWL4030_SIH_CTRL_COR_MASK,
315 sih->control_offset);
316 if (status < 0)
317 pr_err("twl4030: err %d initializing %s %s\n",
318 status, sih->name, "SIH_CTRL");
319 }
320 }
321
322 sih = sih_modules;
323 for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) {
324 u8 rxbuf[4];
325 int j;
326
327 /* skip USB */
328 if (!sih->bytes_ixr)
329 continue;
330
331 /* Clear pending interrupt status. Either the read was
332 * enough, or we need to write those bits. Repeat, in
333 * case an IRQ is pending (PENDDIS=0) ... that's not
334 * uncommon with PWR_INT.PWRON.
335 */
336 for (j = 0; j < 2; j++) {
337 status = twl4030_i2c_read(sih->module, rxbuf,
338 sih->mask[line].isr_offset, sih->bytes_ixr);
339 if (status < 0)
340 pr_err("twl4030: err %d initializing %s %s\n",
341 status, sih->name, "ISR");
342
343 if (!sih->set_cor)
344 status = twl4030_i2c_write(sih->module, buf,
345 sih->mask[line].isr_offset,
346 sih->bytes_ixr);
347 /* else COR=1 means read sufficed.
348 * (for most SIH modules...)
349 */
350 }
351 }
352
353 return 0;
354}
355
356static inline void activate_irq(int irq)
357{
358#ifdef CONFIG_ARM
359 /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
360 * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
361 */
362 set_irq_flags(irq, IRQF_VALID);
363#else
364 /* same effect on other architectures */
365 set_irq_noprobe(irq);
366#endif
367}
368
369/*----------------------------------------------------------------------*/
370
371static DEFINE_SPINLOCK(sih_agent_lock);
372
373static struct workqueue_struct *wq;
374
375struct sih_agent {
376 int irq_base;
377 const struct sih *sih;
378
379 u32 imr;
380 bool imr_change_pending;
381 struct work_struct mask_work;
382
383 u32 edge_change;
384 struct work_struct edge_work;
385};
386
387static void twl4030_sih_do_mask(struct work_struct *work)
388{
389 struct sih_agent *agent;
390 const struct sih *sih;
391 union {
392 u8 bytes[4];
393 u32 word;
394 } imr;
395 int status;
396
397 agent = container_of(work, struct sih_agent, mask_work);
398
399 /* see what work we have */
400 spin_lock_irq(&sih_agent_lock);
401 if (agent->imr_change_pending) {
402 sih = agent->sih;
403 /* byte[0] gets overwritten as we write ... */
404 imr.word = cpu_to_le32(agent->imr << 8);
405 agent->imr_change_pending = false;
406 } else
407 sih = NULL;
408 spin_unlock_irq(&sih_agent_lock);
409 if (!sih)
410 return;
411
412 /* write the whole mask ... simpler than subsetting it */
413 status = twl4030_i2c_write(sih->module, imr.bytes,
414 sih->mask[irq_line].imr_offset, sih->bytes_ixr);
415 if (status)
416 pr_err("twl4030: %s, %s --> %d\n", __func__,
417 "write", status);
418}
419
420static void twl4030_sih_do_edge(struct work_struct *work)
421{
422 struct sih_agent *agent;
423 const struct sih *sih;
424 u8 bytes[6];
425 u32 edge_change;
426 int status;
427
428 agent = container_of(work, struct sih_agent, edge_work);
429
430 /* see what work we have */
431 spin_lock_irq(&sih_agent_lock);
432 edge_change = agent->edge_change;
433 agent->edge_change = 0;;
434 sih = edge_change ? agent->sih : NULL;
435 spin_unlock_irq(&sih_agent_lock);
436 if (!sih)
437 return;
438
439 /* Read, reserving first byte for write scratch. Yes, this
440 * could be cached for some speedup ... but be careful about
441 * any processor on the other IRQ line, EDR registers are
442 * shared.
443 */
444 status = twl4030_i2c_read(sih->module, bytes + 1,
445 sih->edr_offset, sih->bytes_edr);
446 if (status) {
447 pr_err("twl4030: %s, %s --> %d\n", __func__,
448 "read", status);
449 return;
450 }
451
452 /* Modify only the bits we know must change */
453 while (edge_change) {
454 int i = fls(edge_change) - 1;
455 struct irq_desc *d = irq_desc + i + agent->irq_base;
456 int byte = 1 + (i >> 2);
457 int off = (i & 0x3) * 2;
458
459 bytes[byte] &= ~(0x03 << off);
460
461 spin_lock_irq(&d->lock);
462 if (d->status & IRQ_TYPE_EDGE_RISING)
463 bytes[byte] |= BIT(off + 1);
464 if (d->status & IRQ_TYPE_EDGE_FALLING)
465 bytes[byte] |= BIT(off + 0);
466 spin_unlock_irq(&d->lock);
467
468 edge_change &= ~BIT(i);
469 }
470
471 /* Write */
472 status = twl4030_i2c_write(sih->module, bytes,
473 sih->edr_offset, sih->bytes_edr);
474 if (status)
475 pr_err("twl4030: %s, %s --> %d\n", __func__,
476 "write", status);
477}
478
479/*----------------------------------------------------------------------*/
480
481/*
482 * All irq_chip methods get issued from code holding irq_desc[irq].lock,
483 * which can't perform the underlying I2C operations (because they sleep).
484 * So we must hand them off to a thread (workqueue) and cope with asynch
485 * completion, potentially including some re-ordering, of these requests.
486 */
487
488static void twl4030_sih_mask(unsigned irq)
489{
490 struct sih_agent *sih = get_irq_chip_data(irq);
491 unsigned long flags;
492
493 spin_lock_irqsave(&sih_agent_lock, flags);
494 sih->imr |= BIT(irq - sih->irq_base);
495 sih->imr_change_pending = true;
496 queue_work(wq, &sih->mask_work);
497 spin_unlock_irqrestore(&sih_agent_lock, flags);
498}
499
500static void twl4030_sih_unmask(unsigned irq)
501{
502 struct sih_agent *sih = get_irq_chip_data(irq);
503 unsigned long flags;
504
505 spin_lock_irqsave(&sih_agent_lock, flags);
506 sih->imr &= ~BIT(irq - sih->irq_base);
507 sih->imr_change_pending = true;
508 queue_work(wq, &sih->mask_work);
509 spin_unlock_irqrestore(&sih_agent_lock, flags);
510}
511
512static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
513{
514 struct sih_agent *sih = get_irq_chip_data(irq);
515 struct irq_desc *desc = irq_desc + irq;
516 unsigned long flags;
517
518 if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
519 return -EINVAL;
520
521 spin_lock_irqsave(&sih_agent_lock, flags);
522 if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) {
523 desc->status &= ~IRQ_TYPE_SENSE_MASK;
524 desc->status |= trigger;
525 sih->edge_change |= BIT(irq - sih->irq_base);
526 queue_work(wq, &sih->edge_work);
527 }
528 spin_unlock_irqrestore(&sih_agent_lock, flags);
529 return 0;
530}
531
532static struct irq_chip twl4030_sih_irq_chip = {
533 .name = "twl4030",
534 .mask = twl4030_sih_mask,
535 .unmask = twl4030_sih_unmask,
536 .set_type = twl4030_sih_set_type,
537};
538
539/*----------------------------------------------------------------------*/
540
541static inline int sih_read_isr(const struct sih *sih)
542{
543 int status;
544 union {
545 u8 bytes[4];
546 u32 word;
547 } isr;
548
549 /* FIXME need retry-on-error ... */
550
551 isr.word = 0;
552 status = twl4030_i2c_read(sih->module, isr.bytes,
553 sih->mask[irq_line].isr_offset, sih->bytes_ixr);
554
555 return (status < 0) ? status : le32_to_cpu(isr.word);
556}
557
558/*
559 * Generic handler for SIH interrupts ... we "know" this is called
560 * in task context, with IRQs enabled.
561 */
562static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
563{
564 struct sih_agent *agent = get_irq_data(irq);
565 const struct sih *sih = agent->sih;
566 int isr;
567
568 /* reading ISR acks the IRQs, using clear-on-read mode */
569 local_irq_enable();
570 isr = sih_read_isr(sih);
571 local_irq_disable();
572
573 if (isr < 0) {
574 pr_err("twl4030: %s SIH, read ISR error %d\n",
575 sih->name, isr);
576 /* REVISIT: recover; eventually mask it all, etc */
577 return;
578 }
579
580 while (isr) {
581 irq = fls(isr);
582 irq--;
583 isr &= ~BIT(irq);
584
585 if (irq < sih->bits)
586 generic_handle_irq(agent->irq_base + irq);
587 else
588 pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
589 sih->name, irq);
590 }
591}
592
593static unsigned twl4030_irq_next;
594
595/* returns the first IRQ used by this SIH bank,
596 * or negative errno
597 */
598int twl4030_sih_setup(int module)
599{
600 int sih_mod;
601 const struct sih *sih = NULL;
602 struct sih_agent *agent;
603 int i, irq;
604 int status = -EINVAL;
605 unsigned irq_base = twl4030_irq_next;
606
607 /* only support modules with standard clear-on-read for now */
608 for (sih_mod = 0, sih = sih_modules;
609 sih_mod < ARRAY_SIZE(sih_modules);
610 sih_mod++, sih++) {
611 if (sih->module == module && sih->set_cor) {
612 if (!WARN((irq_base + sih->bits) > NR_IRQS,
613 "irq %d for %s too big\n",
614 irq_base + sih->bits,
615 sih->name))
616 status = 0;
617 break;
618 }
619 }
620 if (status < 0)
621 return status;
622
623 agent = kzalloc(sizeof *agent, GFP_KERNEL);
624 if (!agent)
625 return -ENOMEM;
626
627 status = 0;
628
629 agent->irq_base = irq_base;
630 agent->sih = sih;
631 agent->imr = ~0;
632 INIT_WORK(&agent->mask_work, twl4030_sih_do_mask);
633 INIT_WORK(&agent->edge_work, twl4030_sih_do_edge);
634
635 for (i = 0; i < sih->bits; i++) {
636 irq = irq_base + i;
637
638 set_irq_chip_and_handler(irq, &twl4030_sih_irq_chip,
639 handle_edge_irq);
640 set_irq_chip_data(irq, agent);
641 activate_irq(irq);
642 }
643
644 status = irq_base;
645 twl4030_irq_next += i;
646
647 /* replace generic PIH handler (handle_simple_irq) */
648 irq = sih_mod + twl4030_irq_base;
649 set_irq_data(irq, agent);
650 set_irq_chained_handler(irq, handle_twl4030_sih);
651
652 pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
653 irq, irq_base, twl4030_irq_next - 1);
654
655 return status;
656}
657
658/* FIXME need a call to reverse twl4030_sih_setup() ... */
659
660
661/*----------------------------------------------------------------------*/
662
663/* FIXME pass in which interrupt line we'll use ... */
664#define twl_irq_line 0
665
666int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
667{
668 static struct irq_chip twl4030_irq_chip;
669
670 int status;
671 int i;
672 struct task_struct *task;
673
674 /*
675 * Mask and clear all TWL4030 interrupts since initially we do
676 * not have any TWL4030 module interrupt handlers present
677 */
678 status = twl4030_init_sih_modules(twl_irq_line);
679 if (status < 0)
680 return status;
681
682 wq = create_singlethread_workqueue("twl4030-irqchip");
683 if (!wq) {
684 pr_err("twl4030: workqueue FAIL\n");
685 return -ESRCH;
686 }
687
688 twl4030_irq_base = irq_base;
689
690 /* install an irq handler for each of the SIH modules;
691 * clone dummy irq_chip since PIH can't *do* anything
692 */
693 twl4030_irq_chip = dummy_irq_chip;
694 twl4030_irq_chip.name = "twl4030";
695
696 twl4030_sih_irq_chip.ack = dummy_irq_chip.ack;
697
698 for (i = irq_base; i < irq_end; i++) {
699 set_irq_chip_and_handler(i, &twl4030_irq_chip,
700 handle_simple_irq);
701 activate_irq(i);
702 }
703 twl4030_irq_next = i;
704 pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", "PIH",
705 irq_num, irq_base, twl4030_irq_next - 1);
706
707 /* ... and the PWR_INT module ... */
708 status = twl4030_sih_setup(TWL4030_MODULE_INT);
709 if (status < 0) {
710 pr_err("twl4030: sih_setup PWR INT --> %d\n", status);
711 goto fail;
712 }
713
714 /* install an irq handler to demultiplex the TWL4030 interrupt */
715 task = start_twl4030_irq_thread(irq_num);
716 if (!task) {
717 pr_err("twl4030: irq thread FAIL\n");
718 status = -ESRCH;
719 goto fail;
720 }
721
722 set_irq_data(irq_num, task);
723 set_irq_chained_handler(irq_num, handle_twl4030_pih);
724
725 return status;
726
727fail:
728 for (i = irq_base; i < irq_end; i++)
729 set_irq_chip_and_handler(i, NULL, NULL);
730 destroy_workqueue(wq);
731 wq = NULL;
732 return status;
733}
734
735int twl_exit_irq(void)
736{
737 /* FIXME undo twl_init_irq() */
738 if (twl4030_irq_base) {
739 pr_err("twl4030: can't yet clean up IRQs?\n");
740 return -ENOSYS;
741 }
742 return 0;
743}
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index bf87f675e7fa..0d47fb9e4b3b 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -183,6 +183,9 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
183 (wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable) 183 (wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable)
184 | src[i - reg]; 184 | src[i - reg];
185 185
186 /* Don't store volatile bits */
187 wm8350->reg_cache[i] &= ~wm8350_reg_io_map[i].vol;
188
186 src[i - reg] = cpu_to_be16(src[i - reg]); 189 src[i - reg] = cpu_to_be16(src[i - reg]);
187 } 190 }
188 191
@@ -1120,6 +1123,7 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int mode)
1120 } 1123 }
1121 value = be16_to_cpu(value); 1124 value = be16_to_cpu(value);
1122 value &= wm8350_reg_io_map[i].readable; 1125 value &= wm8350_reg_io_map[i].readable;
1126 value &= ~wm8350_reg_io_map[i].vol;
1123 wm8350->reg_cache[i] = value; 1127 wm8350->reg_cache[i] = value;
1124 } else 1128 } else
1125 wm8350->reg_cache[i] = reg_map[i]; 1129 wm8350->reg_cache[i] = reg_map[i];
@@ -1128,7 +1132,6 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int mode)
1128out: 1132out:
1129 return ret; 1133 return ret;
1130} 1134}
1131EXPORT_SYMBOL_GPL(wm8350_create_cache);
1132 1135
1133/* 1136/*
1134 * Register a client device. This is non-fatal since there is no need to 1137 * Register a client device. This is non-fatal since there is no need to
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 8dfe21bb3bd1..3e0ce0e50ea2 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -30,7 +30,12 @@ static int wm8350_i2c_read_device(struct wm8350 *wm8350, char reg,
30 ret = i2c_master_send(wm8350->i2c_client, &reg, 1); 30 ret = i2c_master_send(wm8350->i2c_client, &reg, 1);
31 if (ret < 0) 31 if (ret < 0)
32 return ret; 32 return ret;
33 return i2c_master_recv(wm8350->i2c_client, dest, bytes); 33 ret = i2c_master_recv(wm8350->i2c_client, dest, bytes);
34 if (ret < 0)
35 return ret;
36 if (ret != bytes)
37 return -EIO;
38 return 0;
34} 39}
35 40
36static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg, 41static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
@@ -38,13 +43,19 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
38{ 43{
39 /* we add 1 byte for device register */ 44 /* we add 1 byte for device register */
40 u8 msg[(WM8350_MAX_REGISTER << 1) + 1]; 45 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
46 int ret;
41 47
42 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1)) 48 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
43 return -EINVAL; 49 return -EINVAL;
44 50
45 msg[0] = reg; 51 msg[0] = reg;
46 memcpy(&msg[1], src, bytes); 52 memcpy(&msg[1], src, bytes);
47 return i2c_master_send(wm8350->i2c_client, msg, bytes + 1); 53 ret = i2c_master_send(wm8350->i2c_client, msg, bytes + 1);
54 if (ret < 0)
55 return ret;
56 if (ret != bytes + 1)
57 return -EIO;
58 return 0;
48} 59}
49 60
50static int wm8350_i2c_probe(struct i2c_client *i2c, 61static int wm8350_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index efd3aa08b88b..fee7304102af 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -145,6 +145,7 @@ config ACER_WMI
145 depends on NEW_LEDS 145 depends on NEW_LEDS
146 depends on BACKLIGHT_CLASS_DEVICE 146 depends on BACKLIGHT_CLASS_DEVICE
147 depends on SERIO_I8042 147 depends on SERIO_I8042
148 depends on RFKILL
148 select ACPI_WMI 149 select ACPI_WMI
149 ---help--- 150 ---help---
150 This is a driver for newer Acer (and Wistron) laptops. It adds 151 This is a driver for newer Acer (and Wistron) laptops. It adds
@@ -226,10 +227,20 @@ config HP_WMI
226 To compile this driver as a module, choose M here: the module will 227 To compile this driver as a module, choose M here: the module will
227 be called hp-wmi. 228 be called hp-wmi.
228 229
230config ICS932S401
231 tristate "Integrated Circuits ICS932S401"
232 depends on I2C && EXPERIMENTAL
233 help
234 If you say yes here you get support for the Integrated Circuits
235 ICS932S401 clock control chips.
236
237 This driver can also be built as a module. If so, the module
238 will be called ics932s401.
239
229config MSI_LAPTOP 240config MSI_LAPTOP
230 tristate "MSI Laptop Extras" 241 tristate "MSI Laptop Extras"
231 depends on X86 242 depends on X86
232 depends on ACPI_EC 243 depends on ACPI
233 depends on BACKLIGHT_CLASS_DEVICE 244 depends on BACKLIGHT_CLASS_DEVICE
234 ---help--- 245 ---help---
235 This is a driver for laptops built by MSI (MICRO-STAR 246 This is a driver for laptops built by MSI (MICRO-STAR
@@ -245,10 +256,21 @@ config MSI_LAPTOP
245 256
246 If you have an MSI S270 laptop, say Y or M here. 257 If you have an MSI S270 laptop, say Y or M here.
247 258
259config PANASONIC_LAPTOP
260 tristate "Panasonic Laptop Extras"
261 depends on X86 && INPUT && ACPI
262 depends on BACKLIGHT_CLASS_DEVICE
263 ---help---
264 This driver adds support for access to backlight control and hotkeys
265 on Panasonic Let's Note laptops.
266
267 If you have a Panasonic Let's note laptop (such as the R1(N variant),
268 R2, R3, R5, T2, W2 and Y2 series), say Y.
269
248config COMPAL_LAPTOP 270config COMPAL_LAPTOP
249 tristate "Compal Laptop Extras" 271 tristate "Compal Laptop Extras"
250 depends on X86 272 depends on X86
251 depends on ACPI_EC 273 depends on ACPI
252 depends on BACKLIGHT_CLASS_DEVICE 274 depends on BACKLIGHT_CLASS_DEVICE
253 ---help--- 275 ---help---
254 This is a driver for laptops built by Compal: 276 This is a driver for laptops built by Compal:
@@ -476,4 +498,6 @@ config SGI_GRU_DEBUG
476 This option enables addition debugging code for the SGI GRU driver. If 498 This option enables addition debugging code for the SGI GRU driver. If
477 you are unsure, say N. 499 you are unsure, say N.
478 500
501source "drivers/misc/c2port/Kconfig"
502
479endif # MISC_DEVICES 503endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c6c13f60b452..817f7f5ab3bd 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
14obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o 14obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
15obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o 15obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
16obj-$(CONFIG_HP_WMI) += hp-wmi.o 16obj-$(CONFIG_HP_WMI) += hp-wmi.o
17obj-$(CONFIG_ICS932S401) += ics932s401.o
17obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o 18obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
18obj-$(CONFIG_LKDTM) += lkdtm.o 19obj-$(CONFIG_LKDTM) += lkdtm.o
19obj-$(CONFIG_TIFM_CORE) += tifm_core.o 20obj-$(CONFIG_TIFM_CORE) += tifm_core.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_SGI_IOC4) += ioc4.o
23obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o 24obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
24obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o 25obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
25obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o 26obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
27obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
26obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o 28obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
27obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o 29obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
28obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o 30obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
@@ -30,3 +32,4 @@ obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
30obj-$(CONFIG_SGI_XP) += sgi-xp/ 32obj-$(CONFIG_SGI_XP) += sgi-xp/
31obj-$(CONFIG_SGI_GRU) += sgi-gru/ 33obj-$(CONFIG_SGI_GRU) += sgi-gru/
32obj-$(CONFIG_HP_ILO) += hpilo.o 34obj-$(CONFIG_HP_ILO) += hpilo.o
35obj-$(CONFIG_C2PORT) += c2port/
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
index d8b0d326e452..94c9f911824e 100644
--- a/drivers/misc/acer-wmi.c
+++ b/drivers/misc/acer-wmi.c
@@ -33,6 +33,8 @@
33#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/acpi.h> 34#include <linux/acpi.h>
35#include <linux/i8042.h> 35#include <linux/i8042.h>
36#include <linux/rfkill.h>
37#include <linux/workqueue.h>
36#include <linux/debugfs.h> 38#include <linux/debugfs.h>
37 39
38#include <acpi/acpi_drivers.h> 40#include <acpi/acpi_drivers.h>
@@ -123,21 +125,15 @@ enum interface_flags {
123 125
124static int max_brightness = 0xF; 126static int max_brightness = 0xF;
125 127
126static int wireless = -1;
127static int bluetooth = -1;
128static int mailled = -1; 128static int mailled = -1;
129static int brightness = -1; 129static int brightness = -1;
130static int threeg = -1; 130static int threeg = -1;
131static int force_series; 131static int force_series;
132 132
133module_param(mailled, int, 0444); 133module_param(mailled, int, 0444);
134module_param(wireless, int, 0444);
135module_param(bluetooth, int, 0444);
136module_param(brightness, int, 0444); 134module_param(brightness, int, 0444);
137module_param(threeg, int, 0444); 135module_param(threeg, int, 0444);
138module_param(force_series, int, 0444); 136module_param(force_series, int, 0444);
139MODULE_PARM_DESC(wireless, "Set initial state of Wireless hardware");
140MODULE_PARM_DESC(bluetooth, "Set initial state of Bluetooth hardware");
141MODULE_PARM_DESC(mailled, "Set initial state of Mail LED"); 137MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
142MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness"); 138MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
143MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware"); 139MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
@@ -145,8 +141,6 @@ MODULE_PARM_DESC(force_series, "Force a different laptop series");
145 141
146struct acer_data { 142struct acer_data {
147 int mailled; 143 int mailled;
148 int wireless;
149 int bluetooth;
150 int threeg; 144 int threeg;
151 int brightness; 145 int brightness;
152}; 146};
@@ -157,6 +151,9 @@ struct acer_debug {
157 u32 wmid_devices; 151 u32 wmid_devices;
158}; 152};
159 153
154static struct rfkill *wireless_rfkill;
155static struct rfkill *bluetooth_rfkill;
156
160/* Each low-level interface must define at least some of the following */ 157/* Each low-level interface must define at least some of the following */
161struct wmi_interface { 158struct wmi_interface {
162 /* The WMI device type */ 159 /* The WMI device type */
@@ -476,7 +473,7 @@ struct wmi_interface *iface)
476 } 473 }
477 break; 474 break;
478 default: 475 default:
479 return AE_BAD_ADDRESS; 476 return AE_ERROR;
480 } 477 }
481 return AE_OK; 478 return AE_OK;
482} 479}
@@ -514,7 +511,7 @@ static acpi_status AMW0_set_u32(u32 value, u32 cap, struct wmi_interface *iface)
514 break; 511 break;
515 } 512 }
516 default: 513 default:
517 return AE_BAD_ADDRESS; 514 return AE_ERROR;
518 } 515 }
519 516
520 /* Actually do the set */ 517 /* Actually do the set */
@@ -689,7 +686,7 @@ struct wmi_interface *iface)
689 return 0; 686 return 0;
690 } 687 }
691 default: 688 default:
692 return AE_BAD_ADDRESS; 689 return AE_ERROR;
693 } 690 }
694 status = WMI_execute_u32(method_id, 0, &result); 691 status = WMI_execute_u32(method_id, 0, &result);
695 692
@@ -735,7 +732,7 @@ static acpi_status WMID_set_u32(u32 value, u32 cap, struct wmi_interface *iface)
735 } 732 }
736 break; 733 break;
737 default: 734 default:
738 return AE_BAD_ADDRESS; 735 return AE_ERROR;
739 } 736 }
740 return WMI_execute_u32(method_id, (u32)value, NULL); 737 return WMI_execute_u32(method_id, (u32)value, NULL);
741} 738}
@@ -785,7 +782,7 @@ static struct wmi_interface wmid_interface = {
785 782
786static acpi_status get_u32(u32 *value, u32 cap) 783static acpi_status get_u32(u32 *value, u32 cap)
787{ 784{
788 acpi_status status = AE_BAD_ADDRESS; 785 acpi_status status = AE_ERROR;
789 786
790 switch (interface->type) { 787 switch (interface->type) {
791 case ACER_AMW0: 788 case ACER_AMW0:
@@ -846,8 +843,6 @@ static void __init acer_commandline_init(void)
846 * capability isn't available on the given interface 843 * capability isn't available on the given interface
847 */ 844 */
848 set_u32(mailled, ACER_CAP_MAILLED); 845 set_u32(mailled, ACER_CAP_MAILLED);
849 set_u32(wireless, ACER_CAP_WIRELESS);
850 set_u32(bluetooth, ACER_CAP_BLUETOOTH);
851 set_u32(threeg, ACER_CAP_THREEG); 846 set_u32(threeg, ACER_CAP_THREEG);
852 set_u32(brightness, ACER_CAP_BRIGHTNESS); 847 set_u32(brightness, ACER_CAP_BRIGHTNESS);
853} 848}
@@ -933,40 +928,135 @@ static void acer_backlight_exit(void)
933} 928}
934 929
935/* 930/*
936 * Read/ write bool sysfs macro 931 * Rfkill devices
937 */ 932 */
938#define show_set_bool(value, cap) \ 933static void acer_rfkill_update(struct work_struct *ignored);
939static ssize_t \ 934static DECLARE_DELAYED_WORK(acer_rfkill_work, acer_rfkill_update);
940show_bool_##value(struct device *dev, struct device_attribute *attr, \ 935static void acer_rfkill_update(struct work_struct *ignored)
941 char *buf) \ 936{
942{ \ 937 u32 state;
943 u32 result; \ 938 acpi_status status;
944 acpi_status status = get_u32(&result, cap); \ 939
945 if (ACPI_SUCCESS(status)) \ 940 status = get_u32(&state, ACER_CAP_WIRELESS);
946 return sprintf(buf, "%u\n", result); \ 941 if (ACPI_SUCCESS(status))
947 return sprintf(buf, "Read error\n"); \ 942 rfkill_force_state(wireless_rfkill, state ?
948} \ 943 RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED);
949\ 944
950static ssize_t \ 945 if (has_cap(ACER_CAP_BLUETOOTH)) {
951set_bool_##value(struct device *dev, struct device_attribute *attr, \ 946 status = get_u32(&state, ACER_CAP_BLUETOOTH);
952 const char *buf, size_t count) \ 947 if (ACPI_SUCCESS(status))
953{ \ 948 rfkill_force_state(bluetooth_rfkill, state ?
954 u32 tmp = simple_strtoul(buf, NULL, 10); \ 949 RFKILL_STATE_UNBLOCKED :
955 acpi_status status = set_u32(tmp, cap); \ 950 RFKILL_STATE_SOFT_BLOCKED);
956 if (ACPI_FAILURE(status)) \ 951 }
957 return -EINVAL; \ 952
958 return count; \ 953 schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
959} \ 954}
960static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \ 955
961 show_bool_##value, set_bool_##value); 956static int acer_rfkill_set(void *data, enum rfkill_state state)
962 957{
963show_set_bool(wireless, ACER_CAP_WIRELESS); 958 acpi_status status;
964show_set_bool(bluetooth, ACER_CAP_BLUETOOTH); 959 u32 *cap = data;
965show_set_bool(threeg, ACER_CAP_THREEG); 960 status = set_u32((u32) (state == RFKILL_STATE_UNBLOCKED), *cap);
961 if (ACPI_FAILURE(status))
962 return -ENODEV;
963 return 0;
964}
965
966static struct rfkill * acer_rfkill_register(struct device *dev,
967enum rfkill_type type, char *name, u32 cap)
968{
969 int err;
970 u32 state;
971 u32 *data;
972 struct rfkill *rfkill_dev;
973
974 rfkill_dev = rfkill_allocate(dev, type);
975 if (!rfkill_dev)
976 return ERR_PTR(-ENOMEM);
977 rfkill_dev->name = name;
978 get_u32(&state, cap);
979 rfkill_dev->state = state ? RFKILL_STATE_UNBLOCKED :
980 RFKILL_STATE_SOFT_BLOCKED;
981 data = kzalloc(sizeof(u32), GFP_KERNEL);
982 if (!data) {
983 rfkill_free(rfkill_dev);
984 return ERR_PTR(-ENOMEM);
985 }
986 *data = cap;
987 rfkill_dev->data = data;
988 rfkill_dev->toggle_radio = acer_rfkill_set;
989 rfkill_dev->user_claim_unsupported = 1;
990
991 err = rfkill_register(rfkill_dev);
992 if (err) {
993 kfree(rfkill_dev->data);
994 rfkill_free(rfkill_dev);
995 return ERR_PTR(err);
996 }
997 return rfkill_dev;
998}
999
1000static int acer_rfkill_init(struct device *dev)
1001{
1002 wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN,
1003 "acer-wireless", ACER_CAP_WIRELESS);
1004 if (IS_ERR(wireless_rfkill))
1005 return PTR_ERR(wireless_rfkill);
1006
1007 if (has_cap(ACER_CAP_BLUETOOTH)) {
1008 bluetooth_rfkill = acer_rfkill_register(dev,
1009 RFKILL_TYPE_BLUETOOTH, "acer-bluetooth",
1010 ACER_CAP_BLUETOOTH);
1011 if (IS_ERR(bluetooth_rfkill)) {
1012 kfree(wireless_rfkill->data);
1013 rfkill_unregister(wireless_rfkill);
1014 return PTR_ERR(bluetooth_rfkill);
1015 }
1016 }
1017
1018 schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
1019
1020 return 0;
1021}
1022
1023static void acer_rfkill_exit(void)
1024{
1025 cancel_delayed_work_sync(&acer_rfkill_work);
1026 kfree(wireless_rfkill->data);
1027 rfkill_unregister(wireless_rfkill);
1028 if (has_cap(ACER_CAP_BLUETOOTH)) {
1029 kfree(wireless_rfkill->data);
1030 rfkill_unregister(bluetooth_rfkill);
1031 }
1032 return;
1033}
966 1034
967/* 1035/*
968 * Read interface sysfs macro 1036 * sysfs interface
969 */ 1037 */
1038static ssize_t show_bool_threeg(struct device *dev,
1039 struct device_attribute *attr, char *buf)
1040{
1041 u32 result; \
1042 acpi_status status = get_u32(&result, ACER_CAP_THREEG);
1043 if (ACPI_SUCCESS(status))
1044 return sprintf(buf, "%u\n", result);
1045 return sprintf(buf, "Read error\n");
1046}
1047
1048static ssize_t set_bool_threeg(struct device *dev,
1049 struct device_attribute *attr, const char *buf, size_t count)
1050{
1051 u32 tmp = simple_strtoul(buf, NULL, 10);
1052 acpi_status status = set_u32(tmp, ACER_CAP_THREEG);
1053 if (ACPI_FAILURE(status))
1054 return -EINVAL;
1055 return count;
1056}
1057static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg,
1058 set_bool_threeg);
1059
970static ssize_t show_interface(struct device *dev, struct device_attribute *attr, 1060static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
971 char *buf) 1061 char *buf)
972{ 1062{
@@ -1026,7 +1116,9 @@ static int __devinit acer_platform_probe(struct platform_device *device)
1026 goto error_brightness; 1116 goto error_brightness;
1027 } 1117 }
1028 1118
1029 return 0; 1119 err = acer_rfkill_init(&device->dev);
1120
1121 return err;
1030 1122
1031error_brightness: 1123error_brightness:
1032 acer_led_exit(); 1124 acer_led_exit();
@@ -1040,6 +1132,8 @@ static int acer_platform_remove(struct platform_device *device)
1040 acer_led_exit(); 1132 acer_led_exit();
1041 if (has_cap(ACER_CAP_BRIGHTNESS)) 1133 if (has_cap(ACER_CAP_BRIGHTNESS))
1042 acer_backlight_exit(); 1134 acer_backlight_exit();
1135
1136 acer_rfkill_exit();
1043 return 0; 1137 return 0;
1044} 1138}
1045 1139
@@ -1052,16 +1146,6 @@ pm_message_t state)
1052 if (!data) 1146 if (!data)
1053 return -ENOMEM; 1147 return -ENOMEM;
1054 1148
1055 if (has_cap(ACER_CAP_WIRELESS)) {
1056 get_u32(&value, ACER_CAP_WIRELESS);
1057 data->wireless = value;
1058 }
1059
1060 if (has_cap(ACER_CAP_BLUETOOTH)) {
1061 get_u32(&value, ACER_CAP_BLUETOOTH);
1062 data->bluetooth = value;
1063 }
1064
1065 if (has_cap(ACER_CAP_MAILLED)) { 1149 if (has_cap(ACER_CAP_MAILLED)) {
1066 get_u32(&value, ACER_CAP_MAILLED); 1150 get_u32(&value, ACER_CAP_MAILLED);
1067 data->mailled = value; 1151 data->mailled = value;
@@ -1082,15 +1166,6 @@ static int acer_platform_resume(struct platform_device *device)
1082 if (!data) 1166 if (!data)
1083 return -ENOMEM; 1167 return -ENOMEM;
1084 1168
1085 if (has_cap(ACER_CAP_WIRELESS))
1086 set_u32(data->wireless, ACER_CAP_WIRELESS);
1087
1088 if (has_cap(ACER_CAP_BLUETOOTH))
1089 set_u32(data->bluetooth, ACER_CAP_BLUETOOTH);
1090
1091 if (has_cap(ACER_CAP_THREEG))
1092 set_u32(data->threeg, ACER_CAP_THREEG);
1093
1094 if (has_cap(ACER_CAP_MAILLED)) 1169 if (has_cap(ACER_CAP_MAILLED))
1095 set_u32(data->mailled, ACER_CAP_MAILLED); 1170 set_u32(data->mailled, ACER_CAP_MAILLED);
1096 1171
@@ -1115,12 +1190,6 @@ static struct platform_device *acer_platform_device;
1115 1190
1116static int remove_sysfs(struct platform_device *device) 1191static int remove_sysfs(struct platform_device *device)
1117{ 1192{
1118 if (has_cap(ACER_CAP_WIRELESS))
1119 device_remove_file(&device->dev, &dev_attr_wireless);
1120
1121 if (has_cap(ACER_CAP_BLUETOOTH))
1122 device_remove_file(&device->dev, &dev_attr_bluetooth);
1123
1124 if (has_cap(ACER_CAP_THREEG)) 1193 if (has_cap(ACER_CAP_THREEG))
1125 device_remove_file(&device->dev, &dev_attr_threeg); 1194 device_remove_file(&device->dev, &dev_attr_threeg);
1126 1195
@@ -1133,20 +1202,6 @@ static int create_sysfs(void)
1133{ 1202{
1134 int retval = -ENOMEM; 1203 int retval = -ENOMEM;
1135 1204
1136 if (has_cap(ACER_CAP_WIRELESS)) {
1137 retval = device_create_file(&acer_platform_device->dev,
1138 &dev_attr_wireless);
1139 if (retval)
1140 goto error_sysfs;
1141 }
1142
1143 if (has_cap(ACER_CAP_BLUETOOTH)) {
1144 retval = device_create_file(&acer_platform_device->dev,
1145 &dev_attr_bluetooth);
1146 if (retval)
1147 goto error_sysfs;
1148 }
1149
1150 if (has_cap(ACER_CAP_THREEG)) { 1205 if (has_cap(ACER_CAP_THREEG)) {
1151 retval = device_create_file(&acer_platform_device->dev, 1206 retval = device_create_file(&acer_platform_device->dev,
1152 &dev_attr_threeg); 1207 &dev_attr_threeg);
@@ -1242,6 +1297,12 @@ static int __init acer_wmi_init(void)
1242 1297
1243 set_quirks(); 1298 set_quirks();
1244 1299
1300 if (!acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) {
1301 interface->capability &= ~ACER_CAP_BRIGHTNESS;
1302 printk(ACER_INFO "Brightness must be controlled by "
1303 "generic video driver\n");
1304 }
1305
1245 if (platform_driver_register(&acer_platform_driver)) { 1306 if (platform_driver_register(&acer_platform_driver)) {
1246 printk(ACER_ERR "Unable to register platform driver.\n"); 1307 printk(ACER_ERR "Unable to register platform driver.\n");
1247 goto error_platform_register; 1308 goto error_platform_register;
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
index 7c6dfd03de9f..8fb8b3591048 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/misc/asus-laptop.c
@@ -139,6 +139,7 @@ ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
139 "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */ 139 "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
140 "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */ 140 "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
141 "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */ 141 "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
142 "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */
142 "\\_SB.PCI0.PX40.Q10", /* S1x */ 143 "\\_SB.PCI0.PX40.Q10", /* S1x */
143 "\\Q10"); /* A2x, L2D, L3D, M2E */ 144 "\\Q10"); /* A2x, L2D, L3D, M2E */
144 145
@@ -280,7 +281,7 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val,
280 281
281static int read_wireless_status(int mask) 282static int read_wireless_status(int mask)
282{ 283{
283 ulong status; 284 unsigned long long status;
284 acpi_status rv = AE_OK; 285 acpi_status rv = AE_OK;
285 286
286 if (!wireless_status_handle) 287 if (!wireless_status_handle)
@@ -297,7 +298,7 @@ static int read_wireless_status(int mask)
297 298
298static int read_gps_status(void) 299static int read_gps_status(void)
299{ 300{
300 ulong status; 301 unsigned long long status;
301 acpi_status rv = AE_OK; 302 acpi_status rv = AE_OK;
302 303
303 rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status); 304 rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status);
@@ -350,7 +351,7 @@ static void write_status(acpi_handle handle, int out, int mask)
350 static void object##_led_set(struct led_classdev *led_cdev, \ 351 static void object##_led_set(struct led_classdev *led_cdev, \
351 enum led_brightness value) \ 352 enum led_brightness value) \
352 { \ 353 { \
353 object##_led_wk = value; \ 354 object##_led_wk = (value > 0) ? 1 : 0; \
354 queue_work(led_workqueue, &object##_led_work); \ 355 queue_work(led_workqueue, &object##_led_work); \
355 } \ 356 } \
356 static void object##_led_update(struct work_struct *ignored) \ 357 static void object##_led_update(struct work_struct *ignored) \
@@ -404,7 +405,7 @@ static void lcd_blank(int blank)
404 405
405static int read_brightness(struct backlight_device *bd) 406static int read_brightness(struct backlight_device *bd)
406{ 407{
407 ulong value; 408 unsigned long long value;
408 acpi_status rv = AE_OK; 409 acpi_status rv = AE_OK;
409 410
410 rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value); 411 rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value);
@@ -455,7 +456,7 @@ static ssize_t show_infos(struct device *dev,
455 struct device_attribute *attr, char *page) 456 struct device_attribute *attr, char *page)
456{ 457{
457 int len = 0; 458 int len = 0;
458 ulong temp; 459 unsigned long long temp;
459 char buf[16]; //enough for all info 460 char buf[16]; //enough for all info
460 acpi_status rv = AE_OK; 461 acpi_status rv = AE_OK;
461 462
@@ -603,7 +604,7 @@ static void set_display(int value)
603 604
604static int read_display(void) 605static int read_display(void)
605{ 606{
606 ulong value = 0; 607 unsigned long long value = 0;
607 acpi_status rv = AE_OK; 608 acpi_status rv = AE_OK;
608 609
609 /* In most of the case, we know how to set the display, but sometime 610 /* In most of the case, we know how to set the display, but sometime
@@ -849,7 +850,7 @@ static int asus_hotk_get_info(void)
849{ 850{
850 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 851 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
851 union acpi_object *model = NULL; 852 union acpi_object *model = NULL;
852 ulong bsts_result, hwrs_result; 853 unsigned long long bsts_result, hwrs_result;
853 char *string = NULL; 854 char *string = NULL;
854 acpi_status status; 855 acpi_status status;
855 856
@@ -996,7 +997,7 @@ static int asus_hotk_add(struct acpi_device *device)
996 hotk->handle = device->handle; 997 hotk->handle = device->handle;
997 strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME); 998 strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME);
998 strcpy(acpi_device_class(device), ASUS_HOTK_CLASS); 999 strcpy(acpi_device_class(device), ASUS_HOTK_CLASS);
999 acpi_driver_data(device) = hotk; 1000 device->driver_data = hotk;
1000 hotk->device = device; 1001 hotk->device = device;
1001 1002
1002 result = asus_hotk_check(); 1003 result = asus_hotk_check();
@@ -1207,9 +1208,13 @@ static int __init asus_laptop_init(void)
1207 1208
1208 dev = acpi_get_physical_device(hotk->device->handle); 1209 dev = acpi_get_physical_device(hotk->device->handle);
1209 1210
1210 result = asus_backlight_init(dev); 1211 if (!acpi_video_backlight_support()) {
1211 if (result) 1212 result = asus_backlight_init(dev);
1212 goto fail_backlight; 1213 if (result)
1214 goto fail_backlight;
1215 } else
1216 printk(ASUS_INFO "Brightness ignored, must be controlled by "
1217 "ACPI video driver\n");
1213 1218
1214 result = asus_led_init(dev); 1219 result = asus_led_init(dev);
1215 if (result) 1220 if (result)
diff --git a/drivers/misc/c2port/Kconfig b/drivers/misc/c2port/Kconfig
new file mode 100644
index 000000000000..e46af9a5810d
--- /dev/null
+++ b/drivers/misc/c2port/Kconfig
@@ -0,0 +1,35 @@
1#
2# C2 port devices
3#
4
5menuconfig C2PORT
6 tristate "Silicon Labs C2 port support (EXPERIMENTAL)"
7 depends on EXPERIMENTAL
8 default no
9 help
10 This option enables support for Silicon Labs C2 port used to
11 program Silicon micro controller chips (and other 8051 compatible).
12
13 If your board have no such micro controllers you don't need this
14 interface at all.
15
16 To compile this driver as a module, choose M here: the module will
17 be called c2port_core. Note that you also need a client module
18 usually called c2port-*.
19
20 If you are not sure, say N here.
21
22if C2PORT
23
24config C2PORT_DURAMAR_2150
25 tristate "C2 port support for Eurotech's Duramar 2150 (EXPERIMENTAL)"
26 depends on X86 && C2PORT
27 default no
28 help
29 This option enables C2 support for the Eurotech's Duramar 2150
30 on board micro controller.
31
32 To compile this driver as a module, choose M here: the module will
33 be called c2port-duramar2150.
34
35endif # C2PORT
diff --git a/drivers/misc/c2port/Makefile b/drivers/misc/c2port/Makefile
new file mode 100644
index 000000000000..3b2cf43d60f5
--- /dev/null
+++ b/drivers/misc/c2port/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_C2PORT) += core.o
2
3obj-$(CONFIG_C2PORT_DURAMAR_2150) += c2port-duramar2150.o
diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c
new file mode 100644
index 000000000000..338dcc121507
--- /dev/null
+++ b/drivers/misc/c2port/c2port-duramar2150.c
@@ -0,0 +1,158 @@
1/*
2 * Silicon Labs C2 port Linux support for Eurotech Duramar 2150
3 *
4 * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
5 * Copyright (c) 2008 Eurotech S.p.A. <info@eurotech.it>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation
10 */
11
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/delay.h>
17#include <linux/io.h>
18#include <linux/c2port.h>
19
20#define DATA_PORT 0x325
21#define DIR_PORT 0x326
22#define C2D (1 << 0)
23#define C2CK (1 << 1)
24
25static DEFINE_MUTEX(update_lock);
26
27/*
28 * C2 port operations
29 */
30
31static void duramar2150_c2port_access(struct c2port_device *dev, int status)
32{
33 u8 v;
34
35 mutex_lock(&update_lock);
36
37 v = inb(DIR_PORT);
38
39 /* 0 = input, 1 = output */
40 if (status)
41 outb(v | (C2D | C2CK), DIR_PORT);
42 else
43 /* When access is "off" is important that both lines are set
44 * as inputs or hi-impedence */
45 outb(v & ~(C2D | C2CK), DIR_PORT);
46
47 mutex_unlock(&update_lock);
48}
49
50static void duramar2150_c2port_c2d_dir(struct c2port_device *dev, int dir)
51{
52 u8 v;
53
54 mutex_lock(&update_lock);
55
56 v = inb(DIR_PORT);
57
58 if (dir)
59 outb(v & ~C2D, DIR_PORT);
60 else
61 outb(v | C2D, DIR_PORT);
62
63 mutex_unlock(&update_lock);
64}
65
66static int duramar2150_c2port_c2d_get(struct c2port_device *dev)
67{
68 return inb(DATA_PORT) & C2D;
69}
70
71static void duramar2150_c2port_c2d_set(struct c2port_device *dev, int status)
72{
73 u8 v;
74
75 mutex_lock(&update_lock);
76
77 v = inb(DATA_PORT);
78
79 if (status)
80 outb(v | C2D, DATA_PORT);
81 else
82 outb(v & ~C2D, DATA_PORT);
83
84 mutex_unlock(&update_lock);
85}
86
87static void duramar2150_c2port_c2ck_set(struct c2port_device *dev, int status)
88{
89 u8 v;
90
91 mutex_lock(&update_lock);
92
93 v = inb(DATA_PORT);
94
95 if (status)
96 outb(v | C2CK, DATA_PORT);
97 else
98 outb(v & ~C2CK, DATA_PORT);
99
100 mutex_unlock(&update_lock);
101}
102
103static struct c2port_ops duramar2150_c2port_ops = {
104 .block_size = 512, /* bytes */
105 .blocks_num = 30, /* total flash size: 15360 bytes */
106
107 .access = duramar2150_c2port_access,
108 .c2d_dir = duramar2150_c2port_c2d_dir,
109 .c2d_get = duramar2150_c2port_c2d_get,
110 .c2d_set = duramar2150_c2port_c2d_set,
111 .c2ck_set = duramar2150_c2port_c2ck_set,
112};
113
114static struct c2port_device *duramar2150_c2port_dev;
115
116/*
117 * Module stuff
118 */
119
120static int __init duramar2150_c2port_init(void)
121{
122 struct resource *res;
123 int ret = 0;
124
125 res = request_region(0x325, 2, "c2port");
126 if (!res)
127 return -EBUSY;
128
129 duramar2150_c2port_dev = c2port_device_register("uc",
130 &duramar2150_c2port_ops, NULL);
131 if (!duramar2150_c2port_dev) {
132 ret = -ENODEV;
133 goto free_region;
134 }
135
136 return 0;
137
138free_region:
139 release_region(0x325, 2);
140 return ret;
141}
142
143static void __exit duramar2150_c2port_exit(void)
144{
145 /* Setup the GPIOs as input by default (access = 0) */
146 duramar2150_c2port_access(duramar2150_c2port_dev, 0);
147
148 c2port_device_unregister(duramar2150_c2port_dev);
149
150 release_region(0x325, 2);
151}
152
153module_init(duramar2150_c2port_init);
154module_exit(duramar2150_c2port_exit);
155
156MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
157MODULE_DESCRIPTION("Silicon Labs C2 port Linux support for Duramar 2150");
158MODULE_LICENSE("GPL");
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
new file mode 100644
index 000000000000..0207dd59090d
--- /dev/null
+++ b/drivers/misc/c2port/core.c
@@ -0,0 +1,1003 @@
1/*
2 * Silicon Labs C2 port core Linux support
3 *
4 * Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it>
5 * Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/device.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/kernel.h>
18#include <linux/ctype.h>
19#include <linux/delay.h>
20#include <linux/idr.h>
21#include <linux/sched.h>
22
23#include <linux/c2port.h>
24
25#define DRIVER_NAME "c2port"
26#define DRIVER_VERSION "0.51.0"
27
28static DEFINE_SPINLOCK(c2port_idr_lock);
29static DEFINE_IDR(c2port_idr);
30
31/*
32 * Local variables
33 */
34
35static struct class *c2port_class;
36
37/*
38 * C2 registers & commands defines
39 */
40
41/* C2 registers */
42#define C2PORT_DEVICEID 0x00
43#define C2PORT_REVID 0x01
44#define C2PORT_FPCTL 0x02
45#define C2PORT_FPDAT 0xB4
46
47/* C2 interface commands */
48#define C2PORT_GET_VERSION 0x01
49#define C2PORT_DEVICE_ERASE 0x03
50#define C2PORT_BLOCK_READ 0x06
51#define C2PORT_BLOCK_WRITE 0x07
52#define C2PORT_PAGE_ERASE 0x08
53
54/* C2 status return codes */
55#define C2PORT_INVALID_COMMAND 0x00
56#define C2PORT_COMMAND_FAILED 0x02
57#define C2PORT_COMMAND_OK 0x0d
58
59/*
60 * C2 port low level signal managements
61 */
62
63static void c2port_reset(struct c2port_device *dev)
64{
65 struct c2port_ops *ops = dev->ops;
66
67 /* To reset the device we have to keep clock line low for at least
68 * 20us.
69 */
70 local_irq_disable();
71 ops->c2ck_set(dev, 0);
72 udelay(25);
73 ops->c2ck_set(dev, 1);
74 local_irq_enable();
75
76 udelay(1);
77}
78
79static void c2port_strobe_ck(struct c2port_device *dev)
80{
81 struct c2port_ops *ops = dev->ops;
82
83 /* During hi-low-hi transition we disable local IRQs to avoid
84 * interructions since C2 port specification says that it must be
85 * shorter than 5us, otherwise the microcontroller may consider
86 * it as a reset signal!
87 */
88 local_irq_disable();
89 ops->c2ck_set(dev, 0);
90 udelay(1);
91 ops->c2ck_set(dev, 1);
92 local_irq_enable();
93
94 udelay(1);
95}
96
97/*
98 * C2 port basic functions
99 */
100
101static void c2port_write_ar(struct c2port_device *dev, u8 addr)
102{
103 struct c2port_ops *ops = dev->ops;
104 int i;
105
106 /* START field */
107 c2port_strobe_ck(dev);
108
109 /* INS field (11b, LSB first) */
110 ops->c2d_dir(dev, 0);
111 ops->c2d_set(dev, 1);
112 c2port_strobe_ck(dev);
113 ops->c2d_set(dev, 1);
114 c2port_strobe_ck(dev);
115
116 /* ADDRESS field */
117 for (i = 0; i < 8; i++) {
118 ops->c2d_set(dev, addr & 0x01);
119 c2port_strobe_ck(dev);
120
121 addr >>= 1;
122 }
123
124 /* STOP field */
125 ops->c2d_dir(dev, 1);
126 c2port_strobe_ck(dev);
127}
128
129static int c2port_read_ar(struct c2port_device *dev, u8 *addr)
130{
131 struct c2port_ops *ops = dev->ops;
132 int i;
133
134 /* START field */
135 c2port_strobe_ck(dev);
136
137 /* INS field (10b, LSB first) */
138 ops->c2d_dir(dev, 0);
139 ops->c2d_set(dev, 0);
140 c2port_strobe_ck(dev);
141 ops->c2d_set(dev, 1);
142 c2port_strobe_ck(dev);
143
144 /* ADDRESS field */
145 ops->c2d_dir(dev, 1);
146 *addr = 0;
147 for (i = 0; i < 8; i++) {
148 *addr >>= 1; /* shift in 8-bit ADDRESS field LSB first */
149
150 c2port_strobe_ck(dev);
151 if (ops->c2d_get(dev))
152 *addr |= 0x80;
153 }
154
155 /* STOP field */
156 c2port_strobe_ck(dev);
157
158 return 0;
159}
160
161static int c2port_write_dr(struct c2port_device *dev, u8 data)
162{
163 struct c2port_ops *ops = dev->ops;
164 int timeout, i;
165
166 /* START field */
167 c2port_strobe_ck(dev);
168
169 /* INS field (01b, LSB first) */
170 ops->c2d_dir(dev, 0);
171 ops->c2d_set(dev, 1);
172 c2port_strobe_ck(dev);
173 ops->c2d_set(dev, 0);
174 c2port_strobe_ck(dev);
175
176 /* LENGTH field (00b, LSB first -> 1 byte) */
177 ops->c2d_set(dev, 0);
178 c2port_strobe_ck(dev);
179 ops->c2d_set(dev, 0);
180 c2port_strobe_ck(dev);
181
182 /* DATA field */
183 for (i = 0; i < 8; i++) {
184 ops->c2d_set(dev, data & 0x01);
185 c2port_strobe_ck(dev);
186
187 data >>= 1;
188 }
189
190 /* WAIT field */
191 ops->c2d_dir(dev, 1);
192 timeout = 20;
193 do {
194 c2port_strobe_ck(dev);
195 if (ops->c2d_get(dev))
196 break;
197
198 udelay(1);
199 } while (--timeout > 0);
200 if (timeout == 0)
201 return -EIO;
202
203 /* STOP field */
204 c2port_strobe_ck(dev);
205
206 return 0;
207}
208
209static int c2port_read_dr(struct c2port_device *dev, u8 *data)
210{
211 struct c2port_ops *ops = dev->ops;
212 int timeout, i;
213
214 /* START field */
215 c2port_strobe_ck(dev);
216
217 /* INS field (00b, LSB first) */
218 ops->c2d_dir(dev, 0);
219 ops->c2d_set(dev, 0);
220 c2port_strobe_ck(dev);
221 ops->c2d_set(dev, 0);
222 c2port_strobe_ck(dev);
223
224 /* LENGTH field (00b, LSB first -> 1 byte) */
225 ops->c2d_set(dev, 0);
226 c2port_strobe_ck(dev);
227 ops->c2d_set(dev, 0);
228 c2port_strobe_ck(dev);
229
230 /* WAIT field */
231 ops->c2d_dir(dev, 1);
232 timeout = 20;
233 do {
234 c2port_strobe_ck(dev);
235 if (ops->c2d_get(dev))
236 break;
237
238 udelay(1);
239 } while (--timeout > 0);
240 if (timeout == 0)
241 return -EIO;
242
243 /* DATA field */
244 *data = 0;
245 for (i = 0; i < 8; i++) {
246 *data >>= 1; /* shift in 8-bit DATA field LSB first */
247
248 c2port_strobe_ck(dev);
249 if (ops->c2d_get(dev))
250 *data |= 0x80;
251 }
252
253 /* STOP field */
254 c2port_strobe_ck(dev);
255
256 return 0;
257}
258
259static int c2port_poll_in_busy(struct c2port_device *dev)
260{
261 u8 addr;
262 int ret, timeout = 20;
263
264 do {
265 ret = (c2port_read_ar(dev, &addr));
266 if (ret < 0)
267 return -EIO;
268
269 if (!(addr & 0x02))
270 break;
271
272 udelay(1);
273 } while (--timeout > 0);
274 if (timeout == 0)
275 return -EIO;
276
277 return 0;
278}
279
280static int c2port_poll_out_ready(struct c2port_device *dev)
281{
282 u8 addr;
283 int ret, timeout = 10000; /* erase flash needs long time... */
284
285 do {
286 ret = (c2port_read_ar(dev, &addr));
287 if (ret < 0)
288 return -EIO;
289
290 if (addr & 0x01)
291 break;
292
293 udelay(1);
294 } while (--timeout > 0);
295 if (timeout == 0)
296 return -EIO;
297
298 return 0;
299}
300
301/*
302 * sysfs methods
303 */
304
305static ssize_t c2port_show_name(struct device *dev,
306 struct device_attribute *attr, char *buf)
307{
308 struct c2port_device *c2dev = dev_get_drvdata(dev);
309
310 return sprintf(buf, "%s\n", c2dev->name);
311}
312
313static ssize_t c2port_show_flash_blocks_num(struct device *dev,
314 struct device_attribute *attr, char *buf)
315{
316 struct c2port_device *c2dev = dev_get_drvdata(dev);
317 struct c2port_ops *ops = c2dev->ops;
318
319 return sprintf(buf, "%d\n", ops->blocks_num);
320}
321
322static ssize_t c2port_show_flash_block_size(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 struct c2port_device *c2dev = dev_get_drvdata(dev);
326 struct c2port_ops *ops = c2dev->ops;
327
328 return sprintf(buf, "%d\n", ops->block_size);
329}
330
331static ssize_t c2port_show_flash_size(struct device *dev,
332 struct device_attribute *attr, char *buf)
333{
334 struct c2port_device *c2dev = dev_get_drvdata(dev);
335 struct c2port_ops *ops = c2dev->ops;
336
337 return sprintf(buf, "%d\n", ops->blocks_num * ops->block_size);
338}
339
340static ssize_t c2port_show_access(struct device *dev,
341 struct device_attribute *attr, char *buf)
342{
343 struct c2port_device *c2dev = dev_get_drvdata(dev);
344
345 return sprintf(buf, "%d\n", c2dev->access);
346}
347
348static ssize_t c2port_store_access(struct device *dev,
349 struct device_attribute *attr,
350 const char *buf, size_t count)
351{
352 struct c2port_device *c2dev = dev_get_drvdata(dev);
353 struct c2port_ops *ops = c2dev->ops;
354 int status, ret;
355
356 ret = sscanf(buf, "%d", &status);
357 if (ret != 1)
358 return -EINVAL;
359
360 mutex_lock(&c2dev->mutex);
361
362 c2dev->access = !!status;
363
364 /* If access is "on" clock should be HIGH _before_ setting the line
365 * as output and data line should be set as INPUT anyway */
366 if (c2dev->access)
367 ops->c2ck_set(c2dev, 1);
368 ops->access(c2dev, c2dev->access);
369 if (c2dev->access)
370 ops->c2d_dir(c2dev, 1);
371
372 mutex_unlock(&c2dev->mutex);
373
374 return count;
375}
376
377static ssize_t c2port_store_reset(struct device *dev,
378 struct device_attribute *attr,
379 const char *buf, size_t count)
380{
381 struct c2port_device *c2dev = dev_get_drvdata(dev);
382
383 /* Check the device access status */
384 if (!c2dev->access)
385 return -EBUSY;
386
387 mutex_lock(&c2dev->mutex);
388
389 c2port_reset(c2dev);
390 c2dev->flash_access = 0;
391
392 mutex_unlock(&c2dev->mutex);
393
394 return count;
395}
396
397static ssize_t __c2port_show_dev_id(struct c2port_device *dev, char *buf)
398{
399 u8 data;
400 int ret;
401
402 /* Select DEVICEID register for C2 data register accesses */
403 c2port_write_ar(dev, C2PORT_DEVICEID);
404
405 /* Read and return the device ID register */
406 ret = c2port_read_dr(dev, &data);
407 if (ret < 0)
408 return ret;
409
410 return sprintf(buf, "%d\n", data);
411}
412
413static ssize_t c2port_show_dev_id(struct device *dev,
414 struct device_attribute *attr, char *buf)
415{
416 struct c2port_device *c2dev = dev_get_drvdata(dev);
417 ssize_t ret;
418
419 /* Check the device access status */
420 if (!c2dev->access)
421 return -EBUSY;
422
423 mutex_lock(&c2dev->mutex);
424 ret = __c2port_show_dev_id(c2dev, buf);
425 mutex_unlock(&c2dev->mutex);
426
427 if (ret < 0)
428 dev_err(dev, "cannot read from %s\n", c2dev->name);
429
430 return ret;
431}
432
433static ssize_t __c2port_show_rev_id(struct c2port_device *dev, char *buf)
434{
435 u8 data;
436 int ret;
437
438 /* Select REVID register for C2 data register accesses */
439 c2port_write_ar(dev, C2PORT_REVID);
440
441 /* Read and return the revision ID register */
442 ret = c2port_read_dr(dev, &data);
443 if (ret < 0)
444 return ret;
445
446 return sprintf(buf, "%d\n", data);
447}
448
449static ssize_t c2port_show_rev_id(struct device *dev,
450 struct device_attribute *attr, char *buf)
451{
452 struct c2port_device *c2dev = dev_get_drvdata(dev);
453 ssize_t ret;
454
455 /* Check the device access status */
456 if (!c2dev->access)
457 return -EBUSY;
458
459 mutex_lock(&c2dev->mutex);
460 ret = __c2port_show_rev_id(c2dev, buf);
461 mutex_unlock(&c2dev->mutex);
462
463 if (ret < 0)
464 dev_err(c2dev->dev, "cannot read from %s\n", c2dev->name);
465
466 return ret;
467}
468
469static ssize_t c2port_show_flash_access(struct device *dev,
470 struct device_attribute *attr, char *buf)
471{
472 struct c2port_device *c2dev = dev_get_drvdata(dev);
473
474 return sprintf(buf, "%d\n", c2dev->flash_access);
475}
476
477static ssize_t __c2port_store_flash_access(struct c2port_device *dev,
478 int status)
479{
480 int ret;
481
482 /* Check the device access status */
483 if (!dev->access)
484 return -EBUSY;
485
486 dev->flash_access = !!status;
487
488 /* If flash_access is off we have nothing to do... */
489 if (dev->flash_access == 0)
490 return 0;
491
492 /* Target the C2 flash programming control register for C2 data
493 * register access */
494 c2port_write_ar(dev, C2PORT_FPCTL);
495
496 /* Write the first keycode to enable C2 Flash programming */
497 ret = c2port_write_dr(dev, 0x02);
498 if (ret < 0)
499 return ret;
500
501 /* Write the second keycode to enable C2 Flash programming */
502 ret = c2port_write_dr(dev, 0x01);
503 if (ret < 0)
504 return ret;
505
506 /* Delay for at least 20ms to ensure the target is ready for
507 * C2 flash programming */
508 mdelay(25);
509
510 return 0;
511}
512
513static ssize_t c2port_store_flash_access(struct device *dev,
514 struct device_attribute *attr,
515 const char *buf, size_t count)
516{
517 struct c2port_device *c2dev = dev_get_drvdata(dev);
518 int status;
519 ssize_t ret;
520
521 ret = sscanf(buf, "%d", &status);
522 if (ret != 1)
523 return -EINVAL;
524
525 mutex_lock(&c2dev->mutex);
526 ret = __c2port_store_flash_access(c2dev, status);
527 mutex_unlock(&c2dev->mutex);
528
529 if (ret < 0) {
530 dev_err(c2dev->dev, "cannot enable %s flash programming\n",
531 c2dev->name);
532 return ret;
533 }
534
535 return count;
536}
537
538static ssize_t __c2port_write_flash_erase(struct c2port_device *dev)
539{
540 u8 status;
541 int ret;
542
543 /* Target the C2 flash programming data register for C2 data register
544 * access.
545 */
546 c2port_write_ar(dev, C2PORT_FPDAT);
547
548 /* Send device erase command */
549 c2port_write_dr(dev, C2PORT_DEVICE_ERASE);
550
551 /* Wait for input acknowledge */
552 ret = c2port_poll_in_busy(dev);
553 if (ret < 0)
554 return ret;
555
556 /* Should check status before starting FLASH access sequence */
557
558 /* Wait for status information */
559 ret = c2port_poll_out_ready(dev);
560 if (ret < 0)
561 return ret;
562
563 /* Read flash programming interface status */
564 ret = c2port_read_dr(dev, &status);
565 if (ret < 0)
566 return ret;
567 if (status != C2PORT_COMMAND_OK)
568 return -EBUSY;
569
570 /* Send a three-byte arming sequence to enable the device erase.
571 * If the sequence is not received correctly, the command will be
572 * ignored.
573 * Sequence is: 0xde, 0xad, 0xa5.
574 */
575 c2port_write_dr(dev, 0xde);
576 ret = c2port_poll_in_busy(dev);
577 if (ret < 0)
578 return ret;
579 c2port_write_dr(dev, 0xad);
580 ret = c2port_poll_in_busy(dev);
581 if (ret < 0)
582 return ret;
583 c2port_write_dr(dev, 0xa5);
584 ret = c2port_poll_in_busy(dev);
585 if (ret < 0)
586 return ret;
587
588 ret = c2port_poll_out_ready(dev);
589 if (ret < 0)
590 return ret;
591
592 return 0;
593}
594
595static ssize_t c2port_store_flash_erase(struct device *dev,
596 struct device_attribute *attr,
597 const char *buf, size_t count)
598{
599 struct c2port_device *c2dev = dev_get_drvdata(dev);
600 int ret;
601
602 /* Check the device and flash access status */
603 if (!c2dev->access || !c2dev->flash_access)
604 return -EBUSY;
605
606 mutex_lock(&c2dev->mutex);
607 ret = __c2port_write_flash_erase(c2dev);
608 mutex_unlock(&c2dev->mutex);
609
610 if (ret < 0) {
611 dev_err(c2dev->dev, "cannot erase %s flash\n", c2dev->name);
612 return ret;
613 }
614
615 return count;
616}
617
618static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
619 char *buffer, loff_t offset, size_t count)
620{
621 struct c2port_ops *ops = dev->ops;
622 u8 status, nread = 128;
623 int i, ret;
624
625 /* Check for flash end */
626 if (offset >= ops->block_size * ops->blocks_num)
627 return 0;
628
629 if (ops->block_size * ops->blocks_num - offset < nread)
630 nread = ops->block_size * ops->blocks_num - offset;
631 if (count < nread)
632 nread = count;
633 if (nread == 0)
634 return nread;
635
636 /* Target the C2 flash programming data register for C2 data register
637 * access */
638 c2port_write_ar(dev, C2PORT_FPDAT);
639
640 /* Send flash block read command */
641 c2port_write_dr(dev, C2PORT_BLOCK_READ);
642
643 /* Wait for input acknowledge */
644 ret = c2port_poll_in_busy(dev);
645 if (ret < 0)
646 return ret;
647
648 /* Should check status before starting FLASH access sequence */
649
650 /* Wait for status information */
651 ret = c2port_poll_out_ready(dev);
652 if (ret < 0)
653 return ret;
654
655 /* Read flash programming interface status */
656 ret = c2port_read_dr(dev, &status);
657 if (ret < 0)
658 return ret;
659 if (status != C2PORT_COMMAND_OK)
660 return -EBUSY;
661
662 /* Send address high byte */
663 c2port_write_dr(dev, offset >> 8);
664 ret = c2port_poll_in_busy(dev);
665 if (ret < 0)
666 return ret;
667
668 /* Send address low byte */
669 c2port_write_dr(dev, offset & 0x00ff);
670 ret = c2port_poll_in_busy(dev);
671 if (ret < 0)
672 return ret;
673
674 /* Send address block size */
675 c2port_write_dr(dev, nread);
676 ret = c2port_poll_in_busy(dev);
677 if (ret < 0)
678 return ret;
679
680 /* Should check status before reading FLASH block */
681
682 /* Wait for status information */
683 ret = c2port_poll_out_ready(dev);
684 if (ret < 0)
685 return ret;
686
687 /* Read flash programming interface status */
688 ret = c2port_read_dr(dev, &status);
689 if (ret < 0)
690 return ret;
691 if (status != C2PORT_COMMAND_OK)
692 return -EBUSY;
693
694 /* Read flash block */
695 for (i = 0; i < nread; i++) {
696 ret = c2port_poll_out_ready(dev);
697 if (ret < 0)
698 return ret;
699
700 ret = c2port_read_dr(dev, buffer+i);
701 if (ret < 0)
702 return ret;
703 }
704
705 return nread;
706}
707
708static ssize_t c2port_read_flash_data(struct kobject *kobj,
709 struct bin_attribute *attr,
710 char *buffer, loff_t offset, size_t count)
711{
712 struct c2port_device *c2dev =
713 dev_get_drvdata(container_of(kobj,
714 struct device, kobj));
715 ssize_t ret;
716
717 /* Check the device and flash access status */
718 if (!c2dev->access || !c2dev->flash_access)
719 return -EBUSY;
720
721 mutex_lock(&c2dev->mutex);
722 ret = __c2port_read_flash_data(c2dev, buffer, offset, count);
723 mutex_unlock(&c2dev->mutex);
724
725 if (ret < 0)
726 dev_err(c2dev->dev, "cannot read %s flash\n", c2dev->name);
727
728 return ret;
729}
730
731static ssize_t __c2port_write_flash_data(struct c2port_device *dev,
732 char *buffer, loff_t offset, size_t count)
733{
734 struct c2port_ops *ops = dev->ops;
735 u8 status, nwrite = 128;
736 int i, ret;
737
738 if (nwrite > count)
739 nwrite = count;
740 if (ops->block_size * ops->blocks_num - offset < nwrite)
741 nwrite = ops->block_size * ops->blocks_num - offset;
742
743 /* Check for flash end */
744 if (offset >= ops->block_size * ops->blocks_num)
745 return -EINVAL;
746
747 /* Target the C2 flash programming data register for C2 data register
748 * access */
749 c2port_write_ar(dev, C2PORT_FPDAT);
750
751 /* Send flash block write command */
752 c2port_write_dr(dev, C2PORT_BLOCK_WRITE);
753
754 /* Wait for input acknowledge */
755 ret = c2port_poll_in_busy(dev);
756 if (ret < 0)
757 return ret;
758
759 /* Should check status before starting FLASH access sequence */
760
761 /* Wait for status information */
762 ret = c2port_poll_out_ready(dev);
763 if (ret < 0)
764 return ret;
765
766 /* Read flash programming interface status */
767 ret = c2port_read_dr(dev, &status);
768 if (ret < 0)
769 return ret;
770 if (status != C2PORT_COMMAND_OK)
771 return -EBUSY;
772
773 /* Send address high byte */
774 c2port_write_dr(dev, offset >> 8);
775 ret = c2port_poll_in_busy(dev);
776 if (ret < 0)
777 return ret;
778
779 /* Send address low byte */
780 c2port_write_dr(dev, offset & 0x00ff);
781 ret = c2port_poll_in_busy(dev);
782 if (ret < 0)
783 return ret;
784
785 /* Send address block size */
786 c2port_write_dr(dev, nwrite);
787 ret = c2port_poll_in_busy(dev);
788 if (ret < 0)
789 return ret;
790
791 /* Should check status before writing FLASH block */
792
793 /* Wait for status information */
794 ret = c2port_poll_out_ready(dev);
795 if (ret < 0)
796 return ret;
797
798 /* Read flash programming interface status */
799 ret = c2port_read_dr(dev, &status);
800 if (ret < 0)
801 return ret;
802 if (status != C2PORT_COMMAND_OK)
803 return -EBUSY;
804
805 /* Write flash block */
806 for (i = 0; i < nwrite; i++) {
807 ret = c2port_write_dr(dev, *(buffer+i));
808 if (ret < 0)
809 return ret;
810
811 ret = c2port_poll_in_busy(dev);
812 if (ret < 0)
813 return ret;
814
815 }
816
817 /* Wait for last flash write to complete */
818 ret = c2port_poll_out_ready(dev);
819 if (ret < 0)
820 return ret;
821
822 return nwrite;
823}
824
825static ssize_t c2port_write_flash_data(struct kobject *kobj,
826 struct bin_attribute *attr,
827 char *buffer, loff_t offset, size_t count)
828{
829 struct c2port_device *c2dev =
830 dev_get_drvdata(container_of(kobj,
831 struct device, kobj));
832 int ret;
833
834 /* Check the device access status */
835 if (!c2dev->access || !c2dev->flash_access)
836 return -EBUSY;
837
838 mutex_lock(&c2dev->mutex);
839 ret = __c2port_write_flash_data(c2dev, buffer, offset, count);
840 mutex_unlock(&c2dev->mutex);
841
842 if (ret < 0)
843 dev_err(c2dev->dev, "cannot write %s flash\n", c2dev->name);
844
845 return ret;
846}
847
848/*
849 * Class attributes
850 */
851
852static struct device_attribute c2port_attrs[] = {
853 __ATTR(name, 0444, c2port_show_name, NULL),
854 __ATTR(flash_blocks_num, 0444, c2port_show_flash_blocks_num, NULL),
855 __ATTR(flash_block_size, 0444, c2port_show_flash_block_size, NULL),
856 __ATTR(flash_size, 0444, c2port_show_flash_size, NULL),
857 __ATTR(access, 0644, c2port_show_access, c2port_store_access),
858 __ATTR(reset, 0200, NULL, c2port_store_reset),
859 __ATTR(dev_id, 0444, c2port_show_dev_id, NULL),
860 __ATTR(rev_id, 0444, c2port_show_rev_id, NULL),
861
862 __ATTR(flash_access, 0644, c2port_show_flash_access,
863 c2port_store_flash_access),
864 __ATTR(flash_erase, 0200, NULL, c2port_store_flash_erase),
865 __ATTR_NULL,
866};
867
868static struct bin_attribute c2port_bin_attrs = {
869 .attr = {
870 .name = "flash_data",
871 .mode = 0644
872 },
873 .read = c2port_read_flash_data,
874 .write = c2port_write_flash_data,
875 /* .size is computed at run-time */
876};
877
878/*
879 * Exported functions
880 */
881
882struct c2port_device *c2port_device_register(char *name,
883 struct c2port_ops *ops, void *devdata)
884{
885 struct c2port_device *c2dev;
886 int id, ret;
887
888 if (unlikely(!ops) || unlikely(!ops->access) || \
889 unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \
890 unlikely(!ops->c2d_get) || unlikely(!ops->c2d_set))
891 return ERR_PTR(-EINVAL);
892
893 c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
894 if (unlikely(!c2dev))
895 return ERR_PTR(-ENOMEM);
896
897 ret = idr_pre_get(&c2port_idr, GFP_KERNEL);
898 if (!ret) {
899 ret = -ENOMEM;
900 goto error_idr_get_new;
901 }
902
903 spin_lock_irq(&c2port_idr_lock);
904 ret = idr_get_new(&c2port_idr, c2dev, &id);
905 spin_unlock_irq(&c2port_idr_lock);
906
907 if (ret < 0)
908 goto error_idr_get_new;
909 c2dev->id = id;
910
911 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
912 "c2port%d", id);
913 if (unlikely(!c2dev->dev)) {
914 ret = -ENOMEM;
915 goto error_device_create;
916 }
917 dev_set_drvdata(c2dev->dev, c2dev);
918
919 strncpy(c2dev->name, name, C2PORT_NAME_LEN);
920 c2dev->ops = ops;
921 mutex_init(&c2dev->mutex);
922
923 /* Create binary file */
924 c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
925 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
926 if (unlikely(ret))
927 goto error_device_create_bin_file;
928
929 /* By default C2 port access is off */
930 c2dev->access = c2dev->flash_access = 0;
931 ops->access(c2dev, 0);
932
933 dev_info(c2dev->dev, "C2 port %s added\n", name);
934 dev_info(c2dev->dev, "%s flash has %d blocks x %d bytes "
935 "(%d bytes total)\n",
936 name, ops->blocks_num, ops->block_size,
937 ops->blocks_num * ops->block_size);
938
939 return c2dev;
940
941error_device_create_bin_file:
942 device_destroy(c2port_class, 0);
943
944error_device_create:
945 spin_lock_irq(&c2port_idr_lock);
946 idr_remove(&c2port_idr, id);
947 spin_unlock_irq(&c2port_idr_lock);
948
949error_idr_get_new:
950 kfree(c2dev);
951
952 return ERR_PTR(ret);
953}
954EXPORT_SYMBOL(c2port_device_register);
955
956void c2port_device_unregister(struct c2port_device *c2dev)
957{
958 if (!c2dev)
959 return;
960
961 dev_info(c2dev->dev, "C2 port %s removed\n", c2dev->name);
962
963 device_remove_bin_file(c2dev->dev, &c2port_bin_attrs);
964 spin_lock_irq(&c2port_idr_lock);
965 idr_remove(&c2port_idr, c2dev->id);
966 spin_unlock_irq(&c2port_idr_lock);
967
968 device_destroy(c2port_class, c2dev->id);
969
970 kfree(c2dev);
971}
972EXPORT_SYMBOL(c2port_device_unregister);
973
974/*
975 * Module stuff
976 */
977
978static int __init c2port_init(void)
979{
980 printk(KERN_INFO "Silicon Labs C2 port support v. " DRIVER_VERSION
981 " - (C) 2007 Rodolfo Giometti\n");
982
983 c2port_class = class_create(THIS_MODULE, "c2port");
984 if (!c2port_class) {
985 printk(KERN_ERR "c2port: failed to allocate class\n");
986 return -ENOMEM;
987 }
988 c2port_class->dev_attrs = c2port_attrs;
989
990 return 0;
991}
992
993static void __exit c2port_exit(void)
994{
995 class_destroy(c2port_class);
996}
997
998module_init(c2port_init);
999module_exit(c2port_exit);
1000
1001MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
1002MODULE_DESCRIPTION("Silicon Labs C2 port support v. " DRIVER_VERSION);
1003MODULE_LICENSE("GPL");
diff --git a/drivers/misc/compal-laptop.c b/drivers/misc/compal-laptop.c
index 344b790a6253..11003bba10d3 100644
--- a/drivers/misc/compal-laptop.c
+++ b/drivers/misc/compal-laptop.c
@@ -326,12 +326,14 @@ static int __init compal_init(void)
326 326
327 /* Register backlight stuff */ 327 /* Register backlight stuff */
328 328
329 compalbl_device = backlight_device_register("compal-laptop", NULL, NULL, 329 if (!acpi_video_backlight_support()) {
330 &compalbl_ops); 330 compalbl_device = backlight_device_register("compal-laptop", NULL, NULL,
331 if (IS_ERR(compalbl_device)) 331 &compalbl_ops);
332 return PTR_ERR(compalbl_device); 332 if (IS_ERR(compalbl_device))
333 return PTR_ERR(compalbl_device);
333 334
334 compalbl_device->props.max_brightness = COMPAL_LCD_LEVEL_MAX-1; 335 compalbl_device->props.max_brightness = COMPAL_LCD_LEVEL_MAX-1;
336 }
335 337
336 ret = platform_driver_register(&compal_driver); 338 ret = platform_driver_register(&compal_driver);
337 if (ret) 339 if (ret)
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c
index 1ee8501e90f1..02fe2b8b8939 100644
--- a/drivers/misc/eeepc-laptop.c
+++ b/drivers/misc/eeepc-laptop.c
@@ -28,6 +28,8 @@
28#include <acpi/acpi_drivers.h> 28#include <acpi/acpi_drivers.h>
29#include <acpi/acpi_bus.h> 29#include <acpi/acpi_bus.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/input.h>
32#include <linux/rfkill.h>
31 33
32#define EEEPC_LAPTOP_VERSION "0.1" 34#define EEEPC_LAPTOP_VERSION "0.1"
33 35
@@ -125,6 +127,10 @@ struct eeepc_hotk {
125 by this BIOS */ 127 by this BIOS */
126 uint init_flag; /* Init flags */ 128 uint init_flag; /* Init flags */
127 u16 event_count[128]; /* count for each event */ 129 u16 event_count[128]; /* count for each event */
130 struct input_dev *inputdev;
131 u16 *keycode_map;
132 struct rfkill *eeepc_wlan_rfkill;
133 struct rfkill *eeepc_bluetooth_rfkill;
128}; 134};
129 135
130/* The actual device the driver binds to */ 136/* The actual device the driver binds to */
@@ -140,6 +146,27 @@ static struct platform_driver platform_driver = {
140 146
141static struct platform_device *platform_device; 147static struct platform_device *platform_device;
142 148
149struct key_entry {
150 char type;
151 u8 code;
152 u16 keycode;
153};
154
155enum { KE_KEY, KE_END };
156
157static struct key_entry eeepc_keymap[] = {
158 /* Sleep already handled via generic ACPI code */
159 {KE_KEY, 0x10, KEY_WLAN },
160 {KE_KEY, 0x12, KEY_PROG1 },
161 {KE_KEY, 0x13, KEY_MUTE },
162 {KE_KEY, 0x14, KEY_VOLUMEDOWN },
163 {KE_KEY, 0x15, KEY_VOLUMEUP },
164 {KE_KEY, 0x30, KEY_SWITCHVIDEOMODE },
165 {KE_KEY, 0x31, KEY_SWITCHVIDEOMODE },
166 {KE_KEY, 0x32, KEY_SWITCHVIDEOMODE },
167 {KE_END, 0},
168};
169
143/* 170/*
144 * The hotkey driver declaration 171 * The hotkey driver declaration
145 */ 172 */
@@ -204,7 +231,7 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val,
204static int read_acpi_int(acpi_handle handle, const char *method, int *val) 231static int read_acpi_int(acpi_handle handle, const char *method, int *val)
205{ 232{
206 acpi_status status; 233 acpi_status status;
207 ulong result; 234 unsigned long long result;
208 235
209 status = acpi_evaluate_integer(handle, (char *)method, NULL, &result); 236 status = acpi_evaluate_integer(handle, (char *)method, NULL, &result);
210 if (ACPI_FAILURE(status)) { 237 if (ACPI_FAILURE(status)) {
@@ -261,6 +288,44 @@ static int update_bl_status(struct backlight_device *bd)
261} 288}
262 289
263/* 290/*
291 * Rfkill helpers
292 */
293
294static int eeepc_wlan_rfkill_set(void *data, enum rfkill_state state)
295{
296 if (state == RFKILL_STATE_SOFT_BLOCKED)
297 return set_acpi(CM_ASL_WLAN, 0);
298 else
299 return set_acpi(CM_ASL_WLAN, 1);
300}
301
302static int eeepc_wlan_rfkill_state(void *data, enum rfkill_state *state)
303{
304 if (get_acpi(CM_ASL_WLAN) == 1)
305 *state = RFKILL_STATE_UNBLOCKED;
306 else
307 *state = RFKILL_STATE_SOFT_BLOCKED;
308 return 0;
309}
310
311static int eeepc_bluetooth_rfkill_set(void *data, enum rfkill_state state)
312{
313 if (state == RFKILL_STATE_SOFT_BLOCKED)
314 return set_acpi(CM_ASL_BLUETOOTH, 0);
315 else
316 return set_acpi(CM_ASL_BLUETOOTH, 1);
317}
318
319static int eeepc_bluetooth_rfkill_state(void *data, enum rfkill_state *state)
320{
321 if (get_acpi(CM_ASL_BLUETOOTH) == 1)
322 *state = RFKILL_STATE_UNBLOCKED;
323 else
324 *state = RFKILL_STATE_SOFT_BLOCKED;
325 return 0;
326}
327
328/*
264 * Sys helpers 329 * Sys helpers
265 */ 330 */
266static int parse_arg(const char *buf, unsigned long count, int *val) 331static int parse_arg(const char *buf, unsigned long count, int *val)
@@ -311,13 +376,11 @@ static ssize_t show_sys_acpi(int cm, char *buf)
311EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA); 376EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
312EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER); 377EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
313EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH); 378EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
314EEEPC_CREATE_DEVICE_ATTR(wlan, CM_ASL_WLAN);
315 379
316static struct attribute *platform_attributes[] = { 380static struct attribute *platform_attributes[] = {
317 &dev_attr_camera.attr, 381 &dev_attr_camera.attr,
318 &dev_attr_cardr.attr, 382 &dev_attr_cardr.attr,
319 &dev_attr_disp.attr, 383 &dev_attr_disp.attr,
320 &dev_attr_wlan.attr,
321 NULL 384 NULL
322}; 385};
323 386
@@ -328,8 +391,64 @@ static struct attribute_group platform_attribute_group = {
328/* 391/*
329 * Hotkey functions 392 * Hotkey functions
330 */ 393 */
394static struct key_entry *eepc_get_entry_by_scancode(int code)
395{
396 struct key_entry *key;
397
398 for (key = eeepc_keymap; key->type != KE_END; key++)
399 if (code == key->code)
400 return key;
401
402 return NULL;
403}
404
405static struct key_entry *eepc_get_entry_by_keycode(int code)
406{
407 struct key_entry *key;
408
409 for (key = eeepc_keymap; key->type != KE_END; key++)
410 if (code == key->keycode && key->type == KE_KEY)
411 return key;
412
413 return NULL;
414}
415
416static int eeepc_getkeycode(struct input_dev *dev, int scancode, int *keycode)
417{
418 struct key_entry *key = eepc_get_entry_by_scancode(scancode);
419
420 if (key && key->type == KE_KEY) {
421 *keycode = key->keycode;
422 return 0;
423 }
424
425 return -EINVAL;
426}
427
428static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
429{
430 struct key_entry *key;
431 int old_keycode;
432
433 if (keycode < 0 || keycode > KEY_MAX)
434 return -EINVAL;
435
436 key = eepc_get_entry_by_scancode(scancode);
437 if (key && key->type == KE_KEY) {
438 old_keycode = key->keycode;
439 key->keycode = keycode;
440 set_bit(keycode, dev->keybit);
441 if (!eepc_get_entry_by_keycode(old_keycode))
442 clear_bit(old_keycode, dev->keybit);
443 return 0;
444 }
445
446 return -EINVAL;
447}
448
331static int eeepc_hotk_check(void) 449static int eeepc_hotk_check(void)
332{ 450{
451 const struct key_entry *key;
333 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 452 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
334 int result; 453 int result;
335 454
@@ -356,6 +475,31 @@ static int eeepc_hotk_check(void)
356 "Get control methods supported: 0x%x\n", 475 "Get control methods supported: 0x%x\n",
357 ehotk->cm_supported); 476 ehotk->cm_supported);
358 } 477 }
478 ehotk->inputdev = input_allocate_device();
479 if (!ehotk->inputdev) {
480 printk(EEEPC_INFO "Unable to allocate input device\n");
481 return 0;
482 }
483 ehotk->inputdev->name = "Asus EeePC extra buttons";
484 ehotk->inputdev->phys = EEEPC_HOTK_FILE "/input0";
485 ehotk->inputdev->id.bustype = BUS_HOST;
486 ehotk->inputdev->getkeycode = eeepc_getkeycode;
487 ehotk->inputdev->setkeycode = eeepc_setkeycode;
488
489 for (key = eeepc_keymap; key->type != KE_END; key++) {
490 switch (key->type) {
491 case KE_KEY:
492 set_bit(EV_KEY, ehotk->inputdev->evbit);
493 set_bit(key->keycode, ehotk->inputdev->keybit);
494 break;
495 }
496 }
497 result = input_register_device(ehotk->inputdev);
498 if (result) {
499 printk(EEEPC_INFO "Unable to register input device\n");
500 input_free_device(ehotk->inputdev);
501 return 0;
502 }
359 } else { 503 } else {
360 printk(EEEPC_ERR "Hotkey device not present, aborting\n"); 504 printk(EEEPC_ERR "Hotkey device not present, aborting\n");
361 return -EINVAL; 505 return -EINVAL;
@@ -363,21 +507,6 @@ static int eeepc_hotk_check(void)
363 return 0; 507 return 0;
364} 508}
365 509
366static void notify_wlan(u32 *event)
367{
368 /* if DISABLE_ASL_WLAN is set, the notify code for fn+f2
369 will always be 0x10 */
370 if (ehotk->cm_supported & (0x1 << CM_ASL_WLAN)) {
371 const char *method = cm_getv[CM_ASL_WLAN];
372 int value;
373 if (read_acpi_int(ehotk->handle, method, &value))
374 printk(EEEPC_WARNING "Error reading %s\n",
375 method);
376 else if (value == 1)
377 *event = 0x11;
378 }
379}
380
381static void notify_brn(void) 510static void notify_brn(void)
382{ 511{
383 struct backlight_device *bd = eeepc_backlight_device; 512 struct backlight_device *bd = eeepc_backlight_device;
@@ -386,14 +515,28 @@ static void notify_brn(void)
386 515
387static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data) 516static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
388{ 517{
518 static struct key_entry *key;
389 if (!ehotk) 519 if (!ehotk)
390 return; 520 return;
391 if (event == NOTIFY_WLAN_ON && (DISABLE_ASL_WLAN & ehotk->init_flag))
392 notify_wlan(&event);
393 if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) 521 if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
394 notify_brn(); 522 notify_brn();
395 acpi_bus_generate_proc_event(ehotk->device, event, 523 acpi_bus_generate_proc_event(ehotk->device, event,
396 ehotk->event_count[event % 128]++); 524 ehotk->event_count[event % 128]++);
525 if (ehotk->inputdev) {
526 key = eepc_get_entry_by_scancode(event);
527 if (key) {
528 switch (key->type) {
529 case KE_KEY:
530 input_report_key(ehotk->inputdev, key->keycode,
531 1);
532 input_sync(ehotk->inputdev);
533 input_report_key(ehotk->inputdev, key->keycode,
534 0);
535 input_sync(ehotk->inputdev);
536 break;
537 }
538 }
539 }
397} 540}
398 541
399static int eeepc_hotk_add(struct acpi_device *device) 542static int eeepc_hotk_add(struct acpi_device *device)
@@ -411,7 +554,7 @@ static int eeepc_hotk_add(struct acpi_device *device)
411 ehotk->handle = device->handle; 554 ehotk->handle = device->handle;
412 strcpy(acpi_device_name(device), EEEPC_HOTK_DEVICE_NAME); 555 strcpy(acpi_device_name(device), EEEPC_HOTK_DEVICE_NAME);
413 strcpy(acpi_device_class(device), EEEPC_HOTK_CLASS); 556 strcpy(acpi_device_class(device), EEEPC_HOTK_CLASS);
414 acpi_driver_data(device) = ehotk; 557 device->driver_data = ehotk;
415 ehotk->device = device; 558 ehotk->device = device;
416 result = eeepc_hotk_check(); 559 result = eeepc_hotk_check();
417 if (result) 560 if (result)
@@ -420,6 +563,47 @@ static int eeepc_hotk_add(struct acpi_device *device)
420 eeepc_hotk_notify, ehotk); 563 eeepc_hotk_notify, ehotk);
421 if (ACPI_FAILURE(status)) 564 if (ACPI_FAILURE(status))
422 printk(EEEPC_ERR "Error installing notify handler\n"); 565 printk(EEEPC_ERR "Error installing notify handler\n");
566
567 if (get_acpi(CM_ASL_WLAN) != -1) {
568 ehotk->eeepc_wlan_rfkill = rfkill_allocate(&device->dev,
569 RFKILL_TYPE_WLAN);
570
571 if (!ehotk->eeepc_wlan_rfkill)
572 goto end;
573
574 ehotk->eeepc_wlan_rfkill->name = "eeepc-wlan";
575 ehotk->eeepc_wlan_rfkill->toggle_radio = eeepc_wlan_rfkill_set;
576 ehotk->eeepc_wlan_rfkill->get_state = eeepc_wlan_rfkill_state;
577 if (get_acpi(CM_ASL_WLAN) == 1)
578 ehotk->eeepc_wlan_rfkill->state =
579 RFKILL_STATE_UNBLOCKED;
580 else
581 ehotk->eeepc_wlan_rfkill->state =
582 RFKILL_STATE_SOFT_BLOCKED;
583 rfkill_register(ehotk->eeepc_wlan_rfkill);
584 }
585
586 if (get_acpi(CM_ASL_BLUETOOTH) != -1) {
587 ehotk->eeepc_bluetooth_rfkill =
588 rfkill_allocate(&device->dev, RFKILL_TYPE_BLUETOOTH);
589
590 if (!ehotk->eeepc_bluetooth_rfkill)
591 goto end;
592
593 ehotk->eeepc_bluetooth_rfkill->name = "eeepc-bluetooth";
594 ehotk->eeepc_bluetooth_rfkill->toggle_radio =
595 eeepc_bluetooth_rfkill_set;
596 ehotk->eeepc_bluetooth_rfkill->get_state =
597 eeepc_bluetooth_rfkill_state;
598 if (get_acpi(CM_ASL_BLUETOOTH) == 1)
599 ehotk->eeepc_bluetooth_rfkill->state =
600 RFKILL_STATE_UNBLOCKED;
601 else
602 ehotk->eeepc_bluetooth_rfkill->state =
603 RFKILL_STATE_SOFT_BLOCKED;
604 rfkill_register(ehotk->eeepc_bluetooth_rfkill);
605 }
606
423 end: 607 end:
424 if (result) { 608 if (result) {
425 kfree(ehotk); 609 kfree(ehotk);
@@ -553,6 +737,12 @@ static void eeepc_backlight_exit(void)
553{ 737{
554 if (eeepc_backlight_device) 738 if (eeepc_backlight_device)
555 backlight_device_unregister(eeepc_backlight_device); 739 backlight_device_unregister(eeepc_backlight_device);
740 if (ehotk->inputdev)
741 input_unregister_device(ehotk->inputdev);
742 if (ehotk->eeepc_wlan_rfkill)
743 rfkill_unregister(ehotk->eeepc_wlan_rfkill);
744 if (ehotk->eeepc_bluetooth_rfkill)
745 rfkill_unregister(ehotk->eeepc_bluetooth_rfkill);
556 eeepc_backlight_device = NULL; 746 eeepc_backlight_device = NULL;
557} 747}
558 748
@@ -635,9 +825,15 @@ static int __init eeepc_laptop_init(void)
635 return -ENODEV; 825 return -ENODEV;
636 } 826 }
637 dev = acpi_get_physical_device(ehotk->device->handle); 827 dev = acpi_get_physical_device(ehotk->device->handle);
638 result = eeepc_backlight_init(dev); 828
639 if (result) 829 if (!acpi_video_backlight_support()) {
640 goto fail_backlight; 830 result = eeepc_backlight_init(dev);
831 if (result)
832 goto fail_backlight;
833 } else
834 printk(EEEPC_INFO "Backlight controlled by ACPI video "
835 "driver\n");
836
641 result = eeepc_hwmon_init(dev); 837 result = eeepc_hwmon_init(dev);
642 if (result) 838 if (result)
643 goto fail_hwmon; 839 goto fail_hwmon;
diff --git a/drivers/misc/fujitsu-laptop.c b/drivers/misc/fujitsu-laptop.c
index 3e56203e4947..a7dd3e9fb79d 100644
--- a/drivers/misc/fujitsu-laptop.c
+++ b/drivers/misc/fujitsu-laptop.c
@@ -44,8 +44,9 @@
44 * Hotkeys present on certain Fujitsu laptops (eg: the S6xxx series) are 44 * Hotkeys present on certain Fujitsu laptops (eg: the S6xxx series) are
45 * also supported by this driver. 45 * also supported by this driver.
46 * 46 *
47 * This driver has been tested on a Fujitsu Lifebook S6410 and S7020. It 47 * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and
48 * should work on most P-series and S-series Lifebooks, but YMMV. 48 * P8010. It should work on most P-series and S-series Lifebooks, but
49 * YMMV.
49 * 50 *
50 * The module parameter use_alt_lcd_levels switches between different ACPI 51 * The module parameter use_alt_lcd_levels switches between different ACPI
51 * brightness controls which are used by different Fujitsu laptops. In most 52 * brightness controls which are used by different Fujitsu laptops. In most
@@ -65,7 +66,7 @@
65#include <linux/video_output.h> 66#include <linux/video_output.h>
66#include <linux/platform_device.h> 67#include <linux/platform_device.h>
67 68
68#define FUJITSU_DRIVER_VERSION "0.4.2" 69#define FUJITSU_DRIVER_VERSION "0.4.3"
69 70
70#define FUJITSU_LCD_N_LEVELS 8 71#define FUJITSU_LCD_N_LEVELS 8
71 72
@@ -83,10 +84,10 @@
83#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87 84#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87
84 85
85/* Hotkey details */ 86/* Hotkey details */
86#define LOCK_KEY 0x410 /* codes for the keys in the GIRB register */ 87#define KEY1_CODE 0x410 /* codes for the keys in the GIRB register */
87#define DISPLAY_KEY 0x411 /* keys are mapped to KEY_SCREENLOCK (the key with the key symbol) */ 88#define KEY2_CODE 0x411
88#define ENERGY_KEY 0x412 /* KEY_MEDIA (the key with the laptop symbol, KEY_EMAIL (E key)) */ 89#define KEY3_CODE 0x412
89#define REST_KEY 0x413 /* KEY_SUSPEND (R key) */ 90#define KEY4_CODE 0x413
90 91
91#define MAX_HOTKEY_RINGBUFFER_SIZE 100 92#define MAX_HOTKEY_RINGBUFFER_SIZE 100
92#define RINGBUFFERSIZE 40 93#define RINGBUFFERSIZE 40
@@ -123,6 +124,7 @@ struct fujitsu_t {
123 char phys[32]; 124 char phys[32];
124 struct backlight_device *bl_device; 125 struct backlight_device *bl_device;
125 struct platform_device *pf_device; 126 struct platform_device *pf_device;
127 int keycode1, keycode2, keycode3, keycode4;
126 128
127 unsigned int max_brightness; 129 unsigned int max_brightness;
128 unsigned int brightness_changed; 130 unsigned int brightness_changed;
@@ -224,7 +226,7 @@ static int set_lcd_level_alt(int level)
224 226
225static int get_lcd_level(void) 227static int get_lcd_level(void)
226{ 228{
227 unsigned long state = 0; 229 unsigned long long state = 0;
228 acpi_status status = AE_OK; 230 acpi_status status = AE_OK;
229 231
230 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n"); 232 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");
@@ -246,7 +248,7 @@ static int get_lcd_level(void)
246 248
247static int get_max_brightness(void) 249static int get_max_brightness(void)
248{ 250{
249 unsigned long state = 0; 251 unsigned long long state = 0;
250 acpi_status status = AE_OK; 252 acpi_status status = AE_OK;
251 253
252 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n"); 254 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");
@@ -263,7 +265,7 @@ static int get_max_brightness(void)
263 265
264static int get_lcd_level_alt(void) 266static int get_lcd_level_alt(void)
265{ 267{
266 unsigned long state = 0; 268 unsigned long long state = 0;
267 acpi_status status = AE_OK; 269 acpi_status status = AE_OK;
268 270
269 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLS\n"); 271 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLS\n");
@@ -384,7 +386,7 @@ static ssize_t store_lcd_level(struct device *dev,
384 386
385static int get_irb(void) 387static int get_irb(void)
386{ 388{
387 unsigned long state = 0; 389 unsigned long long state = 0;
388 acpi_status status = AE_OK; 390 acpi_status status = AE_OK;
389 391
390 vdbg_printk(FUJLAPTOP_DBG_TRACE, "Get irb\n"); 392 vdbg_printk(FUJLAPTOP_DBG_TRACE, "Get irb\n");
@@ -430,7 +432,7 @@ static struct platform_driver fujitsupf_driver = {
430 } 432 }
431}; 433};
432 434
433static int dmi_check_cb_s6410(const struct dmi_system_id *id) 435static void dmi_check_cb_common(const struct dmi_system_id *id)
434{ 436{
435 acpi_handle handle; 437 acpi_handle handle;
436 int have_blnf; 438 int have_blnf;
@@ -452,24 +454,55 @@ static int dmi_check_cb_s6410(const struct dmi_system_id *id)
452 "auto-detecting disable_adjust\n"); 454 "auto-detecting disable_adjust\n");
453 disable_brightness_adjust = have_blnf ? 0 : 1; 455 disable_brightness_adjust = have_blnf ? 0 : 1;
454 } 456 }
457}
458
459static int dmi_check_cb_s6410(const struct dmi_system_id *id)
460{
461 dmi_check_cb_common(id);
462 fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */
463 fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */
464 return 0;
465}
466
467static int dmi_check_cb_s6420(const struct dmi_system_id *id)
468{
469 dmi_check_cb_common(id);
470 fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */
471 fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */
472 return 0;
473}
474
475static int dmi_check_cb_p8010(const struct dmi_system_id *id)
476{
477 dmi_check_cb_common(id);
478 fujitsu->keycode1 = KEY_HELP; /* "Support" */
479 fujitsu->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */
480 fujitsu->keycode4 = KEY_WWW; /* "Internet" */
455 return 0; 481 return 0;
456} 482}
457 483
458static struct dmi_system_id __initdata fujitsu_dmi_table[] = { 484static struct dmi_system_id fujitsu_dmi_table[] = {
459 { 485 {
460 .ident = "Fujitsu Siemens", 486 .ident = "Fujitsu Siemens S6410",
461 .matches = { 487 .matches = {
462 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), 488 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
463 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"), 489 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"),
464 }, 490 },
465 .callback = dmi_check_cb_s6410}, 491 .callback = dmi_check_cb_s6410},
466 { 492 {
467 .ident = "FUJITSU LifeBook P8010", 493 .ident = "Fujitsu Siemens S6420",
494 .matches = {
495 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
496 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6420"),
497 },
498 .callback = dmi_check_cb_s6420},
499 {
500 .ident = "Fujitsu LifeBook P8010",
468 .matches = { 501 .matches = {
469 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 502 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
470 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P8010"), 503 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P8010"),
471 }, 504 },
472 .callback = dmi_check_cb_s6410}, 505 .callback = dmi_check_cb_p8010},
473 {} 506 {}
474}; 507};
475 508
@@ -490,7 +523,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
490 fujitsu->acpi_handle = device->handle; 523 fujitsu->acpi_handle = device->handle;
491 sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME); 524 sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME);
492 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); 525 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
493 acpi_driver_data(device) = fujitsu; 526 device->driver_data = fujitsu;
494 527
495 status = acpi_install_notify_handler(device->handle, 528 status = acpi_install_notify_handler(device->handle,
496 ACPI_DEVICE_NOTIFY, 529 ACPI_DEVICE_NOTIFY,
@@ -547,7 +580,6 @@ static int acpi_fujitsu_add(struct acpi_device *device)
547 } 580 }
548 581
549 /* do config (detect defaults) */ 582 /* do config (detect defaults) */
550 dmi_check_system(fujitsu_dmi_table);
551 use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0; 583 use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
552 disable_brightness_keys = disable_brightness_keys == 1 ? 1 : 0; 584 disable_brightness_keys = disable_brightness_keys == 1 ? 1 : 0;
553 disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0; 585 disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
@@ -623,17 +655,17 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data)
623 keycode = 0; 655 keycode = 0;
624 if (disable_brightness_keys != 1) { 656 if (disable_brightness_keys != 1) {
625 if (oldb == 0) { 657 if (oldb == 0) {
626 acpi_bus_generate_proc_event(fujitsu-> 658 acpi_bus_generate_proc_event
627 dev, 659 (fujitsu->dev,
628 ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 660 ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS,
629 0); 661 0);
630 keycode = KEY_BRIGHTNESSDOWN; 662 keycode = KEY_BRIGHTNESSDOWN;
631 } else if (oldb == 663 } else if (oldb ==
632 (fujitsu->max_brightness) - 1) { 664 (fujitsu->max_brightness) - 1) {
633 acpi_bus_generate_proc_event(fujitsu-> 665 acpi_bus_generate_proc_event
634 dev, 666 (fujitsu->dev,
635 ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 667 ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS,
636 0); 668 0);
637 keycode = KEY_BRIGHTNESSUP; 669 keycode = KEY_BRIGHTNESSUP;
638 } 670 }
639 } 671 }
@@ -646,8 +678,7 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data)
646 } 678 }
647 if (disable_brightness_keys != 1) { 679 if (disable_brightness_keys != 1) {
648 acpi_bus_generate_proc_event(fujitsu->dev, 680 acpi_bus_generate_proc_event(fujitsu->dev,
649 ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 681 ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 0);
650 0);
651 keycode = KEY_BRIGHTNESSUP; 682 keycode = KEY_BRIGHTNESSUP;
652 } 683 }
653 } else if (oldb > newb) { 684 } else if (oldb > newb) {
@@ -659,8 +690,7 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data)
659 } 690 }
660 if (disable_brightness_keys != 1) { 691 if (disable_brightness_keys != 1) {
661 acpi_bus_generate_proc_event(fujitsu->dev, 692 acpi_bus_generate_proc_event(fujitsu->dev,
662 ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 693 ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 0);
663 0);
664 keycode = KEY_BRIGHTNESSDOWN; 694 keycode = KEY_BRIGHTNESSDOWN;
665 } 695 }
666 } else { 696 } else {
@@ -703,7 +733,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
703 sprintf(acpi_device_name(device), "%s", 733 sprintf(acpi_device_name(device), "%s",
704 ACPI_FUJITSU_HOTKEY_DEVICE_NAME); 734 ACPI_FUJITSU_HOTKEY_DEVICE_NAME);
705 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); 735 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
706 acpi_driver_data(device) = fujitsu_hotkey; 736 device->driver_data = fujitsu_hotkey;
707 737
708 status = acpi_install_notify_handler(device->handle, 738 status = acpi_install_notify_handler(device->handle,
709 ACPI_DEVICE_NOTIFY, 739 ACPI_DEVICE_NOTIFY,
@@ -742,10 +772,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
742 input->id.product = 0x06; 772 input->id.product = 0x06;
743 input->dev.parent = &device->dev; 773 input->dev.parent = &device->dev;
744 input->evbit[0] = BIT(EV_KEY); 774 input->evbit[0] = BIT(EV_KEY);
745 set_bit(KEY_SCREENLOCK, input->keybit); 775 set_bit(fujitsu->keycode1, input->keybit);
746 set_bit(KEY_MEDIA, input->keybit); 776 set_bit(fujitsu->keycode2, input->keybit);
747 set_bit(KEY_EMAIL, input->keybit); 777 set_bit(fujitsu->keycode3, input->keybit);
748 set_bit(KEY_SUSPEND, input->keybit); 778 set_bit(fujitsu->keycode4, input->keybit);
749 set_bit(KEY_UNKNOWN, input->keybit); 779 set_bit(KEY_UNKNOWN, input->keybit);
750 780
751 error = input_register_device(input); 781 error = input_register_device(input);
@@ -833,24 +863,24 @@ static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
833 irb); 863 irb);
834 864
835 switch (irb & 0x4ff) { 865 switch (irb & 0x4ff) {
836 case LOCK_KEY: 866 case KEY1_CODE:
837 keycode = KEY_SCREENLOCK; 867 keycode = fujitsu->keycode1;
838 break; 868 break;
839 case DISPLAY_KEY: 869 case KEY2_CODE:
840 keycode = KEY_MEDIA; 870 keycode = fujitsu->keycode2;
841 break; 871 break;
842 case ENERGY_KEY: 872 case KEY3_CODE:
843 keycode = KEY_EMAIL; 873 keycode = fujitsu->keycode3;
844 break; 874 break;
845 case REST_KEY: 875 case KEY4_CODE:
846 keycode = KEY_SUSPEND; 876 keycode = fujitsu->keycode4;
847 break; 877 break;
848 case 0: 878 case 0:
849 keycode = 0; 879 keycode = 0;
850 break; 880 break;
851 default: 881 default:
852 vdbg_printk(FUJLAPTOP_DBG_WARN, 882 vdbg_printk(FUJLAPTOP_DBG_WARN,
853 "Unknown GIRB result [%x]\n", irb); 883 "Unknown GIRB result [%x]\n", irb);
854 keycode = -1; 884 keycode = -1;
855 break; 885 break;
856 } 886 }
@@ -859,12 +889,12 @@ static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
859 "Push keycode into ringbuffer [%d]\n", 889 "Push keycode into ringbuffer [%d]\n",
860 keycode); 890 keycode);
861 status = kfifo_put(fujitsu_hotkey->fifo, 891 status = kfifo_put(fujitsu_hotkey->fifo,
862 (unsigned char *)&keycode, 892 (unsigned char *)&keycode,
863 sizeof(keycode)); 893 sizeof(keycode));
864 if (status != sizeof(keycode)) { 894 if (status != sizeof(keycode)) {
865 vdbg_printk(FUJLAPTOP_DBG_WARN, 895 vdbg_printk(FUJLAPTOP_DBG_WARN,
866 "Could not push keycode [0x%x]\n", 896 "Could not push keycode [0x%x]\n",
867 keycode); 897 keycode);
868 } else { 898 } else {
869 input_report_key(input, keycode, 1); 899 input_report_key(input, keycode, 1);
870 input_sync(input); 900 input_sync(input);
@@ -879,8 +909,8 @@ static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
879 input_report_key(input, keycode_r, 0); 909 input_report_key(input, keycode_r, 0);
880 input_sync(input); 910 input_sync(input);
881 vdbg_printk(FUJLAPTOP_DBG_TRACE, 911 vdbg_printk(FUJLAPTOP_DBG_TRACE,
882 "Pop keycode from ringbuffer [%d]\n", 912 "Pop keycode from ringbuffer [%d]\n",
883 keycode_r); 913 keycode_r);
884 } 914 }
885 } 915 }
886 } 916 }
@@ -943,6 +973,11 @@ static int __init fujitsu_init(void)
943 if (!fujitsu) 973 if (!fujitsu)
944 return -ENOMEM; 974 return -ENOMEM;
945 memset(fujitsu, 0, sizeof(struct fujitsu_t)); 975 memset(fujitsu, 0, sizeof(struct fujitsu_t));
976 fujitsu->keycode1 = KEY_PROG1;
977 fujitsu->keycode2 = KEY_PROG2;
978 fujitsu->keycode3 = KEY_PROG3;
979 fujitsu->keycode4 = KEY_PROG4;
980 dmi_check_system(fujitsu_dmi_table);
946 981
947 result = acpi_bus_register_driver(&acpi_fujitsu_driver); 982 result = acpi_bus_register_driver(&acpi_fujitsu_driver);
948 if (result < 0) { 983 if (result < 0) {
@@ -970,16 +1005,16 @@ static int __init fujitsu_init(void)
970 1005
971 /* Register backlight stuff */ 1006 /* Register backlight stuff */
972 1007
973 fujitsu->bl_device = 1008 if (!acpi_video_backlight_support()) {
974 backlight_device_register("fujitsu-laptop", NULL, NULL, 1009 fujitsu->bl_device =
975 &fujitsubl_ops); 1010 backlight_device_register("fujitsu-laptop", NULL, NULL,
976 if (IS_ERR(fujitsu->bl_device)) 1011 &fujitsubl_ops);
977 return PTR_ERR(fujitsu->bl_device); 1012 if (IS_ERR(fujitsu->bl_device))
978 1013 return PTR_ERR(fujitsu->bl_device);
979 max_brightness = fujitsu->max_brightness; 1014 max_brightness = fujitsu->max_brightness;
980 1015 fujitsu->bl_device->props.max_brightness = max_brightness - 1;
981 fujitsu->bl_device->props.max_brightness = max_brightness - 1; 1016 fujitsu->bl_device->props.brightness = fujitsu->brightness_level;
982 fujitsu->bl_device->props.brightness = fujitsu->brightness_level; 1017 }
983 1018
984 ret = platform_driver_register(&fujitsupf_driver); 1019 ret = platform_driver_register(&fujitsupf_driver);
985 if (ret) 1020 if (ret)
@@ -1015,7 +1050,8 @@ fail_hotkey:
1015 1050
1016fail_backlight: 1051fail_backlight:
1017 1052
1018 backlight_device_unregister(fujitsu->bl_device); 1053 if (fujitsu->bl_device)
1054 backlight_device_unregister(fujitsu->bl_device);
1019 1055
1020fail_platform_device2: 1056fail_platform_device2:
1021 1057
@@ -1042,7 +1078,8 @@ static void __exit fujitsu_cleanup(void)
1042 &fujitsupf_attribute_group); 1078 &fujitsupf_attribute_group);
1043 platform_device_unregister(fujitsu->pf_device); 1079 platform_device_unregister(fujitsu->pf_device);
1044 platform_driver_unregister(&fujitsupf_driver); 1080 platform_driver_unregister(&fujitsupf_driver);
1045 backlight_device_unregister(fujitsu->bl_device); 1081 if (fujitsu->bl_device)
1082 backlight_device_unregister(fujitsu->bl_device);
1046 1083
1047 acpi_bus_unregister_driver(&acpi_fujitsu_driver); 1084 acpi_bus_unregister_driver(&acpi_fujitsu_driver);
1048 1085
@@ -1076,15 +1113,14 @@ MODULE_DESCRIPTION("Fujitsu laptop extras support");
1076MODULE_VERSION(FUJITSU_DRIVER_VERSION); 1113MODULE_VERSION(FUJITSU_DRIVER_VERSION);
1077MODULE_LICENSE("GPL"); 1114MODULE_LICENSE("GPL");
1078 1115
1079MODULE_ALIAS 1116MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
1080 ("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); 1117MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
1081MODULE_ALIAS
1082 ("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
1083 1118
1084static struct pnp_device_id pnp_ids[] = { 1119static struct pnp_device_id pnp_ids[] = {
1085 { .id = "FUJ02bf" }, 1120 {.id = "FUJ02bf"},
1086 { .id = "FUJ02B1" }, 1121 {.id = "FUJ02B1"},
1087 { .id = "FUJ02E3" }, 1122 {.id = "FUJ02E3"},
1088 { .id = "" } 1123 {.id = ""}
1089}; 1124};
1125
1090MODULE_DEVICE_TABLE(pnp, pnp_ids); 1126MODULE_DEVICE_TABLE(pnp, pnp_ids);
diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c
index 08e26beefe64..ce39fa54949b 100644
--- a/drivers/misc/hdpuftrs/hdpu_nexus.c
+++ b/drivers/misc/hdpuftrs/hdpu_nexus.c
@@ -113,7 +113,6 @@ static int hdpu_nexus_probe(struct platform_device *pdev)
113 if (!hdpu_chassis_id) 113 if (!hdpu_chassis_id)
114 printk(KERN_WARNING "sky_nexus: " 114 printk(KERN_WARNING "sky_nexus: "
115 "Unable to create proc dir entry: sky_chassis_id\n"); 115 "Unable to create proc dir entry: sky_chassis_id\n");
116 }
117 116
118 return 0; 117 return 0;
119} 118}
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
new file mode 100644
index 000000000000..6e43ab4231ae
--- /dev/null
+++ b/drivers/misc/ics932s401.c
@@ -0,0 +1,515 @@
1/*
2 * A driver for the Integrated Circuits ICS932S401
3 * Copyright (C) 2008 IBM
4 *
5 * Author: Darrick J. Wong <djwong@us.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/module.h>
23#include <linux/jiffies.h>
24#include <linux/i2c.h>
25#include <linux/err.h>
26#include <linux/mutex.h>
27#include <linux/delay.h>
28#include <linux/log2.h>
29
30/* Addresses to scan */
31static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END };
32
33/* Insmod parameters */
34I2C_CLIENT_INSMOD_1(ics932s401);
35
36/* ICS932S401 registers */
37#define ICS932S401_REG_CFG2 0x01
38#define ICS932S401_CFG1_SPREAD 0x01
39#define ICS932S401_REG_CFG7 0x06
40#define ICS932S401_FS_MASK 0x07
41#define ICS932S401_REG_VENDOR_REV 0x07
42#define ICS932S401_VENDOR 1
43#define ICS932S401_VENDOR_MASK 0x0F
44#define ICS932S401_REV 4
45#define ICS932S401_REV_SHIFT 4
46#define ICS932S401_REG_DEVICE 0x09
47#define ICS932S401_DEVICE 11
48#define ICS932S401_REG_CTRL 0x0A
49#define ICS932S401_MN_ENABLED 0x80
50#define ICS932S401_CPU_ALT 0x04
51#define ICS932S401_SRC_ALT 0x08
52#define ICS932S401_REG_CPU_M_CTRL 0x0B
53#define ICS932S401_M_MASK 0x3F
54#define ICS932S401_REG_CPU_N_CTRL 0x0C
55#define ICS932S401_REG_CPU_SPREAD1 0x0D
56#define ICS932S401_REG_CPU_SPREAD2 0x0E
57#define ICS932S401_SPREAD_MASK 0x7FFF
58#define ICS932S401_REG_SRC_M_CTRL 0x0F
59#define ICS932S401_REG_SRC_N_CTRL 0x10
60#define ICS932S401_REG_SRC_SPREAD1 0x11
61#define ICS932S401_REG_SRC_SPREAD2 0x12
62#define ICS932S401_REG_CPU_DIVISOR 0x13
63#define ICS932S401_CPU_DIVISOR_SHIFT 4
64#define ICS932S401_REG_PCISRC_DIVISOR 0x14
65#define ICS932S401_SRC_DIVISOR_MASK 0x0F
66#define ICS932S401_PCI_DIVISOR_SHIFT 4
67
68/* Base clock is 14.318MHz */
69#define BASE_CLOCK 14318
70
71#define NUM_REGS 21
72#define NUM_MIRRORED_REGS 15
73
74static int regs_to_copy[NUM_MIRRORED_REGS] = {
75 ICS932S401_REG_CFG2,
76 ICS932S401_REG_CFG7,
77 ICS932S401_REG_VENDOR_REV,
78 ICS932S401_REG_DEVICE,
79 ICS932S401_REG_CTRL,
80 ICS932S401_REG_CPU_M_CTRL,
81 ICS932S401_REG_CPU_N_CTRL,
82 ICS932S401_REG_CPU_SPREAD1,
83 ICS932S401_REG_CPU_SPREAD2,
84 ICS932S401_REG_SRC_M_CTRL,
85 ICS932S401_REG_SRC_N_CTRL,
86 ICS932S401_REG_SRC_SPREAD1,
87 ICS932S401_REG_SRC_SPREAD2,
88 ICS932S401_REG_CPU_DIVISOR,
89 ICS932S401_REG_PCISRC_DIVISOR,
90};
91
92/* How often do we reread sensors values? (In jiffies) */
93#define SENSOR_REFRESH_INTERVAL (2 * HZ)
94
95/* How often do we reread sensor limit values? (In jiffies) */
96#define LIMIT_REFRESH_INTERVAL (60 * HZ)
97
98struct ics932s401_data {
99 struct attribute_group attrs;
100 struct mutex lock;
101 char sensors_valid;
102 unsigned long sensors_last_updated; /* In jiffies */
103
104 u8 regs[NUM_REGS];
105};
106
107static int ics932s401_probe(struct i2c_client *client,
108 const struct i2c_device_id *id);
109static int ics932s401_detect(struct i2c_client *client, int kind,
110 struct i2c_board_info *info);
111static int ics932s401_remove(struct i2c_client *client);
112
113static const struct i2c_device_id ics932s401_id[] = {
114 { "ics932s401", ics932s401 },
115 { }
116};
117MODULE_DEVICE_TABLE(i2c, ics932s401_id);
118
119static struct i2c_driver ics932s401_driver = {
120 .class = I2C_CLASS_HWMON,
121 .driver = {
122 .name = "ics932s401",
123 },
124 .probe = ics932s401_probe,
125 .remove = ics932s401_remove,
126 .id_table = ics932s401_id,
127 .detect = ics932s401_detect,
128 .address_data = &addr_data,
129};
130
131static struct ics932s401_data *ics932s401_update_device(struct device *dev)
132{
133 struct i2c_client *client = to_i2c_client(dev);
134 struct ics932s401_data *data = i2c_get_clientdata(client);
135 unsigned long local_jiffies = jiffies;
136 int i, temp;
137
138 mutex_lock(&data->lock);
139 if (time_before(local_jiffies, data->sensors_last_updated +
140 SENSOR_REFRESH_INTERVAL)
141 && data->sensors_valid)
142 goto out;
143
144 /*
145 * Each register must be read as a word and then right shifted 8 bits.
146 * Not really sure why this is; setting the "byte count programming"
147 * register to 1 does not fix this problem.
148 */
149 for (i = 0; i < NUM_MIRRORED_REGS; i++) {
150 temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
151 data->regs[regs_to_copy[i]] = temp >> 8;
152 }
153
154 data->sensors_last_updated = local_jiffies;
155 data->sensors_valid = 1;
156
157out:
158 mutex_unlock(&data->lock);
159 return data;
160}
161
162static ssize_t show_spread_enabled(struct device *dev,
163 struct device_attribute *devattr,
164 char *buf)
165{
166 struct ics932s401_data *data = ics932s401_update_device(dev);
167
168 if (data->regs[ICS932S401_REG_CFG2] & ICS932S401_CFG1_SPREAD)
169 return sprintf(buf, "1\n");
170
171 return sprintf(buf, "0\n");
172}
173
174/* bit to cpu khz map */
175static const int fs_speeds[] = {
176 266666,
177 133333,
178 200000,
179 166666,
180 333333,
181 100000,
182 400000,
183 0,
184};
185
186/* clock divisor map */
187static const int divisors[] = {2, 3, 5, 15, 4, 6, 10, 30, 8, 12, 20, 60, 16,
188 24, 40, 120};
189
190/* Calculate CPU frequency from the M/N registers. */
191static int calculate_cpu_freq(struct ics932s401_data *data)
192{
193 int m, n, freq;
194
195 m = data->regs[ICS932S401_REG_CPU_M_CTRL] & ICS932S401_M_MASK;
196 n = data->regs[ICS932S401_REG_CPU_N_CTRL];
197
198 /* Pull in bits 8 & 9 from the M register */
199 n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x80) << 1;
200 n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x40) << 3;
201
202 freq = BASE_CLOCK * (n + 8) / (m + 2);
203 freq /= divisors[data->regs[ICS932S401_REG_CPU_DIVISOR] >>
204 ICS932S401_CPU_DIVISOR_SHIFT];
205
206 return freq;
207}
208
209static ssize_t show_cpu_clock(struct device *dev,
210 struct device_attribute *devattr,
211 char *buf)
212{
213 struct ics932s401_data *data = ics932s401_update_device(dev);
214
215 return sprintf(buf, "%d\n", calculate_cpu_freq(data));
216}
217
218static ssize_t show_cpu_clock_sel(struct device *dev,
219 struct device_attribute *devattr,
220 char *buf)
221{
222 struct ics932s401_data *data = ics932s401_update_device(dev);
223 int freq;
224
225 if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED)
226 freq = calculate_cpu_freq(data);
227 else {
228 /* Freq is neatly wrapped up for us */
229 int fid = data->regs[ICS932S401_REG_CFG7] & ICS932S401_FS_MASK;
230 freq = fs_speeds[fid];
231 if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT) {
232 switch (freq) {
233 case 166666:
234 freq = 160000;
235 break;
236 case 333333:
237 freq = 320000;
238 break;
239 }
240 }
241 }
242
243 return sprintf(buf, "%d\n", freq);
244}
245
246/* Calculate SRC frequency from the M/N registers. */
247static int calculate_src_freq(struct ics932s401_data *data)
248{
249 int m, n, freq;
250
251 m = data->regs[ICS932S401_REG_SRC_M_CTRL] & ICS932S401_M_MASK;
252 n = data->regs[ICS932S401_REG_SRC_N_CTRL];
253
254 /* Pull in bits 8 & 9 from the M register */
255 n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1;
256 n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3;
257
258 freq = BASE_CLOCK * (n + 8) / (m + 2);
259 freq /= divisors[data->regs[ICS932S401_REG_PCISRC_DIVISOR] &
260 ICS932S401_SRC_DIVISOR_MASK];
261
262 return freq;
263}
264
265static ssize_t show_src_clock(struct device *dev,
266 struct device_attribute *devattr,
267 char *buf)
268{
269 struct ics932s401_data *data = ics932s401_update_device(dev);
270
271 return sprintf(buf, "%d\n", calculate_src_freq(data));
272}
273
274static ssize_t show_src_clock_sel(struct device *dev,
275 struct device_attribute *devattr,
276 char *buf)
277{
278 struct ics932s401_data *data = ics932s401_update_device(dev);
279 int freq;
280
281 if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED)
282 freq = calculate_src_freq(data);
283 else
284 /* Freq is neatly wrapped up for us */
285 if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT &&
286 data->regs[ICS932S401_REG_CTRL] & ICS932S401_SRC_ALT)
287 freq = 96000;
288 else
289 freq = 100000;
290
291 return sprintf(buf, "%d\n", freq);
292}
293
294/* Calculate PCI frequency from the SRC M/N registers. */
295static int calculate_pci_freq(struct ics932s401_data *data)
296{
297 int m, n, freq;
298
299 m = data->regs[ICS932S401_REG_SRC_M_CTRL] & ICS932S401_M_MASK;
300 n = data->regs[ICS932S401_REG_SRC_N_CTRL];
301
302 /* Pull in bits 8 & 9 from the M register */
303 n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1;
304 n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3;
305
306 freq = BASE_CLOCK * (n + 8) / (m + 2);
307 freq /= divisors[data->regs[ICS932S401_REG_PCISRC_DIVISOR] >>
308 ICS932S401_PCI_DIVISOR_SHIFT];
309
310 return freq;
311}
312
313static ssize_t show_pci_clock(struct device *dev,
314 struct device_attribute *devattr,
315 char *buf)
316{
317 struct ics932s401_data *data = ics932s401_update_device(dev);
318
319 return sprintf(buf, "%d\n", calculate_pci_freq(data));
320}
321
322static ssize_t show_pci_clock_sel(struct device *dev,
323 struct device_attribute *devattr,
324 char *buf)
325{
326 struct ics932s401_data *data = ics932s401_update_device(dev);
327 int freq;
328
329 if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED)
330 freq = calculate_pci_freq(data);
331 else
332 freq = 33333;
333
334 return sprintf(buf, "%d\n", freq);
335}
336
337static ssize_t show_value(struct device *dev,
338 struct device_attribute *devattr,
339 char *buf);
340
341static ssize_t show_spread(struct device *dev,
342 struct device_attribute *devattr,
343 char *buf);
344
345static DEVICE_ATTR(spread_enabled, S_IRUGO, show_spread_enabled, NULL);
346static DEVICE_ATTR(cpu_clock_selection, S_IRUGO, show_cpu_clock_sel, NULL);
347static DEVICE_ATTR(cpu_clock, S_IRUGO, show_cpu_clock, NULL);
348static DEVICE_ATTR(src_clock_selection, S_IRUGO, show_src_clock_sel, NULL);
349static DEVICE_ATTR(src_clock, S_IRUGO, show_src_clock, NULL);
350static DEVICE_ATTR(pci_clock_selection, S_IRUGO, show_pci_clock_sel, NULL);
351static DEVICE_ATTR(pci_clock, S_IRUGO, show_pci_clock, NULL);
352static DEVICE_ATTR(usb_clock, S_IRUGO, show_value, NULL);
353static DEVICE_ATTR(ref_clock, S_IRUGO, show_value, NULL);
354static DEVICE_ATTR(cpu_spread, S_IRUGO, show_spread, NULL);
355static DEVICE_ATTR(src_spread, S_IRUGO, show_spread, NULL);
356
357static struct attribute *ics932s401_attr[] =
358{
359 &dev_attr_spread_enabled.attr,
360 &dev_attr_cpu_clock_selection.attr,
361 &dev_attr_cpu_clock.attr,
362 &dev_attr_src_clock_selection.attr,
363 &dev_attr_src_clock.attr,
364 &dev_attr_pci_clock_selection.attr,
365 &dev_attr_pci_clock.attr,
366 &dev_attr_usb_clock.attr,
367 &dev_attr_ref_clock.attr,
368 &dev_attr_cpu_spread.attr,
369 &dev_attr_src_spread.attr,
370 NULL
371};
372
373static ssize_t show_value(struct device *dev,
374 struct device_attribute *devattr,
375 char *buf)
376{
377 int x;
378
379 if (devattr == &dev_attr_usb_clock)
380 x = 48000;
381 else if (devattr == &dev_attr_ref_clock)
382 x = BASE_CLOCK;
383 else
384 BUG();
385
386 return sprintf(buf, "%d\n", x);
387}
388
389static ssize_t show_spread(struct device *dev,
390 struct device_attribute *devattr,
391 char *buf)
392{
393 struct ics932s401_data *data = ics932s401_update_device(dev);
394 int reg;
395 unsigned long val;
396
397 if (!(data->regs[ICS932S401_REG_CFG2] & ICS932S401_CFG1_SPREAD))
398 return sprintf(buf, "0%%\n");
399
400 if (devattr == &dev_attr_src_spread)
401 reg = ICS932S401_REG_SRC_SPREAD1;
402 else if (devattr == &dev_attr_cpu_spread)
403 reg = ICS932S401_REG_CPU_SPREAD1;
404 else
405 BUG();
406
407 val = data->regs[reg] | (data->regs[reg + 1] << 8);
408 val &= ICS932S401_SPREAD_MASK;
409
410 /* Scale 0..2^14 to -0.5. */
411 val = 500000 * val / 16384;
412 return sprintf(buf, "-0.%lu%%\n", val);
413}
414
415/* Return 0 if detection is successful, -ENODEV otherwise */
416static int ics932s401_detect(struct i2c_client *client, int kind,
417 struct i2c_board_info *info)
418{
419 struct i2c_adapter *adapter = client->adapter;
420
421 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
422 return -ENODEV;
423
424 if (kind <= 0) {
425 int vendor, device, revision;
426
427 vendor = i2c_smbus_read_word_data(client,
428 ICS932S401_REG_VENDOR_REV);
429 vendor >>= 8;
430 revision = vendor >> ICS932S401_REV_SHIFT;
431 vendor &= ICS932S401_VENDOR_MASK;
432 if (vendor != ICS932S401_VENDOR)
433 return -ENODEV;
434
435 device = i2c_smbus_read_word_data(client,
436 ICS932S401_REG_DEVICE);
437 device >>= 8;
438 if (device != ICS932S401_DEVICE)
439 return -ENODEV;
440
441 if (revision != ICS932S401_REV)
442 dev_info(&adapter->dev, "Unknown revision %d\n",
443 revision);
444 } else
445 dev_dbg(&adapter->dev, "detection forced\n");
446
447 strlcpy(info->type, "ics932s401", I2C_NAME_SIZE);
448
449 return 0;
450}
451
452static int ics932s401_probe(struct i2c_client *client,
453 const struct i2c_device_id *id)
454{
455 struct ics932s401_data *data;
456 int err;
457
458 data = kzalloc(sizeof(struct ics932s401_data), GFP_KERNEL);
459 if (!data) {
460 err = -ENOMEM;
461 goto exit;
462 }
463
464 i2c_set_clientdata(client, data);
465 mutex_init(&data->lock);
466
467 dev_info(&client->dev, "%s chip found\n", client->name);
468
469 /* Register sysfs hooks */
470 data->attrs.attrs = ics932s401_attr;
471 err = sysfs_create_group(&client->dev.kobj, &data->attrs);
472 if (err)
473 goto exit_free;
474
475 return 0;
476
477exit_free:
478 kfree(data);
479exit:
480 return err;
481}
482
483static int ics932s401_remove(struct i2c_client *client)
484{
485 struct ics932s401_data *data = i2c_get_clientdata(client);
486
487 sysfs_remove_group(&client->dev.kobj, &data->attrs);
488 kfree(data);
489 return 0;
490}
491
492static int __init ics932s401_init(void)
493{
494 return i2c_add_driver(&ics932s401_driver);
495}
496
497static void __exit ics932s401_exit(void)
498{
499 i2c_del_driver(&ics932s401_driver);
500}
501
502MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
503MODULE_DESCRIPTION("ICS932S401 driver");
504MODULE_LICENSE("GPL");
505
506module_init(ics932s401_init);
507module_exit(ics932s401_exit);
508
509/* IBM IntelliStation Z30 */
510MODULE_ALIAS("dmi:bvnIBM:*:rn9228:*");
511MODULE_ALIAS("dmi:bvnIBM:*:rn9232:*");
512
513/* IBM x3650/x3550 */
514MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3650*");
515MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3550*");
diff --git a/drivers/misc/intel_menlow.c b/drivers/misc/intel_menlow.c
index 80a136352408..27b7662955bb 100644
--- a/drivers/misc/intel_menlow.c
+++ b/drivers/misc/intel_menlow.c
@@ -52,12 +52,17 @@ MODULE_LICENSE("GPL");
52#define MEMORY_ARG_CUR_BANDWIDTH 1 52#define MEMORY_ARG_CUR_BANDWIDTH 1
53#define MEMORY_ARG_MAX_BANDWIDTH 0 53#define MEMORY_ARG_MAX_BANDWIDTH 0
54 54
55/*
56 * GTHS returning 'n' would mean that [0,n-1] states are supported
57 * In that case max_cstate would be n-1
58 * GTHS returning '0' would mean that no bandwidth control states are supported
59 */
55static int memory_get_int_max_bandwidth(struct thermal_cooling_device *cdev, 60static int memory_get_int_max_bandwidth(struct thermal_cooling_device *cdev,
56 unsigned long *max_state) 61 unsigned long *max_state)
57{ 62{
58 struct acpi_device *device = cdev->devdata; 63 struct acpi_device *device = cdev->devdata;
59 acpi_handle handle = device->handle; 64 acpi_handle handle = device->handle;
60 unsigned long value; 65 unsigned long long value;
61 struct acpi_object_list arg_list; 66 struct acpi_object_list arg_list;
62 union acpi_object arg; 67 union acpi_object arg;
63 acpi_status status = AE_OK; 68 acpi_status status = AE_OK;
@@ -71,6 +76,9 @@ static int memory_get_int_max_bandwidth(struct thermal_cooling_device *cdev,
71 if (ACPI_FAILURE(status)) 76 if (ACPI_FAILURE(status))
72 return -EFAULT; 77 return -EFAULT;
73 78
79 if (!value)
80 return -EINVAL;
81
74 *max_state = value - 1; 82 *max_state = value - 1;
75 return 0; 83 return 0;
76} 84}
@@ -90,7 +98,7 @@ static int memory_get_cur_bandwidth(struct thermal_cooling_device *cdev,
90{ 98{
91 struct acpi_device *device = cdev->devdata; 99 struct acpi_device *device = cdev->devdata;
92 acpi_handle handle = device->handle; 100 acpi_handle handle = device->handle;
93 unsigned long value; 101 unsigned long long value;
94 struct acpi_object_list arg_list; 102 struct acpi_object_list arg_list;
95 union acpi_object arg; 103 union acpi_object arg;
96 acpi_status status = AE_OK; 104 acpi_status status = AE_OK;
@@ -104,7 +112,7 @@ static int memory_get_cur_bandwidth(struct thermal_cooling_device *cdev,
104 if (ACPI_FAILURE(status)) 112 if (ACPI_FAILURE(status))
105 return -EFAULT; 113 return -EFAULT;
106 114
107 return sprintf(buf, "%ld\n", value); 115 return sprintf(buf, "%llu\n", value);
108} 116}
109 117
110static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev, 118static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
@@ -115,13 +123,13 @@ static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
115 struct acpi_object_list arg_list; 123 struct acpi_object_list arg_list;
116 union acpi_object arg; 124 union acpi_object arg;
117 acpi_status status; 125 acpi_status status;
118 int temp; 126 unsigned long long temp;
119 unsigned long max_state; 127 unsigned long max_state;
120 128
121 if (memory_get_int_max_bandwidth(cdev, &max_state)) 129 if (memory_get_int_max_bandwidth(cdev, &max_state))
122 return -EFAULT; 130 return -EFAULT;
123 131
124 if (max_state < 0 || state > max_state) 132 if (state > max_state)
125 return -EINVAL; 133 return -EINVAL;
126 134
127 arg_list.count = 1; 135 arg_list.count = 1;
@@ -131,7 +139,7 @@ static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
131 139
132 status = 140 status =
133 acpi_evaluate_integer(handle, MEMORY_SET_BANDWIDTH, &arg_list, 141 acpi_evaluate_integer(handle, MEMORY_SET_BANDWIDTH, &arg_list,
134 (unsigned long *)&temp); 142 &temp);
135 143
136 printk(KERN_INFO 144 printk(KERN_INFO
137 "Bandwidth value was %d: status is %d\n", state, status); 145 "Bandwidth value was %d: status is %d\n", state, status);
@@ -175,7 +183,7 @@ static int intel_menlow_memory_add(struct acpi_device *device)
175 goto end; 183 goto end;
176 } 184 }
177 185
178 acpi_driver_data(device) = cdev; 186 device->driver_data = cdev;
179 result = sysfs_create_link(&device->dev.kobj, 187 result = sysfs_create_link(&device->dev.kobj,
180 &cdev->device.kobj, "thermal_cooling"); 188 &cdev->device.kobj, "thermal_cooling");
181 if (result) 189 if (result)
@@ -252,7 +260,8 @@ static DEFINE_MUTEX(intel_menlow_attr_lock);
252 * @auxtype : AUX0/AUX1 260 * @auxtype : AUX0/AUX1
253 * @buf: syfs buffer 261 * @buf: syfs buffer
254 */ 262 */
255static int sensor_get_auxtrip(acpi_handle handle, int index, int *value) 263static int sensor_get_auxtrip(acpi_handle handle, int index,
264 unsigned long long *value)
256{ 265{
257 acpi_status status; 266 acpi_status status;
258 267
@@ -260,7 +269,7 @@ static int sensor_get_auxtrip(acpi_handle handle, int index, int *value)
260 return -EINVAL; 269 return -EINVAL;
261 270
262 status = acpi_evaluate_integer(handle, index ? GET_AUX1 : GET_AUX0, 271 status = acpi_evaluate_integer(handle, index ? GET_AUX1 : GET_AUX0,
263 NULL, (unsigned long *)value); 272 NULL, value);
264 if (ACPI_FAILURE(status)) 273 if (ACPI_FAILURE(status))
265 return -EIO; 274 return -EIO;
266 275
@@ -282,13 +291,13 @@ static int sensor_set_auxtrip(acpi_handle handle, int index, int value)
282 struct acpi_object_list args = { 291 struct acpi_object_list args = {
283 1, &arg 292 1, &arg
284 }; 293 };
285 int temp; 294 unsigned long long temp;
286 295
287 if (index != 0 && index != 1) 296 if (index != 0 && index != 1)
288 return -EINVAL; 297 return -EINVAL;
289 298
290 status = acpi_evaluate_integer(handle, index ? GET_AUX0 : GET_AUX1, 299 status = acpi_evaluate_integer(handle, index ? GET_AUX0 : GET_AUX1,
291 NULL, (unsigned long *)&temp); 300 NULL, &temp);
292 if (ACPI_FAILURE(status)) 301 if (ACPI_FAILURE(status))
293 return -EIO; 302 return -EIO;
294 if ((index && value < temp) || (!index && value > temp)) 303 if ((index && value < temp) || (!index && value > temp))
@@ -296,7 +305,7 @@ static int sensor_set_auxtrip(acpi_handle handle, int index, int value)
296 305
297 arg.integer.value = value; 306 arg.integer.value = value;
298 status = acpi_evaluate_integer(handle, index ? SET_AUX1 : SET_AUX0, 307 status = acpi_evaluate_integer(handle, index ? SET_AUX1 : SET_AUX0,
299 &args, (unsigned long *)&temp); 308 &args, &temp);
300 if (ACPI_FAILURE(status)) 309 if (ACPI_FAILURE(status))
301 return -EIO; 310 return -EIO;
302 311
@@ -312,7 +321,7 @@ static ssize_t aux0_show(struct device *dev,
312 struct device_attribute *dev_attr, char *buf) 321 struct device_attribute *dev_attr, char *buf)
313{ 322{
314 struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); 323 struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
315 int value; 324 unsigned long long value;
316 int result; 325 int result;
317 326
318 result = sensor_get_auxtrip(attr->handle, 0, &value); 327 result = sensor_get_auxtrip(attr->handle, 0, &value);
@@ -324,7 +333,7 @@ static ssize_t aux1_show(struct device *dev,
324 struct device_attribute *dev_attr, char *buf) 333 struct device_attribute *dev_attr, char *buf)
325{ 334{
326 struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); 335 struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
327 int value; 336 unsigned long long value;
328 int result; 337 int result;
329 338
330 result = sensor_get_auxtrip(attr->handle, 1, &value); 339 result = sensor_get_auxtrip(attr->handle, 1, &value);
@@ -376,7 +385,7 @@ static ssize_t bios_enabled_show(struct device *dev,
376 struct device_attribute *attr, char *buf) 385 struct device_attribute *attr, char *buf)
377{ 386{
378 acpi_status status; 387 acpi_status status;
379 unsigned long bios_enabled; 388 unsigned long long bios_enabled;
380 389
381 status = acpi_evaluate_integer(NULL, BIOS_ENABLED, NULL, &bios_enabled); 390 status = acpi_evaluate_integer(NULL, BIOS_ENABLED, NULL, &bios_enabled);
382 if (ACPI_FAILURE(status)) 391 if (ACPI_FAILURE(status))
@@ -492,7 +501,7 @@ static int __init intel_menlow_module_init(void)
492{ 501{
493 int result = -ENODEV; 502 int result = -ENODEV;
494 acpi_status status; 503 acpi_status status;
495 unsigned long enable; 504 unsigned long long enable;
496 505
497 if (acpi_disabled) 506 if (acpi_disabled)
498 return result; 507 return result;
diff --git a/drivers/misc/msi-laptop.c b/drivers/misc/msi-laptop.c
index de898c6938f3..759763d18e4c 100644
--- a/drivers/misc/msi-laptop.c
+++ b/drivers/misc/msi-laptop.c
@@ -347,12 +347,16 @@ static int __init msi_init(void)
347 347
348 /* Register backlight stuff */ 348 /* Register backlight stuff */
349 349
350 msibl_device = backlight_device_register("msi-laptop-bl", NULL, NULL, 350 if (acpi_video_backlight_support()) {
351 &msibl_ops); 351 printk(KERN_INFO "MSI: Brightness ignored, must be controlled "
352 if (IS_ERR(msibl_device)) 352 "by ACPI video driver\n");
353 return PTR_ERR(msibl_device); 353 } else {
354 354 msibl_device = backlight_device_register("msi-laptop-bl", NULL,
355 msibl_device->props.max_brightness = MSI_LCD_LEVEL_MAX-1; 355 NULL, &msibl_ops);
356 if (IS_ERR(msibl_device))
357 return PTR_ERR(msibl_device);
358 msibl_device->props.max_brightness = MSI_LCD_LEVEL_MAX-1;
359 }
356 360
357 ret = platform_driver_register(&msipf_driver); 361 ret = platform_driver_register(&msipf_driver);
358 if (ret) 362 if (ret)
diff --git a/drivers/misc/panasonic-laptop.c b/drivers/misc/panasonic-laptop.c
new file mode 100644
index 000000000000..4a1bc64485d5
--- /dev/null
+++ b/drivers/misc/panasonic-laptop.c
@@ -0,0 +1,766 @@
1/*
2 * Panasonic HotKey and LCD brightness control driver
3 * (C) 2004 Hiroshi Miura <miura@da-cha.org>
4 * (C) 2004 NTT DATA Intellilink Co. http://www.intellilink.co.jp/
5 * (C) YOKOTA Hiroshi <yokota (at) netlab. is. tsukuba. ac. jp>
6 * (C) 2004 David Bronaugh <dbronaugh>
7 * (C) 2006-2008 Harald Welte <laforge@gnumonks.org>
8 *
9 * derived from toshiba_acpi.c, Copyright (C) 2002-2004 John Belmonte
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * publicshed by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 *---------------------------------------------------------------------------
25 *
26 * ChangeLog:
27 * Sep.23, 2008 Harald Welte <laforge@gnumonks.org>
28 * -v0.95 rename driver from drivers/acpi/pcc_acpi.c to
29 * drivers/misc/panasonic-laptop.c
30 *
31 * Jul.04, 2008 Harald Welte <laforge@gnumonks.org>
32 * -v0.94 replace /proc interface with device attributes
33 * support {set,get}keycode on th input device
34 *
35 * Jun.27, 2008 Harald Welte <laforge@gnumonks.org>
36 * -v0.92 merge with 2.6.26-rc6 input API changes
37 * remove broken <= 2.6.15 kernel support
38 * resolve all compiler warnings
39 * various coding style fixes (checkpatch.pl)
40 * add support for backlight api
41 * major code restructuring
42 *
43 * Dac.28, 2007 Harald Welte <laforge@gnumonks.org>
44 * -v0.91 merge with 2.6.24-rc6 ACPI changes
45 *
46 * Nov.04, 2006 Hiroshi Miura <miura@da-cha.org>
47 * -v0.9 remove warning about section reference.
48 * remove acpi_os_free
49 * add /proc/acpi/pcc/brightness interface for HAL access
50 * merge dbronaugh's enhancement
51 * Aug.17, 2004 David Bronaugh (dbronaugh)
52 * - Added screen brightness setting interface
53 * Thanks to FreeBSD crew (acpi_panasonic.c)
54 * for the ideas I needed to accomplish it
55 *
56 * May.29, 2006 Hiroshi Miura <miura@da-cha.org>
57 * -v0.8.4 follow to change keyinput structure
58 * thanks Fabian Yamaguchi <fabs@cs.tu-berlin.de>,
59 * Jacob Bower <jacob.bower@ic.ac.uk> and
60 * Hiroshi Yokota for providing solutions.
61 *
62 * Oct.02, 2004 Hiroshi Miura <miura@da-cha.org>
63 * -v0.8.2 merge code of YOKOTA Hiroshi
64 * <yokota@netlab.is.tsukuba.ac.jp>.
65 * Add sticky key mode interface.
66 * Refactoring acpi_pcc_generate_keyinput().
67 *
68 * Sep.15, 2004 Hiroshi Miura <miura@da-cha.org>
69 * -v0.8 Generate key input event on input subsystem.
70 * This is based on yet another driver written by
71 * Ryuta Nakanishi.
72 *
73 * Sep.10, 2004 Hiroshi Miura <miura@da-cha.org>
74 * -v0.7 Change proc interface functions using seq_file
75 * facility as same as other ACPI drivers.
76 *
77 * Aug.28, 2004 Hiroshi Miura <miura@da-cha.org>
78 * -v0.6.4 Fix a silly error with status checking
79 *
80 * Aug.25, 2004 Hiroshi Miura <miura@da-cha.org>
81 * -v0.6.3 replace read_acpi_int by standard function
82 * acpi_evaluate_integer
83 * some clean up and make smart copyright notice.
84 * fix return value of pcc_acpi_get_key()
85 * fix checking return value of acpi_bus_register_driver()
86 *
87 * Aug.22, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
88 * -v0.6.2 Add check on ACPI data (num_sifr)
89 * Coding style cleanups, better error messages/handling
90 * Fixed an off-by-one error in memory allocation
91 *
92 * Aug.21, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
93 * -v0.6.1 Fix a silly error with status checking
94 *
95 * Aug.20, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
96 * - v0.6 Correct brightness controls to reflect reality
97 * based on information gleaned by Hiroshi Miura
98 * and discussions with Hiroshi Miura
99 *
100 * Aug.10, 2004 Hiroshi Miura <miura@da-cha.org>
101 * - v0.5 support LCD brightness control
102 * based on the disclosed information by MEI.
103 *
104 * Jul.25, 2004 Hiroshi Miura <miura@da-cha.org>
105 * - v0.4 first post version
106 * add function to retrive SIFR
107 *
108 * Jul.24, 2004 Hiroshi Miura <miura@da-cha.org>
109 * - v0.3 get proper status of hotkey
110 *
111 * Jul.22, 2004 Hiroshi Miura <miura@da-cha.org>
112 * - v0.2 add HotKey handler
113 *
114 * Jul.17, 2004 Hiroshi Miura <miura@da-cha.org>
115 * - v0.1 start from toshiba_acpi driver written by John Belmonte
116 *
117 */
118
119#include <linux/kernel.h>
120#include <linux/module.h>
121#include <linux/init.h>
122#include <linux/types.h>
123#include <linux/backlight.h>
124#include <linux/ctype.h>
125#include <linux/seq_file.h>
126#include <linux/uaccess.h>
127#include <acpi/acpi_bus.h>
128#include <acpi/acpi_drivers.h>
129#include <linux/input.h>
130
131
132#ifndef ACPI_HOTKEY_COMPONENT
133#define ACPI_HOTKEY_COMPONENT 0x10000000
134#endif
135
136#define _COMPONENT ACPI_HOTKEY_COMPONENT
137
138MODULE_AUTHOR("Hiroshi Miura, David Bronaugh and Harald Welte");
139MODULE_DESCRIPTION("ACPI HotKey driver for Panasonic Let's Note laptops");
140MODULE_LICENSE("GPL");
141
142#define LOGPREFIX "pcc_acpi: "
143
144/* Define ACPI PATHs */
145/* Lets note hotkeys */
146#define METHOD_HKEY_QUERY "HINF"
147#define METHOD_HKEY_SQTY "SQTY"
148#define METHOD_HKEY_SINF "SINF"
149#define METHOD_HKEY_SSET "SSET"
150#define HKEY_NOTIFY 0x80
151
152#define ACPI_PCC_DRIVER_NAME "Panasonic Laptop Support"
153#define ACPI_PCC_DEVICE_NAME "Hotkey"
154#define ACPI_PCC_CLASS "pcc"
155
156#define ACPI_PCC_INPUT_PHYS "panasonic/hkey0"
157
158/* LCD_TYPEs: 0 = Normal, 1 = Semi-transparent
159 ENV_STATEs: Normal temp=0x01, High temp=0x81, N/A=0x00
160*/
161enum SINF_BITS { SINF_NUM_BATTERIES = 0,
162 SINF_LCD_TYPE,
163 SINF_AC_MAX_BRIGHT,
164 SINF_AC_MIN_BRIGHT,
165 SINF_AC_CUR_BRIGHT,
166 SINF_DC_MAX_BRIGHT,
167 SINF_DC_MIN_BRIGHT,
168 SINF_DC_CUR_BRIGHT,
169 SINF_MUTE,
170 SINF_RESERVED,
171 SINF_ENV_STATE,
172 SINF_STICKY_KEY = 0x80,
173 };
174/* R1 handles SINF_AC_CUR_BRIGHT as SINF_CUR_BRIGHT, doesn't know AC state */
175
176static int acpi_pcc_hotkey_add(struct acpi_device *device);
177static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type);
178static int acpi_pcc_hotkey_resume(struct acpi_device *device);
179
180static const struct acpi_device_id pcc_device_ids[] = {
181 { "MAT0012", 0},
182 { "MAT0013", 0},
183 { "MAT0018", 0},
184 { "MAT0019", 0},
185 { "", 0},
186};
187
188static struct acpi_driver acpi_pcc_driver = {
189 .name = ACPI_PCC_DRIVER_NAME,
190 .class = ACPI_PCC_CLASS,
191 .ids = pcc_device_ids,
192 .ops = {
193 .add = acpi_pcc_hotkey_add,
194 .remove = acpi_pcc_hotkey_remove,
195 .resume = acpi_pcc_hotkey_resume,
196 },
197};
198
199#define KEYMAP_SIZE 11
200static const int initial_keymap[KEYMAP_SIZE] = {
201 /* 0 */ KEY_RESERVED,
202 /* 1 */ KEY_BRIGHTNESSDOWN,
203 /* 2 */ KEY_BRIGHTNESSUP,
204 /* 3 */ KEY_DISPLAYTOGGLE,
205 /* 4 */ KEY_MUTE,
206 /* 5 */ KEY_VOLUMEDOWN,
207 /* 6 */ KEY_VOLUMEUP,
208 /* 7 */ KEY_SLEEP,
209 /* 8 */ KEY_PROG1, /* Change CPU boost */
210 /* 9 */ KEY_BATTERY,
211 /* 10 */ KEY_SUSPEND,
212};
213
214struct pcc_acpi {
215 acpi_handle handle;
216 unsigned long num_sifr;
217 int sticky_mode;
218 u32 *sinf;
219 struct acpi_device *device;
220 struct input_dev *input_dev;
221 struct backlight_device *backlight;
222 int keymap[KEYMAP_SIZE];
223};
224
225struct pcc_keyinput {
226 struct acpi_hotkey *hotkey;
227};
228
229/* method access functions */
230static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val)
231{
232 union acpi_object in_objs[] = {
233 { .integer.type = ACPI_TYPE_INTEGER,
234 .integer.value = func, },
235 { .integer.type = ACPI_TYPE_INTEGER,
236 .integer.value = val, },
237 };
238 struct acpi_object_list params = {
239 .count = ARRAY_SIZE(in_objs),
240 .pointer = in_objs,
241 };
242 acpi_status status = AE_OK;
243
244 ACPI_FUNCTION_TRACE("acpi_pcc_write_sset");
245
246 status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET,
247 &params, NULL);
248
249 return status == AE_OK;
250}
251
252static inline int acpi_pcc_get_sqty(struct acpi_device *device)
253{
254 unsigned long long s;
255 acpi_status status;
256
257 ACPI_FUNCTION_TRACE("acpi_pcc_get_sqty");
258
259 status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY,
260 NULL, &s);
261 if (ACPI_SUCCESS(status))
262 return s;
263 else {
264 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
265 "evaluation error HKEY.SQTY\n"));
266 return -EINVAL;
267 }
268}
269
270static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf)
271{
272 acpi_status status;
273 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
274 union acpi_object *hkey = NULL;
275 int i;
276
277 ACPI_FUNCTION_TRACE("acpi_pcc_retrieve_biosdata");
278
279 status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0,
280 &buffer);
281 if (ACPI_FAILURE(status)) {
282 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
283 "evaluation error HKEY.SINF\n"));
284 return 0;
285 }
286
287 hkey = buffer.pointer;
288 if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) {
289 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF\n"));
290 goto end;
291 }
292
293 if (pcc->num_sifr < hkey->package.count) {
294 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
295 "SQTY reports bad SINF length\n"));
296 status = AE_ERROR;
297 goto end;
298 }
299
300 for (i = 0; i < hkey->package.count; i++) {
301 union acpi_object *element = &(hkey->package.elements[i]);
302 if (likely(element->type == ACPI_TYPE_INTEGER)) {
303 sinf[i] = element->integer.value;
304 } else
305 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
306 "Invalid HKEY.SINF data\n"));
307 }
308 sinf[hkey->package.count] = -1;
309
310end:
311 kfree(buffer.pointer);
312 return status == AE_OK;
313}
314
315/* backlight API interface functions */
316
317/* This driver currently treats AC and DC brightness identical,
318 * since we don't need to invent an interface to the core ACPI
319 * logic to receive events in case a power supply is plugged in
320 * or removed */
321
322static int bl_get(struct backlight_device *bd)
323{
324 struct pcc_acpi *pcc = bl_get_data(bd);
325
326 if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
327 return -EIO;
328
329 return pcc->sinf[SINF_AC_CUR_BRIGHT];
330}
331
332static int bl_set_status(struct backlight_device *bd)
333{
334 struct pcc_acpi *pcc = bl_get_data(bd);
335 int bright = bd->props.brightness;
336 int rc;
337
338 if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
339 return -EIO;
340
341 if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT])
342 bright = pcc->sinf[SINF_AC_MIN_BRIGHT];
343
344 if (bright < pcc->sinf[SINF_DC_MIN_BRIGHT])
345 bright = pcc->sinf[SINF_DC_MIN_BRIGHT];
346
347 if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT] ||
348 bright > pcc->sinf[SINF_AC_MAX_BRIGHT])
349 return -EINVAL;
350
351 rc = acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, bright);
352 if (rc < 0)
353 return rc;
354
355 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
356}
357
358static struct backlight_ops pcc_backlight_ops = {
359 .get_brightness = bl_get,
360 .update_status = bl_set_status,
361};
362
363
364/* sysfs user interface functions */
365
366static ssize_t show_numbatt(struct device *dev, struct device_attribute *attr,
367 char *buf)
368{
369 struct acpi_device *acpi = to_acpi_device(dev);
370 struct pcc_acpi *pcc = acpi_driver_data(acpi);
371
372 if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
373 return -EIO;
374
375 return sprintf(buf, "%u\n", pcc->sinf[SINF_NUM_BATTERIES]);
376}
377
378static ssize_t show_lcdtype(struct device *dev, struct device_attribute *attr,
379 char *buf)
380{
381 struct acpi_device *acpi = to_acpi_device(dev);
382 struct pcc_acpi *pcc = acpi_driver_data(acpi);
383
384 if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
385 return -EIO;
386
387 return sprintf(buf, "%u\n", pcc->sinf[SINF_LCD_TYPE]);
388}
389
390static ssize_t show_mute(struct device *dev, struct device_attribute *attr,
391 char *buf)
392{
393 struct acpi_device *acpi = to_acpi_device(dev);
394 struct pcc_acpi *pcc = acpi_driver_data(acpi);
395
396 if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
397 return -EIO;
398
399 return sprintf(buf, "%u\n", pcc->sinf[SINF_MUTE]);
400}
401
402static ssize_t show_sticky(struct device *dev, struct device_attribute *attr,
403 char *buf)
404{
405 struct acpi_device *acpi = to_acpi_device(dev);
406 struct pcc_acpi *pcc = acpi_driver_data(acpi);
407
408 if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
409 return -EIO;
410
411 return sprintf(buf, "%u\n", pcc->sinf[SINF_STICKY_KEY]);
412}
413
414static ssize_t set_sticky(struct device *dev, struct device_attribute *attr,
415 const char *buf, size_t count)
416{
417 struct acpi_device *acpi = to_acpi_device(dev);
418 struct pcc_acpi *pcc = acpi_driver_data(acpi);
419 int val;
420
421 if (count && sscanf(buf, "%i", &val) == 1 &&
422 (val == 0 || val == 1)) {
423 acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, val);
424 pcc->sticky_mode = val;
425 }
426
427 return count;
428}
429
430static DEVICE_ATTR(numbatt, S_IRUGO, show_numbatt, NULL);
431static DEVICE_ATTR(lcdtype, S_IRUGO, show_lcdtype, NULL);
432static DEVICE_ATTR(mute, S_IRUGO, show_mute, NULL);
433static DEVICE_ATTR(sticky_key, S_IRUGO | S_IWUSR, show_sticky, set_sticky);
434
435static struct attribute *pcc_sysfs_entries[] = {
436 &dev_attr_numbatt.attr,
437 &dev_attr_lcdtype.attr,
438 &dev_attr_mute.attr,
439 &dev_attr_sticky_key.attr,
440 NULL,
441};
442
443static struct attribute_group pcc_attr_group = {
444 .name = NULL, /* put in device directory */
445 .attrs = pcc_sysfs_entries,
446};
447
448
449/* hotkey input device driver */
450
451static int pcc_getkeycode(struct input_dev *dev, int scancode, int *keycode)
452{
453 struct pcc_acpi *pcc = input_get_drvdata(dev);
454
455 if (scancode >= ARRAY_SIZE(pcc->keymap))
456 return -EINVAL;
457
458 *keycode = pcc->keymap[scancode];
459
460 return 0;
461}
462
463static int keymap_get_by_keycode(struct pcc_acpi *pcc, int keycode)
464{
465 int i;
466
467 for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++) {
468 if (pcc->keymap[i] == keycode)
469 return i+1;
470 }
471
472 return 0;
473}
474
475static int pcc_setkeycode(struct input_dev *dev, int scancode, int keycode)
476{
477 struct pcc_acpi *pcc = input_get_drvdata(dev);
478 int oldkeycode;
479
480 if (scancode >= ARRAY_SIZE(pcc->keymap))
481 return -EINVAL;
482
483 if (keycode < 0 || keycode > KEY_MAX)
484 return -EINVAL;
485
486 oldkeycode = pcc->keymap[scancode];
487 pcc->keymap[scancode] = keycode;
488
489 set_bit(keycode, dev->keybit);
490
491 if (!keymap_get_by_keycode(pcc, oldkeycode))
492 clear_bit(oldkeycode, dev->keybit);
493
494 return 0;
495}
496
497static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
498{
499 struct input_dev *hotk_input_dev = pcc->input_dev;
500 int rc;
501 int key_code, hkey_num;
502 unsigned long long result;
503
504 ACPI_FUNCTION_TRACE("acpi_pcc_generate_keyinput");
505
506 rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
507 NULL, &result);
508 if (!ACPI_SUCCESS(rc)) {
509 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
510 "error getting hotkey status\n"));
511 return;
512 }
513
514 acpi_bus_generate_proc_event(pcc->device, HKEY_NOTIFY, result);
515
516 hkey_num = result & 0xf;
517
518 if (hkey_num < 0 || hkey_num > ARRAY_SIZE(pcc->keymap)) {
519 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
520 "hotkey number out of range: %d\n",
521 hkey_num));
522 return;
523 }
524
525 key_code = pcc->keymap[hkey_num];
526
527 if (key_code != KEY_RESERVED) {
528 int pushed = (result & 0x80) ? TRUE : FALSE;
529
530 input_report_key(hotk_input_dev, key_code, pushed);
531 input_sync(hotk_input_dev);
532 }
533
534 return;
535}
536
537static void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data)
538{
539 struct pcc_acpi *pcc = (struct pcc_acpi *) data;
540
541 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_notify");
542
543 switch (event) {
544 case HKEY_NOTIFY:
545 acpi_pcc_generate_keyinput(pcc);
546 break;
547 default:
548 /* nothing to do */
549 break;
550 }
551}
552
553static int acpi_pcc_init_input(struct pcc_acpi *pcc)
554{
555 int i, rc;
556
557 ACPI_FUNCTION_TRACE("acpi_pcc_init_input");
558
559 pcc->input_dev = input_allocate_device();
560 if (!pcc->input_dev) {
561 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
562 "Couldn't allocate input device for hotkey"));
563 return -ENOMEM;
564 }
565
566 pcc->input_dev->evbit[0] = BIT(EV_KEY);
567
568 pcc->input_dev->name = ACPI_PCC_DRIVER_NAME;
569 pcc->input_dev->phys = ACPI_PCC_INPUT_PHYS;
570 pcc->input_dev->id.bustype = BUS_HOST;
571 pcc->input_dev->id.vendor = 0x0001;
572 pcc->input_dev->id.product = 0x0001;
573 pcc->input_dev->id.version = 0x0100;
574 pcc->input_dev->getkeycode = pcc_getkeycode;
575 pcc->input_dev->setkeycode = pcc_setkeycode;
576
577 /* load initial keymap */
578 memcpy(pcc->keymap, initial_keymap, sizeof(pcc->keymap));
579
580 for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++)
581 __set_bit(pcc->keymap[i], pcc->input_dev->keybit);
582 __clear_bit(KEY_RESERVED, pcc->input_dev->keybit);
583
584 input_set_drvdata(pcc->input_dev, pcc);
585
586 rc = input_register_device(pcc->input_dev);
587 if (rc < 0)
588 input_free_device(pcc->input_dev);
589
590 return rc;
591}
592
593/* kernel module interface */
594
595static int acpi_pcc_hotkey_resume(struct acpi_device *device)
596{
597 struct pcc_acpi *pcc = acpi_driver_data(device);
598 acpi_status status = AE_OK;
599
600 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_resume");
601
602 if (device == NULL || pcc == NULL)
603 return -EINVAL;
604
605 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Sticky mode restore: %d\n",
606 pcc->sticky_mode));
607
608 status = acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode);
609
610 return status == AE_OK ? 0 : -EINVAL;
611}
612
613static int acpi_pcc_hotkey_add(struct acpi_device *device)
614{
615 acpi_status status;
616 struct pcc_acpi *pcc;
617 int num_sifr, result;
618
619 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_add");
620
621 if (!device)
622 return -EINVAL;
623
624 num_sifr = acpi_pcc_get_sqty(device);
625
626 if (num_sifr > 255) {
627 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr too large"));
628 return -ENODEV;
629 }
630
631 pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL);
632 if (!pcc) {
633 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
634 "Couldn't allocate mem for pcc"));
635 return -ENOMEM;
636 }
637
638 pcc->sinf = kzalloc(sizeof(u32) * (num_sifr + 1), GFP_KERNEL);
639 if (!pcc->sinf) {
640 result = -ENOMEM;
641 goto out_hotkey;
642 }
643
644 pcc->device = device;
645 pcc->handle = device->handle;
646 pcc->num_sifr = num_sifr;
647 device->driver_data = pcc;
648 strcpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME);
649 strcpy(acpi_device_class(device), ACPI_PCC_CLASS);
650
651 result = acpi_pcc_init_input(pcc);
652 if (result) {
653 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
654 "Error installing keyinput handler\n"));
655 goto out_sinf;
656 }
657
658 /* initialize hotkey input device */
659 status = acpi_install_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
660 acpi_pcc_hotkey_notify, pcc);
661
662 if (ACPI_FAILURE(status)) {
663 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
664 "Error installing notify handler\n"));
665 result = -ENODEV;
666 goto out_input;
667 }
668
669 /* initialize backlight */
670 pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
671 &pcc_backlight_ops);
672 if (IS_ERR(pcc->backlight))
673 goto out_notify;
674
675 if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) {
676 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
677 "Couldn't retrieve BIOS data\n"));
678 goto out_backlight;
679 }
680
681 /* read the initial brightness setting from the hardware */
682 pcc->backlight->props.max_brightness =
683 pcc->sinf[SINF_AC_MAX_BRIGHT];
684 pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
685
686 /* read the initial sticky key mode from the hardware */
687 pcc->sticky_mode = pcc->sinf[SINF_STICKY_KEY];
688
689 /* add sysfs attributes */
690 result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
691 if (result)
692 goto out_backlight;
693
694 return 0;
695
696out_backlight:
697 backlight_device_unregister(pcc->backlight);
698out_notify:
699 acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
700 acpi_pcc_hotkey_notify);
701out_input:
702 input_unregister_device(pcc->input_dev);
703 /* no need to input_free_device() since core input API refcount and
704 * free()s the device */
705out_sinf:
706 kfree(pcc->sinf);
707out_hotkey:
708 kfree(pcc);
709
710 return result;
711}
712
713static int __init acpi_pcc_init(void)
714{
715 int result = 0;
716
717 ACPI_FUNCTION_TRACE("acpi_pcc_init");
718
719 if (acpi_disabled)
720 return -ENODEV;
721
722 result = acpi_bus_register_driver(&acpi_pcc_driver);
723 if (result < 0) {
724 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
725 "Error registering hotkey driver\n"));
726 return -ENODEV;
727 }
728
729 return 0;
730}
731
732static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type)
733{
734 struct pcc_acpi *pcc = acpi_driver_data(device);
735
736 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_remove");
737
738 if (!device || !pcc)
739 return -EINVAL;
740
741 sysfs_remove_group(&device->dev.kobj, &pcc_attr_group);
742
743 backlight_device_unregister(pcc->backlight);
744
745 acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
746 acpi_pcc_hotkey_notify);
747
748 input_unregister_device(pcc->input_dev);
749 /* no need to input_free_device() since core input API refcount and
750 * free()s the device */
751
752 kfree(pcc->sinf);
753 kfree(pcc);
754
755 return 0;
756}
757
758static void __exit acpi_pcc_exit(void)
759{
760 ACPI_FUNCTION_TRACE("acpi_pcc_exit");
761
762 acpi_bus_unregister_driver(&acpi_pcc_driver);
763}
764
765module_init(acpi_pcc_init);
766module_exit(acpi_pcc_exit);
diff --git a/drivers/misc/sgi-gru/Makefile b/drivers/misc/sgi-gru/Makefile
index d03597a521b0..9e9170b3599a 100644
--- a/drivers/misc/sgi-gru/Makefile
+++ b/drivers/misc/sgi-gru/Makefile
@@ -1,3 +1,7 @@
1ifdef CONFIG_SGI_GRU_DEBUG
2 EXTRA_CFLAGS += -DDEBUG
3endif
4
1obj-$(CONFIG_SGI_GRU) := gru.o 5obj-$(CONFIG_SGI_GRU) := gru.o
2gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o 6gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o
3 7
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 8c389d606c30..3ee698ad8599 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -254,7 +254,11 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
254 return 1; 254 return 1;
255 255
256 *paddr = pte_pfn(pte) << PAGE_SHIFT; 256 *paddr = pte_pfn(pte) << PAGE_SHIFT;
257#ifdef CONFIG_HUGETLB_PAGE
257 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; 258 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
259#else
260 *pageshift = PAGE_SHIFT;
261#endif
258 return 0; 262 return 0;
259 263
260err: 264err:
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 5c027b6b4e5a..650983806392 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -481,7 +481,7 @@ struct vm_operations_struct gru_vm_ops = {
481 .fault = gru_fault, 481 .fault = gru_fault,
482}; 482};
483 483
484module_init(gru_init); 484fs_initcall(gru_init);
485module_exit(gru_exit); 485module_exit(gru_exit);
486 486
487module_param(gru_options, ulong, 0644); 487module_param(gru_options, ulong, 0644);
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 533923f83f1a..73b0ca061bb5 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -317,7 +317,6 @@ int gru_proc_init(void)
317{ 317{
318 struct proc_entry *p; 318 struct proc_entry *p;
319 319
320 proc_mkdir("sgi_uv", NULL);
321 proc_gru = proc_mkdir("sgi_uv/gru", NULL); 320 proc_gru = proc_mkdir("sgi_uv/gru", NULL);
322 321
323 for (p = proc_files; p->name; p++) 322 for (p = proc_files; p->name; p++)
diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile
index 35ce28578075..4fc40d8e1bcc 100644
--- a/drivers/misc/sgi-xp/Makefile
+++ b/drivers/misc/sgi-xp/Makefile
@@ -5,14 +5,14 @@
5obj-$(CONFIG_SGI_XP) += xp.o 5obj-$(CONFIG_SGI_XP) += xp.o
6xp-y := xp_main.o 6xp-y := xp_main.o
7xp-$(CONFIG_IA64_SGI_SN2) += xp_sn2.o xp_nofault.o 7xp-$(CONFIG_IA64_SGI_SN2) += xp_sn2.o xp_nofault.o
8xp-$(CONFIG_IA64_GENERIC) += xp_sn2.o xp_nofault.o xp_uv.o 8xp-$(CONFIG_IA64_GENERIC) += xp_sn2.o xp_nofault.o
9xp-$(CONFIG_IA64_SGI_UV) += xp_uv.o 9xp-$(CONFIG_IA64_SGI_UV) += xp_uv.o
10xp-$(CONFIG_X86_64) += xp_uv.o 10xp-$(CONFIG_X86_64) += xp_uv.o
11 11
12obj-$(CONFIG_SGI_XP) += xpc.o 12obj-$(CONFIG_SGI_XP) += xpc.o
13xpc-y := xpc_main.o xpc_channel.o xpc_partition.o 13xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
14xpc-$(CONFIG_IA64_SGI_SN2) += xpc_sn2.o 14xpc-$(CONFIG_IA64_SGI_SN2) += xpc_sn2.o
15xpc-$(CONFIG_IA64_GENERIC) += xpc_sn2.o xpc_uv.o 15xpc-$(CONFIG_IA64_GENERIC) += xpc_sn2.o
16xpc-$(CONFIG_IA64_SGI_UV) += xpc_uv.o 16xpc-$(CONFIG_IA64_SGI_UV) += xpc_uv.o
17xpc-$(CONFIG_X86_64) += xpc_uv.o 17xpc-$(CONFIG_X86_64) += xpc_uv.o
18 18
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 859a5281c61b..7b4cbd5e03e9 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -19,7 +19,11 @@
19#include <asm/system.h> 19#include <asm/system.h>
20#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */ 20#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */
21#define is_shub() ia64_platform_is("sn2") 21#define is_shub() ia64_platform_is("sn2")
22#ifdef CONFIG_IA64_SGI_UV
22#define is_uv() ia64_platform_is("uv") 23#define is_uv() ia64_platform_is("uv")
24#else
25#define is_uv() 0
26#endif
23#endif 27#endif
24#ifdef CONFIG_X86_64 28#ifdef CONFIG_X86_64
25#include <asm/genapic.h> 29#include <asm/genapic.h>
@@ -190,9 +194,10 @@ enum xp_retval {
190 xpGruSendMqError, /* 59: gru send message queue related error */ 194 xpGruSendMqError, /* 59: gru send message queue related error */
191 195
192 xpBadChannelNumber, /* 60: invalid channel number */ 196 xpBadChannelNumber, /* 60: invalid channel number */
193 xpBadMsgType, /* 60: invalid message type */ 197 xpBadMsgType, /* 61: invalid message type */
198 xpBiosError, /* 62: BIOS error */
194 199
195 xpUnknownReason /* 61: unknown reason - must be last in enum */ 200 xpUnknownReason /* 63: unknown reason - must be last in enum */
196}; 201};
197 202
198/* 203/*
@@ -341,6 +346,8 @@ extern unsigned long (*xp_pa) (void *);
341extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, 346extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
342 size_t); 347 size_t);
343extern int (*xp_cpu_to_nasid) (int); 348extern int (*xp_cpu_to_nasid) (int);
349extern enum xp_retval (*xp_expand_memprotect) (unsigned long, unsigned long);
350extern enum xp_retval (*xp_restrict_memprotect) (unsigned long, unsigned long);
344 351
345extern u64 xp_nofault_PIOR_target; 352extern u64 xp_nofault_PIOR_target;
346extern int xp_nofault_PIOR(void *); 353extern int xp_nofault_PIOR(void *);
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 66a1d19e08ad..9a2e77172d94 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -51,6 +51,13 @@ EXPORT_SYMBOL_GPL(xp_remote_memcpy);
51int (*xp_cpu_to_nasid) (int cpuid); 51int (*xp_cpu_to_nasid) (int cpuid);
52EXPORT_SYMBOL_GPL(xp_cpu_to_nasid); 52EXPORT_SYMBOL_GPL(xp_cpu_to_nasid);
53 53
54enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr,
55 unsigned long size);
56EXPORT_SYMBOL_GPL(xp_expand_memprotect);
57enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr,
58 unsigned long size);
59EXPORT_SYMBOL_GPL(xp_restrict_memprotect);
60
54/* 61/*
55 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level 62 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
56 * users of XPC. 63 * users of XPC.
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c
index 1440134caf31..fb3ec9d735a9 100644
--- a/drivers/misc/sgi-xp/xp_sn2.c
+++ b/drivers/misc/sgi-xp/xp_sn2.c
@@ -120,6 +120,38 @@ xp_cpu_to_nasid_sn2(int cpuid)
120 return cpuid_to_nasid(cpuid); 120 return cpuid_to_nasid(cpuid);
121} 121}
122 122
123static enum xp_retval
124xp_expand_memprotect_sn2(unsigned long phys_addr, unsigned long size)
125{
126 u64 nasid_array = 0;
127 int ret;
128
129 ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
130 &nasid_array);
131 if (ret != 0) {
132 dev_err(xp, "sn_change_memprotect(,, "
133 "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
134 return xpSalError;
135 }
136 return xpSuccess;
137}
138
139static enum xp_retval
140xp_restrict_memprotect_sn2(unsigned long phys_addr, unsigned long size)
141{
142 u64 nasid_array = 0;
143 int ret;
144
145 ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
146 &nasid_array);
147 if (ret != 0) {
148 dev_err(xp, "sn_change_memprotect(,, "
149 "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
150 return xpSalError;
151 }
152 return xpSuccess;
153}
154
123enum xp_retval 155enum xp_retval
124xp_init_sn2(void) 156xp_init_sn2(void)
125{ 157{
@@ -132,6 +164,8 @@ xp_init_sn2(void)
132 xp_pa = xp_pa_sn2; 164 xp_pa = xp_pa_sn2;
133 xp_remote_memcpy = xp_remote_memcpy_sn2; 165 xp_remote_memcpy = xp_remote_memcpy_sn2;
134 xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; 166 xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
167 xp_expand_memprotect = xp_expand_memprotect_sn2;
168 xp_restrict_memprotect = xp_restrict_memprotect_sn2;
135 169
136 return xp_register_nofault_code_sn2(); 170 return xp_register_nofault_code_sn2();
137} 171}
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c
index d9f7ce2510bc..d238576b26fa 100644
--- a/drivers/misc/sgi-xp/xp_uv.c
+++ b/drivers/misc/sgi-xp/xp_uv.c
@@ -15,6 +15,11 @@
15 15
16#include <linux/device.h> 16#include <linux/device.h>
17#include <asm/uv/uv_hub.h> 17#include <asm/uv/uv_hub.h>
18#if defined CONFIG_X86_64
19#include <asm/uv/bios.h>
20#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
21#include <asm/sn/sn_sal.h>
22#endif
18#include "../sgi-gru/grukservices.h" 23#include "../sgi-gru/grukservices.h"
19#include "xp.h" 24#include "xp.h"
20 25
@@ -49,18 +54,79 @@ xp_cpu_to_nasid_uv(int cpuid)
49 return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); 54 return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid));
50} 55}
51 56
57static enum xp_retval
58xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size)
59{
60 int ret;
61
62#if defined CONFIG_X86_64
63 ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW);
64 if (ret != BIOS_STATUS_SUCCESS) {
65 dev_err(xp, "uv_bios_change_memprotect(,, "
66 "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret);
67 return xpBiosError;
68 }
69
70#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
71 u64 nasid_array;
72
73 ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
74 &nasid_array);
75 if (ret != 0) {
76 dev_err(xp, "sn_change_memprotect(,, "
77 "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
78 return xpSalError;
79 }
80#else
81 #error not a supported configuration
82#endif
83 return xpSuccess;
84}
85
86static enum xp_retval
87xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size)
88{
89 int ret;
90
91#if defined CONFIG_X86_64
92 ret = uv_bios_change_memprotect(phys_addr, size,
93 UV_MEMPROT_RESTRICT_ACCESS);
94 if (ret != BIOS_STATUS_SUCCESS) {
95 dev_err(xp, "uv_bios_change_memprotect(,, "
96 "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret);
97 return xpBiosError;
98 }
99
100#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
101 u64 nasid_array;
102
103 ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
104 &nasid_array);
105 if (ret != 0) {
106 dev_err(xp, "sn_change_memprotect(,, "
107 "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
108 return xpSalError;
109 }
110#else
111 #error not a supported configuration
112#endif
113 return xpSuccess;
114}
115
52enum xp_retval 116enum xp_retval
53xp_init_uv(void) 117xp_init_uv(void)
54{ 118{
55 BUG_ON(!is_uv()); 119 BUG_ON(!is_uv());
56 120
57 xp_max_npartitions = XP_MAX_NPARTITIONS_UV; 121 xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
58 xp_partition_id = 0; /* !!! not correct value */ 122 xp_partition_id = sn_partition_id;
59 xp_region_size = 0; /* !!! not correct value */ 123 xp_region_size = sn_region_size;
60 124
61 xp_pa = xp_pa_uv; 125 xp_pa = xp_pa_uv;
62 xp_remote_memcpy = xp_remote_memcpy_uv; 126 xp_remote_memcpy = xp_remote_memcpy_uv;
63 xp_cpu_to_nasid = xp_cpu_to_nasid_uv; 127 xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
128 xp_expand_memprotect = xp_expand_memprotect_uv;
129 xp_restrict_memprotect = xp_restrict_memprotect_uv;
64 130
65 return xpSuccess; 131 return xpSuccess;
66} 132}
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 619208d61862..a5bd658c2e83 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -181,6 +181,18 @@ struct xpc_vars_part_sn2 {
181 xpc_nasid_mask_nlongs)) 181 xpc_nasid_mask_nlongs))
182 182
183/* 183/*
184 * Info pertinent to a GRU message queue using a watch list for irq generation.
185 */
186struct xpc_gru_mq_uv {
187 void *address; /* address of GRU message queue */
188 unsigned int order; /* size of GRU message queue as a power of 2 */
189 int irq; /* irq raised when message is received in mq */
190 int mmr_blade; /* blade where watchlist was allocated from */
191 unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */
192 int watchlist_num; /* number of watchlist allocatd by BIOS */
193};
194
195/*
184 * The activate_mq is used to send/receive GRU messages that affect XPC's 196 * The activate_mq is used to send/receive GRU messages that affect XPC's
185 * heartbeat, partition active state, and channel state. This is UV only. 197 * heartbeat, partition active state, and channel state. This is UV only.
186 */ 198 */
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 46325fc84811..e8d5cfbd32c2 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -1104,7 +1104,7 @@ xpc_do_exit(enum xp_retval reason)
1104 1104
1105 if (is_shub()) 1105 if (is_shub())
1106 xpc_exit_sn2(); 1106 xpc_exit_sn2();
1107 else 1107 else if (is_uv())
1108 xpc_exit_uv(); 1108 xpc_exit_uv();
1109} 1109}
1110 1110
@@ -1363,7 +1363,7 @@ out_2:
1363out_1: 1363out_1:
1364 if (is_shub()) 1364 if (is_shub())
1365 xpc_exit_sn2(); 1365 xpc_exit_sn2();
1366 else 1366 else if (is_uv())
1367 xpc_exit_uv(); 1367 xpc_exit_uv();
1368 return ret; 1368 return ret;
1369} 1369}
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index b4882ccf6344..73b7fb8de47a 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -553,22 +553,17 @@ static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
553static enum xp_retval 553static enum xp_retval
554xpc_allow_amo_ops_sn2(struct amo *amos_page) 554xpc_allow_amo_ops_sn2(struct amo *amos_page)
555{ 555{
556 u64 nasid_array = 0; 556 enum xp_retval ret = xpSuccess;
557 int ret;
558 557
559 /* 558 /*
560 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST 559 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
561 * collides with memory operations. On those systems we call 560 * collides with memory operations. On those systems we call
562 * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead. 561 * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
563 */ 562 */
564 if (!enable_shub_wars_1_1()) { 563 if (!enable_shub_wars_1_1())
565 ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE, 564 ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE);
566 SN_MEMPROT_ACCESS_CLASS_1, 565
567 &nasid_array); 566 return ret;
568 if (ret != 0)
569 return xpSalError;
570 }
571 return xpSuccess;
572} 567}
573 568
574/* 569/*
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 1ac694c01623..91a55b1b1037 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -18,7 +18,15 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/err.h>
21#include <asm/uv/uv_hub.h> 22#include <asm/uv/uv_hub.h>
23#if defined CONFIG_X86_64
24#include <asm/uv/bios.h>
25#include <asm/uv/uv_irq.h>
26#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
27#include <asm/sn/intr.h>
28#include <asm/sn/sn_sal.h>
29#endif
22#include "../sgi-gru/gru.h" 30#include "../sgi-gru/gru.h"
23#include "../sgi-gru/grukservices.h" 31#include "../sgi-gru/grukservices.h"
24#include "xpc.h" 32#include "xpc.h"
@@ -27,15 +35,17 @@ static atomic64_t xpc_heartbeat_uv;
27static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); 35static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
28 36
29#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) 37#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
30#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) 38#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
39 XPC_ACTIVATE_MSG_SIZE_UV)
40#define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
31 41
32#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 42#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
33 XPC_ACTIVATE_MSG_SIZE_UV) 43#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
34#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 44 XPC_NOTIFY_MSG_SIZE_UV)
35 XPC_NOTIFY_MSG_SIZE_UV) 45#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
36 46
37static void *xpc_activate_mq_uv; 47static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
38static void *xpc_notify_mq_uv; 48static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
39 49
40static int 50static int
41xpc_setup_partitions_sn_uv(void) 51xpc_setup_partitions_sn_uv(void)
@@ -52,62 +62,209 @@ xpc_setup_partitions_sn_uv(void)
52 return 0; 62 return 0;
53} 63}
54 64
55static void * 65static int
56xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, 66xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
67{
68#if defined CONFIG_X86_64
69 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset);
70 if (mq->irq < 0) {
71 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
72 mq->irq);
73 }
74
75#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
76 int mmr_pnode;
77 unsigned long mmr_value;
78
79 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
80 mq->irq = SGI_XPC_ACTIVATE;
81 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
82 mq->irq = SGI_XPC_NOTIFY;
83 else
84 return -EINVAL;
85
86 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
87 mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
88
89 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
90#else
91 #error not a supported configuration
92#endif
93
94 return 0;
95}
96
97static void
98xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
99{
100#if defined CONFIG_X86_64
101 uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset);
102
103#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
104 int mmr_pnode;
105 unsigned long mmr_value;
106
107 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
108 mmr_value = 1UL << 16;
109
110 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
111#else
112 #error not a supported configuration
113#endif
114}
115
116static int
117xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
118{
119 int ret;
120
121#if defined CONFIG_X86_64
122 ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
123 mq->order, &mq->mmr_offset);
124 if (ret < 0) {
125 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
126 "ret=%d\n", ret);
127 return ret;
128 }
129#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
130 ret = sn_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
131 mq->order, &mq->mmr_offset);
132 if (ret < 0) {
133 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
134 ret);
135 return -EBUSY;
136 }
137#else
138 #error not a supported configuration
139#endif
140
141 mq->watchlist_num = ret;
142 return 0;
143}
144
145static void
146xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
147{
148 int ret;
149
150#if defined CONFIG_X86_64
151 ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
152 BUG_ON(ret != BIOS_STATUS_SUCCESS);
153#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
154 ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
155 BUG_ON(ret != SALRET_OK);
156#else
157 #error not a supported configuration
158#endif
159}
160
161static struct xpc_gru_mq_uv *
162xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
57 irq_handler_t irq_handler) 163 irq_handler_t irq_handler)
58{ 164{
165 enum xp_retval xp_ret;
59 int ret; 166 int ret;
60 int nid; 167 int nid;
61 int mq_order; 168 int pg_order;
62 struct page *page; 169 struct page *page;
63 void *mq; 170 struct xpc_gru_mq_uv *mq;
171
172 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
173 if (mq == NULL) {
174 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
175 "a xpc_gru_mq_uv structure\n");
176 ret = -ENOMEM;
177 goto out_1;
178 }
179
180 pg_order = get_order(mq_size);
181 mq->order = pg_order + PAGE_SHIFT;
182 mq_size = 1UL << mq->order;
183
184 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
64 185
65 nid = cpu_to_node(cpuid); 186 nid = cpu_to_node(cpu);
66 mq_order = get_order(mq_size);
67 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 187 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
68 mq_order); 188 pg_order);
69 if (page == NULL) { 189 if (page == NULL) {
70 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 190 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
71 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); 191 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
72 return NULL; 192 ret = -ENOMEM;
193 goto out_2;
73 } 194 }
195 mq->address = page_address(page);
74 196
75 mq = page_address(page); 197 ret = gru_create_message_queue(mq->address, mq_size);
76 ret = gru_create_message_queue(mq, mq_size);
77 if (ret != 0) { 198 if (ret != 0) {
78 dev_err(xpc_part, "gru_create_message_queue() returned " 199 dev_err(xpc_part, "gru_create_message_queue() returned "
79 "error=%d\n", ret); 200 "error=%d\n", ret);
80 free_pages((unsigned long)mq, mq_order); 201 ret = -EINVAL;
81 return NULL; 202 goto out_3;
82 } 203 }
83 204
84 /* !!! Need to do some other things to set up IRQ */ 205 /* enable generation of irq when GRU mq operation occurs to this mq */
206 ret = xpc_gru_mq_watchlist_alloc_uv(mq);
207 if (ret != 0)
208 goto out_3;
85 209
86 ret = request_irq(irq, irq_handler, 0, "xpc", NULL); 210 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
211 if (ret != 0)
212 goto out_4;
213
214 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
87 if (ret != 0) { 215 if (ret != 0) {
88 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", 216 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
89 irq, ret); 217 mq->irq, ret);
90 free_pages((unsigned long)mq, mq_order); 218 goto out_5;
91 return NULL;
92 } 219 }
93 220
94 /* !!! enable generation of irq when GRU mq op occurs to this mq */ 221 /* allow other partitions to access this GRU mq */
95 222 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
96 /* ??? allow other partitions to access GRU mq? */ 223 if (xp_ret != xpSuccess) {
224 ret = -EACCES;
225 goto out_6;
226 }
97 227
98 return mq; 228 return mq;
229
230 /* something went wrong */
231out_6:
232 free_irq(mq->irq, NULL);
233out_5:
234 xpc_release_gru_mq_irq_uv(mq);
235out_4:
236 xpc_gru_mq_watchlist_free_uv(mq);
237out_3:
238 free_pages((unsigned long)mq->address, pg_order);
239out_2:
240 kfree(mq);
241out_1:
242 return ERR_PTR(ret);
99} 243}
100 244
101static void 245static void
102xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq) 246xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
103{ 247{
104 /* ??? disallow other partitions to access GRU mq? */ 248 unsigned int mq_size;
249 int pg_order;
250 int ret;
251
252 /* disallow other partitions to access GRU mq */
253 mq_size = 1UL << mq->order;
254 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
255 BUG_ON(ret != xpSuccess);
105 256
106 /* !!! disable generation of irq when GRU mq op occurs to this mq */ 257 /* unregister irq handler and release mq irq/vector mapping */
258 free_irq(mq->irq, NULL);
259 xpc_release_gru_mq_irq_uv(mq);
107 260
108 free_irq(irq, NULL); 261 /* disable generation of irq when GRU mq op occurs to this mq */
262 xpc_gru_mq_watchlist_free_uv(mq);
109 263
110 free_pages((unsigned long)mq, get_order(mq_size)); 264 pg_order = mq->order - PAGE_SHIFT;
265 free_pages((unsigned long)mq->address, pg_order);
266
267 kfree(mq);
111} 268}
112 269
113static enum xp_retval 270static enum xp_retval
@@ -402,7 +559,10 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
402 struct xpc_partition *part; 559 struct xpc_partition *part;
403 int wakeup_hb_checker = 0; 560 int wakeup_hb_checker = 0;
404 561
405 while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { 562 while (1) {
563 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address);
564 if (msg_hdr == NULL)
565 break;
406 566
407 partid = msg_hdr->partid; 567 partid = msg_hdr->partid;
408 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { 568 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
@@ -418,7 +578,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
418 } 578 }
419 } 579 }
420 580
421 gru_free_message(xpc_activate_mq_uv, msg_hdr); 581 gru_free_message(xpc_activate_mq_uv->address, msg_hdr);
422 } 582 }
423 583
424 if (wakeup_hb_checker) 584 if (wakeup_hb_checker)
@@ -482,7 +642,7 @@ xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
482 struct xpc_partition_uv *part_uv = &part->sn.uv; 642 struct xpc_partition_uv *part_uv = &part->sn.uv;
483 643
484 /* 644 /*
485 * !!! Make our side think that the remote parition sent an activate 645 * !!! Make our side think that the remote partition sent an activate
486 * !!! message our way by doing what the activate IRQ handler would 646 * !!! message our way by doing what the activate IRQ handler would
487 * !!! do had one really been sent. 647 * !!! do had one really been sent.
488 */ 648 */
@@ -500,14 +660,39 @@ static enum xp_retval
500xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, 660xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
501 size_t *len) 661 size_t *len)
502{ 662{
503 /* !!! call the UV version of sn_partition_reserved_page_pa() */ 663 s64 status;
504 return xpUnsupported; 664 enum xp_retval ret;
665
666#if defined CONFIG_X86_64
667 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
668 (u64 *)len);
669 if (status == BIOS_STATUS_SUCCESS)
670 ret = xpSuccess;
671 else if (status == BIOS_STATUS_MORE_PASSES)
672 ret = xpNeedMoreInfo;
673 else
674 ret = xpBiosError;
675
676#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
677 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
678 if (status == SALRET_OK)
679 ret = xpSuccess;
680 else if (status == SALRET_MORE_PASSES)
681 ret = xpNeedMoreInfo;
682 else
683 ret = xpSalError;
684
685#else
686 #error not a supported configuration
687#endif
688
689 return ret;
505} 690}
506 691
507static int 692static int
508xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) 693xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
509{ 694{
510 rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv); 695 rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address);
511 return 0; 696 return 0;
512} 697}
513 698
@@ -1411,22 +1596,18 @@ xpc_init_uv(void)
1411 return -E2BIG; 1596 return -E2BIG;
1412 } 1597 }
1413 1598
1414 /* ??? The cpuid argument's value is 0, is that what we want? */ 1599 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
1415 /* !!! The irq argument's value isn't correct. */ 1600 XPC_ACTIVATE_IRQ_NAME,
1416 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0,
1417 xpc_handle_activate_IRQ_uv); 1601 xpc_handle_activate_IRQ_uv);
1418 if (xpc_activate_mq_uv == NULL) 1602 if (IS_ERR(xpc_activate_mq_uv))
1419 return -ENOMEM; 1603 return PTR_ERR(xpc_activate_mq_uv);
1420 1604
1421 /* ??? The cpuid argument's value is 0, is that what we want? */ 1605 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
1422 /* !!! The irq argument's value isn't correct. */ 1606 XPC_NOTIFY_IRQ_NAME,
1423 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0,
1424 xpc_handle_notify_IRQ_uv); 1607 xpc_handle_notify_IRQ_uv);
1425 if (xpc_notify_mq_uv == NULL) { 1608 if (IS_ERR(xpc_notify_mq_uv)) {
1426 /* !!! The irq argument's value isn't correct. */ 1609 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1427 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, 1610 return PTR_ERR(xpc_notify_mq_uv);
1428 XPC_ACTIVATE_MQ_SIZE_UV, 0);
1429 return -ENOMEM;
1430 } 1611 }
1431 1612
1432 return 0; 1613 return 0;
@@ -1435,9 +1616,6 @@ xpc_init_uv(void)
1435void 1616void
1436xpc_exit_uv(void) 1617xpc_exit_uv(void)
1437{ 1618{
1438 /* !!! The irq argument's value isn't correct. */ 1619 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
1439 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0); 1620 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1440
1441 /* !!! The irq argument's value isn't correct. */
1442 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0);
1443} 1621}
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index 60775be22822..571b211608d1 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -970,7 +970,7 @@ static int sony_nc_resume(struct acpi_device *device)
970 /* set the last requested brightness level */ 970 /* set the last requested brightness level */
971 if (sony_backlight_device && 971 if (sony_backlight_device &&
972 !sony_backlight_update_status(sony_backlight_device)) 972 !sony_backlight_update_status(sony_backlight_device))
973 printk(KERN_WARNING DRV_PFX "unable to restore brightness level"); 973 printk(KERN_WARNING DRV_PFX "unable to restore brightness level\n");
974 974
975 /* re-initialize models with specific requirements */ 975 /* re-initialize models with specific requirements */
976 dmi_check_system(sony_nc_ids); 976 dmi_check_system(sony_nc_ids);
@@ -1038,7 +1038,11 @@ static int sony_nc_add(struct acpi_device *device)
1038 goto outinput; 1038 goto outinput;
1039 } 1039 }
1040 1040
1041 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", &handle))) { 1041 if (acpi_video_backlight_support()) {
1042 printk(KERN_INFO DRV_PFX "brightness ignored, must be "
1043 "controlled by ACPI video driver\n");
1044 } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
1045 &handle))) {
1042 sony_backlight_device = backlight_device_register("sony", NULL, 1046 sony_backlight_device = backlight_device_register("sony", NULL,
1043 NULL, 1047 NULL,
1044 &sony_backlight_ops); 1048 &sony_backlight_ops);
@@ -1920,7 +1924,6 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
1920 1924
1921static int sonypi_misc_release(struct inode *inode, struct file *file) 1925static int sonypi_misc_release(struct inode *inode, struct file *file)
1922{ 1926{
1923 sonypi_misc_fasync(-1, file, 0);
1924 atomic_dec(&sonypi_compat.open_count); 1927 atomic_dec(&sonypi_compat.open_count);
1925 return 0; 1928 return 0;
1926} 1929}
@@ -2315,8 +2318,10 @@ end:
2315 */ 2318 */
2316static int sony_pic_disable(struct acpi_device *device) 2319static int sony_pic_disable(struct acpi_device *device)
2317{ 2320{
2318 if (ACPI_FAILURE(acpi_evaluate_object(device->handle, 2321 acpi_status ret = acpi_evaluate_object(device->handle, "_DIS", NULL,
2319 "_DIS", NULL, NULL))) 2322 NULL);
2323
2324 if (ACPI_FAILURE(ret) && ret != AE_NOT_FOUND)
2320 return -ENXIO; 2325 return -ENXIO;
2321 2326
2322 dprintk("Device disabled\n"); 2327 dprintk("Device disabled\n");
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index 6b9300779a43..899766e16fa8 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -159,7 +159,6 @@ enum {
159#define TPACPI_DEBUG KERN_DEBUG TPACPI_LOG 159#define TPACPI_DEBUG KERN_DEBUG TPACPI_LOG
160 160
161#define TPACPI_DBG_ALL 0xffff 161#define TPACPI_DBG_ALL 0xffff
162#define TPACPI_DBG_ALL 0xffff
163#define TPACPI_DBG_INIT 0x0001 162#define TPACPI_DBG_INIT 0x0001
164#define TPACPI_DBG_EXIT 0x0002 163#define TPACPI_DBG_EXIT 0x0002
165#define dbg_printk(a_dbg_level, format, arg...) \ 164#define dbg_printk(a_dbg_level, format, arg...) \
@@ -543,7 +542,7 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
543 return -ENODEV; 542 return -ENODEV;
544 } 543 }
545 544
546 acpi_driver_data(ibm->acpi->device) = ibm; 545 ibm->acpi->device->driver_data = ibm;
547 sprintf(acpi_device_class(ibm->acpi->device), "%s/%s", 546 sprintf(acpi_device_class(ibm->acpi->device), "%s/%s",
548 TPACPI_ACPI_EVENT_PREFIX, 547 TPACPI_ACPI_EVENT_PREFIX,
549 ibm->name); 548 ibm->name);
@@ -582,7 +581,8 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
582 581
583 ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL); 582 ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL);
584 if (!ibm->acpi->driver) { 583 if (!ibm->acpi->driver) {
585 printk(TPACPI_ERR "kzalloc(ibm->driver) failed\n"); 584 printk(TPACPI_ERR
585 "failed to allocate memory for ibm->acpi->driver\n");
586 return -ENOMEM; 586 return -ENOMEM;
587 } 587 }
588 588
@@ -838,6 +838,13 @@ static int parse_strtoul(const char *buf,
838 return 0; 838 return 0;
839} 839}
840 840
841static void tpacpi_disable_brightness_delay(void)
842{
843 if (acpi_evalf(hkey_handle, NULL, "PWMS", "qvd", 0))
844 printk(TPACPI_NOTICE
845 "ACPI backlight control delay disabled\n");
846}
847
841static int __init tpacpi_query_bcl_levels(acpi_handle handle) 848static int __init tpacpi_query_bcl_levels(acpi_handle handle)
842{ 849{
843 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 850 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -2139,6 +2146,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2139 if (!tp_features.hotkey) 2146 if (!tp_features.hotkey)
2140 return 1; 2147 return 1;
2141 2148
2149 tpacpi_disable_brightness_delay();
2150
2142 hotkey_dev_attributes = create_attr_set(13, NULL); 2151 hotkey_dev_attributes = create_attr_set(13, NULL);
2143 if (!hotkey_dev_attributes) 2152 if (!hotkey_dev_attributes)
2144 return -ENOMEM; 2153 return -ENOMEM;
@@ -2512,6 +2521,8 @@ static void hotkey_suspend(pm_message_t state)
2512 2521
2513static void hotkey_resume(void) 2522static void hotkey_resume(void)
2514{ 2523{
2524 tpacpi_disable_brightness_delay();
2525
2515 if (hotkey_mask_get()) 2526 if (hotkey_mask_get())
2516 printk(TPACPI_ERR 2527 printk(TPACPI_ERR
2517 "error while trying to read hot key mask " 2528 "error while trying to read hot key mask "
@@ -4921,16 +4932,25 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
4921 */ 4932 */
4922 b = tpacpi_check_std_acpi_brightness_support(); 4933 b = tpacpi_check_std_acpi_brightness_support();
4923 if (b > 0) { 4934 if (b > 0) {
4924 if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO) { 4935
4925 printk(TPACPI_NOTICE 4936 if (acpi_video_backlight_support()) {
4926 "Lenovo BIOS switched to ACPI backlight " 4937 if (brightness_enable > 1) {
4927 "control mode\n"); 4938 printk(TPACPI_NOTICE
4928 } 4939 "Standard ACPI backlight interface "
4929 if (brightness_enable > 1) { 4940 "available, not loading native one.\n");
4930 printk(TPACPI_NOTICE 4941 return 1;
4931 "standard ACPI backlight interface " 4942 } else if (brightness_enable == 1) {
4932 "available, not loading native one...\n"); 4943 printk(TPACPI_NOTICE
4933 return 1; 4944 "Backlight control force enabled, even if standard "
4945 "ACPI backlight interface is available\n");
4946 }
4947 } else {
4948 if (brightness_enable > 1) {
4949 printk(TPACPI_NOTICE
4950 "Standard ACPI backlight interface not "
4951 "available, thinkpad_acpi native "
4952 "brightness control enabled\n");
4953 }
4934 } 4954 }
4935 } 4955 }
4936 4956
@@ -5298,6 +5318,7 @@ static enum fan_control_commands fan_control_commands;
5298 5318
5299static u8 fan_control_initial_status; 5319static u8 fan_control_initial_status;
5300static u8 fan_control_desired_level; 5320static u8 fan_control_desired_level;
5321static u8 fan_control_resume_level;
5301static int fan_watchdog_maxinterval; 5322static int fan_watchdog_maxinterval;
5302 5323
5303static struct mutex fan_mutex; 5324static struct mutex fan_mutex;
@@ -5420,8 +5441,8 @@ static int fan_set_level(int level)
5420 5441
5421 case TPACPI_FAN_WR_ACPI_FANS: 5442 case TPACPI_FAN_WR_ACPI_FANS:
5422 case TPACPI_FAN_WR_TPEC: 5443 case TPACPI_FAN_WR_TPEC:
5423 if ((level != TP_EC_FAN_AUTO) && 5444 if (!(level & TP_EC_FAN_AUTO) &&
5424 (level != TP_EC_FAN_FULLSPEED) && 5445 !(level & TP_EC_FAN_FULLSPEED) &&
5425 ((level < 0) || (level > 7))) 5446 ((level < 0) || (level > 7)))
5426 return -EINVAL; 5447 return -EINVAL;
5427 5448
@@ -5983,6 +6004,84 @@ static void fan_exit(void)
5983 flush_workqueue(tpacpi_wq); 6004 flush_workqueue(tpacpi_wq);
5984} 6005}
5985 6006
6007static void fan_suspend(pm_message_t state)
6008{
6009 int rc;
6010
6011 if (!fan_control_allowed)
6012 return;
6013
6014 /* Store fan status in cache */
6015 fan_control_resume_level = 0;
6016 rc = fan_get_status_safe(&fan_control_resume_level);
6017 if (rc < 0)
6018 printk(TPACPI_NOTICE
6019 "failed to read fan level for later "
6020 "restore during resume: %d\n", rc);
6021
6022 /* if it is undefined, don't attempt to restore it.
6023 * KEEP THIS LAST */
6024 if (tp_features.fan_ctrl_status_undef)
6025 fan_control_resume_level = 0;
6026}
6027
6028static void fan_resume(void)
6029{
6030 u8 current_level = 7;
6031 bool do_set = false;
6032 int rc;
6033
6034 /* DSDT *always* updates status on resume */
6035 tp_features.fan_ctrl_status_undef = 0;
6036
6037 if (!fan_control_allowed ||
6038 !fan_control_resume_level ||
6039 (fan_get_status_safe(&current_level) < 0))
6040 return;
6041
6042 switch (fan_control_access_mode) {
6043 case TPACPI_FAN_WR_ACPI_SFAN:
6044 /* never decrease fan level */
6045 do_set = (fan_control_resume_level > current_level);
6046 break;
6047 case TPACPI_FAN_WR_ACPI_FANS:
6048 case TPACPI_FAN_WR_TPEC:
6049 /* never decrease fan level, scale is:
6050 * TP_EC_FAN_FULLSPEED > 7 >= TP_EC_FAN_AUTO
6051 *
6052 * We expect the firmware to set either 7 or AUTO, but we
6053 * handle FULLSPEED out of paranoia.
6054 *
6055 * So, we can safely only restore FULLSPEED or 7, anything
6056 * else could slow the fan. Restoring AUTO is useless, at
6057 * best that's exactly what the DSDT already set (it is the
6058 * slower it uses).
6059 *
6060 * Always keep in mind that the DSDT *will* have set the
6061 * fans to what the vendor supposes is the best level. We
6062 * muck with it only to speed the fan up.
6063 */
6064 if (fan_control_resume_level != 7 &&
6065 !(fan_control_resume_level & TP_EC_FAN_FULLSPEED))
6066 return;
6067 else
6068 do_set = !(current_level & TP_EC_FAN_FULLSPEED) &&
6069 (current_level != fan_control_resume_level);
6070 break;
6071 default:
6072 return;
6073 }
6074 if (do_set) {
6075 printk(TPACPI_NOTICE
6076 "restoring fan level to 0x%02x\n",
6077 fan_control_resume_level);
6078 rc = fan_set_level_safe(fan_control_resume_level);
6079 if (rc < 0)
6080 printk(TPACPI_NOTICE
6081 "failed to restore fan level: %d\n", rc);
6082 }
6083}
6084
5986static int fan_read(char *p) 6085static int fan_read(char *p)
5987{ 6086{
5988 int len = 0; 6087 int len = 0;
@@ -6174,6 +6273,8 @@ static struct ibm_struct fan_driver_data = {
6174 .read = fan_read, 6273 .read = fan_read,
6175 .write = fan_write, 6274 .write = fan_write,
6176 .exit = fan_exit, 6275 .exit = fan_exit,
6276 .suspend = fan_suspend,
6277 .resume = fan_resume,
6177}; 6278};
6178 6279
6179/**************************************************************************** 6280/****************************************************************************
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 24c97d3d16bb..3d067c35185d 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -92,18 +92,17 @@ static void mmc_blk_put(struct mmc_blk_data *md)
92 mutex_unlock(&open_lock); 92 mutex_unlock(&open_lock);
93} 93}
94 94
95static int mmc_blk_open(struct inode *inode, struct file *filp) 95static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
96{ 96{
97 struct mmc_blk_data *md; 97 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
98 int ret = -ENXIO; 98 int ret = -ENXIO;
99 99
100 md = mmc_blk_get(inode->i_bdev->bd_disk);
101 if (md) { 100 if (md) {
102 if (md->usage == 2) 101 if (md->usage == 2)
103 check_disk_change(inode->i_bdev); 102 check_disk_change(bdev);
104 ret = 0; 103 ret = 0;
105 104
106 if ((filp->f_mode & FMODE_WRITE) && md->read_only) { 105 if ((mode & FMODE_WRITE) && md->read_only) {
107 mmc_blk_put(md); 106 mmc_blk_put(md);
108 ret = -EROFS; 107 ret = -EROFS;
109 } 108 }
@@ -112,9 +111,9 @@ static int mmc_blk_open(struct inode *inode, struct file *filp)
112 return ret; 111 return ret;
113} 112}
114 113
115static int mmc_blk_release(struct inode *inode, struct file *filp) 114static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
116{ 115{
117 struct mmc_blk_data *md = inode->i_bdev->bd_disk->private_data; 116 struct mmc_blk_data *md = disk->private_data;
118 117
119 mmc_blk_put(md); 118 mmc_blk_put(md);
120 return 0; 119 return 0;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 0d9b2d6f9ebf..f210a8ee6861 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -216,8 +216,7 @@ int mmc_add_card(struct mmc_card *card)
216 int ret; 216 int ret;
217 const char *type; 217 const char *type;
218 218
219 snprintf(card->dev.bus_id, sizeof(card->dev.bus_id), 219 dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca);
220 "%s:%04x", mmc_hostname(card->host), card->rca);
221 220
222 switch (card->type) { 221 switch (card->type) {
223 case MMC_TYPE_MMC: 222 case MMC_TYPE_MMC:
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 044d84eeed7c..f7284b905eb3 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -280,7 +280,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
280 (card->host->ios.clock / 1000); 280 (card->host->ios.clock / 1000);
281 281
282 if (data->flags & MMC_DATA_WRITE) 282 if (data->flags & MMC_DATA_WRITE)
283 limit_us = 250000; 283 /*
284 * The limit is really 250 ms, but that is
285 * insufficient for some crappy cards.
286 */
287 limit_us = 300000;
284 else 288 else
285 limit_us = 100000; 289 limit_us = 100000;
286 290
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 6da80fd4d974..5e945e64ead7 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -73,8 +73,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
73 if (err) 73 if (err)
74 goto free; 74 goto free;
75 75
76 snprintf(host->class_dev.bus_id, BUS_ID_SIZE, 76 dev_set_name(&host->class_dev, "mmc%d", host->index);
77 "mmc%d", host->index);
78 77
79 host->parent = dev; 78 host->parent = dev;
80 host->class_dev.parent = dev; 79 host->class_dev.parent = dev;
@@ -121,7 +120,7 @@ int mmc_add_host(struct mmc_host *host)
121 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && 120 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
122 !host->ops->enable_sdio_irq); 121 !host->ops->enable_sdio_irq);
123 122
124 led_trigger_register_simple(host->class_dev.bus_id, &host->led); 123 led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
125 124
126 err = device_add(&host->class_dev); 125 err = device_add(&host->class_dev);
127 if (err) 126 if (err)
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 233d0f9b3c4b..46284b527397 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -239,8 +239,7 @@ int sdio_add_func(struct sdio_func *func)
239{ 239{
240 int ret; 240 int ret;
241 241
242 snprintf(func->dev.bus_id, sizeof(func->dev.bus_id), 242 dev_set_name(&func->dev, "%s:%d", mmc_card_id(func->card), func->num);
243 "%s:%d", mmc_card_id(func->card), func->num);
244 243
245 ret = device_add(&func->dev); 244 ret = device_add(&func->dev);
246 if (ret == 0) 245 if (ret == 0)
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 07faf5412a1f..ad00e1632317 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1348,7 +1348,7 @@ static int mmc_spi_probe(struct spi_device *spi)
1348 goto fail_add_host; 1348 goto fail_add_host;
1349 1349
1350 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", 1350 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
1351 mmc->class_dev.bus_id, 1351 dev_name(&mmc->class_dev),
1352 host->dma_dev ? "" : ", no DMA", 1352 host->dma_dev ? "" : ", no DMA",
1353 (host->pdata && host->pdata->get_ro) 1353 (host->pdata && host->pdata->get_ro)
1354 ? "" : ", no WP", 1354 ? "" : ", no WP",
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 696cf3647ceb..2fadf323c696 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -391,6 +391,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
391static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 391static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
392{ 392{
393 struct mmci_host *host = mmc_priv(mmc); 393 struct mmci_host *host = mmc_priv(mmc);
394 unsigned long flags;
394 395
395 WARN_ON(host->mrq != NULL); 396 WARN_ON(host->mrq != NULL);
396 397
@@ -402,7 +403,7 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
402 return; 403 return;
403 } 404 }
404 405
405 spin_lock_irq(&host->lock); 406 spin_lock_irqsave(&host->lock, flags);
406 407
407 host->mrq = mrq; 408 host->mrq = mrq;
408 409
@@ -411,7 +412,7 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
411 412
412 mmci_start_command(host, mrq->cmd, 0); 413 mmci_start_command(host, mrq->cmd, 0);
413 414
414 spin_unlock_irq(&host->lock); 415 spin_unlock_irqrestore(&host->lock, flags);
415} 416}
416 417
417static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 418static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 30f64b1f2354..4d010a984bed 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1733,7 +1733,7 @@ int sdhci_add_host(struct sdhci_host *host)
1733 mmc_add_host(mmc); 1733 mmc_add_host(mmc);
1734 1734
1735 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n", 1735 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
1736 mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id, 1736 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
1737 (host->flags & SDHCI_USE_ADMA)?"A":"", 1737 (host->flags & SDHCI_USE_ADMA)?"A":"",
1738 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); 1738 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
1739 1739
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 13844843e8de..82554ddec6b3 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -632,7 +632,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
632 632
633 if (host->req) { 633 if (host->req) {
634 printk(KERN_ERR "%s : unfinished request detected\n", 634 printk(KERN_ERR "%s : unfinished request detected\n",
635 sock->dev.bus_id); 635 dev_name(&sock->dev));
636 mrq->cmd->error = -ETIMEDOUT; 636 mrq->cmd->error = -ETIMEDOUT;
637 goto err_out; 637 goto err_out;
638 } 638 }
@@ -672,7 +672,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
672 ? PCI_DMA_TODEVICE 672 ? PCI_DMA_TODEVICE
673 : PCI_DMA_FROMDEVICE)) { 673 : PCI_DMA_FROMDEVICE)) {
674 printk(KERN_ERR "%s : scatterlist map failed\n", 674 printk(KERN_ERR "%s : scatterlist map failed\n",
675 sock->dev.bus_id); 675 dev_name(&sock->dev));
676 mrq->cmd->error = -ENOMEM; 676 mrq->cmd->error = -ENOMEM;
677 goto err_out; 677 goto err_out;
678 } 678 }
@@ -684,7 +684,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
684 : PCI_DMA_FROMDEVICE); 684 : PCI_DMA_FROMDEVICE);
685 if (host->sg_len < 1) { 685 if (host->sg_len < 1) {
686 printk(KERN_ERR "%s : scatterlist map failed\n", 686 printk(KERN_ERR "%s : scatterlist map failed\n",
687 sock->dev.bus_id); 687 dev_name(&sock->dev));
688 tifm_unmap_sg(sock, &host->bounce_buf, 1, 688 tifm_unmap_sg(sock, &host->bounce_buf, 1,
689 r_data->flags & MMC_DATA_WRITE 689 r_data->flags & MMC_DATA_WRITE
690 ? PCI_DMA_TODEVICE 690 ? PCI_DMA_TODEVICE
@@ -748,7 +748,7 @@ static void tifm_sd_end_cmd(unsigned long data)
748 748
749 if (!mrq) { 749 if (!mrq) {
750 printk(KERN_ERR " %s : no request to complete?\n", 750 printk(KERN_ERR " %s : no request to complete?\n",
751 sock->dev.bus_id); 751 dev_name(&sock->dev));
752 spin_unlock_irqrestore(&sock->lock, flags); 752 spin_unlock_irqrestore(&sock->lock, flags);
753 return; 753 return;
754 } 754 }
@@ -789,7 +789,7 @@ static void tifm_sd_abort(unsigned long data)
789 printk(KERN_ERR 789 printk(KERN_ERR
790 "%s : card failed to respond for a long period of time " 790 "%s : card failed to respond for a long period of time "
791 "(%x, %x)\n", 791 "(%x, %x)\n",
792 host->dev->dev.bus_id, host->req->cmd->opcode, host->cmd_flags); 792 dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags);
793 793
794 tifm_eject(host->dev); 794 tifm_eject(host->dev);
795} 795}
@@ -906,7 +906,7 @@ static int tifm_sd_initialize_host(struct tifm_sd *host)
906 906
907 if (rc) { 907 if (rc) {
908 printk(KERN_ERR "%s : controller failed to reset\n", 908 printk(KERN_ERR "%s : controller failed to reset\n",
909 sock->dev.bus_id); 909 dev_name(&sock->dev));
910 return -ENODEV; 910 return -ENODEV;
911 } 911 }
912 912
@@ -933,7 +933,7 @@ static int tifm_sd_initialize_host(struct tifm_sd *host)
933 if (rc) { 933 if (rc) {
934 printk(KERN_ERR 934 printk(KERN_ERR
935 "%s : card not ready - probe failed on initialization\n", 935 "%s : card not ready - probe failed on initialization\n",
936 sock->dev.bus_id); 936 dev_name(&sock->dev));
937 return -ENODEV; 937 return -ENODEV;
938 } 938 }
939 939
@@ -954,7 +954,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
954 if (!(TIFM_SOCK_STATE_OCCUPIED 954 if (!(TIFM_SOCK_STATE_OCCUPIED
955 & readl(sock->addr + SOCK_PRESENT_STATE))) { 955 & readl(sock->addr + SOCK_PRESENT_STATE))) {
956 printk(KERN_WARNING "%s : card gone, unexpectedly\n", 956 printk(KERN_WARNING "%s : card gone, unexpectedly\n",
957 sock->dev.bus_id); 957 dev_name(&sock->dev));
958 return rc; 958 return rc;
959 } 959 }
960 960
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 3e6f5d8609e8..d74ec46aa032 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -406,19 +406,6 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
406 /* Set the default CFI lock/unlock addresses */ 406 /* Set the default CFI lock/unlock addresses */
407 cfi->addr_unlock1 = 0x555; 407 cfi->addr_unlock1 = 0x555;
408 cfi->addr_unlock2 = 0x2aa; 408 cfi->addr_unlock2 = 0x2aa;
409 /* Modify the unlock address if we are in compatibility mode */
410 if ( /* x16 in x8 mode */
411 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
412 (cfi->cfiq->InterfaceDesc ==
413 CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
414 /* x32 in x16 mode */
415 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
416 (cfi->cfiq->InterfaceDesc ==
417 CFI_INTERFACE_X16_BY_X32_ASYNC)))
418 {
419 cfi->addr_unlock1 = 0xaaa;
420 cfi->addr_unlock2 = 0x555;
421 }
422 409
423 } /* CFI mode */ 410 } /* CFI mode */
424 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 411 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index f84ab6182148..2f3f2f719ba4 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1808,9 +1808,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
1808 * several first banks can contain 0x7f instead of actual ID 1808 * several first banks can contain 0x7f instead of actual ID
1809 */ 1809 */
1810 do { 1810 do {
1811 uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), 1811 uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
1812 cfi_interleave(cfi),
1813 cfi->device_type);
1814 mask = (1 << (cfi->device_type * 8)) - 1; 1812 mask = (1 << (cfi->device_type * 8)) - 1;
1815 result = map_read(map, base + ofs); 1813 result = map_read(map, base + ofs);
1816 bank++; 1814 bank++;
@@ -1824,7 +1822,7 @@ static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
1824{ 1822{
1825 map_word result; 1823 map_word result;
1826 unsigned long mask; 1824 unsigned long mask;
1827 u32 ofs = cfi_build_cmd_addr(1, cfi_interleave(cfi), cfi->device_type); 1825 u32 ofs = cfi_build_cmd_addr(1, map, cfi);
1828 mask = (1 << (cfi->device_type * 8)) -1; 1826 mask = (1 << (cfi->device_type * 8)) -1;
1829 result = map_read(map, base + ofs); 1827 result = map_read(map, base + ofs);
1830 return result.x[0] & mask; 1828 return result.x[0] & mask;
@@ -2067,8 +2065,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2067 2065
2068 } 2066 }
2069 /* Ensure the unlock addresses we try stay inside the map */ 2067 /* Ensure the unlock addresses we try stay inside the map */
2070 probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type); 2068 probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi);
2071 probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type); 2069 probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi);
2072 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) || 2070 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
2073 ((base + probe_offset2 + map_bankwidth(map)) >= map->size)) 2071 ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
2074 goto retry; 2072 goto retry;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 91fbba767635..8c295f40d2ac 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -224,7 +224,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
224 if (dev->blkdev) { 224 if (dev->blkdev) {
225 invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 225 invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
226 0, -1); 226 0, -1);
227 close_bdev_excl(dev->blkdev); 227 close_bdev_exclusive(dev->blkdev, FMODE_READ|FMODE_WRITE);
228 } 228 }
229 229
230 kfree(dev); 230 kfree(dev);
@@ -246,7 +246,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
246 return NULL; 246 return NULL;
247 247
248 /* Get a handle on the device */ 248 /* Get a handle on the device */
249 bdev = open_bdev_excl(devname, O_RDWR, NULL); 249 bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, NULL);
250#ifndef MODULE 250#ifndef MODULE
251 if (IS_ERR(bdev)) { 251 if (IS_ERR(bdev)) {
252 252
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 76a76751da36..6659b2275c0c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -37,9 +37,9 @@
37#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */ 37#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
38#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */ 38#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
39#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */ 39#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
40#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ 40#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
41#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */ 41#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
42#define OPCODE_BE 0xc7 /* Erase whole flash block */ 42#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
43#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */ 43#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
44#define OPCODE_RDID 0x9f /* Read JEDEC ID */ 44#define OPCODE_RDID 0x9f /* Read JEDEC ID */
45 45
@@ -167,7 +167,7 @@ static int wait_till_ready(struct m25p *flash)
167 * 167 *
168 * Returns 0 if successful, non-zero otherwise. 168 * Returns 0 if successful, non-zero otherwise.
169 */ 169 */
170static int erase_block(struct m25p *flash) 170static int erase_chip(struct m25p *flash)
171{ 171{
172 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n", 172 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n",
173 flash->spi->dev.bus_id, __func__, 173 flash->spi->dev.bus_id, __func__,
@@ -181,7 +181,7 @@ static int erase_block(struct m25p *flash)
181 write_enable(flash); 181 write_enable(flash);
182 182
183 /* Set up command buffer. */ 183 /* Set up command buffer. */
184 flash->command[0] = OPCODE_BE; 184 flash->command[0] = OPCODE_CHIP_ERASE;
185 185
186 spi_write(flash->spi, flash->command, 1); 186 spi_write(flash->spi, flash->command, 1);
187 187
@@ -250,15 +250,18 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
250 250
251 mutex_lock(&flash->lock); 251 mutex_lock(&flash->lock);
252 252
253 /* REVISIT in some cases we could speed up erasing large regions 253 /* whole-chip erase? */
254 * by using OPCODE_SE instead of OPCODE_BE_4K 254 if (len == flash->mtd.size && erase_chip(flash)) {
255 */
256
257 /* now erase those sectors */
258 if (len == flash->mtd.size && erase_block(flash)) {
259 instr->state = MTD_ERASE_FAILED; 255 instr->state = MTD_ERASE_FAILED;
260 mutex_unlock(&flash->lock); 256 mutex_unlock(&flash->lock);
261 return -EIO; 257 return -EIO;
258
259 /* REVISIT in some cases we could speed up erasing large regions
260 * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up
261 * to use "small sector erase", but that's not always optimal.
262 */
263
264 /* "sector"-at-a-time erase */
262 } else { 265 } else {
263 while (len) { 266 while (len) {
264 if (erase_sector(flash, addr)) { 267 if (erase_sector(flash, addr)) {
@@ -574,10 +577,11 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
574 for (tmp = 0, info = m25p_data; 577 for (tmp = 0, info = m25p_data;
575 tmp < ARRAY_SIZE(m25p_data); 578 tmp < ARRAY_SIZE(m25p_data);
576 tmp++, info++) { 579 tmp++, info++) {
577 if (info->jedec_id == jedec) 580 if (info->jedec_id == jedec) {
578 if (ext_jedec != 0 && info->ext_id != ext_jedec) 581 if (info->ext_id != 0 && info->ext_id != ext_jedec)
579 continue; 582 continue;
580 return info; 583 return info;
584 }
581 } 585 }
582 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec); 586 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
583 return NULL; 587 return NULL;
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
index e5059aa3c724..8d92d8db9a98 100644
--- a/drivers/mtd/maps/cdb89712.c
+++ b/drivers/mtd/maps/cdb89712.c
@@ -14,7 +14,18 @@
14#include <linux/mtd/map.h> 14#include <linux/mtd/map.h>
15#include <linux/mtd/partitions.h> 15#include <linux/mtd/partitions.h>
16 16
17 17/* dynamic ioremap() areas */
18#define FLASH_START 0x00000000
19#define FLASH_SIZE 0x800000
20#define FLASH_WIDTH 4
21
22#define SRAM_START 0x60000000
23#define SRAM_SIZE 0xc000
24#define SRAM_WIDTH 4
25
26#define BOOTROM_START 0x70000000
27#define BOOTROM_SIZE 0x80
28#define BOOTROM_WIDTH 4
18 29
19 30
20static struct mtd_info *flash_mtd; 31static struct mtd_info *flash_mtd;
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 35fef655ccc4..3b959fad1c4e 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -24,8 +24,8 @@ static struct mtd_info *mymtd;
24static struct map_info h720x_map = { 24static struct map_info h720x_map = {
25 .name = "H720X", 25 .name = "H720X",
26 .bankwidth = 4, 26 .bankwidth = 4,
27 .size = FLASH_SIZE, 27 .size = H720X_FLASH_SIZE,
28 .phys = FLASH_PHYS, 28 .phys = H720X_FLASH_PHYS,
29}; 29};
30 30
31static struct mtd_partition h720x_partitions[] = { 31static struct mtd_partition h720x_partitions[] = {
@@ -70,7 +70,7 @@ int __init h720x_mtd_init(void)
70 70
71 char *part_type = NULL; 71 char *part_type = NULL;
72 72
73 h720x_map.virt = ioremap(FLASH_PHYS, FLASH_SIZE); 73 h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size);
74 74
75 if (!h720x_map.virt) { 75 if (!h720x_map.virt) {
76 printk(KERN_ERR "H720x-MTD: ioremap failed\n"); 76 printk(KERN_ERR "H720x-MTD: ioremap failed\n");
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 42d844f8f6bf..dfbf3f270cea 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -19,7 +19,7 @@
19#include <linux/mtd/partitions.h> 19#include <linux/mtd/partitions.h>
20#include <linux/mtd/physmap.h> 20#include <linux/mtd/physmap.h>
21#include <linux/mtd/concat.h> 21#include <linux/mtd/concat.h>
22#include <asm/io.h> 22#include <linux/io.h>
23 23
24#define MAX_RESOURCES 4 24#define MAX_RESOURCES 4
25 25
@@ -27,7 +27,6 @@ struct physmap_flash_info {
27 struct mtd_info *mtd[MAX_RESOURCES]; 27 struct mtd_info *mtd[MAX_RESOURCES];
28 struct mtd_info *cmtd; 28 struct mtd_info *cmtd;
29 struct map_info map[MAX_RESOURCES]; 29 struct map_info map[MAX_RESOURCES];
30 struct resource *res;
31#ifdef CONFIG_MTD_PARTITIONS 30#ifdef CONFIG_MTD_PARTITIONS
32 int nr_parts; 31 int nr_parts;
33 struct mtd_partition *parts; 32 struct mtd_partition *parts;
@@ -70,16 +69,7 @@ static int physmap_flash_remove(struct platform_device *dev)
70#endif 69#endif
71 map_destroy(info->mtd[i]); 70 map_destroy(info->mtd[i]);
72 } 71 }
73
74 if (info->map[i].virt != NULL)
75 iounmap(info->map[i].virt);
76 }
77
78 if (info->res != NULL) {
79 release_resource(info->res);
80 kfree(info->res);
81 } 72 }
82
83 return 0; 73 return 0;
84} 74}
85 75
@@ -101,7 +91,8 @@ static int physmap_flash_probe(struct platform_device *dev)
101 if (physmap_data == NULL) 91 if (physmap_data == NULL)
102 return -ENODEV; 92 return -ENODEV;
103 93
104 info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL); 94 info = devm_kzalloc(&dev->dev, sizeof(struct physmap_flash_info),
95 GFP_KERNEL);
105 if (info == NULL) { 96 if (info == NULL) {
106 err = -ENOMEM; 97 err = -ENOMEM;
107 goto err_out; 98 goto err_out;
@@ -114,10 +105,10 @@ static int physmap_flash_probe(struct platform_device *dev)
114 (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1), 105 (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1),
115 (unsigned long long)dev->resource[i].start); 106 (unsigned long long)dev->resource[i].start);
116 107
117 info->res = request_mem_region(dev->resource[i].start, 108 if (!devm_request_mem_region(&dev->dev,
118 dev->resource[i].end - dev->resource[i].start + 1, 109 dev->resource[i].start,
119 dev->dev.bus_id); 110 dev->resource[i].end - dev->resource[i].start + 1,
120 if (info->res == NULL) { 111 dev->dev.bus_id)) {
121 dev_err(&dev->dev, "Could not reserve memory region\n"); 112 dev_err(&dev->dev, "Could not reserve memory region\n");
122 err = -ENOMEM; 113 err = -ENOMEM;
123 goto err_out; 114 goto err_out;
@@ -129,7 +120,8 @@ static int physmap_flash_probe(struct platform_device *dev)
129 info->map[i].bankwidth = physmap_data->width; 120 info->map[i].bankwidth = physmap_data->width;
130 info->map[i].set_vpp = physmap_data->set_vpp; 121 info->map[i].set_vpp = physmap_data->set_vpp;
131 122
132 info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size); 123 info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys,
124 info->map[i].size);
133 if (info->map[i].virt == NULL) { 125 if (info->map[i].virt == NULL) {
134 dev_err(&dev->dev, "Failed to ioremap flash region\n"); 126 dev_err(&dev->dev, "Failed to ioremap flash region\n");
135 err = EIO; 127 err = EIO;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 681d5aca2af4..1409f01406f6 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -133,15 +133,12 @@ static void mtd_blktrans_request(struct request_queue *rq)
133} 133}
134 134
135 135
136static int blktrans_open(struct inode *i, struct file *f) 136static int blktrans_open(struct block_device *bdev, fmode_t mode)
137{ 137{
138 struct mtd_blktrans_dev *dev; 138 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
139 struct mtd_blktrans_ops *tr; 139 struct mtd_blktrans_ops *tr = dev->tr;
140 int ret = -ENODEV; 140 int ret = -ENODEV;
141 141
142 dev = i->i_bdev->bd_disk->private_data;
143 tr = dev->tr;
144
145 if (!try_module_get(dev->mtd->owner)) 142 if (!try_module_get(dev->mtd->owner))
146 goto out; 143 goto out;
147 144
@@ -164,15 +161,12 @@ static int blktrans_open(struct inode *i, struct file *f)
164 return ret; 161 return ret;
165} 162}
166 163
167static int blktrans_release(struct inode *i, struct file *f) 164static int blktrans_release(struct gendisk *disk, fmode_t mode)
168{ 165{
169 struct mtd_blktrans_dev *dev; 166 struct mtd_blktrans_dev *dev = disk->private_data;
170 struct mtd_blktrans_ops *tr; 167 struct mtd_blktrans_ops *tr = dev->tr;
171 int ret = 0; 168 int ret = 0;
172 169
173 dev = i->i_bdev->bd_disk->private_data;
174 tr = dev->tr;
175
176 if (tr->release) 170 if (tr->release)
177 ret = tr->release(dev); 171 ret = tr->release(dev);
178 172
@@ -194,10 +188,10 @@ static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
194 return -ENOTTY; 188 return -ENOTTY;
195} 189}
196 190
197static int blktrans_ioctl(struct inode *inode, struct file *file, 191static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
198 unsigned int cmd, unsigned long arg) 192 unsigned int cmd, unsigned long arg)
199{ 193{
200 struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data; 194 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
201 struct mtd_blktrans_ops *tr = dev->tr; 195 struct mtd_blktrans_ops *tr = dev->tr;
202 196
203 switch (cmd) { 197 switch (cmd) {
@@ -215,7 +209,7 @@ static struct block_device_operations mtd_blktrans_ops = {
215 .owner = THIS_MODULE, 209 .owner = THIS_MODULE,
216 .open = blktrans_open, 210 .open = blktrans_open,
217 .release = blktrans_release, 211 .release = blktrans_release,
218 .ioctl = blktrans_ioctl, 212 .locked_ioctl = blktrans_ioctl,
219 .getgeo = blktrans_getgeo, 213 .getgeo = blktrans_getgeo,
220}; 214};
221 215
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 963840e9b5bf..bcffeda2df3d 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -96,7 +96,7 @@ static int mtd_open(struct inode *inode, struct file *file)
96 return -ENODEV; 96 return -ENODEV;
97 97
98 /* You can't open the RO devices RW */ 98 /* You can't open the RO devices RW */
99 if ((file->f_mode & 2) && (minor & 1)) 99 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
100 return -EACCES; 100 return -EACCES;
101 101
102 lock_kernel(); 102 lock_kernel();
@@ -114,7 +114,7 @@ static int mtd_open(struct inode *inode, struct file *file)
114 } 114 }
115 115
116 /* You can't open it RW if it's not a writeable device */ 116 /* You can't open it RW if it's not a writeable device */
117 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) { 117 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
118 put_mtd_device(mtd); 118 put_mtd_device(mtd);
119 ret = -EACCES; 119 ret = -EACCES;
120 goto out; 120 goto out;
@@ -144,7 +144,7 @@ static int mtd_close(struct inode *inode, struct file *file)
144 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 144 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
145 145
146 /* Only sync if opened RW */ 146 /* Only sync if opened RW */
147 if ((file->f_mode & 2) && mtd->sync) 147 if ((file->f_mode & FMODE_WRITE) && mtd->sync)
148 mtd->sync(mtd); 148 mtd->sync(mtd);
149 149
150 put_mtd_device(mtd); 150 put_mtd_device(mtd);
@@ -443,7 +443,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
443 { 443 {
444 struct erase_info *erase; 444 struct erase_info *erase;
445 445
446 if(!(file->f_mode & 2)) 446 if(!(file->f_mode & FMODE_WRITE))
447 return -EPERM; 447 return -EPERM;
448 448
449 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 449 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
@@ -497,7 +497,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
497 struct mtd_oob_buf __user *user_buf = argp; 497 struct mtd_oob_buf __user *user_buf = argp;
498 uint32_t retlen; 498 uint32_t retlen;
499 499
500 if(!(file->f_mode & 2)) 500 if(!(file->f_mode & FMODE_WRITE))
501 return -EPERM; 501 return -EPERM;
502 502
503 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 503 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 024e3fffd4bb..a83192f80eba 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -163,9 +163,11 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
163 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0); 163 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
164 164
165#ifdef CONFIG_MTD_OF_PARTS 165#ifdef CONFIG_MTD_OF_PARTS
166 if (ret == 0) 166 if (ret == 0) {
167 ret = of_mtd_parse_partitions(fun->dev, &fun->mtd, 167 ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts);
168 flash_np, &fun->parts); 168 if (ret < 0)
169 goto err;
170 }
169#endif 171#endif
170 if (ret > 0) 172 if (ret > 0)
171 ret = add_mtd_partitions(&fun->mtd, fun->parts, ret); 173 ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 75c899039023..9bd6c9ac8443 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -141,6 +141,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev,
141 } 141 }
142 142
143 lpcctl = pci_resource_start(pdev, 0); 143 lpcctl = pci_resource_start(pdev, 0);
144 pci_dev_put(pdev);
144 145
145 if (!request_region(lpcctl, 4, driver_name)) { 146 if (!request_region(lpcctl, 4, driver_name)) {
146 err = -EBUSY; 147 err = -EBUSY;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index c0fa9c9edf08..15f0a26730ae 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -269,6 +269,7 @@ static struct pxa3xx_nand_timing stm2GbX16_timing = {
269 269
270static struct pxa3xx_nand_flash stm2GbX16 = { 270static struct pxa3xx_nand_flash stm2GbX16 = {
271 .timing = &stm2GbX16_timing, 271 .timing = &stm2GbX16_timing,
272 .cmdset = &largepage_cmdset,
272 .page_per_block = 64, 273 .page_per_block = 64,
273 .page_size = 2048, 274 .page_size = 2048,
274 .flash_width = 16, 275 .flash_width = 16,
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 8387e05daae2..a7e4d985f5ef 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -32,20 +32,18 @@
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/dma-mapping.h>
36#include <linux/io.h>
35 37
36#include <asm/io.h>
37#include <asm/mach/flash.h> 38#include <asm/mach/flash.h>
38#include <asm/arch/gpmc.h> 39#include <mach/gpmc.h>
39#include <asm/arch/onenand.h> 40#include <mach/onenand.h>
40#include <asm/arch/gpio.h> 41#include <mach/gpio.h>
41#include <asm/arch/gpmc.h> 42#include <mach/pm.h>
42#include <asm/arch/pm.h>
43 43
44#include <linux/dma-mapping.h> 44#include <mach/dma.h>
45#include <asm/dma-mapping.h>
46#include <asm/arch/dma.h>
47 45
48#include <asm/arch/board.h> 46#include <mach/board.h>
49 47
50#define DRIVER_NAME "omap2-onenand" 48#define DRIVER_NAME "omap2-onenand"
51 49
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index e04bcf1dff87..d8966bae0e0b 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1022,7 +1022,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1022 } 1022 }
1023 1023
1024 /* 1024 /*
1025 * OK, now the LEB is locked and we can safely start moving iy. Since 1025 * OK, now the LEB is locked and we can safely start moving it. Since
1026 * this function utilizes thie @ubi->peb1_buf buffer which is shared 1026 * this function utilizes thie @ubi->peb1_buf buffer which is shared
1027 * with some other functions, so lock the buffer by taking the 1027 * with some other functions, so lock the buffer by taking the
1028 * @ubi->buf_mutex. 1028 * @ubi->buf_mutex.
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 4f2daa5bbecf..41d47e1cf15c 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -320,7 +320,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
320 } 320 }
321 321
322 err = ubi_io_read_data(ubi, buf, pnum, 0, len); 322 err = ubi_io_read_data(ubi, buf, pnum, 0, len);
323 if (err && err != UBI_IO_BITFLIPS) 323 if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
324 goto out_free_buf; 324 goto out_free_buf;
325 325
326 data_crc = be32_to_cpu(vid_hdr->data_crc); 326 data_crc = be32_to_cpu(vid_hdr->data_crc);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 05d70937b543..dcb6dac1dc54 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1396,7 +1396,8 @@ int ubi_thread(void *u)
1396 ubi_msg("%s: %d consecutive failures", 1396 ubi_msg("%s: %d consecutive failures",
1397 ubi->bgt_name, WL_MAX_FAILURES); 1397 ubi->bgt_name, WL_MAX_FAILURES);
1398 ubi_ro_mode(ubi); 1398 ubi_ro_mode(ubi);
1399 break; 1399 ubi->thread_enabled = 0;
1400 continue;
1400 } 1401 }
1401 } else 1402 } else
1402 failures = 0; 1403 failures = 0;
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 3a7bc524af33..c7a4f3bcc2bc 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -94,7 +94,7 @@
94#include <asm/io.h> 94#include <asm/io.h>
95#include <asm/irq.h> 95#include <asm/irq.h>
96 96
97static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; 97static char version[] __devinitdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
98 98
99#ifdef EL3_DEBUG 99#ifdef EL3_DEBUG
100static int el3_debug = EL3_DEBUG; 100static int el3_debug = EL3_DEBUG;
@@ -186,7 +186,7 @@ static int max_interrupt_work = 10;
186static int nopnp; 186static int nopnp;
187#endif 187#endif
188 188
189static int __init el3_common_init(struct net_device *dev); 189static int __devinit el3_common_init(struct net_device *dev);
190static void el3_common_remove(struct net_device *dev); 190static void el3_common_remove(struct net_device *dev);
191static ushort id_read_eeprom(int index); 191static ushort id_read_eeprom(int index);
192static ushort read_eeprom(int ioaddr, int index); 192static ushort read_eeprom(int ioaddr, int index);
@@ -537,7 +537,7 @@ static struct mca_driver el3_mca_driver = {
537static int mca_registered; 537static int mca_registered;
538#endif /* CONFIG_MCA */ 538#endif /* CONFIG_MCA */
539 539
540static int __init el3_common_init(struct net_device *dev) 540static int __devinit el3_common_init(struct net_device *dev)
541{ 541{
542 struct el3_private *lp = netdev_priv(dev); 542 struct el3_private *lp = netdev_priv(dev);
543 int err; 543 int err;
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 85fa40a0a667..9ba1f0b46429 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1836,10 +1836,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1836 1836
1837 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && 1837 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1838 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { 1838 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1839 dev_err(&pdev->dev, 1839 dev_info(&pdev->dev,
1840 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", 1840 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1841 pdev->vendor, pdev->device, pdev->revision); 1841 pdev->vendor, pdev->device, pdev->revision);
1842 dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
1843 return -ENODEV; 1842 return -ENODEV;
1844 } 1843 }
1845 1844
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 0daf8c15e381..63f906b04899 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -946,10 +946,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
946 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && 946 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
947 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) { 947 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) {
948 dev_info(&pdev->dev, 948 dev_info(&pdev->dev,
949 "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n", 949 "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip, use 8139cp\n",
950 pdev->vendor, pdev->device, pdev->revision); 950 pdev->vendor, pdev->device, pdev->revision);
951 dev_info(&pdev->dev, 951 return -ENODEV;
952 "Use the \"8139cp\" driver for improved performance and stability.\n");
953 } 952 }
954 953
955 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && 954 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ad301ace6085..231eeaf1d552 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -464,6 +464,12 @@ config MIPS_JAZZ_SONIC
464 This is the driver for the onboard card of MIPS Magnum 4000, 464 This is the driver for the onboard card of MIPS Magnum 4000,
465 Acer PICA, Olivetti M700-10 and a few other identical OEM systems. 465 Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
466 466
467config XTENSA_XT2000_SONIC
468 tristate "Xtensa XT2000 onboard SONIC Ethernet support"
469 depends on XTENSA_PLATFORM_XT2000
470 help
471 This is the driver for the onboard card of the Xtensa XT2000 board.
472
467config MIPS_AU1X00_ENET 473config MIPS_AU1X00_ENET
468 bool "MIPS AU1000 Ethernet support" 474 bool "MIPS AU1000 Ethernet support"
469 depends on SOC_AU1X00 475 depends on SOC_AU1X00
@@ -888,7 +894,7 @@ config SMC91X
888 select CRC32 894 select CRC32
889 select MII 895 select MII
890 depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \ 896 depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \
891 SOC_AU1X00 || BLACKFIN || MN10300 897 MIPS || BLACKFIN || MN10300
892 help 898 help
893 This is a driver for SMC's 91x series of Ethernet chipsets, 899 This is a driver for SMC's 91x series of Ethernet chipsets,
894 including the SMC91C94 and the SMC91C111. Say Y if you want it 900 including the SMC91C94 and the SMC91C111. Say Y if you want it
@@ -960,7 +966,7 @@ config SMC911X
960 tristate "SMSC LAN911[5678] support" 966 tristate "SMSC LAN911[5678] support"
961 select CRC32 967 select CRC32
962 select MII 968 select MII
963 depends on ARCH_PXA || SUPERH 969 depends on ARM || SUPERH
964 help 970 help
965 This is a driver for SMSC's LAN911x series of Ethernet chipsets 971 This is a driver for SMSC's LAN911x series of Ethernet chipsets
966 including the new LAN9115, LAN9116, LAN9117, and LAN9118. 972 including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -1819,9 +1825,10 @@ config FEC2
1819 1825
1820config FEC_MPC52xx 1826config FEC_MPC52xx
1821 tristate "MPC52xx FEC driver" 1827 tristate "MPC52xx FEC driver"
1822 depends on PPC_MPC52xx && PPC_BESTCOMM_FEC 1828 depends on PPC_MPC52xx && PPC_BESTCOMM
1823 select CRC32 1829 select CRC32
1824 select PHYLIB 1830 select PHYLIB
1831 select PPC_BESTCOMM_FEC
1825 ---help--- 1832 ---help---
1826 This option enables support for the MPC5200's on-chip 1833 This option enables support for the MPC5200's on-chip
1827 Fast Ethernet Controller 1834 Fast Ethernet Controller
@@ -2003,6 +2010,15 @@ config IGB_LRO
2003 2010
2004 If in doubt, say N. 2011 If in doubt, say N.
2005 2012
2013config IGB_DCA
2014 bool "Direct Cache Access (DCA) Support"
2015 default y
2016 depends on IGB && DCA && !(IGB=y && DCA=m)
2017 ---help---
2018 Say Y here if you want to use Direct Cache Access (DCA) in the
2019 driver. DCA is a method for warming the CPU cache before data
2020 is used, with the intent of lessening the impact of cache misses.
2021
2006source "drivers/net/ixp2000/Kconfig" 2022source "drivers/net/ixp2000/Kconfig"
2007 2023
2008config MYRI_SBUS 2024config MYRI_SBUS
@@ -2426,9 +2442,13 @@ config IXGBE
2426 will be called ixgbe. 2442 will be called ixgbe.
2427 2443
2428config IXGBE_DCA 2444config IXGBE_DCA
2429 bool 2445 bool "Direct Cache Access (DCA) Support"
2430 default y 2446 default y
2431 depends on IXGBE && DCA && !(IXGBE=y && DCA=m) 2447 depends on IXGBE && DCA && !(IXGBE=y && DCA=m)
2448 ---help---
2449 Say Y here if you want to use Direct Cache Access (DCA) in the
2450 driver. DCA is a method for warming the CPU cache before data
2451 is used, with the intent of lessening the impact of cache misses.
2432 2452
2433config IXGB 2453config IXGB
2434 tristate "Intel(R) PRO/10GbE support" 2454 tristate "Intel(R) PRO/10GbE support"
@@ -2478,9 +2498,13 @@ config MYRI10GE
2478 will be called myri10ge. 2498 will be called myri10ge.
2479 2499
2480config MYRI10GE_DCA 2500config MYRI10GE_DCA
2481 bool 2501 bool "Direct Cache Access (DCA) Support"
2482 default y 2502 default y
2483 depends on MYRI10GE && DCA && !(MYRI10GE=y && DCA=m) 2503 depends on MYRI10GE && DCA && !(MYRI10GE=y && DCA=m)
2504 ---help---
2505 Say Y here if you want to use Direct Cache Access (DCA) in the
2506 driver. DCA is a method for warming the CPU cache before data
2507 is used, with the intent of lessening the impact of cache misses.
2484 2508
2485config NETXEN_NIC 2509config NETXEN_NIC
2486 tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" 2510 tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
@@ -2504,6 +2528,15 @@ config PASEMI_MAC
2504 This driver supports the on-chip 1/10Gbit Ethernet controller on 2528 This driver supports the on-chip 1/10Gbit Ethernet controller on
2505 PA Semi's PWRficient line of chips. 2529 PA Semi's PWRficient line of chips.
2506 2530
2531config MLX4_EN
2532 tristate "Mellanox Technologies 10Gbit Ethernet support"
2533 depends on PCI && INET
2534 select MLX4_CORE
2535 select INET_LRO
2536 help
2537 This driver supports Mellanox Technologies ConnectX Ethernet
2538 devices.
2539
2507config MLX4_CORE 2540config MLX4_CORE
2508 tristate 2541 tristate
2509 depends on PCI 2542 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fa2510b2e609..017383ad5ec6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -114,7 +114,7 @@ obj-$(CONFIG_EL2) += 3c503.o 8390p.o
114obj-$(CONFIG_NE2000) += ne.o 8390p.o 114obj-$(CONFIG_NE2000) += ne.o 8390p.o
115obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o 115obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
116obj-$(CONFIG_HPLAN) += hp.o 8390p.o 116obj-$(CONFIG_HPLAN) += hp.o 8390p.o
117obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390.o 117obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
118obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o 118obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
119obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o 119obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
120obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o 120obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
@@ -227,6 +227,8 @@ pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
227obj-$(CONFIG_MLX4_CORE) += mlx4/ 227obj-$(CONFIG_MLX4_CORE) += mlx4/
228obj-$(CONFIG_ENC28J60) += enc28j60.o 228obj-$(CONFIG_ENC28J60) += enc28j60.o
229 229
230obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
231
230obj-$(CONFIG_MACB) += macb.o 232obj-$(CONFIG_MACB) += macb.o
231 233
232obj-$(CONFIG_ARM) += arm/ 234obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index c54967f7942a..07a6697e3635 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -644,10 +644,6 @@ This function frees the transmiter and receiver descriptor rings.
644*/ 644*/
645static void amd8111e_free_ring(struct amd8111e_priv* lp) 645static void amd8111e_free_ring(struct amd8111e_priv* lp)
646{ 646{
647
648 /* Free transmit and receive skbs */
649 amd8111e_free_skbs(lp->amd8111e_net_dev);
650
651 /* Free transmit and receive descriptor rings */ 647 /* Free transmit and receive descriptor rings */
652 if(lp->rx_ring){ 648 if(lp->rx_ring){
653 pci_free_consistent(lp->pci_dev, 649 pci_free_consistent(lp->pci_dev,
@@ -833,12 +829,14 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
833 829
834 } while(intr0 & RINT0); 830 } while(intr0 & RINT0);
835 831
836 /* Receive descriptor is empty now */ 832 if (rx_pkt_limit > 0) {
837 spin_lock_irqsave(&lp->lock, flags); 833 /* Receive descriptor is empty now */
838 __netif_rx_complete(dev, napi); 834 spin_lock_irqsave(&lp->lock, flags);
839 writel(VAL0|RINTEN0, mmio + INTEN0); 835 __netif_rx_complete(dev, napi);
840 writel(VAL2 | RDMD0, mmio + CMD0); 836 writel(VAL0|RINTEN0, mmio + INTEN0);
841 spin_unlock_irqrestore(&lp->lock, flags); 837 writel(VAL2 | RDMD0, mmio + CMD0);
838 spin_unlock_irqrestore(&lp->lock, flags);
839 }
842 840
843rx_not_empty: 841rx_not_empty:
844 return num_rx_pkt; 842 return num_rx_pkt;
@@ -1231,7 +1229,9 @@ static int amd8111e_close(struct net_device * dev)
1231 1229
1232 amd8111e_disable_interrupt(lp); 1230 amd8111e_disable_interrupt(lp);
1233 amd8111e_stop_chip(lp); 1231 amd8111e_stop_chip(lp);
1234 amd8111e_free_ring(lp); 1232
1233 /* Free transmit and receive skbs */
1234 amd8111e_free_skbs(lp->amd8111e_net_dev);
1235 1235
1236 netif_carrier_off(lp->amd8111e_net_dev); 1236 netif_carrier_off(lp->amd8111e_net_dev);
1237 1237
@@ -1241,6 +1241,7 @@ static int amd8111e_close(struct net_device * dev)
1241 1241
1242 spin_unlock_irq(&lp->lock); 1242 spin_unlock_irq(&lp->lock);
1243 free_irq(dev->irq, dev); 1243 free_irq(dev->irq, dev);
1244 amd8111e_free_ring(lp);
1244 1245
1245 /* Update the statistics before closing */ 1246 /* Update the statistics before closing */
1246 amd8111e_get_stats(dev); 1247 amd8111e_get_stats(dev);
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 0fa53464efb2..6f431a887e7e 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -1080,7 +1080,8 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
1080 init_timer(&lp->check_timer); 1080 init_timer(&lp->check_timer);
1081 lp->check_timer.data = (unsigned long)dev; 1081 lp->check_timer.data = (unsigned long)dev;
1082 lp->check_timer.function = at91ether_check_link; 1082 lp->check_timer.function = at91ether_check_link;
1083 } 1083 } else if (lp->board_data.phy_irq_pin >= 32)
1084 gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy");
1084 1085
1085 /* Display ethernet banner */ 1086 /* Display ethernet banner */
1086 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%s)\n", 1087 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%s)\n",
@@ -1167,6 +1168,9 @@ static int __devexit at91ether_remove(struct platform_device *pdev)
1167 struct net_device *dev = platform_get_drvdata(pdev); 1168 struct net_device *dev = platform_get_drvdata(pdev);
1168 struct at91_private *lp = netdev_priv(dev); 1169 struct at91_private *lp = netdev_priv(dev);
1169 1170
1171 if (lp->board_data.phy_irq_pin >= 32)
1172 gpio_free(lp->board_data.phy_irq_pin);
1173
1170 unregister_netdev(dev); 1174 unregister_netdev(dev);
1171 free_irq(dev->irq, dev); 1175 free_irq(dev->irq, dev);
1172 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys); 1176 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h
index b645fa0f3f64..c49550d507a0 100644
--- a/drivers/net/atl1e/atl1e.h
+++ b/drivers/net/atl1e/atl1e.h
@@ -46,7 +46,6 @@
46#include <linux/vmalloc.h> 46#include <linux/vmalloc.h>
47#include <linux/pagemap.h> 47#include <linux/pagemap.h>
48#include <linux/tcp.h> 48#include <linux/tcp.h>
49#include <linux/mii.h>
50#include <linux/ethtool.h> 49#include <linux/ethtool.h>
51#include <linux/if_vlan.h> 50#include <linux/if_vlan.h>
52#include <linux/workqueue.h> 51#include <linux/workqueue.h>
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 8cbc1b59bd62..4a7700620119 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -163,9 +163,6 @@ int atl1e_read_mac_addr(struct atl1e_hw *hw)
163 * atl1e_hash_mc_addr 163 * atl1e_hash_mc_addr
164 * purpose 164 * purpose
165 * set hash value for a multicast address 165 * set hash value for a multicast address
166 * hash calcu processing :
167 * 1. calcu 32bit CRC for multicast address
168 * 2. reverse crc with MSB to LSB
169 */ 166 */
170u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr) 167u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr)
171{ 168{
@@ -174,7 +171,6 @@ u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr)
174 int i; 171 int i;
175 172
176 crc32 = ether_crc_le(6, mc_addr); 173 crc32 = ether_crc_le(6, mc_addr);
177 crc32 = ~crc32;
178 for (i = 0; i < 32; i++) 174 for (i = 0; i < 32; i++)
179 value |= (((crc32 >> i) & 1) << (31 - i)); 175 value |= (((crc32 >> i) & 1) << (31 - i));
180 176
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 3cf59a7f5a1c..aef403d299ee 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2310,7 +2310,8 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
2310 if (tpd != ptpd) 2310 if (tpd != ptpd)
2311 memcpy(tpd, ptpd, sizeof(struct tx_packet_desc)); 2311 memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
2312 tpd->buffer_addr = cpu_to_le64(buffer_info->dma); 2312 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2313 tpd->word2 = (cpu_to_le16(buffer_info->length) & 2313 tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT);
2314 tpd->word2 |= (cpu_to_le16(buffer_info->length) &
2314 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT; 2315 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
2315 2316
2316 /* 2317 /*
@@ -2409,8 +2410,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2409 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | 2410 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
2410 ((vlan_tag >> 9) & 0x8); 2411 ((vlan_tag >> 9) & 0x8);
2411 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; 2412 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
2412 ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) << 2413 ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) <<
2413 TPD_VL_TAGGED_SHIFT; 2414 TPD_VLANTAG_SHIFT;
2414 } 2415 }
2415 2416
2416 tso = atl1_tso(adapter, skb, ptpd); 2417 tso = atl1_tso(adapter, skb, ptpd);
@@ -3403,14 +3404,8 @@ static void atl1_get_wol(struct net_device *netdev,
3403{ 3404{
3404 struct atl1_adapter *adapter = netdev_priv(netdev); 3405 struct atl1_adapter *adapter = netdev_priv(netdev);
3405 3406
3406 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; 3407 wol->supported = WAKE_MAGIC;
3407 wol->wolopts = 0; 3408 wol->wolopts = 0;
3408 if (adapter->wol & ATLX_WUFC_EX)
3409 wol->wolopts |= WAKE_UCAST;
3410 if (adapter->wol & ATLX_WUFC_MC)
3411 wol->wolopts |= WAKE_MCAST;
3412 if (adapter->wol & ATLX_WUFC_BC)
3413 wol->wolopts |= WAKE_BCAST;
3414 if (adapter->wol & ATLX_WUFC_MAG) 3409 if (adapter->wol & ATLX_WUFC_MAG)
3415 wol->wolopts |= WAKE_MAGIC; 3410 wol->wolopts |= WAKE_MAGIC;
3416 return; 3411 return;
@@ -3421,15 +3416,10 @@ static int atl1_set_wol(struct net_device *netdev,
3421{ 3416{
3422 struct atl1_adapter *adapter = netdev_priv(netdev); 3417 struct atl1_adapter *adapter = netdev_priv(netdev);
3423 3418
3424 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 3419 if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
3420 WAKE_ARP | WAKE_MAGICSECURE))
3425 return -EOPNOTSUPP; 3421 return -EOPNOTSUPP;
3426 adapter->wol = 0; 3422 adapter->wol = 0;
3427 if (wol->wolopts & WAKE_UCAST)
3428 adapter->wol |= ATLX_WUFC_EX;
3429 if (wol->wolopts & WAKE_MCAST)
3430 adapter->wol |= ATLX_WUFC_MC;
3431 if (wol->wolopts & WAKE_BCAST)
3432 adapter->wol |= ATLX_WUFC_BC;
3433 if (wol->wolopts & WAKE_MAGIC) 3423 if (wol->wolopts & WAKE_MAGIC)
3434 adapter->wol |= ATLX_WUFC_MAG; 3424 adapter->wol |= ATLX_WUFC_MAG;
3435 return 0; 3425 return 0;
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index a5015b14a429..ffa73fc8d95e 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -504,7 +504,7 @@ struct rx_free_desc {
504#define TPD_PKTNT_MASK 0x0001 504#define TPD_PKTNT_MASK 0x0001
505#define TPD_PKTINT_SHIFT 15 505#define TPD_PKTINT_SHIFT 15
506#define TPD_VLANTAG_MASK 0xFFFF 506#define TPD_VLANTAG_MASK 0xFFFF
507#define TPD_VLAN_SHIFT 16 507#define TPD_VLANTAG_SHIFT 16
508 508
509/* tpd word 3 bits 0:13 */ 509/* tpd word 3 bits 0:13 */
510#define TPD_EOP_MASK 0x0001 510#define TPD_EOP_MASK 0x0001
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index f5bdc92c1a65..8571e8c0bc67 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1690,9 +1690,11 @@ static int atl2_resume(struct pci_dev *pdev)
1690 1690
1691 ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); 1691 ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
1692 1692
1693 err = atl2_request_irq(adapter); 1693 if (netif_running(netdev)) {
1694 if (netif_running(netdev) && err) 1694 err = atl2_request_irq(adapter);
1695 return err; 1695 if (err)
1696 return err;
1697 }
1696 1698
1697 atl2_reset_hw(&adapter->hw); 1699 atl2_reset_hw(&adapter->hw);
1698 1700
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 4207d6efddc0..9a314d88e7b6 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -838,12 +838,12 @@ static int ax_probe(struct platform_device *pdev)
838 838
839 /* find the platform resources */ 839 /* find the platform resources */
840 840
841 dev->irq = platform_get_irq(pdev, 0); 841 ret = platform_get_irq(pdev, 0);
842 if (dev->irq < 0) { 842 if (ret < 0) {
843 dev_err(&pdev->dev, "no IRQ specified\n"); 843 dev_err(&pdev->dev, "no IRQ specified\n");
844 ret = -ENXIO;
845 goto exit_mem; 844 goto exit_mem;
846 } 845 }
846 dev->irq = ret;
847 847
848 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 848 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
849 if (res == NULL) { 849 if (res == NULL) {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 430d430bce29..9e8222f9e90e 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -543,9 +543,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
543 for (j = 0; j < bp->rx_max_pg_ring; j++) { 543 for (j = 0; j < bp->rx_max_pg_ring; j++) {
544 if (rxr->rx_pg_desc_ring[j]) 544 if (rxr->rx_pg_desc_ring[j])
545 pci_free_consistent(bp->pdev, RXBD_RING_SIZE, 545 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
546 rxr->rx_pg_desc_ring[i], 546 rxr->rx_pg_desc_ring[j],
547 rxr->rx_pg_desc_mapping[i]); 547 rxr->rx_pg_desc_mapping[j]);
548 rxr->rx_pg_desc_ring[i] = NULL; 548 rxr->rx_pg_desc_ring[j] = NULL;
549 } 549 }
550 if (rxr->rx_pg_ring) 550 if (rxr->rx_pg_ring)
551 vfree(rxr->rx_pg_ring); 551 vfree(rxr->rx_pg_ring);
@@ -3144,6 +3144,28 @@ bnx2_has_work(struct bnx2_napi *bnapi)
3144 return 0; 3144 return 0;
3145} 3145}
3146 3146
3147static void
3148bnx2_chk_missed_msi(struct bnx2 *bp)
3149{
3150 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3151 u32 msi_ctrl;
3152
3153 if (bnx2_has_work(bnapi)) {
3154 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3155 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3156 return;
3157
3158 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3159 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3160 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3161 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3162 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3163 }
3164 }
3165
3166 bp->idle_chk_status_idx = bnapi->last_status_idx;
3167}
3168
3147static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) 3169static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3148{ 3170{
3149 struct status_block *sblk = bnapi->status_blk.msi; 3171 struct status_block *sblk = bnapi->status_blk.msi;
@@ -3218,14 +3240,15 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3218 3240
3219 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); 3241 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3220 3242
3221 if (unlikely(work_done >= budget))
3222 break;
3223
3224 /* bnapi->last_status_idx is used below to tell the hw how 3243 /* bnapi->last_status_idx is used below to tell the hw how
3225 * much work has been processed, so we must read it before 3244 * much work has been processed, so we must read it before
3226 * checking for more work. 3245 * checking for more work.
3227 */ 3246 */
3228 bnapi->last_status_idx = sblk->status_idx; 3247 bnapi->last_status_idx = sblk->status_idx;
3248
3249 if (unlikely(work_done >= budget))
3250 break;
3251
3229 rmb(); 3252 rmb();
3230 if (likely(!bnx2_has_work(bnapi))) { 3253 if (likely(!bnx2_has_work(bnapi))) {
3231 netif_rx_complete(bp->dev, napi); 3254 netif_rx_complete(bp->dev, napi);
@@ -4570,6 +4593,8 @@ bnx2_init_chip(struct bnx2 *bp)
4570 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) 4593 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4571 bp->bnx2_napi[i].last_status_idx = 0; 4594 bp->bnx2_napi[i].last_status_idx = 0;
4572 4595
4596 bp->idle_chk_status_idx = 0xffff;
4597
4573 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; 4598 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4574 4599
4575 /* Set up how to generate a link change interrupt. */ 4600 /* Set up how to generate a link change interrupt. */
@@ -5718,6 +5743,10 @@ bnx2_timer(unsigned long data)
5718 if (atomic_read(&bp->intr_sem) != 0) 5743 if (atomic_read(&bp->intr_sem) != 0)
5719 goto bnx2_restart_timer; 5744 goto bnx2_restart_timer;
5720 5745
5746 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5747 BNX2_FLAG_USING_MSI)
5748 bnx2_chk_missed_msi(bp);
5749
5721 bnx2_send_heart_beat(bp); 5750 bnx2_send_heart_beat(bp);
5722 5751
5723 bp->stats_blk->stat_FwRxDrop = 5752 bp->stats_blk->stat_FwRxDrop =
@@ -7204,10 +7233,13 @@ static void
7204poll_bnx2(struct net_device *dev) 7233poll_bnx2(struct net_device *dev)
7205{ 7234{
7206 struct bnx2 *bp = netdev_priv(dev); 7235 struct bnx2 *bp = netdev_priv(dev);
7236 int i;
7207 7237
7208 disable_irq(bp->pdev->irq); 7238 for (i = 0; i < bp->irq_nvecs; i++) {
7209 bnx2_interrupt(bp->pdev->irq, dev); 7239 disable_irq(bp->irq_tbl[i].vector);
7210 enable_irq(bp->pdev->irq); 7240 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7241 enable_irq(bp->irq_tbl[i].vector);
7242 }
7211} 7243}
7212#endif 7244#endif
7213 7245
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 617d95340160..0b032c3c7b61 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -378,6 +378,9 @@ struct l2_fhdr {
378 * pci_config_l definition 378 * pci_config_l definition
379 * offset: 0000 379 * offset: 0000
380 */ 380 */
381#define BNX2_PCICFG_MSI_CONTROL 0x00000058
382#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16)
383
381#define BNX2_PCICFG_MISC_CONFIG 0x00000068 384#define BNX2_PCICFG_MISC_CONFIG 0x00000068
382#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2) 385#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2)
383#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3) 386#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3)
@@ -6863,6 +6866,9 @@ struct bnx2 {
6863 6866
6864 u8 num_tx_rings; 6867 u8 num_tx_rings;
6865 u8 num_rx_rings; 6868 u8 num_rx_rings;
6869
6870 u32 idle_chk_status_idx;
6871
6866}; 6872};
6867 6873
6868#define REG_RD(bp, offset) \ 6874#define REG_RD(bp, offset) \
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index 130927cfc75b..a6c0b3abba29 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -564,14 +564,15 @@ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
564 564
565static void bnx2x_init_pxp(struct bnx2x *bp) 565static void bnx2x_init_pxp(struct bnx2x *bp)
566{ 566{
567 u16 devctl;
567 int r_order, w_order; 568 int r_order, w_order;
568 u32 val, i; 569 u32 val, i;
569 570
570 pci_read_config_word(bp->pdev, 571 pci_read_config_word(bp->pdev,
571 bp->pcie_cap + PCI_EXP_DEVCTL, (u16 *)&val); 572 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
572 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", (u16)val); 573 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
573 w_order = ((val & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 574 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
574 r_order = ((val & PCI_EXP_DEVCTL_READRQ) >> 12); 575 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
575 576
576 if (r_order > MAX_RD_ORD) { 577 if (r_order > MAX_RD_ORD) {
577 DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n", 578 DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fce745148ff9..600210d7eff9 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -59,8 +59,8 @@
59#include "bnx2x.h" 59#include "bnx2x.h"
60#include "bnx2x_init.h" 60#include "bnx2x_init.h"
61 61
62#define DRV_MODULE_VERSION "1.45.22" 62#define DRV_MODULE_VERSION "1.45.23"
63#define DRV_MODULE_RELDATE "2008/09/09" 63#define DRV_MODULE_RELDATE "2008/11/03"
64#define BNX2X_BC_VER 0x040200 64#define BNX2X_BC_VER 0x040200
65 65
66/* Time in jiffies before concluding the transmitter is hung */ 66/* Time in jiffies before concluding the transmitter is hung */
@@ -6481,6 +6481,7 @@ load_int_disable:
6481 bnx2x_free_irq(bp); 6481 bnx2x_free_irq(bp);
6482load_error: 6482load_error:
6483 bnx2x_free_mem(bp); 6483 bnx2x_free_mem(bp);
6484 bp->port.pmf = 0;
6484 6485
6485 /* TBD we really need to reset the chip 6486 /* TBD we really need to reset the chip
6486 if we want to recover from this */ 6487 if we want to recover from this */
@@ -6791,6 +6792,7 @@ unload_error:
6791 /* Report UNLOAD_DONE to MCP */ 6792 /* Report UNLOAD_DONE to MCP */
6792 if (!BP_NOMCP(bp)) 6793 if (!BP_NOMCP(bp))
6793 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 6794 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6795 bp->port.pmf = 0;
6794 6796
6795 /* Free SKBs, SGEs, TPA pool and driver internals */ 6797 /* Free SKBs, SGEs, TPA pool and driver internals */
6796 bnx2x_free_skbs(bp); 6798 bnx2x_free_skbs(bp);
@@ -10204,8 +10206,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10204 return -ENOMEM; 10206 return -ENOMEM;
10205 } 10207 }
10206 10208
10207 netif_carrier_off(dev);
10208
10209 bp = netdev_priv(dev); 10209 bp = netdev_priv(dev);
10210 bp->msglevel = debug; 10210 bp->msglevel = debug;
10211 10211
@@ -10229,6 +10229,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10229 goto init_one_exit; 10229 goto init_one_exit;
10230 } 10230 }
10231 10231
10232 netif_carrier_off(dev);
10233
10232 bp->common.name = board_info[ent->driver_data].name; 10234 bp->common.name = board_info[ent->driver_data].name;
10233 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," 10235 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10234 " IRQ %d, ", dev->name, bp->common.name, 10236 " IRQ %d, ", dev->name, bp->common.name,
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index ade5f3f6693b..87437c788476 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -169,11 +169,14 @@ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_
169 /* clear slave from tx_hashtbl */ 169 /* clear slave from tx_hashtbl */
170 tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl; 170 tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
171 171
172 index = SLAVE_TLB_INFO(slave).head; 172 /* skip this if we've already freed the tx hash table */
173 while (index != TLB_NULL_INDEX) { 173 if (tx_hash_table) {
174 u32 next_index = tx_hash_table[index].next; 174 index = SLAVE_TLB_INFO(slave).head;
175 tlb_init_table_entry(&tx_hash_table[index], save_load); 175 while (index != TLB_NULL_INDEX) {
176 index = next_index; 176 u32 next_index = tx_hash_table[index].next;
177 tlb_init_table_entry(&tx_hash_table[index], save_load);
178 index = next_index;
179 }
177 } 180 }
178 181
179 tlb_init_slave(slave); 182 tlb_init_slave(slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8e2be24f3fe4..a3efba59eee9 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1341,18 +1341,24 @@ static int bond_compute_features(struct bonding *bond)
1341 int i; 1341 int i;
1342 1342
1343 features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); 1343 features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES);
1344 features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 1344 features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM;
1345 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM; 1345
1346 if (!bond->first_slave)
1347 goto done;
1348
1349 features &= ~NETIF_F_ONE_FOR_ALL;
1346 1350
1347 bond_for_each_slave(bond, slave, i) { 1351 bond_for_each_slave(bond, slave, i) {
1348 features = netdev_compute_features(features, 1352 features = netdev_increment_features(features,
1349 slave->dev->features); 1353 slave->dev->features,
1354 NETIF_F_ONE_FOR_ALL);
1350 if (slave->dev->hard_header_len > max_hard_header_len) 1355 if (slave->dev->hard_header_len > max_hard_header_len)
1351 max_hard_header_len = slave->dev->hard_header_len; 1356 max_hard_header_len = slave->dev->hard_header_len;
1352 } 1357 }
1353 1358
1359done:
1354 features |= (bond_dev->features & BOND_VLAN_FEATURES); 1360 features |= (bond_dev->features & BOND_VLAN_FEATURES);
1355 bond_dev->features = features; 1361 bond_dev->features = netdev_fix_features(features, NULL);
1356 bond_dev->hard_header_len = max_hard_header_len; 1362 bond_dev->hard_header_len = max_hard_header_len;
1357 1363
1358 return 0; 1364 return 0;
@@ -1973,6 +1979,20 @@ void bond_destroy(struct bonding *bond)
1973 unregister_netdevice(bond->dev); 1979 unregister_netdevice(bond->dev);
1974} 1980}
1975 1981
1982static void bond_destructor(struct net_device *bond_dev)
1983{
1984 struct bonding *bond = bond_dev->priv;
1985
1986 if (bond->wq)
1987 destroy_workqueue(bond->wq);
1988
1989 netif_addr_lock_bh(bond_dev);
1990 bond_mc_list_destroy(bond);
1991 netif_addr_unlock_bh(bond_dev);
1992
1993 free_netdev(bond_dev);
1994}
1995
1976/* 1996/*
1977* First release a slave and than destroy the bond if no more slaves iare left. 1997* First release a slave and than destroy the bond if no more slaves iare left.
1978* Must be under rtnl_lock when this function is called. 1998* Must be under rtnl_lock when this function is called.
@@ -2370,6 +2390,9 @@ static void bond_miimon_commit(struct bonding *bond)
2370 continue; 2390 continue;
2371 2391
2372 case BOND_LINK_DOWN: 2392 case BOND_LINK_DOWN:
2393 if (slave->link_failure_count < UINT_MAX)
2394 slave->link_failure_count++;
2395
2373 slave->link = BOND_LINK_DOWN; 2396 slave->link = BOND_LINK_DOWN;
2374 2397
2375 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || 2398 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
@@ -4544,7 +4567,7 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4544 4567
4545 bond_set_mode_ops(bond, bond->params.mode); 4568 bond_set_mode_ops(bond, bond->params.mode);
4546 4569
4547 bond_dev->destructor = free_netdev; 4570 bond_dev->destructor = bond_destructor;
4548 4571
4549 /* Initialize the device options */ 4572 /* Initialize the device options */
4550 bond_dev->tx_queue_len = 0; 4573 bond_dev->tx_queue_len = 0;
@@ -4583,20 +4606,6 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4583 return 0; 4606 return 0;
4584} 4607}
4585 4608
4586/* De-initialize device specific data.
4587 * Caller must hold rtnl_lock.
4588 */
4589static void bond_deinit(struct net_device *bond_dev)
4590{
4591 struct bonding *bond = bond_dev->priv;
4592
4593 list_del(&bond->bond_list);
4594
4595#ifdef CONFIG_PROC_FS
4596 bond_remove_proc_entry(bond);
4597#endif
4598}
4599
4600static void bond_work_cancel_all(struct bonding *bond) 4609static void bond_work_cancel_all(struct bonding *bond)
4601{ 4610{
4602 write_lock_bh(&bond->lock); 4611 write_lock_bh(&bond->lock);
@@ -4618,6 +4627,22 @@ static void bond_work_cancel_all(struct bonding *bond)
4618 cancel_delayed_work(&bond->ad_work); 4627 cancel_delayed_work(&bond->ad_work);
4619} 4628}
4620 4629
4630/* De-initialize device specific data.
4631 * Caller must hold rtnl_lock.
4632 */
4633static void bond_deinit(struct net_device *bond_dev)
4634{
4635 struct bonding *bond = bond_dev->priv;
4636
4637 list_del(&bond->bond_list);
4638
4639 bond_work_cancel_all(bond);
4640
4641#ifdef CONFIG_PROC_FS
4642 bond_remove_proc_entry(bond);
4643#endif
4644}
4645
4621/* Unregister and free all bond devices. 4646/* Unregister and free all bond devices.
4622 * Caller must hold rtnl_lock. 4647 * Caller must hold rtnl_lock.
4623 */ 4648 */
@@ -4629,9 +4654,6 @@ static void bond_free_all(void)
4629 struct net_device *bond_dev = bond->dev; 4654 struct net_device *bond_dev = bond->dev;
4630 4655
4631 bond_work_cancel_all(bond); 4656 bond_work_cancel_all(bond);
4632 netif_addr_lock_bh(bond_dev);
4633 bond_mc_list_destroy(bond);
4634 netif_addr_unlock_bh(bond_dev);
4635 /* Release the bonded slaves */ 4657 /* Release the bonded slaves */
4636 bond_release_all(bond_dev); 4658 bond_release_all(bond_dev);
4637 bond_destroy(bond); 4659 bond_destroy(bond);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index d6c7d2aa761b..7092df50ff78 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1035,10 +1035,6 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1035 * @pdev: the PCI device that received the packet 1035 * @pdev: the PCI device that received the packet
1036 * @fl: the SGE free list holding the packet 1036 * @fl: the SGE free list holding the packet
1037 * @len: the actual packet length, excluding any SGE padding 1037 * @len: the actual packet length, excluding any SGE padding
1038 * @dma_pad: padding at beginning of buffer left by SGE DMA
1039 * @skb_pad: padding to be used if the packet is copied
1040 * @copy_thres: length threshold under which a packet should be copied
1041 * @drop_thres: # of remaining buffers before we start dropping packets
1042 * 1038 *
1043 * Get the next packet from a free list and complete setup of the 1039 * Get the next packet from a free list and complete setup of the
1044 * sk_buff. If the packet is small we make a copy and recycle the 1040 * sk_buff. If the packet is small we make a copy and recycle the
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 65d0a9103297..7e8a63106bdf 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -32,14 +32,14 @@
32#include <linux/skbuff.h> 32#include <linux/skbuff.h>
33#include <linux/ethtool.h> 33#include <linux/ethtool.h>
34 34
35#include <asm/arch/svinto.h>/* DMA and register descriptions */ 35#include <arch/svinto.h>/* DMA and register descriptions */
36#include <asm/io.h> /* CRIS_LED_* I/O functions */ 36#include <asm/io.h> /* CRIS_LED_* I/O functions */
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/dma.h> 38#include <asm/dma.h>
39#include <asm/system.h> 39#include <asm/system.h>
40#include <asm/ethernet.h> 40#include <asm/ethernet.h>
41#include <asm/cache.h> 41#include <asm/cache.h>
42#include <asm/arch/io_interface_mux.h> 42#include <arch/io_interface_mux.h>
43 43
44//#define ETHDEBUG 44//#define ETHDEBUG
45#define D(x) 45#define D(x)
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 1ace41a13ac3..2c341f83d327 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1307,8 +1307,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1307 u32 fw_vers = 0; 1307 u32 fw_vers = 0;
1308 u32 tp_vers = 0; 1308 u32 tp_vers = 0;
1309 1309
1310 spin_lock(&adapter->stats_lock);
1310 t3_get_fw_version(adapter, &fw_vers); 1311 t3_get_fw_version(adapter, &fw_vers);
1311 t3_get_tp_version(adapter, &tp_vers); 1312 t3_get_tp_version(adapter, &tp_vers);
1313 spin_unlock(&adapter->stats_lock);
1312 1314
1313 strcpy(info->driver, DRV_NAME); 1315 strcpy(info->driver, DRV_NAME);
1314 strcpy(info->version, DRV_VERSION); 1316 strcpy(info->version, DRV_VERSION);
@@ -2699,7 +2701,7 @@ static void set_nqsets(struct adapter *adap)
2699 int hwports = adap->params.nports; 2701 int hwports = adap->params.nports;
2700 int nqsets = SGE_QSETS; 2702 int nqsets = SGE_QSETS;
2701 2703
2702 if (adap->params.rev > 0) { 2704 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2703 if (hwports == 2 && 2705 if (hwports == 2 &&
2704 (hwports * nqsets > SGE_QSETS || 2706 (hwports * nqsets > SGE_QSETS ||
2705 num_cpus >= nqsets / hwports)) 2707 num_cpus >= nqsets / hwports))
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 4407ac9bb555..ff1611f90e7a 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -431,6 +431,7 @@ struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
431 for (i = 0; i < l2t_capacity; ++i) { 431 for (i = 0; i < l2t_capacity; ++i) {
432 d->l2tab[i].idx = i; 432 d->l2tab[i].idx = i;
433 d->l2tab[i].state = L2T_STATE_UNUSED; 433 d->l2tab[i].state = L2T_STATE_UNUSED;
434 __skb_queue_head_init(&d->l2tab[i].arpq);
434 spin_lock_init(&d->l2tab[i].lock); 435 spin_lock_init(&d->l2tab[i].lock);
435 atomic_set(&d->l2tab[i].refcnt, 0); 436 atomic_set(&d->l2tab[i].refcnt, 0);
436 } 437 }
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 968f64be3743..9a0898b0dbce 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -572,7 +572,7 @@ struct t3_vpd {
572 u32 pad; /* for multiple-of-4 sizing and alignment */ 572 u32 pad; /* for multiple-of-4 sizing and alignment */
573}; 573};
574 574
575#define EEPROM_MAX_POLL 4 575#define EEPROM_MAX_POLL 40
576#define EEPROM_STAT_ADDR 0x4000 576#define EEPROM_STAT_ADDR 0x4000
577#define VPD_BASE 0xc00 577#define VPD_BASE 0xc00
578 578
@@ -3690,6 +3690,12 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3690 ; 3690 ;
3691 3691
3692 pti = &port_types[adapter->params.vpd.port_type[j]]; 3692 pti = &port_types[adapter->params.vpd.port_type[j]];
3693 if (!pti->phy_prep) {
3694 CH_ALERT(adapter, "Invalid port type index %d\n",
3695 adapter->params.vpd.port_type[j]);
3696 return -EINVAL;
3697 }
3698
3693 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, 3699 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3694 ai->mdio_ops); 3700 ai->mdio_ops);
3695 if (ret) 3701 if (ret)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index f42c23f42652..5a9083e3f443 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -47,15 +47,6 @@
47#define CARDNAME "dm9000" 47#define CARDNAME "dm9000"
48#define DRV_VERSION "1.31" 48#define DRV_VERSION "1.31"
49 49
50#ifdef CONFIG_BLACKFIN
51#define readsb insb
52#define readsw insw
53#define readsl insl
54#define writesb outsb
55#define writesw outsw
56#define writesl outsl
57#endif
58
59/* 50/*
60 * Transmit timeout, default 5 seconds. 51 * Transmit timeout, default 5 seconds.
61 */ 52 */
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 3d69fae781cf..e8bfcce6b319 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -166,7 +166,7 @@
166 166
167#define DRV_NAME "e100" 167#define DRV_NAME "e100"
168#define DRV_EXT "-NAPI" 168#define DRV_EXT "-NAPI"
169#define DRV_VERSION "3.5.23-k4"DRV_EXT 169#define DRV_VERSION "3.5.23-k6"DRV_EXT
170#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 170#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
171#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" 171#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
172#define PFX DRV_NAME ": " 172#define PFX DRV_NAME ": "
@@ -1804,7 +1804,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1804 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; 1804 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1805 put_unaligned_le32(rx->dma_addr, &prev_rfd->link); 1805 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1806 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, 1806 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1807 sizeof(struct rfd), PCI_DMA_TODEVICE); 1807 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1808 } 1808 }
1809 1809
1810 return 0; 1810 return 0;
@@ -1823,7 +1823,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1823 1823
1824 /* Need to sync before taking a peek at cb_complete bit */ 1824 /* Need to sync before taking a peek at cb_complete bit */
1825 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, 1825 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1826 sizeof(struct rfd), PCI_DMA_FROMDEVICE); 1826 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1827 rfd_status = le16_to_cpu(rfd->status); 1827 rfd_status = le16_to_cpu(rfd->status);
1828 1828
1829 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status); 1829 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
@@ -1850,7 +1850,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1850 1850
1851 /* Get data */ 1851 /* Get data */
1852 pci_unmap_single(nic->pdev, rx->dma_addr, 1852 pci_unmap_single(nic->pdev, rx->dma_addr,
1853 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1853 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1854 1854
1855 /* If this buffer has the el bit, but we think the receiver 1855 /* If this buffer has the el bit, but we think the receiver
1856 * is still running, check to see if it really stopped while 1856 * is still running, check to see if it really stopped while
@@ -1943,7 +1943,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1943 new_before_last_rfd->command |= cpu_to_le16(cb_el); 1943 new_before_last_rfd->command |= cpu_to_le16(cb_el);
1944 pci_dma_sync_single_for_device(nic->pdev, 1944 pci_dma_sync_single_for_device(nic->pdev,
1945 new_before_last_rx->dma_addr, sizeof(struct rfd), 1945 new_before_last_rx->dma_addr, sizeof(struct rfd),
1946 PCI_DMA_TODEVICE); 1946 PCI_DMA_BIDIRECTIONAL);
1947 1947
1948 /* Now that we have a new stopping point, we can clear the old 1948 /* Now that we have a new stopping point, we can clear the old
1949 * stopping point. We must sync twice to get the proper 1949 * stopping point. We must sync twice to get the proper
@@ -1951,11 +1951,11 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1951 old_before_last_rfd->command &= ~cpu_to_le16(cb_el); 1951 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
1952 pci_dma_sync_single_for_device(nic->pdev, 1952 pci_dma_sync_single_for_device(nic->pdev,
1953 old_before_last_rx->dma_addr, sizeof(struct rfd), 1953 old_before_last_rx->dma_addr, sizeof(struct rfd),
1954 PCI_DMA_TODEVICE); 1954 PCI_DMA_BIDIRECTIONAL);
1955 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN); 1955 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
1956 pci_dma_sync_single_for_device(nic->pdev, 1956 pci_dma_sync_single_for_device(nic->pdev,
1957 old_before_last_rx->dma_addr, sizeof(struct rfd), 1957 old_before_last_rx->dma_addr, sizeof(struct rfd),
1958 PCI_DMA_TODEVICE); 1958 PCI_DMA_BIDIRECTIONAL);
1959 } 1959 }
1960 1960
1961 if(restart_required) { 1961 if(restart_required) {
@@ -1978,7 +1978,7 @@ static void e100_rx_clean_list(struct nic *nic)
1978 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { 1978 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1979 if(rx->skb) { 1979 if(rx->skb) {
1980 pci_unmap_single(nic->pdev, rx->dma_addr, 1980 pci_unmap_single(nic->pdev, rx->dma_addr,
1981 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1981 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1982 dev_kfree_skb(rx->skb); 1982 dev_kfree_skb(rx->skb);
1983 } 1983 }
1984 } 1984 }
@@ -2021,7 +2021,7 @@ static int e100_rx_alloc_list(struct nic *nic)
2021 before_last->command |= cpu_to_le16(cb_el); 2021 before_last->command |= cpu_to_le16(cb_el);
2022 before_last->size = 0; 2022 before_last->size = 0;
2023 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, 2023 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2024 sizeof(struct rfd), PCI_DMA_TODEVICE); 2024 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2025 2025
2026 nic->rx_to_use = nic->rx_to_clean = nic->rxs; 2026 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2027 nic->ru_running = RU_SUSPENDED; 2027 nic->ru_running = RU_SUSPENDED;
@@ -2222,7 +2222,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2222 msleep(10); 2222 msleep(10);
2223 2223
2224 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr, 2224 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2225 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 2225 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2226 2226
2227 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), 2227 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2228 skb->data, ETH_DATA_LEN)) 2228 skb->data, ETH_DATA_LEN))
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 6a3893acfe04..c854c96f5ab3 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -1774,7 +1774,8 @@ static void e1000_get_wol(struct net_device *netdev,
1774 1774
1775 /* this function will set ->supported = 0 and return 1 if wol is not 1775 /* this function will set ->supported = 0 and return 1 if wol is not
1776 * supported by this hardware */ 1776 * supported by this hardware */
1777 if (e1000_wol_exclusion(adapter, wol)) 1777 if (e1000_wol_exclusion(adapter, wol) ||
1778 !device_can_wakeup(&adapter->pdev->dev))
1778 return; 1779 return;
1779 1780
1780 /* apply any specific unsupported masks here */ 1781 /* apply any specific unsupported masks here */
@@ -1811,7 +1812,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1811 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1812 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1812 return -EOPNOTSUPP; 1813 return -EOPNOTSUPP;
1813 1814
1814 if (e1000_wol_exclusion(adapter, wol)) 1815 if (e1000_wol_exclusion(adapter, wol) ||
1816 !device_can_wakeup(&adapter->pdev->dev))
1815 return wol->wolopts ? -EOPNOTSUPP : 0; 1817 return wol->wolopts ? -EOPNOTSUPP : 0;
1816 1818
1817 switch (hw->device_id) { 1819 switch (hw->device_id) {
@@ -1838,6 +1840,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1838 if (wol->wolopts & WAKE_MAGIC) 1840 if (wol->wolopts & WAKE_MAGIC)
1839 adapter->wol |= E1000_WUFC_MAG; 1841 adapter->wol |= E1000_WUFC_MAG;
1840 1842
1843 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1844
1841 return 0; 1845 return 0;
1842} 1846}
1843 1847
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index fac82152e4c8..872799b746f5 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1179,6 +1179,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1179 1179
1180 /* initialize the wol settings based on the eeprom settings */ 1180 /* initialize the wol settings based on the eeprom settings */
1181 adapter->wol = adapter->eeprom_wol; 1181 adapter->wol = adapter->eeprom_wol;
1182 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1182 1183
1183 /* print bus type/speed/width info */ 1184 /* print bus type/speed/width info */
1184 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", 1185 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index c55de1c027af..c55fd6fdb91c 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -299,6 +299,7 @@ struct e1000_adapter {
299 unsigned long led_status; 299 unsigned long led_status;
300 300
301 unsigned int flags; 301 unsigned int flags;
302 unsigned int flags2;
302 struct work_struct downshift_task; 303 struct work_struct downshift_task;
303 struct work_struct update_phy_task; 304 struct work_struct update_phy_task;
304}; 305};
@@ -306,6 +307,7 @@ struct e1000_adapter {
306struct e1000_info { 307struct e1000_info {
307 enum e1000_mac_type mac; 308 enum e1000_mac_type mac;
308 unsigned int flags; 309 unsigned int flags;
310 unsigned int flags2;
309 u32 pba; 311 u32 pba;
310 s32 (*get_variants)(struct e1000_adapter *); 312 s32 (*get_variants)(struct e1000_adapter *);
311 struct e1000_mac_operations *mac_ops; 313 struct e1000_mac_operations *mac_ops;
@@ -347,6 +349,9 @@ struct e1000_info {
347#define FLAG_RX_RESTART_NOW (1 << 30) 349#define FLAG_RX_RESTART_NOW (1 << 30)
348#define FLAG_MSI_TEST_FAILED (1 << 31) 350#define FLAG_MSI_TEST_FAILED (1 << 31)
349 351
352/* CRC Stripping defines */
353#define FLAG2_CRC_STRIPPING (1 << 0)
354
350#define E1000_RX_DESC_PS(R, i) \ 355#define E1000_RX_DESC_PS(R, i) \
351 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 356 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
352#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) 357#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 70c11c811a08..62421ce96311 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1713,7 +1713,8 @@ static void e1000_get_wol(struct net_device *netdev,
1713 wol->supported = 0; 1713 wol->supported = 0;
1714 wol->wolopts = 0; 1714 wol->wolopts = 0;
1715 1715
1716 if (!(adapter->flags & FLAG_HAS_WOL)) 1716 if (!(adapter->flags & FLAG_HAS_WOL) ||
1717 !device_can_wakeup(&adapter->pdev->dev))
1717 return; 1718 return;
1718 1719
1719 wol->supported = WAKE_UCAST | WAKE_MCAST | 1720 wol->supported = WAKE_UCAST | WAKE_MCAST |
@@ -1751,7 +1752,8 @@ static int e1000_set_wol(struct net_device *netdev,
1751 if (wol->wolopts & WAKE_MAGICSECURE) 1752 if (wol->wolopts & WAKE_MAGICSECURE)
1752 return -EOPNOTSUPP; 1753 return -EOPNOTSUPP;
1753 1754
1754 if (!(adapter->flags & FLAG_HAS_WOL)) 1755 if (!(adapter->flags & FLAG_HAS_WOL) ||
1756 !device_can_wakeup(&adapter->pdev->dev))
1755 return wol->wolopts ? -EOPNOTSUPP : 0; 1757 return wol->wolopts ? -EOPNOTSUPP : 0;
1756 1758
1757 /* these settings will always override what we currently have */ 1759 /* these settings will always override what we currently have */
@@ -1770,6 +1772,8 @@ static int e1000_set_wol(struct net_device *netdev,
1770 if (wol->wolopts & WAKE_ARP) 1772 if (wol->wolopts & WAKE_ARP)
1771 adapter->wol |= E1000_WUFC_ARP; 1773 adapter->wol |= E1000_WUFC_ARP;
1772 1774
1775 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1776
1773 return 0; 1777 return 0;
1774} 1778}
1775 1779
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 523b9716a543..d115a6d30f29 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1893,12 +1893,17 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1893 ctrl |= E1000_CTRL_PHY_RST; 1893 ctrl |= E1000_CTRL_PHY_RST;
1894 } 1894 }
1895 ret_val = e1000_acquire_swflag_ich8lan(hw); 1895 ret_val = e1000_acquire_swflag_ich8lan(hw);
1896 /* Whether or not the swflag was acquired, we need to reset the part */
1896 hw_dbg(hw, "Issuing a global reset to ich8lan"); 1897 hw_dbg(hw, "Issuing a global reset to ich8lan");
1897 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 1898 ew32(CTRL, (ctrl | E1000_CTRL_RST));
1898 msleep(20); 1899 msleep(20);
1899 1900
1900 /* release the swflag because it is not reset by hardware reset */ 1901 if (!ret_val) {
1901 e1000_release_swflag_ich8lan(hw); 1902 /* release the swflag because it is not reset by
1903 * hardware reset
1904 */
1905 e1000_release_swflag_ich8lan(hw);
1906 }
1902 1907
1903 ret_val = e1000e_get_auto_rd_done(hw); 1908 ret_val = e1000e_get_auto_rd_done(hw);
1904 if (ret_val) { 1909 if (ret_val) {
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index abd492b7336d..122539a0e1fe 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -345,7 +345,6 @@ no_buffers:
345/** 345/**
346 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 346 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
347 * @adapter: address of board private structure 347 * @adapter: address of board private structure
348 * @rx_ring: pointer to receive ring structure
349 * @cleaned_count: number of buffers to allocate this pass 348 * @cleaned_count: number of buffers to allocate this pass
350 **/ 349 **/
351 350
@@ -499,6 +498,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
499 goto next_desc; 498 goto next_desc;
500 } 499 }
501 500
501 /* adjust length to remove Ethernet CRC */
502 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
503 length -= 4;
504
502 total_rx_bytes += length; 505 total_rx_bytes += length;
503 total_rx_packets++; 506 total_rx_packets++;
504 507
@@ -804,6 +807,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
804 pci_dma_sync_single_for_device(pdev, ps_page->dma, 807 pci_dma_sync_single_for_device(pdev, ps_page->dma,
805 PAGE_SIZE, PCI_DMA_FROMDEVICE); 808 PAGE_SIZE, PCI_DMA_FROMDEVICE);
806 809
810 /* remove the CRC */
811 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
812 l1 -= 4;
813
807 skb_put(skb, l1); 814 skb_put(skb, l1);
808 goto copydone; 815 goto copydone;
809 } /* if */ 816 } /* if */
@@ -825,6 +832,12 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
825 skb->truesize += length; 832 skb->truesize += length;
826 } 833 }
827 834
835 /* strip the ethernet crc, problem is we're using pages now so
836 * this whole operation can get a little cpu intensive
837 */
838 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
839 pskb_trim(skb, skb->len - 4);
840
828copydone: 841copydone:
829 total_rx_bytes += skb->len; 842 total_rx_bytes += skb->len;
830 total_rx_packets++; 843 total_rx_packets++;
@@ -2301,8 +2314,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2301 else 2314 else
2302 rctl |= E1000_RCTL_LPE; 2315 rctl |= E1000_RCTL_LPE;
2303 2316
2304 /* Enable hardware CRC frame stripping */ 2317 /* Some systems expect that the CRC is included in SMBUS traffic. The
2305 rctl |= E1000_RCTL_SECRC; 2318 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2319 * host memory when this is enabled
2320 */
2321 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2322 rctl |= E1000_RCTL_SECRC;
2306 2323
2307 /* Setup buffer sizes */ 2324 /* Setup buffer sizes */
2308 rctl &= ~E1000_RCTL_SZ_4096; 2325 rctl &= ~E1000_RCTL_SZ_4096;
@@ -4766,6 +4783,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4766 adapter->ei = ei; 4783 adapter->ei = ei;
4767 adapter->pba = ei->pba; 4784 adapter->pba = ei->pba;
4768 adapter->flags = ei->flags; 4785 adapter->flags = ei->flags;
4786 adapter->flags2 = ei->flags2;
4769 adapter->hw.adapter = adapter; 4787 adapter->hw.adapter = adapter;
4770 adapter->hw.mac.type = ei->mac; 4788 adapter->hw.mac.type = ei->mac;
4771 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 4789 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
@@ -4970,6 +4988,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4970 4988
4971 /* initialize the wol settings based on the eeprom settings */ 4989 /* initialize the wol settings based on the eeprom settings */
4972 adapter->wol = adapter->eeprom_wol; 4990 adapter->wol = adapter->eeprom_wol;
4991 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
4973 4992
4974 /* reset the hardware with the new settings */ 4993 /* reset the hardware with the new settings */
4975 e1000e_reset(adapter); 4994 e1000e_reset(adapter);
@@ -5008,6 +5027,7 @@ err_hw_init:
5008err_sw_init: 5027err_sw_init:
5009 if (adapter->hw.flash_address) 5028 if (adapter->hw.flash_address)
5010 iounmap(adapter->hw.flash_address); 5029 iounmap(adapter->hw.flash_address);
5030 e1000e_reset_interrupt_capability(adapter);
5011err_flashmap: 5031err_flashmap:
5012 iounmap(adapter->hw.hw_addr); 5032 iounmap(adapter->hw.hw_addr);
5013err_ioremap: 5033err_ioremap:
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 77a3d7207a5f..e909f96698e8 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -151,6 +151,16 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
151 */ 151 */
152E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); 152E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
153 153
154/*
155 * Enable CRC Stripping
156 *
157 * Valid Range: 0, 1
158 *
159 * Default Value: 1 (enabled)
160 */
161E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \
162 "the CRC");
163
154struct e1000_option { 164struct e1000_option {
155 enum { enable_option, range_option, list_option } type; 165 enum { enable_option, range_option, list_option } type;
156 const char *name; 166 const char *name;
@@ -404,6 +414,21 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
404 adapter->flags |= FLAG_SMART_POWER_DOWN; 414 adapter->flags |= FLAG_SMART_POWER_DOWN;
405 } 415 }
406 } 416 }
417 { /* CRC Stripping */
418 const struct e1000_option opt = {
419 .type = enable_option,
420 .name = "CRC Stripping",
421 .err = "defaulting to enabled",
422 .def = OPTION_ENABLED
423 };
424
425 if (num_CrcStripping > bd) {
426 unsigned int crc_stripping = CrcStripping[bd];
427 e1000_validate_option(&crc_stripping, &opt, adapter);
428 if (crc_stripping == OPTION_ENABLED)
429 adapter->flags2 |= FLAG2_CRC_STRIPPING;
430 }
431 }
407 { /* Kumeran Lock Loss Workaround */ 432 { /* Kumeran Lock Loss Workaround */
408 const struct e1000_option opt = { 433 const struct e1000_option opt = {
409 .type = enable_option, 434 .type = enable_option,
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 5524271eedca..002d918fb4c7 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0093" 43#define DRV_VERSION "EHEA_0095"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index b70c5314f537..422fcb93e2c3 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2863,7 +2863,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
2863 struct ehea_adapter *adapter; 2863 struct ehea_adapter *adapter;
2864 2864
2865 mutex_lock(&dlpar_mem_lock); 2865 mutex_lock(&dlpar_mem_lock);
2866 ehea_info("LPAR memory enlarged - re-initializing driver"); 2866 ehea_info("LPAR memory changed - re-initializing driver");
2867 2867
2868 list_for_each_entry(adapter, &adapter_list, list) 2868 list_for_each_entry(adapter, &adapter_list, list)
2869 if (adapter->active_ports) { 2869 if (adapter->active_ports) {
@@ -2900,13 +2900,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
2900 } 2900 }
2901 } 2901 }
2902 2902
2903 ehea_destroy_busmap();
2904 ret = ehea_create_busmap();
2905 if (ret) {
2906 ehea_error("creating ehea busmap failed");
2907 goto out;
2908 }
2909
2910 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 2903 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2911 2904
2912 list_for_each_entry(adapter, &adapter_list, list) 2905 list_for_each_entry(adapter, &adapter_list, list)
@@ -3519,9 +3512,21 @@ void ehea_crash_handler(void)
3519static int ehea_mem_notifier(struct notifier_block *nb, 3512static int ehea_mem_notifier(struct notifier_block *nb,
3520 unsigned long action, void *data) 3513 unsigned long action, void *data)
3521{ 3514{
3515 struct memory_notify *arg = data;
3522 switch (action) { 3516 switch (action) {
3523 case MEM_OFFLINE: 3517 case MEM_CANCEL_OFFLINE:
3524 ehea_info("memory has been removed"); 3518 ehea_info("memory offlining canceled");
3519 /* Readd canceled memory block */
3520 case MEM_ONLINE:
3521 ehea_info("memory is going online");
3522 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3523 return NOTIFY_BAD;
3524 ehea_rereg_mrs(NULL);
3525 break;
3526 case MEM_GOING_OFFLINE:
3527 ehea_info("memory is going offline");
3528 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3529 return NOTIFY_BAD;
3525 ehea_rereg_mrs(NULL); 3530 ehea_rereg_mrs(NULL);
3526 break; 3531 break;
3527 default: 3532 default:
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index db8a9257e680..9d006878f045 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -567,7 +567,7 @@ static inline int ehea_calc_index(unsigned long i, unsigned long s)
567static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap, 567static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
568 int dir) 568 int dir)
569{ 569{
570 if(!ehea_top_bmap->dir[dir]) { 570 if (!ehea_top_bmap->dir[dir]) {
571 ehea_top_bmap->dir[dir] = 571 ehea_top_bmap->dir[dir] =
572 kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL); 572 kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
573 if (!ehea_top_bmap->dir[dir]) 573 if (!ehea_top_bmap->dir[dir])
@@ -578,7 +578,7 @@ static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
578 578
579static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir) 579static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
580{ 580{
581 if(!ehea_bmap->top[top]) { 581 if (!ehea_bmap->top[top]) {
582 ehea_bmap->top[top] = 582 ehea_bmap->top[top] =
583 kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL); 583 kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
584 if (!ehea_bmap->top[top]) 584 if (!ehea_bmap->top[top])
@@ -587,53 +587,171 @@ static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
587 return ehea_init_top_bmap(ehea_bmap->top[top], dir); 587 return ehea_init_top_bmap(ehea_bmap->top[top], dir);
588} 588}
589 589
590static int ehea_create_busmap_callback(unsigned long pfn, 590static DEFINE_MUTEX(ehea_busmap_mutex);
591 unsigned long nr_pages, void *arg) 591static unsigned long ehea_mr_len;
592
593#define EHEA_BUSMAP_ADD_SECT 1
594#define EHEA_BUSMAP_REM_SECT 0
595
596static void ehea_rebuild_busmap(void)
592{ 597{
593 unsigned long i, mr_len, start_section, end_section; 598 u64 vaddr = EHEA_BUSMAP_START;
594 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; 599 int top, dir, idx;
595 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
596 mr_len = *(unsigned long *)arg;
597 600
598 if (!ehea_bmap) 601 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
599 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); 602 struct ehea_top_bmap *ehea_top;
600 if (!ehea_bmap) 603 int valid_dir_entries = 0;
601 return -ENOMEM;
602 604
603 for (i = start_section; i < end_section; i++) { 605 if (!ehea_bmap->top[top])
604 int ret; 606 continue;
605 int top, dir, idx; 607 ehea_top = ehea_bmap->top[top];
606 u64 vaddr; 608 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
609 struct ehea_dir_bmap *ehea_dir;
610 int valid_entries = 0;
607 611
608 top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT); 612 if (!ehea_top->dir[dir])
609 dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT); 613 continue;
614 valid_dir_entries++;
615 ehea_dir = ehea_top->dir[dir];
616 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
617 if (!ehea_dir->ent[idx])
618 continue;
619 valid_entries++;
620 ehea_dir->ent[idx] = vaddr;
621 vaddr += EHEA_SECTSIZE;
622 }
623 if (!valid_entries) {
624 ehea_top->dir[dir] = NULL;
625 kfree(ehea_dir);
626 }
627 }
628 if (!valid_dir_entries) {
629 ehea_bmap->top[top] = NULL;
630 kfree(ehea_top);
631 }
632 }
633}
610 634
611 ret = ehea_init_bmap(ehea_bmap, top, dir); 635static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
612 if(ret) 636{
613 return ret; 637 unsigned long i, start_section, end_section;
614 638
615 idx = i & EHEA_INDEX_MASK; 639 if (!nr_pages)
616 vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE; 640 return 0;
617 641
618 ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr; 642 if (!ehea_bmap) {
643 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
644 if (!ehea_bmap)
645 return -ENOMEM;
619 } 646 }
620 647
621 mr_len += nr_pages * PAGE_SIZE; 648 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
622 *(unsigned long *)arg = mr_len; 649 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
650 /* Mark entries as valid or invalid only; address is assigned later */
651 for (i = start_section; i < end_section; i++) {
652 u64 flag;
653 int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
654 int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
655 int idx = i & EHEA_INDEX_MASK;
656
657 if (add) {
658 int ret = ehea_init_bmap(ehea_bmap, top, dir);
659 if (ret)
660 return ret;
661 flag = 1; /* valid */
662 ehea_mr_len += EHEA_SECTSIZE;
663 } else {
664 if (!ehea_bmap->top[top])
665 continue;
666 if (!ehea_bmap->top[top]->dir[dir])
667 continue;
668 flag = 0; /* invalid */
669 ehea_mr_len -= EHEA_SECTSIZE;
670 }
623 671
672 ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
673 }
674 ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
624 return 0; 675 return 0;
625} 676}
626 677
627static unsigned long ehea_mr_len; 678int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
679{
680 int ret;
628 681
629static DEFINE_MUTEX(ehea_busmap_mutex); 682 mutex_lock(&ehea_busmap_mutex);
683 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
684 mutex_unlock(&ehea_busmap_mutex);
685 return ret;
686}
687
688int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
689{
690 int ret;
691
692 mutex_lock(&ehea_busmap_mutex);
693 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
694 mutex_unlock(&ehea_busmap_mutex);
695 return ret;
696}
697
698static int ehea_is_hugepage(unsigned long pfn)
699{
700 int page_order;
701
702 if (pfn & EHEA_HUGEPAGE_PFN_MASK)
703 return 0;
704
705 page_order = compound_order(pfn_to_page(pfn));
706 if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
707 return 0;
708
709 return 1;
710}
711
712static int ehea_create_busmap_callback(unsigned long initial_pfn,
713 unsigned long total_nr_pages, void *arg)
714{
715 int ret;
716 unsigned long pfn, start_pfn, end_pfn, nr_pages;
717
718 if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
719 return ehea_update_busmap(initial_pfn, total_nr_pages,
720 EHEA_BUSMAP_ADD_SECT);
721
722 /* Given chunk is >= 16GB -> check for hugepages */
723 start_pfn = initial_pfn;
724 end_pfn = initial_pfn + total_nr_pages;
725 pfn = start_pfn;
726
727 while (pfn < end_pfn) {
728 if (ehea_is_hugepage(pfn)) {
729 /* Add mem found in front of the hugepage */
730 nr_pages = pfn - start_pfn;
731 ret = ehea_update_busmap(start_pfn, nr_pages,
732 EHEA_BUSMAP_ADD_SECT);
733 if (ret)
734 return ret;
735
736 /* Skip the hugepage */
737 pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
738 start_pfn = pfn;
739 } else
740 pfn += (EHEA_SECTSIZE / PAGE_SIZE);
741 }
742
743 /* Add mem found behind the hugepage(s) */
744 nr_pages = pfn - start_pfn;
745 return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
746}
630 747
631int ehea_create_busmap(void) 748int ehea_create_busmap(void)
632{ 749{
633 int ret; 750 int ret;
751
634 mutex_lock(&ehea_busmap_mutex); 752 mutex_lock(&ehea_busmap_mutex);
635 ehea_mr_len = 0; 753 ehea_mr_len = 0;
636 ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len, 754 ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
637 ehea_create_busmap_callback); 755 ehea_create_busmap_callback);
638 mutex_unlock(&ehea_busmap_mutex); 756 mutex_unlock(&ehea_busmap_mutex);
639 return ret; 757 return ret;
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 0bb6f92fa2f8..0817c1e74a19 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -40,6 +40,9 @@
40#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) 40#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
41#define EHEA_SECTSIZE (1UL << 24) 41#define EHEA_SECTSIZE (1UL << 24)
42#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) 42#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
43#define EHEA_HUGEPAGESHIFT 34
44#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
45#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
43 46
44#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE) 47#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
45#error eHEA module cannot work if kernel sectionsize < ehea sectionsize 48#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
@@ -378,6 +381,8 @@ int ehea_rem_mr(struct ehea_mr *mr);
378 381
379void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); 382void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
380 383
384int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
385int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
381int ehea_create_busmap(void); 386int ehea_create_busmap(void);
382void ehea_destroy_busmap(void); 387void ehea_destroy_busmap(void);
383u64 ehea_map_vaddr(void *caddr); 388u64 ehea_map_vaddr(void *caddr);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index e1b441effbbe..36cb6e95b465 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -568,6 +568,17 @@ static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end)
568 return erxrdpt; 568 return erxrdpt;
569} 569}
570 570
571/*
572 * Calculate wrap around when reading beyond the end of the RX buffer
573 */
574static u16 rx_packet_start(u16 ptr)
575{
576 if (ptr + RSV_SIZE > RXEND_INIT)
577 return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1);
578 else
579 return ptr + RSV_SIZE;
580}
581
571static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) 582static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
572{ 583{
573 u16 erxrdpt; 584 u16 erxrdpt;
@@ -938,8 +949,9 @@ static void enc28j60_hw_rx(struct net_device *ndev)
938 skb->dev = ndev; 949 skb->dev = ndev;
939 skb_reserve(skb, NET_IP_ALIGN); 950 skb_reserve(skb, NET_IP_ALIGN);
940 /* copy the packet from the receive buffer */ 951 /* copy the packet from the receive buffer */
941 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), 952 enc28j60_mem_read(priv,
942 len, skb_put(skb, len)); 953 rx_packet_start(priv->next_pk_ptr),
954 len, skb_put(skb, len));
943 if (netif_msg_pktdata(priv)) 955 if (netif_msg_pktdata(priv))
944 dump_packet(__func__, skb->len, skb->data); 956 dump_packet(__func__, skb->len, skb->data);
945 skb->protocol = eth_type_trans(skb, ndev); 957 skb->protocol = eth_type_trans(skb, ndev);
@@ -947,7 +959,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
947 ndev->stats.rx_packets++; 959 ndev->stats.rx_packets++;
948 ndev->stats.rx_bytes += len; 960 ndev->stats.rx_bytes += len;
949 ndev->last_rx = jiffies; 961 ndev->last_rx = jiffies;
950 netif_rx(skb); 962 netif_rx_ni(skb);
951 } 963 }
952 } 964 }
953 /* 965 /*
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 4e4f68304e82..aec3b97e794d 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -401,6 +401,21 @@ static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *d
401 return 0; 401 return 0;
402} 402}
403 403
404#ifdef CONFIG_NET_POLL_CONTROLLER
405static void mpc52xx_fec_poll_controller(struct net_device *dev)
406{
407 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
408
409 disable_irq(priv->t_irq);
410 mpc52xx_fec_tx_interrupt(priv->t_irq, dev);
411 enable_irq(priv->t_irq);
412 disable_irq(priv->r_irq);
413 mpc52xx_fec_rx_interrupt(priv->r_irq, dev);
414 enable_irq(priv->r_irq);
415}
416#endif
417
418
404/* This handles BestComm transmit task interrupts 419/* This handles BestComm transmit task interrupts
405 */ 420 */
406static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) 421static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
@@ -926,6 +941,9 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
926 ndev->tx_timeout = mpc52xx_fec_tx_timeout; 941 ndev->tx_timeout = mpc52xx_fec_tx_timeout;
927 ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT; 942 ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
928 ndev->base_addr = mem.start; 943 ndev->base_addr = mem.start;
944#ifdef CONFIG_NET_POLL_CONTROLLER
945 ndev->poll_controller = mpc52xx_fec_poll_controller;
946#endif
929 947
930 priv->t_irq = priv->r_irq = ndev->irq = NO_IRQ; /* IRQ are free for now */ 948 priv->t_irq = priv->r_irq = ndev->irq = NO_IRQ; /* IRQ are free for now */
931 949
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 08e18bcb970f..45dd9bdc5d62 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -2,6 +2,7 @@
2 * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver 2 * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver
3 * 3 *
4 * Copyright (C) 2007 Domen Puncer, Telargo, Inc. 4 * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
5 * Copyright (C) 2008 Wolfram Sang, Pengutronix
5 * 6 *
6 * This file is licensed under the terms of the GNU General Public License 7 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any 8 * version 2. This program is licensed "as is" without any warranty of any
@@ -21,58 +22,45 @@ struct mpc52xx_fec_mdio_priv {
21 struct mpc52xx_fec __iomem *regs; 22 struct mpc52xx_fec __iomem *regs;
22}; 23};
23 24
24static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) 25static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
26 int reg, u32 value)
25{ 27{
26 struct mpc52xx_fec_mdio_priv *priv = bus->priv; 28 struct mpc52xx_fec_mdio_priv *priv = bus->priv;
27 struct mpc52xx_fec __iomem *fec; 29 struct mpc52xx_fec __iomem *fec;
28 int tries = 100; 30 int tries = 100;
29 u32 request = FEC_MII_READ_FRAME; 31
32 value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
33 value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
30 34
31 fec = priv->regs; 35 fec = priv->regs;
32 out_be32(&fec->ievent, FEC_IEVENT_MII); 36 out_be32(&fec->ievent, FEC_IEVENT_MII);
33 37 out_be32(&priv->regs->mii_data, value);
34 request |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
35 request |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
36
37 out_be32(&priv->regs->mii_data, request);
38 38
39 /* wait for it to finish, this takes about 23 us on lite5200b */ 39 /* wait for it to finish, this takes about 23 us on lite5200b */
40 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) 40 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
41 udelay(5); 41 udelay(5);
42 42
43 if (tries == 0) 43 if (!tries)
44 return -ETIMEDOUT; 44 return -ETIMEDOUT;
45 45
46 return in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK; 46 return value & FEC_MII_DATA_OP_RD ?
47 in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0;
47} 48}
48 49
49static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data) 50static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
50{ 51{
51 struct mpc52xx_fec_mdio_priv *priv = bus->priv; 52 return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME);
52 struct mpc52xx_fec __iomem *fec; 53}
53 u32 value = data;
54 int tries = 100;
55
56 fec = priv->regs;
57 out_be32(&fec->ievent, FEC_IEVENT_MII);
58
59 value |= FEC_MII_WRITE_FRAME;
60 value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
61 value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
62
63 out_be32(&priv->regs->mii_data, value);
64
65 /* wait for request to finish */
66 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
67 udelay(5);
68
69 if (tries == 0)
70 return -ETIMEDOUT;
71 54
72 return 0; 55static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
56 u16 data)
57{
58 return mpc52xx_fec_mdio_transfer(bus, phy_id, reg,
59 data | FEC_MII_WRITE_FRAME);
73} 60}
74 61
75static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_id *match) 62static int mpc52xx_fec_mdio_probe(struct of_device *of,
63 const struct of_device_id *match)
76{ 64{
77 struct device *dev = &of->dev; 65 struct device *dev = &of->dev;
78 struct device_node *np = of->node; 66 struct device_node *np = of->node;
@@ -131,7 +119,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i
131 dev_set_drvdata(dev, bus); 119 dev_set_drvdata(dev, bus);
132 120
133 /* set MII speed */ 121 /* set MII speed */
134 out_be32(&priv->regs->mii_speed, ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1); 122 out_be32(&priv->regs->mii_speed,
123 ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1);
135 124
136 /* enable MII interrupt */ 125 /* enable MII interrupt */
137 out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII); 126 out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII);
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index cb51c1fb0338..a6f49d025787 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1099,7 +1099,9 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1099 ndev->stop = fs_enet_close; 1099 ndev->stop = fs_enet_close;
1100 ndev->get_stats = fs_enet_get_stats; 1100 ndev->get_stats = fs_enet_get_stats;
1101 ndev->set_multicast_list = fs_set_multicast_list; 1101 ndev->set_multicast_list = fs_set_multicast_list;
1102 1102#ifdef CONFIG_NET_POLL_CONTROLLER
1103 ndev->poll_controller = fs_enet_netpoll;
1104#endif
1103 if (fpi->use_napi) 1105 if (fpi->use_napi)
1104 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, 1106 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
1105 fpi->napi_weight); 1107 fpi->napi_weight);
@@ -1209,7 +1211,7 @@ static void __exit fs_cleanup(void)
1209static void fs_enet_netpoll(struct net_device *dev) 1211static void fs_enet_netpoll(struct net_device *dev)
1210{ 1212{
1211 disable_irq(dev->irq); 1213 disable_irq(dev->irq);
1212 fs_enet_interrupt(dev->irq, dev, NULL); 1214 fs_enet_interrupt(dev->irq, dev);
1213 enable_irq(dev->irq); 1215 enable_irq(dev->irq);
1214} 1216}
1215#endif 1217#endif
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b5bb7ae2817f..c4af949bf860 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -161,7 +161,7 @@ static int gfar_probe(struct platform_device *pdev)
161 struct gfar_private *priv = NULL; 161 struct gfar_private *priv = NULL;
162 struct gianfar_platform_data *einfo; 162 struct gianfar_platform_data *einfo;
163 struct resource *r; 163 struct resource *r;
164 int err = 0; 164 int err = 0, irq;
165 DECLARE_MAC_BUF(mac); 165 DECLARE_MAC_BUF(mac);
166 166
167 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 167 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
@@ -187,15 +187,25 @@ static int gfar_probe(struct platform_device *pdev)
187 187
188 /* fill out IRQ fields */ 188 /* fill out IRQ fields */
189 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 189 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
190 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); 190 irq = platform_get_irq_byname(pdev, "tx");
191 priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); 191 if (irq < 0)
192 priv->interruptError = platform_get_irq_byname(pdev, "error"); 192 goto regs_fail;
193 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0) 193 priv->interruptTransmit = irq;
194
195 irq = platform_get_irq_byname(pdev, "rx");
196 if (irq < 0)
197 goto regs_fail;
198 priv->interruptReceive = irq;
199
200 irq = platform_get_irq_byname(pdev, "error");
201 if (irq < 0)
194 goto regs_fail; 202 goto regs_fail;
203 priv->interruptError = irq;
195 } else { 204 } else {
196 priv->interruptTransmit = platform_get_irq(pdev, 0); 205 irq = platform_get_irq(pdev, 0);
197 if (priv->interruptTransmit < 0) 206 if (irq < 0)
198 goto regs_fail; 207 goto regs_fail;
208 priv->interruptTransmit = irq;
199 } 209 }
200 210
201 /* get a pointer to the register memory */ 211 /* get a pointer to the register memory */
@@ -576,6 +586,18 @@ static void gfar_configure_serdes(struct net_device *dev)
576 struct gfar_mii __iomem *regs = 586 struct gfar_mii __iomem *regs =
577 (void __iomem *)&priv->regs->gfar_mii_regs; 587 (void __iomem *)&priv->regs->gfar_mii_regs;
578 int tbipa = gfar_read(&priv->regs->tbipa); 588 int tbipa = gfar_read(&priv->regs->tbipa);
589 struct mii_bus *bus = gfar_get_miibus(priv);
590
591 if (bus)
592 mutex_lock(&bus->mdio_lock);
593
594 /* If the link is already up, we must already be ok, and don't need to
595 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
596 * everything for us? Resetting it takes the link down and requires
597 * several seconds for it to come back.
598 */
599 if (gfar_local_mdio_read(regs, tbipa, MII_BMSR) & BMSR_LSTATUS)
600 goto done;
579 601
580 /* Single clk mode, mii mode off(for serdes communication) */ 602 /* Single clk mode, mii mode off(for serdes communication) */
581 gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT); 603 gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT);
@@ -586,6 +608,10 @@ static void gfar_configure_serdes(struct net_device *dev)
586 608
587 gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE | 609 gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE |
588 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 610 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
611
612 done:
613 if (bus)
614 mutex_unlock(&bus->mdio_lock);
589} 615}
590 616
591static void init_registers(struct net_device *dev) 617static void init_registers(struct net_device *dev)
@@ -1381,6 +1407,10 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1381 if (bdp->status & TXBD_DEF) 1407 if (bdp->status & TXBD_DEF)
1382 dev->stats.collisions++; 1408 dev->stats.collisions++;
1383 1409
1410 /* Unmap the DMA memory */
1411 dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
1412 bdp->length, DMA_TO_DEVICE);
1413
1384 /* Free the sk buffer associated with this TxBD */ 1414 /* Free the sk buffer associated with this TxBD */
1385 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1415 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1386 1416
@@ -1640,6 +1670,9 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1640 1670
1641 skb = priv->rx_skbuff[priv->skb_currx]; 1671 skb = priv->rx_skbuff[priv->skb_currx];
1642 1672
1673 dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
1674 priv->rx_buffer_size, DMA_FROM_DEVICE);
1675
1643 /* We drop the frame if we failed to allocate a new buffer */ 1676 /* We drop the frame if we failed to allocate a new buffer */
1644 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 1677 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
1645 bdp->status & RXBD_ERR)) { 1678 bdp->status & RXBD_ERR)) {
@@ -1648,14 +1681,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1648 if (unlikely(!newskb)) 1681 if (unlikely(!newskb))
1649 newskb = skb; 1682 newskb = skb;
1650 1683
1651 if (skb) { 1684 if (skb)
1652 dma_unmap_single(&priv->dev->dev,
1653 bdp->bufPtr,
1654 priv->rx_buffer_size,
1655 DMA_FROM_DEVICE);
1656
1657 dev_kfree_skb_any(skb); 1685 dev_kfree_skb_any(skb);
1658 }
1659 } else { 1686 } else {
1660 /* Increment the number of packets */ 1687 /* Increment the number of packets */
1661 dev->stats.rx_packets++; 1688 dev->stats.rx_packets++;
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index bf73eea98010..0e2595d24933 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -269,6 +269,27 @@ static struct device_driver gianfar_mdio_driver = {
269 .remove = gfar_mdio_remove, 269 .remove = gfar_mdio_remove,
270}; 270};
271 271
272static int match_mdio_bus(struct device *dev, void *data)
273{
274 const struct gfar_private *priv = data;
275 const struct platform_device *pdev = to_platform_device(dev);
276
277 return !strcmp(pdev->name, gianfar_mdio_driver.name) &&
278 pdev->id == priv->einfo->mdio_bus;
279}
280
281/* Given a gfar_priv structure, find the mii_bus controlled by this device (not
282 * necessarily the same as the bus the gfar's PHY is on), if one exists.
283 * Normally only the first gianfar controls a mii_bus. */
284struct mii_bus *gfar_get_miibus(const struct gfar_private *priv)
285{
286 /*const*/ struct device *d;
287
288 d = bus_find_device(gianfar_mdio_driver.bus, NULL, (void *)priv,
289 match_mdio_bus);
290 return d ? dev_get_drvdata(d) : NULL;
291}
292
272int __init gfar_mdio_init(void) 293int __init gfar_mdio_init(void)
273{ 294{
274 return driver_register(&gianfar_mdio_driver); 295 return driver_register(&gianfar_mdio_driver);
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
index 2af28b16a0e2..02dc970ca1ff 100644
--- a/drivers/net/gianfar_mii.h
+++ b/drivers/net/gianfar_mii.h
@@ -18,6 +18,8 @@
18#ifndef __GIANFAR_MII_H 18#ifndef __GIANFAR_MII_H
19#define __GIANFAR_MII_H 19#define __GIANFAR_MII_H
20 20
21struct gfar_private; /* forward ref */
22
21#define MIIMIND_BUSY 0x00000001 23#define MIIMIND_BUSY 0x00000001
22#define MIIMIND_NOTVALID 0x00000004 24#define MIIMIND_NOTVALID 0x00000004
23 25
@@ -44,6 +46,7 @@ int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
44int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, 46int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
45 int regnum, u16 value); 47 int regnum, u16 value);
46int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum); 48int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
49struct mii_bus *gfar_get_miibus(const struct gfar_private *priv);
47int __init gfar_mdio_init(void); 50int __init gfar_mdio_init(void);
48void gfar_mdio_exit(void); 51void gfar_mdio_exit(void);
49#endif /* GIANFAR_PHY_H */ 52#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index fbbd3e660c27..c01e290d09d2 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -230,7 +230,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
230 dev->open = &hpp_open; 230 dev->open = &hpp_open;
231 dev->stop = &hpp_close; 231 dev->stop = &hpp_close;
232#ifdef CONFIG_NET_POLL_CONTROLLER 232#ifdef CONFIG_NET_POLL_CONTROLLER
233 dev->poll_controller = ei_poll; 233 dev->poll_controller = eip_poll;
234#endif 234#endif
235 235
236 ei_status.name = name; 236 ei_status.name = name;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index efcf21c9f5c7..901212aa37cb 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2604,8 +2604,16 @@ static int __devinit emac_init_config(struct emac_instance *dev)
2604 if (of_device_is_compatible(np, "ibm,emac-440ep") || 2604 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2605 of_device_is_compatible(np, "ibm,emac-440gr")) 2605 of_device_is_compatible(np, "ibm,emac-440gr"))
2606 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX; 2606 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2607 if (of_device_is_compatible(np, "ibm,emac-405ez")) 2607 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2608#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
2608 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x; 2609 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2610#else
2611 printk(KERN_ERR "%s: Flow control not disabled!\n",
2612 np->full_name);
2613 return -ENXIO;
2614#endif
2615 }
2616
2609 } 2617 }
2610 2618
2611 /* Fixup some feature bits based on the device tree */ 2619 /* Fixup some feature bits based on the device tree */
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 1839d3f154a3..ecf9798987fa 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -280,9 +280,11 @@ static irqreturn_t mal_txeob(int irq, void *dev_instance)
280 mal_schedule_poll(mal); 280 mal_schedule_poll(mal);
281 set_mal_dcrn(mal, MAL_TXEOBISR, r); 281 set_mal_dcrn(mal, MAL_TXEOBISR, r);
282 282
283#ifdef CONFIG_PPC_DCR_NATIVE
283 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) 284 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
284 mtdcri(SDR0, DCRN_SDR_ICINTSTAT, 285 mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
285 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX)); 286 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
287#endif
286 288
287 return IRQ_HANDLED; 289 return IRQ_HANDLED;
288} 290}
@@ -298,9 +300,11 @@ static irqreturn_t mal_rxeob(int irq, void *dev_instance)
298 mal_schedule_poll(mal); 300 mal_schedule_poll(mal);
299 set_mal_dcrn(mal, MAL_RXEOBISR, r); 301 set_mal_dcrn(mal, MAL_RXEOBISR, r);
300 302
303#ifdef CONFIG_PPC_DCR_NATIVE
301 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) 304 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
302 mtdcri(SDR0, DCRN_SDR_ICINTSTAT, 305 mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
303 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX)); 306 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
307#endif
304 308
305 return IRQ_HANDLED; 309 return IRQ_HANDLED;
306} 310}
@@ -572,9 +576,18 @@ static int __devinit mal_probe(struct of_device *ofdev,
572 goto fail; 576 goto fail;
573 } 577 }
574 578
575 if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) 579 if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) {
580#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
581 defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
576 mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | 582 mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
577 MAL_FTR_COMMON_ERR_INT); 583 MAL_FTR_COMMON_ERR_INT);
584#else
585 printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
586 ofdev->node->full_name);
587 err = -ENODEV;
588 goto fail;
589#endif
590 }
578 591
579 mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0); 592 mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0);
580 mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1); 593 mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 58906c984be9..89964fa739a0 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1776,7 +1776,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1776 1776
1777 /* this function will set ->supported = 0 and return 1 if wol is not 1777 /* this function will set ->supported = 0 and return 1 if wol is not
1778 * supported by this hardware */ 1778 * supported by this hardware */
1779 if (igb_wol_exclusion(adapter, wol)) 1779 if (igb_wol_exclusion(adapter, wol) ||
1780 !device_can_wakeup(&adapter->pdev->dev))
1780 return; 1781 return;
1781 1782
1782 /* apply any specific unsupported masks here */ 1783 /* apply any specific unsupported masks here */
@@ -1805,7 +1806,8 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1805 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1806 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1806 return -EOPNOTSUPP; 1807 return -EOPNOTSUPP;
1807 1808
1808 if (igb_wol_exclusion(adapter, wol)) 1809 if (igb_wol_exclusion(adapter, wol) ||
1810 !device_can_wakeup(&adapter->pdev->dev))
1809 return wol->wolopts ? -EOPNOTSUPP : 0; 1811 return wol->wolopts ? -EOPNOTSUPP : 0;
1810 1812
1811 switch (hw->device_id) { 1813 switch (hw->device_id) {
@@ -1825,6 +1827,8 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1825 if (wol->wolopts & WAKE_MAGIC) 1827 if (wol->wolopts & WAKE_MAGIC)
1826 adapter->wol |= E1000_WUFC_MAG; 1828 adapter->wol |= E1000_WUFC_MAG;
1827 1829
1830 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1831
1828 return 0; 1832 return 0;
1829} 1833}
1830 1834
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 93d02efa9a0a..20d27e622ec1 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -38,10 +38,11 @@
38#include <linux/ethtool.h> 38#include <linux/ethtool.h>
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/pci-aspm.h>
41#include <linux/delay.h> 42#include <linux/delay.h>
42#include <linux/interrupt.h> 43#include <linux/interrupt.h>
43#include <linux/if_ether.h> 44#include <linux/if_ether.h>
44#ifdef CONFIG_DCA 45#ifdef CONFIG_IGB_DCA
45#include <linux/dca.h> 46#include <linux/dca.h>
46#endif 47#endif
47#include "igb.h" 48#include "igb.h"
@@ -106,11 +107,11 @@ static irqreturn_t igb_msix_other(int irq, void *);
106static irqreturn_t igb_msix_rx(int irq, void *); 107static irqreturn_t igb_msix_rx(int irq, void *);
107static irqreturn_t igb_msix_tx(int irq, void *); 108static irqreturn_t igb_msix_tx(int irq, void *);
108static int igb_clean_rx_ring_msix(struct napi_struct *, int); 109static int igb_clean_rx_ring_msix(struct napi_struct *, int);
109#ifdef CONFIG_DCA 110#ifdef CONFIG_IGB_DCA
110static void igb_update_rx_dca(struct igb_ring *); 111static void igb_update_rx_dca(struct igb_ring *);
111static void igb_update_tx_dca(struct igb_ring *); 112static void igb_update_tx_dca(struct igb_ring *);
112static void igb_setup_dca(struct igb_adapter *); 113static void igb_setup_dca(struct igb_adapter *);
113#endif /* CONFIG_DCA */ 114#endif /* CONFIG_IGB_DCA */
114static bool igb_clean_tx_irq(struct igb_ring *); 115static bool igb_clean_tx_irq(struct igb_ring *);
115static int igb_poll(struct napi_struct *, int); 116static int igb_poll(struct napi_struct *, int);
116static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 117static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
@@ -131,7 +132,7 @@ static int igb_suspend(struct pci_dev *, pm_message_t);
131static int igb_resume(struct pci_dev *); 132static int igb_resume(struct pci_dev *);
132#endif 133#endif
133static void igb_shutdown(struct pci_dev *); 134static void igb_shutdown(struct pci_dev *);
134#ifdef CONFIG_DCA 135#ifdef CONFIG_IGB_DCA
135static int igb_notify_dca(struct notifier_block *, unsigned long, void *); 136static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
136static struct notifier_block dca_notifier = { 137static struct notifier_block dca_notifier = {
137 .notifier_call = igb_notify_dca, 138 .notifier_call = igb_notify_dca,
@@ -207,7 +208,7 @@ static int __init igb_init_module(void)
207 global_quad_port_a = 0; 208 global_quad_port_a = 0;
208 209
209 ret = pci_register_driver(&igb_driver); 210 ret = pci_register_driver(&igb_driver);
210#ifdef CONFIG_DCA 211#ifdef CONFIG_IGB_DCA
211 dca_register_notify(&dca_notifier); 212 dca_register_notify(&dca_notifier);
212#endif 213#endif
213 return ret; 214 return ret;
@@ -223,7 +224,7 @@ module_init(igb_init_module);
223 **/ 224 **/
224static void __exit igb_exit_module(void) 225static void __exit igb_exit_module(void)
225{ 226{
226#ifdef CONFIG_DCA 227#ifdef CONFIG_IGB_DCA
227 dca_unregister_notify(&dca_notifier); 228 dca_unregister_notify(&dca_notifier);
228#endif 229#endif
229 pci_unregister_driver(&igb_driver); 230 pci_unregister_driver(&igb_driver);
@@ -966,10 +967,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
966 struct net_device *netdev; 967 struct net_device *netdev;
967 struct igb_adapter *adapter; 968 struct igb_adapter *adapter;
968 struct e1000_hw *hw; 969 struct e1000_hw *hw;
970 struct pci_dev *us_dev;
969 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 971 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
970 unsigned long mmio_start, mmio_len; 972 unsigned long mmio_start, mmio_len;
971 int i, err, pci_using_dac; 973 int i, err, pci_using_dac, pos;
972 u16 eeprom_data = 0; 974 u16 eeprom_data = 0, state = 0;
973 u16 eeprom_apme_mask = IGB_EEPROM_APME; 975 u16 eeprom_apme_mask = IGB_EEPROM_APME;
974 u32 part_num; 976 u32 part_num;
975 int bars, need_ioport; 977 int bars, need_ioport;
@@ -1004,6 +1006,27 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1004 } 1006 }
1005 } 1007 }
1006 1008
1009 /* 82575 requires that the pci-e link partner disable the L0s state */
1010 switch (pdev->device) {
1011 case E1000_DEV_ID_82575EB_COPPER:
1012 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1013 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1014 us_dev = pdev->bus->self;
1015 pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
1016 if (pos) {
1017 pci_read_config_word(us_dev, pos + PCI_EXP_LNKCTL,
1018 &state);
1019 state &= ~PCIE_LINK_STATE_L0S;
1020 pci_write_config_word(us_dev, pos + PCI_EXP_LNKCTL,
1021 state);
1022 dev_info(&pdev->dev,
1023 "Disabling ASPM L0s upstream switch port %s\n",
1024 pci_name(us_dev));
1025 }
1026 default:
1027 break;
1028 }
1029
1007 err = pci_request_selected_regions(pdev, bars, igb_driver_name); 1030 err = pci_request_selected_regions(pdev, bars, igb_driver_name);
1008 if (err) 1031 if (err)
1009 goto err_pci_reg; 1032 goto err_pci_reg;
@@ -1220,6 +1243,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1220 1243
1221 /* initialize the wol settings based on the eeprom settings */ 1244 /* initialize the wol settings based on the eeprom settings */
1222 adapter->wol = adapter->eeprom_wol; 1245 adapter->wol = adapter->eeprom_wol;
1246 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1223 1247
1224 /* reset the hardware with the new settings */ 1248 /* reset the hardware with the new settings */
1225 igb_reset(adapter); 1249 igb_reset(adapter);
@@ -1237,7 +1261,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1237 if (err) 1261 if (err)
1238 goto err_register; 1262 goto err_register;
1239 1263
1240#ifdef CONFIG_DCA 1264#ifdef CONFIG_IGB_DCA
1241 if ((adapter->flags & IGB_FLAG_HAS_DCA) && 1265 if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
1242 (dca_add_requester(&pdev->dev) == 0)) { 1266 (dca_add_requester(&pdev->dev) == 0)) {
1243 adapter->flags |= IGB_FLAG_DCA_ENABLED; 1267 adapter->flags |= IGB_FLAG_DCA_ENABLED;
@@ -1311,7 +1335,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1311{ 1335{
1312 struct net_device *netdev = pci_get_drvdata(pdev); 1336 struct net_device *netdev = pci_get_drvdata(pdev);
1313 struct igb_adapter *adapter = netdev_priv(netdev); 1337 struct igb_adapter *adapter = netdev_priv(netdev);
1314#ifdef CONFIG_DCA 1338#ifdef CONFIG_IGB_DCA
1315 struct e1000_hw *hw = &adapter->hw; 1339 struct e1000_hw *hw = &adapter->hw;
1316#endif 1340#endif
1317 1341
@@ -1323,7 +1347,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1323 1347
1324 flush_scheduled_work(); 1348 flush_scheduled_work();
1325 1349
1326#ifdef CONFIG_DCA 1350#ifdef CONFIG_IGB_DCA
1327 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 1351 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1328 dev_info(&pdev->dev, "DCA disabled\n"); 1352 dev_info(&pdev->dev, "DCA disabled\n");
1329 dca_remove_requester(&pdev->dev); 1353 dca_remove_requester(&pdev->dev);
@@ -1956,7 +1980,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1956 1980
1957/** 1981/**
1958 * igb_free_tx_resources - Free Tx Resources per Queue 1982 * igb_free_tx_resources - Free Tx Resources per Queue
1959 * @adapter: board private structure
1960 * @tx_ring: Tx descriptor ring for a specific queue 1983 * @tx_ring: Tx descriptor ring for a specific queue
1961 * 1984 *
1962 * Free all transmit software resources 1985 * Free all transmit software resources
@@ -2009,7 +2032,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2009 2032
2010/** 2033/**
2011 * igb_clean_tx_ring - Free Tx Buffers 2034 * igb_clean_tx_ring - Free Tx Buffers
2012 * @adapter: board private structure
2013 * @tx_ring: ring to be cleaned 2035 * @tx_ring: ring to be cleaned
2014 **/ 2036 **/
2015static void igb_clean_tx_ring(struct igb_ring *tx_ring) 2037static void igb_clean_tx_ring(struct igb_ring *tx_ring)
@@ -2056,7 +2078,6 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2056 2078
2057/** 2079/**
2058 * igb_free_rx_resources - Free Rx Resources 2080 * igb_free_rx_resources - Free Rx Resources
2059 * @adapter: board private structure
2060 * @rx_ring: ring to clean the resources from 2081 * @rx_ring: ring to clean the resources from
2061 * 2082 *
2062 * Free all receive software resources 2083 * Free all receive software resources
@@ -2096,7 +2117,6 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2096 2117
2097/** 2118/**
2098 * igb_clean_rx_ring - Free Rx Buffers per Queue 2119 * igb_clean_rx_ring - Free Rx Buffers per Queue
2099 * @adapter: board private structure
2100 * @rx_ring: ring to free buffers from 2120 * @rx_ring: ring to free buffers from
2101 **/ 2121 **/
2102static void igb_clean_rx_ring(struct igb_ring *rx_ring) 2122static void igb_clean_rx_ring(struct igb_ring *rx_ring)
@@ -3271,7 +3291,7 @@ static irqreturn_t igb_msix_tx(int irq, void *data)
3271 struct igb_adapter *adapter = tx_ring->adapter; 3291 struct igb_adapter *adapter = tx_ring->adapter;
3272 struct e1000_hw *hw = &adapter->hw; 3292 struct e1000_hw *hw = &adapter->hw;
3273 3293
3274#ifdef CONFIG_DCA 3294#ifdef CONFIG_IGB_DCA
3275 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3295 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3276 igb_update_tx_dca(tx_ring); 3296 igb_update_tx_dca(tx_ring);
3277#endif 3297#endif
@@ -3323,14 +3343,14 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
3323 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) 3343 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi))
3324 __netif_rx_schedule(adapter->netdev, &rx_ring->napi); 3344 __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
3325 3345
3326#ifdef CONFIG_DCA 3346#ifdef CONFIG_IGB_DCA
3327 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3347 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3328 igb_update_rx_dca(rx_ring); 3348 igb_update_rx_dca(rx_ring);
3329#endif 3349#endif
3330 return IRQ_HANDLED; 3350 return IRQ_HANDLED;
3331} 3351}
3332 3352
3333#ifdef CONFIG_DCA 3353#ifdef CONFIG_IGB_DCA
3334static void igb_update_rx_dca(struct igb_ring *rx_ring) 3354static void igb_update_rx_dca(struct igb_ring *rx_ring)
3335{ 3355{
3336 u32 dca_rxctrl; 3356 u32 dca_rxctrl;
@@ -3450,7 +3470,7 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
3450 3470
3451 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 3471 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3452} 3472}
3453#endif /* CONFIG_DCA */ 3473#endif /* CONFIG_IGB_DCA */
3454 3474
3455/** 3475/**
3456 * igb_intr_msi - Interrupt Handler 3476 * igb_intr_msi - Interrupt Handler
@@ -3529,13 +3549,13 @@ static int igb_poll(struct napi_struct *napi, int budget)
3529 int tx_clean_complete, work_done = 0; 3549 int tx_clean_complete, work_done = 0;
3530 3550
3531 /* this poll routine only supports one tx and one rx queue */ 3551 /* this poll routine only supports one tx and one rx queue */
3532#ifdef CONFIG_DCA 3552#ifdef CONFIG_IGB_DCA
3533 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3553 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3534 igb_update_tx_dca(&adapter->tx_ring[0]); 3554 igb_update_tx_dca(&adapter->tx_ring[0]);
3535#endif 3555#endif
3536 tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]); 3556 tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]);
3537 3557
3538#ifdef CONFIG_DCA 3558#ifdef CONFIG_IGB_DCA
3539 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3559 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3540 igb_update_rx_dca(&adapter->rx_ring[0]); 3560 igb_update_rx_dca(&adapter->rx_ring[0]);
3541#endif 3561#endif
@@ -3563,7 +3583,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3563 struct net_device *netdev = adapter->netdev; 3583 struct net_device *netdev = adapter->netdev;
3564 int work_done = 0; 3584 int work_done = 0;
3565 3585
3566#ifdef CONFIG_DCA 3586#ifdef CONFIG_IGB_DCA
3567 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3587 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3568 igb_update_rx_dca(rx_ring); 3588 igb_update_rx_dca(rx_ring);
3569#endif 3589#endif
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 7373dafbb3f7..059369885be1 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -1112,7 +1112,7 @@ static void ipg_nic_rx_free_skb(struct net_device *dev)
1112 struct ipg_rx *rxfd = sp->rxd + entry; 1112 struct ipg_rx *rxfd = sp->rxd + entry;
1113 1113
1114 pci_unmap_single(sp->pdev, 1114 pci_unmap_single(sp->pdev,
1115 le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), 1115 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1116 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1116 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1117 dev_kfree_skb_irq(sp->rx_buff[entry]); 1117 dev_kfree_skb_irq(sp->rx_buff[entry]);
1118 sp->rx_buff[entry] = NULL; 1118 sp->rx_buff[entry] = NULL;
@@ -1179,7 +1179,7 @@ static int ipg_nic_rx_check_error(struct net_device *dev)
1179 */ 1179 */
1180 if (sp->rx_buff[entry]) { 1180 if (sp->rx_buff[entry]) {
1181 pci_unmap_single(sp->pdev, 1181 pci_unmap_single(sp->pdev,
1182 le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), 1182 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1183 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1183 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1184 1184
1185 dev_kfree_skb_irq(sp->rx_buff[entry]); 1185 dev_kfree_skb_irq(sp->rx_buff[entry]);
@@ -1246,7 +1246,7 @@ static void ipg_nic_rx_with_start(struct net_device *dev,
1246 if (jumbo->found_start) 1246 if (jumbo->found_start)
1247 dev_kfree_skb_irq(jumbo->skb); 1247 dev_kfree_skb_irq(jumbo->skb);
1248 1248
1249 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), 1249 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1250 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1250 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1251 1251
1252 skb_put(skb, sp->rxfrag_size); 1252 skb_put(skb, sp->rxfrag_size);
@@ -1349,7 +1349,7 @@ static int ipg_nic_rx_jumbo(struct net_device *dev)
1349 unsigned int entry = curr % IPG_RFDLIST_LENGTH; 1349 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1350 struct ipg_rx *rxfd = sp->rxd + entry; 1350 struct ipg_rx *rxfd = sp->rxd + entry;
1351 1351
1352 if (!(rxfd->rfs & le64_to_cpu(IPG_RFS_RFDDONE))) 1352 if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
1353 break; 1353 break;
1354 1354
1355 switch (ipg_nic_rx_check_frame_type(dev)) { 1355 switch (ipg_nic_rx_check_frame_type(dev)) {
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 2482d61662a2..2e67ae015d91 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -118,7 +118,6 @@
118#include <linux/errno.h> 118#include <linux/errno.h>
119#include <linux/init.h> 119#include <linux/init.h>
120#include <linux/slab.h> 120#include <linux/slab.h>
121#include <linux/module.h>
122#include <linux/kref.h> 121#include <linux/kref.h>
123#include <linux/usb.h> 122#include <linux/usb.h>
124#include <linux/device.h> 123#include <linux/device.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 1e0de93fd618..3843b5faba8b 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -82,7 +82,6 @@
82#include <linux/errno.h> 82#include <linux/errno.h>
83#include <linux/init.h> 83#include <linux/init.h>
84#include <linux/slab.h> 84#include <linux/slab.h>
85#include <linux/module.h>
86#include <linux/kref.h> 85#include <linux/kref.h>
87#include <linux/usb.h> 86#include <linux/usb.h>
88#include <linux/device.h> 87#include <linux/device.h>
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 7548fb7360d9..5236f633ee36 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1287,13 +1287,39 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1287 return; 1287 return;
1288} 1288}
1289 1289
1290static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter); 1290/**
1291 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1292 * @adapter: board private structure
1293 **/
1294static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1295{
1296 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1297 IXGBE_WRITE_FLUSH(&adapter->hw);
1298 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1299 int i;
1300 for (i = 0; i < adapter->num_msix_vectors; i++)
1301 synchronize_irq(adapter->msix_entries[i].vector);
1302 } else {
1303 synchronize_irq(adapter->pdev->irq);
1304 }
1305}
1306
1307/**
1308 * ixgbe_irq_enable - Enable default interrupt generation settings
1309 * @adapter: board private structure
1310 **/
1311static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1312{
1313 u32 mask;
1314 mask = IXGBE_EIMS_ENABLE_MASK;
1315 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1316 IXGBE_WRITE_FLUSH(&adapter->hw);
1317}
1291 1318
1292/** 1319/**
1293 * ixgbe_intr - legacy mode Interrupt Handler 1320 * ixgbe_intr - legacy mode Interrupt Handler
1294 * @irq: interrupt number 1321 * @irq: interrupt number
1295 * @data: pointer to a network interface device structure 1322 * @data: pointer to a network interface device structure
1296 * @pt_regs: CPU registers structure
1297 **/ 1323 **/
1298static irqreturn_t ixgbe_intr(int irq, void *data) 1324static irqreturn_t ixgbe_intr(int irq, void *data)
1299{ 1325{
@@ -1394,35 +1420,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1394} 1420}
1395 1421
1396/** 1422/**
1397 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1398 * @adapter: board private structure
1399 **/
1400static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1401{
1402 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1403 IXGBE_WRITE_FLUSH(&adapter->hw);
1404 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1405 int i;
1406 for (i = 0; i < adapter->num_msix_vectors; i++)
1407 synchronize_irq(adapter->msix_entries[i].vector);
1408 } else {
1409 synchronize_irq(adapter->pdev->irq);
1410 }
1411}
1412
1413/**
1414 * ixgbe_irq_enable - Enable default interrupt generation settings
1415 * @adapter: board private structure
1416 **/
1417static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1418{
1419 u32 mask;
1420 mask = IXGBE_EIMS_ENABLE_MASK;
1421 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1422 IXGBE_WRITE_FLUSH(&adapter->hw);
1423}
1424
1425/**
1426 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts 1423 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1427 * 1424 *
1428 **/ 1425 **/
@@ -2332,7 +2329,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2332 * Once we know the feature-set enabled for the device, we'll cache 2329 * Once we know the feature-set enabled for the device, we'll cache
2333 * the register offset the descriptor ring is assigned to. 2330 * the register offset the descriptor ring is assigned to.
2334 **/ 2331 **/
2335static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 2332static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2336{ 2333{
2337 int feature_mask = 0, rss_i; 2334 int feature_mask = 0, rss_i;
2338 int i, txr_idx, rxr_idx; 2335 int i, txr_idx, rxr_idx;
@@ -2369,7 +2366,7 @@ static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2369 * number of queues at compile-time. The polling_netdev array is 2366 * number of queues at compile-time. The polling_netdev array is
2370 * intended for Multiqueue, but should work fine with a single queue. 2367 * intended for Multiqueue, but should work fine with a single queue.
2371 **/ 2368 **/
2372static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 2369static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2373{ 2370{
2374 int i; 2371 int i;
2375 2372
@@ -2410,8 +2407,7 @@ err_tx_ring_allocation:
2410 * Attempt to configure the interrupts using the best available 2407 * Attempt to configure the interrupts using the best available
2411 * capabilities of the hardware and the kernel. 2408 * capabilities of the hardware and the kernel.
2412 **/ 2409 **/
2413static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter 2410static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
2414 *adapter)
2415{ 2411{
2416 int err = 0; 2412 int err = 0;
2417 int vector, v_budget; 2413 int vector, v_budget;
@@ -2503,7 +2499,7 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2503 * - Hardware queue count (num_*_queues) 2499 * - Hardware queue count (num_*_queues)
2504 * - defined by miscellaneous hardware support/features (RSS, etc.) 2500 * - defined by miscellaneous hardware support/features (RSS, etc.)
2505 **/ 2501 **/
2506static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 2502static int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2507{ 2503{
2508 int err; 2504 int err;
2509 2505
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 81c6cdc3851f..665e70d620fc 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -912,23 +912,23 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
912 skb_put(skb, framesize); 912 skb_put(skb, framesize);
913 skb->protocol = eth_type_trans(skb, jme->dev); 913 skb->protocol = eth_type_trans(skb, jme->dev);
914 914
915 if (jme_rxsum_ok(jme, rxdesc->descwb.flags)) 915 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
916 skb->ip_summed = CHECKSUM_UNNECESSARY; 916 skb->ip_summed = CHECKSUM_UNNECESSARY;
917 else 917 else
918 skb->ip_summed = CHECKSUM_NONE; 918 skb->ip_summed = CHECKSUM_NONE;
919 919
920 if (rxdesc->descwb.flags & RXWBFLAG_TAGON) { 920 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
921 if (jme->vlgrp) { 921 if (jme->vlgrp) {
922 jme->jme_vlan_rx(skb, jme->vlgrp, 922 jme->jme_vlan_rx(skb, jme->vlgrp,
923 le32_to_cpu(rxdesc->descwb.vlan)); 923 le16_to_cpu(rxdesc->descwb.vlan));
924 NET_STAT(jme).rx_bytes += 4; 924 NET_STAT(jme).rx_bytes += 4;
925 } 925 }
926 } else { 926 } else {
927 jme->jme_rx(skb); 927 jme->jme_rx(skb);
928 } 928 }
929 929
930 if ((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) == 930 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
931 RXWBFLAG_DEST_MUL) 931 cpu_to_le16(RXWBFLAG_DEST_MUL))
932 ++(NET_STAT(jme).multicast); 932 ++(NET_STAT(jme).multicast);
933 933
934 jme->dev->last_rx = jiffies; 934 jme->dev->last_rx = jiffies;
@@ -961,7 +961,7 @@ jme_process_receive(struct jme_adapter *jme, int limit)
961 rxdesc = rxring->desc; 961 rxdesc = rxring->desc;
962 rxdesc += i; 962 rxdesc += i;
963 963
964 if ((rxdesc->descwb.flags & RXWBFLAG_OWN) || 964 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
965 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) 965 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
966 goto out; 966 goto out;
967 967
@@ -1763,10 +1763,9 @@ jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
1763} 1763}
1764 1764
1765static int 1765static int
1766jme_tx_tso(struct sk_buff *skb, 1766jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
1767 u16 *mss, u8 *flags)
1768{ 1767{
1769 *mss = skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT; 1768 *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
1770 if (*mss) { 1769 if (*mss) {
1771 *flags |= TXFLAG_LSEN; 1770 *flags |= TXFLAG_LSEN;
1772 1771
@@ -1826,11 +1825,11 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
1826} 1825}
1827 1826
1828static inline void 1827static inline void
1829jme_tx_vlan(struct sk_buff *skb, u16 *vlan, u8 *flags) 1828jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
1830{ 1829{
1831 if (vlan_tx_tag_present(skb)) { 1830 if (vlan_tx_tag_present(skb)) {
1832 *flags |= TXFLAG_TAGON; 1831 *flags |= TXFLAG_TAGON;
1833 *vlan = vlan_tx_tag_get(skb); 1832 *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
1834 } 1833 }
1835} 1834}
1836 1835
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index f863aee6648b..3f5d91543246 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -22,7 +22,7 @@
22 */ 22 */
23 23
24#ifndef __JME_H_INCLUDED__ 24#ifndef __JME_H_INCLUDED__
25#define __JME_H_INCLUDEE__ 25#define __JME_H_INCLUDED__
26 26
27#define DRV_NAME "jme" 27#define DRV_NAME "jme"
28#define DRV_VERSION "1.0.3" 28#define DRV_VERSION "1.0.3"
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 3b43bfd85a0f..b1ac63ab8c16 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -76,15 +76,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
76 76
77 skb->protocol = eth_type_trans(skb,dev); 77 skb->protocol = eth_type_trans(skb,dev);
78 78
79#ifdef LOOPBACK_TSO
80 if (skb_is_gso(skb)) {
81 BUG_ON(skb->protocol != htons(ETH_P_IP));
82 BUG_ON(ip_hdr(skb)->protocol != IPPROTO_TCP);
83
84 emulate_large_send_offload(skb);
85 return 0;
86 }
87#endif
88 dev->last_rx = jiffies; 79 dev->last_rx = jiffies;
89 80
90 /* it's OK to use per_cpu_ptr() because BHs are off */ 81 /* it's OK to use per_cpu_ptr() because BHs are off */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 42394505bb50..590039cbb146 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -70,6 +70,9 @@ static void macvlan_broadcast(struct sk_buff *skb,
70 struct sk_buff *nskb; 70 struct sk_buff *nskb;
71 unsigned int i; 71 unsigned int i;
72 72
73 if (skb->protocol == htons(ETH_P_PAUSE))
74 return;
75
73 for (i = 0; i < MACVLAN_HASH_SIZE; i++) { 76 for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
74 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { 77 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
75 dev = vlan->dev; 78 dev = vlan->dev;
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 0952a6528f58..a7a97bf998f8 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -1,4 +1,9 @@
1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o 1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
2 2
3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ 3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
4 mr.o pd.o profile.o qp.o reset.o srq.o 4 mr.o pd.o port.o profile.o qp.o reset.o srq.o
5
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index b411b79d72ad..ad95d5f7b630 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -48,13 +48,16 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
48 48
49 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); 49 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
50 if (obj >= bitmap->max) { 50 if (obj >= bitmap->max) {
51 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; 51 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
52 & bitmap->mask;
52 obj = find_first_zero_bit(bitmap->table, bitmap->max); 53 obj = find_first_zero_bit(bitmap->table, bitmap->max);
53 } 54 }
54 55
55 if (obj < bitmap->max) { 56 if (obj < bitmap->max) {
56 set_bit(obj, bitmap->table); 57 set_bit(obj, bitmap->table);
57 bitmap->last = (obj + 1) & (bitmap->max - 1); 58 bitmap->last = (obj + 1);
59 if (bitmap->last == bitmap->max)
60 bitmap->last = 0;
58 obj |= bitmap->top; 61 obj |= bitmap->top;
59 } else 62 } else
60 obj = -1; 63 obj = -1;
@@ -66,16 +69,90 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
66 69
67void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) 70void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
68{ 71{
69 obj &= bitmap->max - 1; 72 mlx4_bitmap_free_range(bitmap, obj, 1);
73}
74
75static unsigned long find_aligned_range(unsigned long *bitmap,
76 u32 start, u32 nbits,
77 int len, int align)
78{
79 unsigned long end, i;
80
81again:
82 start = ALIGN(start, align);
83
84 while ((start < nbits) && test_bit(start, bitmap))
85 start += align;
86
87 if (start >= nbits)
88 return -1;
89
90 end = start+len;
91 if (end > nbits)
92 return -1;
93
94 for (i = start + 1; i < end; i++) {
95 if (test_bit(i, bitmap)) {
96 start = i + 1;
97 goto again;
98 }
99 }
100
101 return start;
102}
103
104u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
105{
106 u32 obj, i;
107
108 if (likely(cnt == 1 && align == 1))
109 return mlx4_bitmap_alloc(bitmap);
110
111 spin_lock(&bitmap->lock);
112
113 obj = find_aligned_range(bitmap->table, bitmap->last,
114 bitmap->max, cnt, align);
115 if (obj >= bitmap->max) {
116 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
117 & bitmap->mask;
118 obj = find_aligned_range(bitmap->table, 0, bitmap->max,
119 cnt, align);
120 }
121
122 if (obj < bitmap->max) {
123 for (i = 0; i < cnt; i++)
124 set_bit(obj + i, bitmap->table);
125 if (obj == bitmap->last) {
126 bitmap->last = (obj + cnt);
127 if (bitmap->last >= bitmap->max)
128 bitmap->last = 0;
129 }
130 obj |= bitmap->top;
131 } else
132 obj = -1;
133
134 spin_unlock(&bitmap->lock);
135
136 return obj;
137}
138
139void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
140{
141 u32 i;
142
143 obj &= bitmap->max + bitmap->reserved_top - 1;
70 144
71 spin_lock(&bitmap->lock); 145 spin_lock(&bitmap->lock);
72 clear_bit(obj, bitmap->table); 146 for (i = 0; i < cnt; i++)
147 clear_bit(obj + i, bitmap->table);
73 bitmap->last = min(bitmap->last, obj); 148 bitmap->last = min(bitmap->last, obj);
74 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; 149 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
150 & bitmap->mask;
75 spin_unlock(&bitmap->lock); 151 spin_unlock(&bitmap->lock);
76} 152}
77 153
78int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved) 154int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
155 u32 reserved_bot, u32 reserved_top)
79{ 156{
80 int i; 157 int i;
81 158
@@ -85,14 +162,16 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved
85 162
86 bitmap->last = 0; 163 bitmap->last = 0;
87 bitmap->top = 0; 164 bitmap->top = 0;
88 bitmap->max = num; 165 bitmap->max = num - reserved_top;
89 bitmap->mask = mask; 166 bitmap->mask = mask;
167 bitmap->reserved_top = reserved_top;
90 spin_lock_init(&bitmap->lock); 168 spin_lock_init(&bitmap->lock);
91 bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL); 169 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
170 sizeof (long), GFP_KERNEL);
92 if (!bitmap->table) 171 if (!bitmap->table)
93 return -ENOMEM; 172 return -ENOMEM;
94 173
95 for (i = 0; i < reserved; ++i) 174 for (i = 0; i < reserved_bot; ++i)
96 set_bit(i, bitmap->table); 175 set_bit(i, bitmap->table);
97 176
98 return 0; 177 return 0;
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 9bb50e3f8974..b7ad2829d67e 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -300,7 +300,7 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
300 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); 300 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
301 301
302 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, 302 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
303 dev->caps.num_cqs - 1, dev->caps.reserved_cqs); 303 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
304 if (err) 304 if (err)
305 return err; 305 return err;
306 306
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
new file mode 100644
index 000000000000..1368a8010af4
--- /dev/null
+++ b/drivers/net/mlx4/en_cq.c
@@ -0,0 +1,146 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
36#include <linux/mlx4/cmd.h>
37
38#include "mlx4_en.h"
39
40static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
41{
42 return;
43}
44
45
46int mlx4_en_create_cq(struct mlx4_en_priv *priv,
47 struct mlx4_en_cq *cq,
48 int entries, int ring, enum cq_type mode)
49{
50 struct mlx4_en_dev *mdev = priv->mdev;
51 int err;
52
53 cq->size = entries;
54 if (mode == RX)
55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
56 else
57 cq->buf_size = sizeof(struct mlx4_cqe);
58
59 cq->ring = ring;
60 cq->is_tx = mode;
61 spin_lock_init(&cq->lock);
62
63 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
64 cq->buf_size, 2 * PAGE_SIZE);
65 if (err)
66 return err;
67
68 err = mlx4_en_map_buffer(&cq->wqres.buf);
69 if (err)
70 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
71
72 return err;
73}
74
75int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
76{
77 struct mlx4_en_dev *mdev = priv->mdev;
78 int err;
79
80 cq->dev = mdev->pndev[priv->port];
81 cq->mcq.set_ci_db = cq->wqres.db.db;
82 cq->mcq.arm_db = cq->wqres.db.db + 1;
83 *cq->mcq.set_ci_db = 0;
84 *cq->mcq.arm_db = 0;
85 cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
86 memset(cq->buf, 0, cq->buf_size);
87
88 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
89 cq->wqres.db.dma, &cq->mcq, cq->is_tx);
90 if (err)
91 return err;
92
93 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
94 cq->mcq.event = mlx4_en_cq_event;
95
96 if (cq->is_tx) {
97 init_timer(&cq->timer);
98 cq->timer.function = mlx4_en_poll_tx_cq;
99 cq->timer.data = (unsigned long) cq;
100 } else {
101 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
102 napi_enable(&cq->napi);
103 }
104
105 return 0;
106}
107
108void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
109{
110 struct mlx4_en_dev *mdev = priv->mdev;
111
112 mlx4_en_unmap_buffer(&cq->wqres.buf);
113 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
114 cq->buf_size = 0;
115 cq->buf = NULL;
116}
117
118void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
119{
120 struct mlx4_en_dev *mdev = priv->mdev;
121
122 if (cq->is_tx)
123 del_timer(&cq->timer);
124 else
125 napi_disable(&cq->napi);
126
127 mlx4_cq_free(mdev->dev, &cq->mcq);
128}
129
130/* Set rx cq moderation parameters */
131int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
132{
133 return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
134 cq->moder_cnt, cq->moder_time);
135}
136
137int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
138{
139 cq->armed = 1;
140 mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
141 &priv->mdev->uar_lock);
142
143 return 0;
144}
145
146
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
new file mode 100644
index 000000000000..4b9794e97a79
--- /dev/null
+++ b/drivers/net/mlx4/en_main.c
@@ -0,0 +1,253 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/cpumask.h>
35#include <linux/module.h>
36#include <linux/delay.h>
37#include <linux/netdevice.h>
38
39#include <linux/mlx4/driver.h>
40#include <linux/mlx4/device.h>
41#include <linux/mlx4/cmd.h>
42
43#include "mlx4_en.h"
44
45MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
46MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
47MODULE_LICENSE("Dual BSD/GPL");
48MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
49
50static const char mlx4_en_version[] =
51 DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
52 DRV_VERSION " (" DRV_RELDATE ")\n";
53
54static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
55 enum mlx4_dev_event event, int port)
56{
57 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
58 struct mlx4_en_priv *priv;
59
60 if (!mdev->pndev[port])
61 return;
62
63 priv = netdev_priv(mdev->pndev[port]);
64 switch (event) {
65 case MLX4_DEV_EVENT_PORT_UP:
66 case MLX4_DEV_EVENT_PORT_DOWN:
67 /* To prevent races, we poll the link state in a separate
68 task rather than changing it here */
69 priv->link_state = event;
70 queue_work(mdev->workqueue, &priv->linkstate_task);
71 break;
72
73 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
74 mlx4_err(mdev, "Internal error detected, restarting device\n");
75 break;
76
77 default:
78 mlx4_warn(mdev, "Unhandled event: %d\n", event);
79 }
80}
81
82static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
83{
84 struct mlx4_en_dev *mdev = endev_ptr;
85 int i;
86
87 mutex_lock(&mdev->state_lock);
88 mdev->device_up = false;
89 mutex_unlock(&mdev->state_lock);
90
91 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
92 if (mdev->pndev[i])
93 mlx4_en_destroy_netdev(mdev->pndev[i]);
94
95 flush_workqueue(mdev->workqueue);
96 destroy_workqueue(mdev->workqueue);
97 mlx4_mr_free(dev, &mdev->mr);
98 mlx4_uar_free(dev, &mdev->priv_uar);
99 mlx4_pd_free(dev, mdev->priv_pdn);
100 kfree(mdev);
101}
102
103static void *mlx4_en_add(struct mlx4_dev *dev)
104{
105 static int mlx4_en_version_printed;
106 struct mlx4_en_dev *mdev;
107 int i;
108 int err;
109
110 if (!mlx4_en_version_printed) {
111 printk(KERN_INFO "%s", mlx4_en_version);
112 mlx4_en_version_printed++;
113 }
114
115 mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
116 if (!mdev) {
117 dev_err(&dev->pdev->dev, "Device struct alloc failed, "
118 "aborting.\n");
119 err = -ENOMEM;
120 goto err_free_res;
121 }
122
123 if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
124 goto err_free_dev;
125
126 if (mlx4_uar_alloc(dev, &mdev->priv_uar))
127 goto err_pd;
128
129 mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
130 if (!mdev->uar_map)
131 goto err_uar;
132 spin_lock_init(&mdev->uar_lock);
133
134 mdev->dev = dev;
135 mdev->dma_device = &(dev->pdev->dev);
136 mdev->pdev = dev->pdev;
137 mdev->device_up = false;
138
139 mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
140 if (!mdev->LSO_support)
141 mlx4_warn(mdev, "LSO not supported, please upgrade to later "
142 "FW version to enable LSO\n");
143
144 if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
145 MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
146 0, 0, &mdev->mr)) {
147 mlx4_err(mdev, "Failed allocating memory region\n");
148 goto err_uar;
149 }
150 if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
151 mlx4_err(mdev, "Failed enabling memory region\n");
152 goto err_mr;
153 }
154
155 /* Build device profile according to supplied module parameters */
156 err = mlx4_en_get_profile(mdev);
157 if (err) {
158 mlx4_err(mdev, "Bad module parameters, aborting.\n");
159 goto err_mr;
160 }
161
162 /* Configure wich ports to start according to module parameters */
163 mdev->port_cnt = 0;
164 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
165 mdev->port_cnt++;
166
167 /* If we did not receive an explicit number of Rx rings, default to
168 * the number of completion vectors populated by the mlx4_core */
169 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
170 mlx4_info(mdev, "Using %d tx rings for port:%d\n",
171 mdev->profile.prof[i].tx_ring_num, i);
172 if (!mdev->profile.prof[i].rx_ring_num) {
173 mdev->profile.prof[i].rx_ring_num = 1;
174 mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
175 1, i);
176 } else
177 mlx4_info(mdev, "Using %d rx rings for port:%d\n",
178 mdev->profile.prof[i].rx_ring_num, i);
179 }
180
181 /* Create our own workqueue for reset/multicast tasks
182 * Note: we cannot use the shared workqueue because of deadlocks caused
183 * by the rtnl lock */
184 mdev->workqueue = create_singlethread_workqueue("mlx4_en");
185 if (!mdev->workqueue) {
186 err = -ENOMEM;
187 goto err_close_nic;
188 }
189
190 /* At this stage all non-port specific tasks are complete:
191 * mark the card state as up */
192 mutex_init(&mdev->state_lock);
193 mdev->device_up = true;
194
195 /* Setup ports */
196
197 /* Create a netdev for each port */
198 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
199 mlx4_info(mdev, "Activating port:%d\n", i);
200 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) {
201 mdev->pndev[i] = NULL;
202 goto err_free_netdev;
203 }
204 }
205 return mdev;
206
207
208err_free_netdev:
209 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
210 if (mdev->pndev[i])
211 mlx4_en_destroy_netdev(mdev->pndev[i]);
212 }
213
214 mutex_lock(&mdev->state_lock);
215 mdev->device_up = false;
216 mutex_unlock(&mdev->state_lock);
217 flush_workqueue(mdev->workqueue);
218
219 /* Stop event queue before we drop down to release shared SW state */
220
221err_close_nic:
222 destroy_workqueue(mdev->workqueue);
223err_mr:
224 mlx4_mr_free(dev, &mdev->mr);
225err_uar:
226 mlx4_uar_free(dev, &mdev->priv_uar);
227err_pd:
228 mlx4_pd_free(dev, mdev->priv_pdn);
229err_free_dev:
230 kfree(mdev);
231err_free_res:
232 return NULL;
233}
234
235static struct mlx4_interface mlx4_en_interface = {
236 .add = mlx4_en_add,
237 .remove = mlx4_en_remove,
238 .event = mlx4_en_event,
239};
240
241static int __init mlx4_en_init(void)
242{
243 return mlx4_register_interface(&mlx4_en_interface);
244}
245
246static void __exit mlx4_en_cleanup(void)
247{
248 mlx4_unregister_interface(&mlx4_en_interface);
249}
250
251module_init(mlx4_en_init);
252module_exit(mlx4_en_cleanup);
253
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
new file mode 100644
index 000000000000..96e709d6440a
--- /dev/null
+++ b/drivers/net/mlx4/en_netdev.c
@@ -0,0 +1,1088 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/etherdevice.h>
35#include <linux/tcp.h>
36#include <linux/if_vlan.h>
37#include <linux/delay.h>
38
39#include <linux/mlx4/driver.h>
40#include <linux/mlx4/device.h>
41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/cq.h>
43
44#include "mlx4_en.h"
45#include "en_port.h"
46
47
48static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
49{
50 struct mlx4_en_priv *priv = netdev_priv(dev);
51 struct mlx4_en_dev *mdev = priv->mdev;
52 int err;
53
54 mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
55 priv->vlgrp = grp;
56
57 mutex_lock(&mdev->state_lock);
58 if (mdev->device_up && priv->port_up) {
59 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
60 if (err)
61 mlx4_err(mdev, "Failed configuring VLAN filter\n");
62 }
63 mutex_unlock(&mdev->state_lock);
64}
65
66static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
67{
68 struct mlx4_en_priv *priv = netdev_priv(dev);
69 struct mlx4_en_dev *mdev = priv->mdev;
70 int err;
71
72 if (!priv->vlgrp)
73 return;
74
75 mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
76 vid, vlan_group_get_device(priv->vlgrp, vid));
77
78 /* Add VID to port VLAN filter */
79 mutex_lock(&mdev->state_lock);
80 if (mdev->device_up && priv->port_up) {
81 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
82 if (err)
83 mlx4_err(mdev, "Failed configuring VLAN filter\n");
84 }
85 mutex_unlock(&mdev->state_lock);
86}
87
88static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
89{
90 struct mlx4_en_priv *priv = netdev_priv(dev);
91 struct mlx4_en_dev *mdev = priv->mdev;
92 int err;
93
94 if (!priv->vlgrp)
95 return;
96
97 mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp "
98 "entry:%p)\n", vid, priv->vlgrp,
99 vlan_group_get_device(priv->vlgrp, vid));
100 vlan_group_set_device(priv->vlgrp, vid, NULL);
101
102 /* Remove VID from port VLAN filter */
103 mutex_lock(&mdev->state_lock);
104 if (mdev->device_up && priv->port_up) {
105 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106 if (err)
107 mlx4_err(mdev, "Failed configuring VLAN filter\n");
108 }
109 mutex_unlock(&mdev->state_lock);
110}
111
112static u64 mlx4_en_mac_to_u64(u8 *addr)
113{
114 u64 mac = 0;
115 int i;
116
117 for (i = 0; i < ETH_ALEN; i++) {
118 mac <<= 8;
119 mac |= addr[i];
120 }
121 return mac;
122}
123
124static int mlx4_en_set_mac(struct net_device *dev, void *addr)
125{
126 struct mlx4_en_priv *priv = netdev_priv(dev);
127 struct mlx4_en_dev *mdev = priv->mdev;
128 struct sockaddr *saddr = addr;
129
130 if (!is_valid_ether_addr(saddr->sa_data))
131 return -EADDRNOTAVAIL;
132
133 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
134 priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
135 queue_work(mdev->workqueue, &priv->mac_task);
136 return 0;
137}
138
139static void mlx4_en_do_set_mac(struct work_struct *work)
140{
141 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
142 mac_task);
143 struct mlx4_en_dev *mdev = priv->mdev;
144 int err = 0;
145
146 mutex_lock(&mdev->state_lock);
147 if (priv->port_up) {
148 /* Remove old MAC and insert the new one */
149 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
150 err = mlx4_register_mac(mdev->dev, priv->port,
151 priv->mac, &priv->mac_index);
152 if (err)
153 mlx4_err(mdev, "Failed changing HW MAC address\n");
154 } else
155 mlx4_dbg(HW, priv, "Port is down, exiting...\n");
156
157 mutex_unlock(&mdev->state_lock);
158}
159
160static void mlx4_en_clear_list(struct net_device *dev)
161{
162 struct mlx4_en_priv *priv = netdev_priv(dev);
163 struct dev_mc_list *plist = priv->mc_list;
164 struct dev_mc_list *next;
165
166 while (plist) {
167 next = plist->next;
168 kfree(plist);
169 plist = next;
170 }
171 priv->mc_list = NULL;
172}
173
174static void mlx4_en_cache_mclist(struct net_device *dev)
175{
176 struct mlx4_en_priv *priv = netdev_priv(dev);
177 struct mlx4_en_dev *mdev = priv->mdev;
178 struct dev_mc_list *mclist;
179 struct dev_mc_list *tmp;
180 struct dev_mc_list *plist = NULL;
181
182 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
183 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
184 if (!tmp) {
185 mlx4_err(mdev, "failed to allocate multicast list\n");
186 mlx4_en_clear_list(dev);
187 return;
188 }
189 memcpy(tmp, mclist, sizeof(struct dev_mc_list));
190 tmp->next = NULL;
191 if (plist)
192 plist->next = tmp;
193 else
194 priv->mc_list = tmp;
195 plist = tmp;
196 }
197}
198
199
200static void mlx4_en_set_multicast(struct net_device *dev)
201{
202 struct mlx4_en_priv *priv = netdev_priv(dev);
203
204 if (!priv->port_up)
205 return;
206
207 queue_work(priv->mdev->workqueue, &priv->mcast_task);
208}
209
210static void mlx4_en_do_set_multicast(struct work_struct *work)
211{
212 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
213 mcast_task);
214 struct mlx4_en_dev *mdev = priv->mdev;
215 struct net_device *dev = priv->dev;
216 struct dev_mc_list *mclist;
217 u64 mcast_addr = 0;
218 int err;
219
220 mutex_lock(&mdev->state_lock);
221 if (!mdev->device_up) {
222 mlx4_dbg(HW, priv, "Card is not up, ignoring "
223 "multicast change.\n");
224 goto out;
225 }
226 if (!priv->port_up) {
227 mlx4_dbg(HW, priv, "Port is down, ignoring "
228 "multicast change.\n");
229 goto out;
230 }
231
232 /*
233 * Promsicuous mode: disable all filters
234 */
235
236 if (dev->flags & IFF_PROMISC) {
237 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
238 if (netif_msg_rx_status(priv))
239 mlx4_warn(mdev, "Port:%d entering promiscuous mode\n",
240 priv->port);
241 priv->flags |= MLX4_EN_FLAG_PROMISC;
242
243 /* Enable promiscouos mode */
244 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
245 priv->base_qpn, 1);
246 if (err)
247 mlx4_err(mdev, "Failed enabling "
248 "promiscous mode\n");
249
250 /* Disable port multicast filter (unconditionally) */
251 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
252 0, MLX4_MCAST_DISABLE);
253 if (err)
254 mlx4_err(mdev, "Failed disabling "
255 "multicast filter\n");
256
257 /* Disable port VLAN filter */
258 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
259 if (err)
260 mlx4_err(mdev, "Failed disabling "
261 "VLAN filter\n");
262 }
263 goto out;
264 }
265
266 /*
267 * Not in promiscous mode
268 */
269
270 if (priv->flags & MLX4_EN_FLAG_PROMISC) {
271 if (netif_msg_rx_status(priv))
272 mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n",
273 priv->port);
274 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
275
276 /* Disable promiscouos mode */
277 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
278 priv->base_qpn, 0);
279 if (err)
280 mlx4_err(mdev, "Failed disabling promiscous mode\n");
281
282 /* Enable port VLAN filter */
283 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
284 if (err)
285 mlx4_err(mdev, "Failed enabling VLAN filter\n");
286 }
287
288 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
289 if (dev->flags & IFF_ALLMULTI) {
290 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
291 0, MLX4_MCAST_DISABLE);
292 if (err)
293 mlx4_err(mdev, "Failed disabling multicast filter\n");
294 } else {
295 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
296 0, MLX4_MCAST_DISABLE);
297 if (err)
298 mlx4_err(mdev, "Failed disabling multicast filter\n");
299
300 /* Flush mcast filter and init it with broadcast address */
301 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
302 1, MLX4_MCAST_CONFIG);
303
304 /* Update multicast list - we cache all addresses so they won't
305 * change while HW is updated holding the command semaphor */
306 netif_tx_lock_bh(dev);
307 mlx4_en_cache_mclist(dev);
308 netif_tx_unlock_bh(dev);
309 for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
310 mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr);
311 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
312 mcast_addr, 0, MLX4_MCAST_CONFIG);
313 }
314 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
315 0, MLX4_MCAST_ENABLE);
316 if (err)
317 mlx4_err(mdev, "Failed enabling multicast filter\n");
318
319 mlx4_en_clear_list(dev);
320 }
321out:
322 mutex_unlock(&mdev->state_lock);
323}
324
325#ifdef CONFIG_NET_POLL_CONTROLLER
326static void mlx4_en_netpoll(struct net_device *dev)
327{
328 struct mlx4_en_priv *priv = netdev_priv(dev);
329 struct mlx4_en_cq *cq;
330 unsigned long flags;
331 int i;
332
333 for (i = 0; i < priv->rx_ring_num; i++) {
334 cq = &priv->rx_cq[i];
335 spin_lock_irqsave(&cq->lock, flags);
336 napi_synchronize(&cq->napi);
337 mlx4_en_process_rx_cq(dev, cq, 0);
338 spin_unlock_irqrestore(&cq->lock, flags);
339 }
340}
341#endif
342
343static void mlx4_en_tx_timeout(struct net_device *dev)
344{
345 struct mlx4_en_priv *priv = netdev_priv(dev);
346 struct mlx4_en_dev *mdev = priv->mdev;
347
348 if (netif_msg_timer(priv))
349 mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
350
351 if (netif_carrier_ok(dev)) {
352 priv->port_stats.tx_timeout++;
353 mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
354 queue_work(mdev->workqueue, &priv->watchdog_task);
355 }
356}
357
358
359static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
360{
361 struct mlx4_en_priv *priv = netdev_priv(dev);
362
363 spin_lock_bh(&priv->stats_lock);
364 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
365 spin_unlock_bh(&priv->stats_lock);
366
367 return &priv->ret_stats;
368}
369
370static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
371{
372 struct mlx4_en_dev *mdev = priv->mdev;
373 struct mlx4_en_cq *cq;
374 int i;
375
376 /* If we haven't received a specific coalescing setting
377 * (module param), we set the moderation paramters as follows:
378 * - moder_cnt is set to the number of mtu sized packets to
379 * satisfy our coelsing target.
380 * - moder_time is set to a fixed value.
381 */
382 priv->rx_frames = (mdev->profile.rx_moder_cnt ==
383 MLX4_EN_AUTO_CONF) ?
384 MLX4_EN_RX_COAL_TARGET /
385 priv->dev->mtu + 1 :
386 mdev->profile.rx_moder_cnt;
387 priv->rx_usecs = (mdev->profile.rx_moder_time ==
388 MLX4_EN_AUTO_CONF) ?
389 MLX4_EN_RX_COAL_TIME :
390 mdev->profile.rx_moder_time;
391 mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
392 "rx_frames:%d rx_usecs:%d\n",
393 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
394
395 /* Setup cq moderation params */
396 for (i = 0; i < priv->rx_ring_num; i++) {
397 cq = &priv->rx_cq[i];
398 cq->moder_cnt = priv->rx_frames;
399 cq->moder_time = priv->rx_usecs;
400 }
401
402 for (i = 0; i < priv->tx_ring_num; i++) {
403 cq = &priv->tx_cq[i];
404 cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
405 cq->moder_time = MLX4_EN_TX_COAL_TIME;
406 }
407
408 /* Reset auto-moderation params */
409 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
410 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
411 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
412 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
413 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
414 priv->adaptive_rx_coal = mdev->profile.auto_moder;
415 priv->last_moder_time = MLX4_EN_AUTO_CONF;
416 priv->last_moder_jiffies = 0;
417 priv->last_moder_packets = 0;
418 priv->last_moder_tx_packets = 0;
419 priv->last_moder_bytes = 0;
420}
421
422static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
423{
424 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
425 struct mlx4_en_dev *mdev = priv->mdev;
426 struct mlx4_en_cq *cq;
427 unsigned long packets;
428 unsigned long rate;
429 unsigned long avg_pkt_size;
430 unsigned long rx_packets;
431 unsigned long rx_bytes;
432 unsigned long tx_packets;
433 unsigned long tx_pkt_diff;
434 unsigned long rx_pkt_diff;
435 int moder_time;
436 int i, err;
437
438 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
439 return;
440
441 spin_lock_bh(&priv->stats_lock);
442 rx_packets = priv->stats.rx_packets;
443 rx_bytes = priv->stats.rx_bytes;
444 tx_packets = priv->stats.tx_packets;
445 spin_unlock_bh(&priv->stats_lock);
446
447 if (!priv->last_moder_jiffies || !period)
448 goto out;
449
450 tx_pkt_diff = ((unsigned long) (tx_packets -
451 priv->last_moder_tx_packets));
452 rx_pkt_diff = ((unsigned long) (rx_packets -
453 priv->last_moder_packets));
454 packets = max(tx_pkt_diff, rx_pkt_diff);
455 rate = packets * HZ / period;
456 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
457 priv->last_moder_bytes)) / packets : 0;
458
459 /* Apply auto-moderation only when packet rate exceeds a rate that
460 * it matters */
461 if (rate > MLX4_EN_RX_RATE_THRESH) {
462 /* If tx and rx packet rates are not balanced, assume that
463 * traffic is mainly BW bound and apply maximum moderation.
464 * Otherwise, moderate according to packet rate */
465 if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
466 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
467 moder_time = priv->rx_usecs_high;
468 } else {
469 if (rate < priv->pkt_rate_low)
470 moder_time = priv->rx_usecs_low;
471 else if (rate > priv->pkt_rate_high)
472 moder_time = priv->rx_usecs_high;
473 else
474 moder_time = (rate - priv->pkt_rate_low) *
475 (priv->rx_usecs_high - priv->rx_usecs_low) /
476 (priv->pkt_rate_high - priv->pkt_rate_low) +
477 priv->rx_usecs_low;
478 }
479 } else {
480 /* When packet rate is low, use default moderation rather than
481 * 0 to prevent interrupt storms if traffic suddenly increases */
482 moder_time = priv->rx_usecs;
483 }
484
485 mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
486 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
487
488 mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
489 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
490 priv->last_moder_time, moder_time, period, packets,
491 avg_pkt_size, rate);
492
493 if (moder_time != priv->last_moder_time) {
494 priv->last_moder_time = moder_time;
495 for (i = 0; i < priv->rx_ring_num; i++) {
496 cq = &priv->rx_cq[i];
497 cq->moder_time = moder_time;
498 err = mlx4_en_set_cq_moder(priv, cq);
499 if (err) {
500 mlx4_err(mdev, "Failed modifying moderation for cq:%d "
501 "on port:%d\n", i, priv->port);
502 break;
503 }
504 }
505 }
506
507out:
508 priv->last_moder_packets = rx_packets;
509 priv->last_moder_tx_packets = tx_packets;
510 priv->last_moder_bytes = rx_bytes;
511 priv->last_moder_jiffies = jiffies;
512}
513
514static void mlx4_en_do_get_stats(struct work_struct *work)
515{
516 struct delayed_work *delay = container_of(work, struct delayed_work, work);
517 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
518 stats_task);
519 struct mlx4_en_dev *mdev = priv->mdev;
520 int err;
521
522 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
523 if (err)
524 mlx4_dbg(HW, priv, "Could not update stats for "
525 "port:%d\n", priv->port);
526
527 mutex_lock(&mdev->state_lock);
528 if (mdev->device_up) {
529 if (priv->port_up)
530 mlx4_en_auto_moderation(priv);
531
532 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
533 }
534 mutex_unlock(&mdev->state_lock);
535}
536
537static void mlx4_en_linkstate(struct work_struct *work)
538{
539 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
540 linkstate_task);
541 struct mlx4_en_dev *mdev = priv->mdev;
542 int linkstate = priv->link_state;
543
544 mutex_lock(&mdev->state_lock);
545 /* If observable port state changed set carrier state and
546 * report to system log */
547 if (priv->last_link_state != linkstate) {
548 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
549 if (netif_msg_link(priv))
550 mlx4_info(mdev, "Port %d - link down\n", priv->port);
551 netif_carrier_off(priv->dev);
552 } else {
553 if (netif_msg_link(priv))
554 mlx4_info(mdev, "Port %d - link up\n", priv->port);
555 netif_carrier_on(priv->dev);
556 }
557 }
558 priv->last_link_state = linkstate;
559 mutex_unlock(&mdev->state_lock);
560}
561
562
563static int mlx4_en_start_port(struct net_device *dev)
564{
565 struct mlx4_en_priv *priv = netdev_priv(dev);
566 struct mlx4_en_dev *mdev = priv->mdev;
567 struct mlx4_en_cq *cq;
568 struct mlx4_en_tx_ring *tx_ring;
569 struct mlx4_en_rx_ring *rx_ring;
570 int rx_index = 0;
571 int tx_index = 0;
572 u16 stride;
573 int err = 0;
574 int i;
575 int j;
576
577 if (priv->port_up) {
578 mlx4_dbg(DRV, priv, "start port called while port already up\n");
579 return 0;
580 }
581
582 /* Calculate Rx buf size */
583 dev->mtu = min(dev->mtu, priv->max_mtu);
584 mlx4_en_calc_rx_buf(dev);
585 mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
586 stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
587 DS_SIZE * priv->num_frags);
588 /* Configure rx cq's and rings */
589 for (i = 0; i < priv->rx_ring_num; i++) {
590 cq = &priv->rx_cq[i];
591 rx_ring = &priv->rx_ring[i];
592
593 err = mlx4_en_activate_cq(priv, cq);
594 if (err) {
595 mlx4_err(mdev, "Failed activating Rx CQ\n");
596 goto rx_err;
597 }
598 for (j = 0; j < cq->size; j++)
599 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
600 err = mlx4_en_set_cq_moder(priv, cq);
601 if (err) {
602 mlx4_err(mdev, "Failed setting cq moderation parameters");
603 mlx4_en_deactivate_cq(priv, cq);
604 goto cq_err;
605 }
606 mlx4_en_arm_cq(priv, cq);
607
608 ++rx_index;
609 }
610
611 err = mlx4_en_activate_rx_rings(priv);
612 if (err) {
613 mlx4_err(mdev, "Failed to activate RX rings\n");
614 goto cq_err;
615 }
616
617 err = mlx4_en_config_rss_steer(priv);
618 if (err) {
619 mlx4_err(mdev, "Failed configuring rss steering\n");
620 goto rx_err;
621 }
622
623 /* Configure tx cq's and rings */
624 for (i = 0; i < priv->tx_ring_num; i++) {
625 /* Configure cq */
626 cq = &priv->tx_cq[i];
627 err = mlx4_en_activate_cq(priv, cq);
628 if (err) {
629 mlx4_err(mdev, "Failed allocating Tx CQ\n");
630 goto tx_err;
631 }
632 err = mlx4_en_set_cq_moder(priv, cq);
633 if (err) {
634 mlx4_err(mdev, "Failed setting cq moderation parameters");
635 mlx4_en_deactivate_cq(priv, cq);
636 goto tx_err;
637 }
638 mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
639 cq->buf->wqe_index = cpu_to_be16(0xffff);
640
641 /* Configure ring */
642 tx_ring = &priv->tx_ring[i];
643 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
644 priv->rx_ring[0].srq.srqn);
645 if (err) {
646 mlx4_err(mdev, "Failed allocating Tx ring\n");
647 mlx4_en_deactivate_cq(priv, cq);
648 goto tx_err;
649 }
650 /* Set initial ownership of all Tx TXBBs to SW (1) */
651 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
652 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
653 ++tx_index;
654 }
655
656 /* Configure port */
657 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
658 priv->rx_skb_size + ETH_FCS_LEN,
659 priv->prof->tx_pause,
660 priv->prof->tx_ppp,
661 priv->prof->rx_pause,
662 priv->prof->rx_ppp);
663 if (err) {
664 mlx4_err(mdev, "Failed setting port general configurations"
665 " for port %d, with error %d\n", priv->port, err);
666 goto tx_err;
667 }
668 /* Set default qp number */
669 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
670 if (err) {
671 mlx4_err(mdev, "Failed setting default qp numbers\n");
672 goto tx_err;
673 }
674 /* Set port mac number */
675 mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
676 err = mlx4_register_mac(mdev->dev, priv->port,
677 priv->mac, &priv->mac_index);
678 if (err) {
679 mlx4_err(mdev, "Failed setting port mac\n");
680 goto tx_err;
681 }
682
683 /* Init port */
684 mlx4_dbg(HW, priv, "Initializing port\n");
685 err = mlx4_INIT_PORT(mdev->dev, priv->port);
686 if (err) {
687 mlx4_err(mdev, "Failed Initializing port\n");
688 goto mac_err;
689 }
690
691 /* Schedule multicast task to populate multicast list */
692 queue_work(mdev->workqueue, &priv->mcast_task);
693
694 priv->port_up = true;
695 netif_start_queue(dev);
696 return 0;
697
698mac_err:
699 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
700tx_err:
701 while (tx_index--) {
702 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
703 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
704 }
705
706 mlx4_en_release_rss_steer(priv);
707rx_err:
708 for (i = 0; i < priv->rx_ring_num; i++)
709 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
710cq_err:
711 while (rx_index--)
712 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
713
714 return err; /* need to close devices */
715}
716
717
718static void mlx4_en_stop_port(struct net_device *dev)
719{
720 struct mlx4_en_priv *priv = netdev_priv(dev);
721 struct mlx4_en_dev *mdev = priv->mdev;
722 int i;
723
724 if (!priv->port_up) {
725 mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n",
726 priv->port);
727 return;
728 }
729 netif_stop_queue(dev);
730
731 /* Synchronize with tx routine */
732 netif_tx_lock_bh(dev);
733 priv->port_up = false;
734 netif_tx_unlock_bh(dev);
735
736 /* close port*/
737 mlx4_CLOSE_PORT(mdev->dev, priv->port);
738
739 /* Unregister Mac address for the port */
740 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
741
742 /* Free TX Rings */
743 for (i = 0; i < priv->tx_ring_num; i++) {
744 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
745 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
746 }
747 msleep(10);
748
749 for (i = 0; i < priv->tx_ring_num; i++)
750 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
751
752 /* Free RSS qps */
753 mlx4_en_release_rss_steer(priv);
754
755 /* Free RX Rings */
756 for (i = 0; i < priv->rx_ring_num; i++) {
757 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
758 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
759 msleep(1);
760 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
761 }
762}
763
764static void mlx4_en_restart(struct work_struct *work)
765{
766 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
767 watchdog_task);
768 struct mlx4_en_dev *mdev = priv->mdev;
769 struct net_device *dev = priv->dev;
770
771 mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
772 mlx4_en_stop_port(dev);
773 if (mlx4_en_start_port(dev))
774 mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
775}
776
777
778static int mlx4_en_open(struct net_device *dev)
779{
780 struct mlx4_en_priv *priv = netdev_priv(dev);
781 struct mlx4_en_dev *mdev = priv->mdev;
782 int i;
783 int err = 0;
784
785 mutex_lock(&mdev->state_lock);
786
787 if (!mdev->device_up) {
788 mlx4_err(mdev, "Cannot open - device down/disabled\n");
789 err = -EBUSY;
790 goto out;
791 }
792
793 /* Reset HW statistics and performance counters */
794 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
795 mlx4_dbg(HW, priv, "Failed dumping statistics\n");
796
797 memset(&priv->stats, 0, sizeof(priv->stats));
798 memset(&priv->pstats, 0, sizeof(priv->pstats));
799
800 for (i = 0; i < priv->tx_ring_num; i++) {
801 priv->tx_ring[i].bytes = 0;
802 priv->tx_ring[i].packets = 0;
803 }
804 for (i = 0; i < priv->rx_ring_num; i++) {
805 priv->rx_ring[i].bytes = 0;
806 priv->rx_ring[i].packets = 0;
807 }
808
809 mlx4_en_set_default_moderation(priv);
810 err = mlx4_en_start_port(dev);
811 if (err)
812 mlx4_err(mdev, "Failed starting port:%d\n", priv->port);
813
814out:
815 mutex_unlock(&mdev->state_lock);
816 return err;
817}
818
819
820static int mlx4_en_close(struct net_device *dev)
821{
822 struct mlx4_en_priv *priv = netdev_priv(dev);
823 struct mlx4_en_dev *mdev = priv->mdev;
824
825 if (netif_msg_ifdown(priv))
826 mlx4_info(mdev, "Close called for port:%d\n", priv->port);
827
828 mutex_lock(&mdev->state_lock);
829
830 mlx4_en_stop_port(dev);
831 netif_carrier_off(dev);
832
833 mutex_unlock(&mdev->state_lock);
834 return 0;
835}
836
837static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
838{
839 int i;
840
841 for (i = 0; i < priv->tx_ring_num; i++) {
842 if (priv->tx_ring[i].tx_info)
843 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
844 if (priv->tx_cq[i].buf)
845 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
846 }
847
848 for (i = 0; i < priv->rx_ring_num; i++) {
849 if (priv->rx_ring[i].rx_info)
850 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
851 if (priv->rx_cq[i].buf)
852 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
853 }
854}
855
856static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
857{
858 struct mlx4_en_dev *mdev = priv->mdev;
859 struct mlx4_en_port_profile *prof = priv->prof;
860 int i;
861
862 /* Create tx Rings */
863 for (i = 0; i < priv->tx_ring_num; i++) {
864 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
865 prof->tx_ring_size, i, TX))
866 goto err;
867
868 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
869 prof->tx_ring_size, TXBB_SIZE))
870 goto err;
871 }
872
873 /* Create rx Rings */
874 for (i = 0; i < priv->rx_ring_num; i++) {
875 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
876 prof->rx_ring_size, i, RX))
877 goto err;
878
879 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
880 prof->rx_ring_size, priv->stride))
881 goto err;
882 }
883
884 return 0;
885
886err:
887 mlx4_err(mdev, "Failed to allocate NIC resources\n");
888 return -ENOMEM;
889}
890
891
892void mlx4_en_destroy_netdev(struct net_device *dev)
893{
894 struct mlx4_en_priv *priv = netdev_priv(dev);
895 struct mlx4_en_dev *mdev = priv->mdev;
896
897 mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
898
899 /* Unregister device - this will close the port if it was up */
900 if (priv->registered)
901 unregister_netdev(dev);
902
903 if (priv->allocated)
904 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
905
906 cancel_delayed_work(&priv->stats_task);
907 cancel_delayed_work(&priv->refill_task);
908 /* flush any pending task for this netdev */
909 flush_workqueue(mdev->workqueue);
910
911 /* Detach the netdev so tasks would not attempt to access it */
912 mutex_lock(&mdev->state_lock);
913 mdev->pndev[priv->port] = NULL;
914 mutex_unlock(&mdev->state_lock);
915
916 mlx4_en_free_resources(priv);
917 free_netdev(dev);
918}
919
920static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
921{
922 struct mlx4_en_priv *priv = netdev_priv(dev);
923 struct mlx4_en_dev *mdev = priv->mdev;
924 int err = 0;
925
926 mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
927 dev->mtu, new_mtu);
928
929 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
930 mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu);
931 return -EPERM;
932 }
933 dev->mtu = new_mtu;
934
935 if (netif_running(dev)) {
936 mutex_lock(&mdev->state_lock);
937 if (!mdev->device_up) {
938 /* NIC is probably restarting - let watchdog task reset
939 * the port */
940 mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n");
941 } else {
942 mlx4_en_stop_port(dev);
943 mlx4_en_set_default_moderation(priv);
944 err = mlx4_en_start_port(dev);
945 if (err) {
946 mlx4_err(mdev, "Failed restarting port:%d\n",
947 priv->port);
948 queue_work(mdev->workqueue, &priv->watchdog_task);
949 }
950 }
951 mutex_unlock(&mdev->state_lock);
952 }
953 return 0;
954}
955
956int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
957 struct mlx4_en_port_profile *prof)
958{
959 struct net_device *dev;
960 struct mlx4_en_priv *priv;
961 int i;
962 int err;
963
964 dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
965 if (dev == NULL) {
966 mlx4_err(mdev, "Net device allocation failed\n");
967 return -ENOMEM;
968 }
969
970 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
971
972 /*
973 * Initialize driver private data
974 */
975
976 priv = netdev_priv(dev);
977 memset(priv, 0, sizeof(struct mlx4_en_priv));
978 priv->dev = dev;
979 priv->mdev = mdev;
980 priv->prof = prof;
981 priv->port = port;
982 priv->port_up = false;
983 priv->rx_csum = 1;
984 priv->flags = prof->flags;
985 priv->tx_ring_num = prof->tx_ring_num;
986 priv->rx_ring_num = prof->rx_ring_num;
987 priv->mc_list = NULL;
988 priv->mac_index = -1;
989 priv->msg_enable = MLX4_EN_MSG_LEVEL;
990 spin_lock_init(&priv->stats_lock);
991 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
992 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
993 INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
994 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
995 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
996 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
997
998 /* Query for default mac and max mtu */
999 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
1000 priv->mac = mdev->dev->caps.def_mac[priv->port];
1001 if (ILLEGAL_MAC(priv->mac)) {
1002 mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
1003 priv->port, priv->mac);
1004 err = -EINVAL;
1005 goto out;
1006 }
1007
1008 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
1009 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
1010 err = mlx4_en_alloc_resources(priv);
1011 if (err)
1012 goto out;
1013
1014 /* Populate Rx default RSS mappings */
1015 mlx4_en_set_default_rss_map(priv, &priv->rss_map, priv->rx_ring_num *
1016 RSS_FACTOR, priv->rx_ring_num);
1017 /* Allocate page for receive rings */
1018 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
1019 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
1020 if (err) {
1021 mlx4_err(mdev, "Failed to allocate page for rx qps\n");
1022 goto out;
1023 }
1024 priv->allocated = 1;
1025
1026 /* Populate Tx priority mappings */
1027 mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num);
1028
1029 /*
1030 * Initialize netdev entry points
1031 */
1032
1033 dev->open = &mlx4_en_open;
1034 dev->stop = &mlx4_en_close;
1035 dev->hard_start_xmit = &mlx4_en_xmit;
1036 dev->get_stats = &mlx4_en_get_stats;
1037 dev->set_multicast_list = &mlx4_en_set_multicast;
1038 dev->set_mac_address = &mlx4_en_set_mac;
1039 dev->change_mtu = &mlx4_en_change_mtu;
1040 dev->tx_timeout = &mlx4_en_tx_timeout;
1041 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1042 dev->vlan_rx_register = mlx4_en_vlan_rx_register;
1043 dev->vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid;
1044 dev->vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid;
1045#ifdef CONFIG_NET_POLL_CONTROLLER
1046 dev->poll_controller = mlx4_en_netpoll;
1047#endif
1048 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1049
1050 /* Set defualt MAC */
1051 dev->addr_len = ETH_ALEN;
1052 for (i = 0; i < ETH_ALEN; i++)
1053 dev->dev_addr[ETH_ALEN - 1 - i] =
1054 (u8) (priv->mac >> (8 * i));
1055
1056 /*
1057 * Set driver features
1058 */
1059 dev->features |= NETIF_F_SG;
1060 dev->features |= NETIF_F_HW_CSUM;
1061 dev->features |= NETIF_F_HIGHDMA;
1062 dev->features |= NETIF_F_HW_VLAN_TX |
1063 NETIF_F_HW_VLAN_RX |
1064 NETIF_F_HW_VLAN_FILTER;
1065 if (mdev->profile.num_lro)
1066 dev->features |= NETIF_F_LRO;
1067 if (mdev->LSO_support) {
1068 dev->features |= NETIF_F_TSO;
1069 dev->features |= NETIF_F_TSO6;
1070 }
1071
1072 mdev->pndev[port] = dev;
1073
1074 netif_carrier_off(dev);
1075 err = register_netdev(dev);
1076 if (err) {
1077 mlx4_err(mdev, "Netdev registration failed\n");
1078 goto out;
1079 }
1080 priv->registered = 1;
1081 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1082 return 0;
1083
1084out:
1085 mlx4_en_destroy_netdev(dev);
1086 return err;
1087}
1088
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
new file mode 100644
index 000000000000..95706ee1c019
--- /dev/null
+++ b/drivers/net/mlx4/en_params.c
@@ -0,0 +1,482 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/ethtool.h>
36#include <linux/netdevice.h>
37
38#include "mlx4_en.h"
39#include "en_port.h"
40
41#define MLX4_EN_PARM_INT(X, def_val, desc) \
42 static unsigned int X = def_val;\
43 module_param(X , uint, 0444); \
44 MODULE_PARM_DESC(X, desc);
45
46
47/*
48 * Device scope module parameters
49 */
50
51
52/* Use a XOR rathern than Toeplitz hash function for RSS */
53MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
54
55/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
56MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
57
58/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
59MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
60 "Number of LRO sessions per ring or disabled (0)");
61
62/* Priority pausing */
63MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE,
64 "Pause policy on TX: 0 never generate pause frames "
65 "1 generate pause frames according to RX buffer threshold");
66MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE,
67 "Pause policy on RX: 0 ignore received pause frames "
68 "1 respect received pause frames");
69MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
70 " Per priority bit mask");
71MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
72 " Per priority bit mask");
73
74/* Interrupt moderation tunning */
75MLX4_EN_PARM_INT(rx_moder_cnt, MLX4_EN_AUTO_CONF,
76 "Max coalesced descriptors for Rx interrupt moderation");
77MLX4_EN_PARM_INT(rx_moder_time, MLX4_EN_AUTO_CONF,
78 "Timeout following last packet for Rx interrupt moderation");
79MLX4_EN_PARM_INT(auto_moder, 1, "Enable dynamic interrupt moderation");
80
81MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
82MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
83
84MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
85MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
86MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
87MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
88
89
90int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
91{
92 struct mlx4_en_profile *params = &mdev->profile;
93 int i;
94
95 params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF);
96 params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF);
97 params->auto_moder = auto_moder;
98 params->rss_xor = (rss_xor != 0);
99 params->rss_mask = rss_mask & 0x1f;
100 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
101 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
102 params->prof[i].rx_pause = pprx;
103 params->prof[i].rx_ppp = pfcrx;
104 params->prof[i].tx_pause = pptx;
105 params->prof[i].tx_ppp = pfctx;
106 }
107 if (pfcrx || pfctx) {
108 params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
109 params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM;
110 } else {
111 params->prof[1].tx_ring_num = 1;
112 params->prof[2].tx_ring_num = 1;
113 }
114 params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS);
115 params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
116
117 if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
118 tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
119 params->prof[1].tx_ring_size =
120 (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
121 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
122
123 if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
124 tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
125 params->prof[2].tx_ring_size =
126 (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
127 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
128
129 if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
130 rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
131 params->prof[1].rx_ring_size =
132 (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
133 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
134
135 if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
136 rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
137 params->prof[2].rx_ring_size =
138 (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
139 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
140 return 0;
141}
142
143
144/*
145 * Ethtool support
146 */
147
148static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
149{
150 int i;
151
152 priv->port_stats.lro_aggregated = 0;
153 priv->port_stats.lro_flushed = 0;
154 priv->port_stats.lro_no_desc = 0;
155
156 for (i = 0; i < priv->rx_ring_num; i++) {
157 priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
158 priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
159 priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
160 }
161}
162
163static void
164mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
165{
166 struct mlx4_en_priv *priv = netdev_priv(dev);
167 struct mlx4_en_dev *mdev = priv->mdev;
168
169 sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
170 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
171 sprintf(drvinfo->fw_version, "%d.%d.%d",
172 (u16) (mdev->dev->caps.fw_ver >> 32),
173 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
174 (u16) (mdev->dev->caps.fw_ver & 0xffff));
175 strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
176 drvinfo->n_stats = 0;
177 drvinfo->regdump_len = 0;
178 drvinfo->eedump_len = 0;
179}
180
181static u32 mlx4_en_get_tso(struct net_device *dev)
182{
183 return (dev->features & NETIF_F_TSO) != 0;
184}
185
186static int mlx4_en_set_tso(struct net_device *dev, u32 data)
187{
188 struct mlx4_en_priv *priv = netdev_priv(dev);
189
190 if (data) {
191 if (!priv->mdev->LSO_support)
192 return -EPERM;
193 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
194 } else
195 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
196 return 0;
197}
198
199static u32 mlx4_en_get_rx_csum(struct net_device *dev)
200{
201 struct mlx4_en_priv *priv = netdev_priv(dev);
202 return priv->rx_csum;
203}
204
205static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
206{
207 struct mlx4_en_priv *priv = netdev_priv(dev);
208 priv->rx_csum = (data != 0);
209 return 0;
210}
211
212static const char main_strings[][ETH_GSTRING_LEN] = {
213 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
214 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
215 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
216 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
217 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
218 "tx_heartbeat_errors", "tx_window_errors",
219
220 /* port statistics */
221 "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
222 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
223 "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
224
225 /* packet statistics */
226 "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
227 "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
228 "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
229 "tx_prio_6", "tx_prio_7",
230};
231#define NUM_MAIN_STATS 21
232#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
233
234static u32 mlx4_en_get_msglevel(struct net_device *dev)
235{
236 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
237}
238
239static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
240{
241 ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
242}
243
244static void mlx4_en_get_wol(struct net_device *netdev,
245 struct ethtool_wolinfo *wol)
246{
247 wol->supported = 0;
248 wol->wolopts = 0;
249
250 return;
251}
252
253static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
254{
255 struct mlx4_en_priv *priv = netdev_priv(dev);
256
257 if (sset != ETH_SS_STATS)
258 return -EOPNOTSUPP;
259
260 return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
261}
262
263static void mlx4_en_get_ethtool_stats(struct net_device *dev,
264 struct ethtool_stats *stats, uint64_t *data)
265{
266 struct mlx4_en_priv *priv = netdev_priv(dev);
267 int index = 0;
268 int i;
269
270 spin_lock_bh(&priv->stats_lock);
271
272 mlx4_en_update_lro_stats(priv);
273
274 for (i = 0; i < NUM_MAIN_STATS; i++)
275 data[index++] = ((unsigned long *) &priv->stats)[i];
276 for (i = 0; i < NUM_PORT_STATS; i++)
277 data[index++] = ((unsigned long *) &priv->port_stats)[i];
278 for (i = 0; i < priv->tx_ring_num; i++) {
279 data[index++] = priv->tx_ring[i].packets;
280 data[index++] = priv->tx_ring[i].bytes;
281 }
282 for (i = 0; i < priv->rx_ring_num; i++) {
283 data[index++] = priv->rx_ring[i].packets;
284 data[index++] = priv->rx_ring[i].bytes;
285 }
286 for (i = 0; i < NUM_PKT_STATS; i++)
287 data[index++] = ((unsigned long *) &priv->pkstats)[i];
288 spin_unlock_bh(&priv->stats_lock);
289
290}
291
292static void mlx4_en_get_strings(struct net_device *dev,
293 uint32_t stringset, uint8_t *data)
294{
295 struct mlx4_en_priv *priv = netdev_priv(dev);
296 int index = 0;
297 int i;
298
299 if (stringset != ETH_SS_STATS)
300 return;
301
302 /* Add main counters */
303 for (i = 0; i < NUM_MAIN_STATS; i++)
304 strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
305 for (i = 0; i < NUM_PORT_STATS; i++)
306 strcpy(data + (index++) * ETH_GSTRING_LEN,
307 main_strings[i + NUM_MAIN_STATS]);
308 for (i = 0; i < priv->tx_ring_num; i++) {
309 sprintf(data + (index++) * ETH_GSTRING_LEN,
310 "tx%d_packets", i);
311 sprintf(data + (index++) * ETH_GSTRING_LEN,
312 "tx%d_bytes", i);
313 }
314 for (i = 0; i < priv->rx_ring_num; i++) {
315 sprintf(data + (index++) * ETH_GSTRING_LEN,
316 "rx%d_packets", i);
317 sprintf(data + (index++) * ETH_GSTRING_LEN,
318 "rx%d_bytes", i);
319 }
320 for (i = 0; i < NUM_PKT_STATS; i++)
321 strcpy(data + (index++) * ETH_GSTRING_LEN,
322 main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
323}
324
325static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
326{
327 cmd->autoneg = AUTONEG_DISABLE;
328 cmd->supported = SUPPORTED_10000baseT_Full;
329 cmd->advertising = SUPPORTED_10000baseT_Full;
330 if (netif_carrier_ok(dev)) {
331 cmd->speed = SPEED_10000;
332 cmd->duplex = DUPLEX_FULL;
333 } else {
334 cmd->speed = -1;
335 cmd->duplex = -1;
336 }
337 return 0;
338}
339
340static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
341{
342 if ((cmd->autoneg == AUTONEG_ENABLE) ||
343 (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL))
344 return -EINVAL;
345
346 /* Nothing to change */
347 return 0;
348}
349
350static int mlx4_en_get_coalesce(struct net_device *dev,
351 struct ethtool_coalesce *coal)
352{
353 struct mlx4_en_priv *priv = netdev_priv(dev);
354
355 coal->tx_coalesce_usecs = 0;
356 coal->tx_max_coalesced_frames = 0;
357 coal->rx_coalesce_usecs = priv->rx_usecs;
358 coal->rx_max_coalesced_frames = priv->rx_frames;
359
360 coal->pkt_rate_low = priv->pkt_rate_low;
361 coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
362 coal->pkt_rate_high = priv->pkt_rate_high;
363 coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
364 coal->rate_sample_interval = priv->sample_interval;
365 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
366 return 0;
367}
368
369static int mlx4_en_set_coalesce(struct net_device *dev,
370 struct ethtool_coalesce *coal)
371{
372 struct mlx4_en_priv *priv = netdev_priv(dev);
373 int err, i;
374
375 priv->rx_frames = (coal->rx_max_coalesced_frames ==
376 MLX4_EN_AUTO_CONF) ?
377 MLX4_EN_RX_COAL_TARGET /
378 priv->dev->mtu + 1 :
379 coal->rx_max_coalesced_frames;
380 priv->rx_usecs = (coal->rx_coalesce_usecs ==
381 MLX4_EN_AUTO_CONF) ?
382 MLX4_EN_RX_COAL_TIME :
383 coal->rx_coalesce_usecs;
384
385 /* Set adaptive coalescing params */
386 priv->pkt_rate_low = coal->pkt_rate_low;
387 priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
388 priv->pkt_rate_high = coal->pkt_rate_high;
389 priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
390 priv->sample_interval = coal->rate_sample_interval;
391 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
392 priv->last_moder_time = MLX4_EN_AUTO_CONF;
393 if (priv->adaptive_rx_coal)
394 return 0;
395
396 for (i = 0; i < priv->rx_ring_num; i++) {
397 priv->rx_cq[i].moder_cnt = priv->rx_frames;
398 priv->rx_cq[i].moder_time = priv->rx_usecs;
399 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
400 if (err)
401 return err;
402 }
403 return 0;
404}
405
406static int mlx4_en_set_pauseparam(struct net_device *dev,
407 struct ethtool_pauseparam *pause)
408{
409 struct mlx4_en_priv *priv = netdev_priv(dev);
410 struct mlx4_en_dev *mdev = priv->mdev;
411 int err;
412
413 priv->prof->tx_pause = pause->tx_pause != 0;
414 priv->prof->rx_pause = pause->rx_pause != 0;
415 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
416 priv->rx_skb_size + ETH_FCS_LEN,
417 priv->prof->tx_pause,
418 priv->prof->tx_ppp,
419 priv->prof->rx_pause,
420 priv->prof->rx_ppp);
421 if (err)
422 mlx4_err(mdev, "Failed setting pause params to\n");
423
424 return err;
425}
426
427static void mlx4_en_get_pauseparam(struct net_device *dev,
428 struct ethtool_pauseparam *pause)
429{
430 struct mlx4_en_priv *priv = netdev_priv(dev);
431
432 pause->tx_pause = priv->prof->tx_pause;
433 pause->rx_pause = priv->prof->rx_pause;
434}
435
436static void mlx4_en_get_ringparam(struct net_device *dev,
437 struct ethtool_ringparam *param)
438{
439 struct mlx4_en_priv *priv = netdev_priv(dev);
440 struct mlx4_en_dev *mdev = priv->mdev;
441
442 memset(param, 0, sizeof(*param));
443 param->rx_max_pending = mdev->dev->caps.max_rq_sg;
444 param->tx_max_pending = mdev->dev->caps.max_sq_sg;
445 param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
446 param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
447}
448
449const struct ethtool_ops mlx4_en_ethtool_ops = {
450 .get_drvinfo = mlx4_en_get_drvinfo,
451 .get_settings = mlx4_en_get_settings,
452 .set_settings = mlx4_en_set_settings,
453#ifdef NETIF_F_TSO
454 .get_tso = mlx4_en_get_tso,
455 .set_tso = mlx4_en_set_tso,
456#endif
457 .get_sg = ethtool_op_get_sg,
458 .set_sg = ethtool_op_set_sg,
459 .get_link = ethtool_op_get_link,
460 .get_rx_csum = mlx4_en_get_rx_csum,
461 .set_rx_csum = mlx4_en_set_rx_csum,
462 .get_tx_csum = ethtool_op_get_tx_csum,
463 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
464 .get_strings = mlx4_en_get_strings,
465 .get_sset_count = mlx4_en_get_sset_count,
466 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
467 .get_wol = mlx4_en_get_wol,
468 .get_msglevel = mlx4_en_get_msglevel,
469 .set_msglevel = mlx4_en_set_msglevel,
470 .get_coalesce = mlx4_en_get_coalesce,
471 .set_coalesce = mlx4_en_set_coalesce,
472 .get_pauseparam = mlx4_en_get_pauseparam,
473 .set_pauseparam = mlx4_en_set_pauseparam,
474 .get_ringparam = mlx4_en_get_ringparam,
475 .get_flags = ethtool_op_get_flags,
476 .set_flags = ethtool_op_set_flags,
477};
478
479
480
481
482
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
new file mode 100644
index 000000000000..c5a4c0389752
--- /dev/null
+++ b/drivers/net/mlx4/en_port.c
@@ -0,0 +1,261 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34
35#include <linux/if_vlan.h>
36
37#include <linux/mlx4/device.h>
38#include <linux/mlx4/cmd.h>
39
40#include "en_port.h"
41#include "mlx4_en.h"
42
43
44int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
45 u64 mac, u64 clear, u8 mode)
46{
47 return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
48 MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
49}
50
51int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
52{
53 struct mlx4_cmd_mailbox *mailbox;
54 struct mlx4_set_vlan_fltr_mbox *filter;
55 int i;
56 int j;
57 int index = 0;
58 u32 entry;
59 int err = 0;
60
61 mailbox = mlx4_alloc_cmd_mailbox(dev);
62 if (IS_ERR(mailbox))
63 return PTR_ERR(mailbox);
64
65 filter = mailbox->buf;
66 if (grp) {
67 memset(filter, 0, sizeof *filter);
68 for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
69 entry = 0;
70 for (j = 0; j < 32; j++)
71 if (vlan_group_get_device(grp, index++))
72 entry |= 1 << j;
73 filter->entry[i] = cpu_to_be32(entry);
74 }
75 } else {
76 /* When no vlans are configured we block all vlans */
77 memset(filter, 0, sizeof(*filter));
78 }
79 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
80 MLX4_CMD_TIME_CLASS_B);
81 mlx4_free_cmd_mailbox(dev, mailbox);
82 return err;
83}
84
85
86int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
87 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
88{
89 struct mlx4_cmd_mailbox *mailbox;
90 struct mlx4_set_port_general_context *context;
91 int err;
92 u32 in_mod;
93
94 mailbox = mlx4_alloc_cmd_mailbox(dev);
95 if (IS_ERR(mailbox))
96 return PTR_ERR(mailbox);
97 context = mailbox->buf;
98 memset(context, 0, sizeof *context);
99
100 context->flags = SET_PORT_GEN_ALL_VALID;
101 context->mtu = cpu_to_be16(mtu);
102 context->pptx = (pptx * (!pfctx)) << 7;
103 context->pfctx = pfctx;
104 context->pprx = (pprx * (!pfcrx)) << 7;
105 context->pfcrx = pfcrx;
106
107 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
108 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
109 MLX4_CMD_TIME_CLASS_B);
110
111 mlx4_free_cmd_mailbox(dev, mailbox);
112 return err;
113}
114
115int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
116 u8 promisc)
117{
118 struct mlx4_cmd_mailbox *mailbox;
119 struct mlx4_set_port_rqp_calc_context *context;
120 int err;
121 u32 in_mod;
122
123 mailbox = mlx4_alloc_cmd_mailbox(dev);
124 if (IS_ERR(mailbox))
125 return PTR_ERR(mailbox);
126 context = mailbox->buf;
127 memset(context, 0, sizeof *context);
128
129 context->base_qpn = cpu_to_be32(base_qpn);
130 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
131 context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
132 context->intra_no_vlan = 0;
133 context->no_vlan = MLX4_NO_VLAN_IDX;
134 context->intra_vlan_miss = 0;
135 context->vlan_miss = MLX4_VLAN_MISS_IDX;
136
137 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
138 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
139 MLX4_CMD_TIME_CLASS_B);
140
141 mlx4_free_cmd_mailbox(dev, mailbox);
142 return err;
143}
144
145
146int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
147{
148 struct mlx4_en_stat_out_mbox *mlx4_en_stats;
149 struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
150 struct net_device_stats *stats = &priv->stats;
151 struct mlx4_cmd_mailbox *mailbox;
152 u64 in_mod = reset << 8 | port;
153 int err;
154
155 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
156 if (IS_ERR(mailbox))
157 return PTR_ERR(mailbox);
158 memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
159 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
160 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
161 if (err)
162 goto out;
163
164 mlx4_en_stats = mailbox->buf;
165
166 spin_lock_bh(&priv->stats_lock);
167
168 stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) -
169 be32_to_cpu(mlx4_en_stats->RDROP);
170 stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
171 be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
172 be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
173 be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
174 be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
175 be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
176 be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
177 be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
178 be64_to_cpu(mlx4_en_stats->TTOT_novlan) +
179 be64_to_cpu(mlx4_en_stats->TTOT_loopbk);
180 stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
181 be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
182 be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
183 be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
184 be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
185 be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
186 be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
187 be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
188 be64_to_cpu(mlx4_en_stats->ROCT_novlan);
189
190 stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) +
191 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) +
192 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) +
193 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) +
194 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) +
195 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) +
196 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) +
197 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) +
198 be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) +
199 be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk);
200
201 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
202 be32_to_cpu(mlx4_en_stats->RdropLength) +
203 be32_to_cpu(mlx4_en_stats->RJBBR) +
204 be32_to_cpu(mlx4_en_stats->RCRC) +
205 be32_to_cpu(mlx4_en_stats->RRUNT);
206 stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
207 stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
208 be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
209 be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
210 be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
211 be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
212 be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
213 be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
214 be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
215 be64_to_cpu(mlx4_en_stats->MCAST_novlan);
216 stats->collisions = 0;
217 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
218 stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
219 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
220 stats->rx_frame_errors = 0;
221 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
222 stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
223 stats->tx_aborted_errors = 0;
224 stats->tx_carrier_errors = 0;
225 stats->tx_fifo_errors = 0;
226 stats->tx_heartbeat_errors = 0;
227 stats->tx_window_errors = 0;
228
229 priv->pkstats.broadcast =
230 be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
231 be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
232 be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
233 be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
234 be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
235 be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
236 be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
237 be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
238 be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
239 priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
240 priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
241 priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
242 priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
243 priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
244 priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
245 priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
246 priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
247 priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
248 priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
249 priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
250 priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
251 priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
252 priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
253 priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
254 priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
255 spin_unlock_bh(&priv->stats_lock);
256
257out:
258 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
259 return err;
260}
261
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
new file mode 100644
index 000000000000..e6477f12beb5
--- /dev/null
+++ b/drivers/net/mlx4/en_port.h
@@ -0,0 +1,570 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef _MLX4_EN_PORT_H_
35#define _MLX4_EN_PORT_H_
36
37
38#define SET_PORT_GEN_ALL_VALID 0x7
39#define SET_PORT_PROMISC_SHIFT 31
40
41enum {
42 MLX4_CMD_SET_VLAN_FLTR = 0x47,
43 MLX4_CMD_SET_MCAST_FLTR = 0x48,
44 MLX4_CMD_DUMP_ETH_STATS = 0x49,
45};
46
47struct mlx4_set_port_general_context {
48 u8 reserved[3];
49 u8 flags;
50 u16 reserved2;
51 __be16 mtu;
52 u8 pptx;
53 u8 pfctx;
54 u16 reserved3;
55 u8 pprx;
56 u8 pfcrx;
57 u16 reserved4;
58};
59
60struct mlx4_set_port_rqp_calc_context {
61 __be32 base_qpn;
62 __be32 flags;
63 u8 reserved[3];
64 u8 mac_miss;
65 u8 intra_no_vlan;
66 u8 no_vlan;
67 u8 intra_vlan_miss;
68 u8 vlan_miss;
69 u8 reserved2[3];
70 u8 no_vlan_prio;
71 __be32 promisc;
72 __be32 mcast;
73};
74
75#define VLAN_FLTR_SIZE 128
76struct mlx4_set_vlan_fltr_mbox {
77 __be32 entry[VLAN_FLTR_SIZE];
78};
79
80
81enum {
82 MLX4_MCAST_CONFIG = 0,
83 MLX4_MCAST_DISABLE = 1,
84 MLX4_MCAST_ENABLE = 2,
85};
86
87
88struct mlx4_en_stat_out_mbox {
89 /* Received frames with a length of 64 octets */
90 __be64 R64_prio_0;
91 __be64 R64_prio_1;
92 __be64 R64_prio_2;
93 __be64 R64_prio_3;
94 __be64 R64_prio_4;
95 __be64 R64_prio_5;
96 __be64 R64_prio_6;
97 __be64 R64_prio_7;
98 __be64 R64_novlan;
99 /* Received frames with a length of 127 octets */
100 __be64 R127_prio_0;
101 __be64 R127_prio_1;
102 __be64 R127_prio_2;
103 __be64 R127_prio_3;
104 __be64 R127_prio_4;
105 __be64 R127_prio_5;
106 __be64 R127_prio_6;
107 __be64 R127_prio_7;
108 __be64 R127_novlan;
109 /* Received frames with a length of 255 octets */
110 __be64 R255_prio_0;
111 __be64 R255_prio_1;
112 __be64 R255_prio_2;
113 __be64 R255_prio_3;
114 __be64 R255_prio_4;
115 __be64 R255_prio_5;
116 __be64 R255_prio_6;
117 __be64 R255_prio_7;
118 __be64 R255_novlan;
119 /* Received frames with a length of 511 octets */
120 __be64 R511_prio_0;
121 __be64 R511_prio_1;
122 __be64 R511_prio_2;
123 __be64 R511_prio_3;
124 __be64 R511_prio_4;
125 __be64 R511_prio_5;
126 __be64 R511_prio_6;
127 __be64 R511_prio_7;
128 __be64 R511_novlan;
129 /* Received frames with a length of 1023 octets */
130 __be64 R1023_prio_0;
131 __be64 R1023_prio_1;
132 __be64 R1023_prio_2;
133 __be64 R1023_prio_3;
134 __be64 R1023_prio_4;
135 __be64 R1023_prio_5;
136 __be64 R1023_prio_6;
137 __be64 R1023_prio_7;
138 __be64 R1023_novlan;
139 /* Received frames with a length of 1518 octets */
140 __be64 R1518_prio_0;
141 __be64 R1518_prio_1;
142 __be64 R1518_prio_2;
143 __be64 R1518_prio_3;
144 __be64 R1518_prio_4;
145 __be64 R1518_prio_5;
146 __be64 R1518_prio_6;
147 __be64 R1518_prio_7;
148 __be64 R1518_novlan;
149 /* Received frames with a length of 1522 octets */
150 __be64 R1522_prio_0;
151 __be64 R1522_prio_1;
152 __be64 R1522_prio_2;
153 __be64 R1522_prio_3;
154 __be64 R1522_prio_4;
155 __be64 R1522_prio_5;
156 __be64 R1522_prio_6;
157 __be64 R1522_prio_7;
158 __be64 R1522_novlan;
159 /* Received frames with a length of 1548 octets */
160 __be64 R1548_prio_0;
161 __be64 R1548_prio_1;
162 __be64 R1548_prio_2;
163 __be64 R1548_prio_3;
164 __be64 R1548_prio_4;
165 __be64 R1548_prio_5;
166 __be64 R1548_prio_6;
167 __be64 R1548_prio_7;
168 __be64 R1548_novlan;
169 /* Received frames with a length of 1548 < octets < MTU */
170 __be64 R2MTU_prio_0;
171 __be64 R2MTU_prio_1;
172 __be64 R2MTU_prio_2;
173 __be64 R2MTU_prio_3;
174 __be64 R2MTU_prio_4;
175 __be64 R2MTU_prio_5;
176 __be64 R2MTU_prio_6;
177 __be64 R2MTU_prio_7;
178 __be64 R2MTU_novlan;
179 /* Received frames with a length of MTU< octets and good CRC */
180 __be64 RGIANT_prio_0;
181 __be64 RGIANT_prio_1;
182 __be64 RGIANT_prio_2;
183 __be64 RGIANT_prio_3;
184 __be64 RGIANT_prio_4;
185 __be64 RGIANT_prio_5;
186 __be64 RGIANT_prio_6;
187 __be64 RGIANT_prio_7;
188 __be64 RGIANT_novlan;
189 /* Received broadcast frames with good CRC */
190 __be64 RBCAST_prio_0;
191 __be64 RBCAST_prio_1;
192 __be64 RBCAST_prio_2;
193 __be64 RBCAST_prio_3;
194 __be64 RBCAST_prio_4;
195 __be64 RBCAST_prio_5;
196 __be64 RBCAST_prio_6;
197 __be64 RBCAST_prio_7;
198 __be64 RBCAST_novlan;
199 /* Received multicast frames with good CRC */
200 __be64 MCAST_prio_0;
201 __be64 MCAST_prio_1;
202 __be64 MCAST_prio_2;
203 __be64 MCAST_prio_3;
204 __be64 MCAST_prio_4;
205 __be64 MCAST_prio_5;
206 __be64 MCAST_prio_6;
207 __be64 MCAST_prio_7;
208 __be64 MCAST_novlan;
209 /* Received unicast not short or GIANT frames with good CRC */
210 __be64 RTOTG_prio_0;
211 __be64 RTOTG_prio_1;
212 __be64 RTOTG_prio_2;
213 __be64 RTOTG_prio_3;
214 __be64 RTOTG_prio_4;
215 __be64 RTOTG_prio_5;
216 __be64 RTOTG_prio_6;
217 __be64 RTOTG_prio_7;
218 __be64 RTOTG_novlan;
219
220 /* Count of total octets of received frames, includes framing characters */
221 __be64 RTTLOCT_prio_0;
222 /* Count of total octets of received frames, not including framing
223 characters */
224 __be64 RTTLOCT_NOFRM_prio_0;
225 /* Count of Total number of octets received
226 (only for frames without errors) */
227 __be64 ROCT_prio_0;
228
229 __be64 RTTLOCT_prio_1;
230 __be64 RTTLOCT_NOFRM_prio_1;
231 __be64 ROCT_prio_1;
232
233 __be64 RTTLOCT_prio_2;
234 __be64 RTTLOCT_NOFRM_prio_2;
235 __be64 ROCT_prio_2;
236
237 __be64 RTTLOCT_prio_3;
238 __be64 RTTLOCT_NOFRM_prio_3;
239 __be64 ROCT_prio_3;
240
241 __be64 RTTLOCT_prio_4;
242 __be64 RTTLOCT_NOFRM_prio_4;
243 __be64 ROCT_prio_4;
244
245 __be64 RTTLOCT_prio_5;
246 __be64 RTTLOCT_NOFRM_prio_5;
247 __be64 ROCT_prio_5;
248
249 __be64 RTTLOCT_prio_6;
250 __be64 RTTLOCT_NOFRM_prio_6;
251 __be64 ROCT_prio_6;
252
253 __be64 RTTLOCT_prio_7;
254 __be64 RTTLOCT_NOFRM_prio_7;
255 __be64 ROCT_prio_7;
256
257 __be64 RTTLOCT_novlan;
258 __be64 RTTLOCT_NOFRM_novlan;
259 __be64 ROCT_novlan;
260
261 /* Count of Total received frames including bad frames */
262 __be64 RTOT_prio_0;
263 /* Count of Total number of received frames with 802.1Q encapsulation */
264 __be64 R1Q_prio_0;
265 __be64 reserved1;
266
267 __be64 RTOT_prio_1;
268 __be64 R1Q_prio_1;
269 __be64 reserved2;
270
271 __be64 RTOT_prio_2;
272 __be64 R1Q_prio_2;
273 __be64 reserved3;
274
275 __be64 RTOT_prio_3;
276 __be64 R1Q_prio_3;
277 __be64 reserved4;
278
279 __be64 RTOT_prio_4;
280 __be64 R1Q_prio_4;
281 __be64 reserved5;
282
283 __be64 RTOT_prio_5;
284 __be64 R1Q_prio_5;
285 __be64 reserved6;
286
287 __be64 RTOT_prio_6;
288 __be64 R1Q_prio_6;
289 __be64 reserved7;
290
291 __be64 RTOT_prio_7;
292 __be64 R1Q_prio_7;
293 __be64 reserved8;
294
295 __be64 RTOT_novlan;
296 __be64 R1Q_novlan;
297 __be64 reserved9;
298
299 /* Total number of Successfully Received Control Frames */
300 __be64 RCNTL;
301 __be64 reserved10;
302 __be64 reserved11;
303 __be64 reserved12;
304 /* Count of received frames with a length/type field value between 46
305 (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames),
306 inclusive */
307 __be64 RInRangeLengthErr;
308 /* Count of received frames with length/type field between 1501 and 1535
309 decimal, inclusive */
310 __be64 ROutRangeLengthErr;
311 /* Count of received frames that are longer than max allowed size for
312 802.3 frames (1518/1522) */
313 __be64 RFrmTooLong;
314 /* Count frames received with PCS error */
315 __be64 PCS;
316
317 /* Transmit frames with a length of 64 octets */
318 __be64 T64_prio_0;
319 __be64 T64_prio_1;
320 __be64 T64_prio_2;
321 __be64 T64_prio_3;
322 __be64 T64_prio_4;
323 __be64 T64_prio_5;
324 __be64 T64_prio_6;
325 __be64 T64_prio_7;
326 __be64 T64_novlan;
327 __be64 T64_loopbk;
328 /* Transmit frames with a length of 65 to 127 octets. */
329 __be64 T127_prio_0;
330 __be64 T127_prio_1;
331 __be64 T127_prio_2;
332 __be64 T127_prio_3;
333 __be64 T127_prio_4;
334 __be64 T127_prio_5;
335 __be64 T127_prio_6;
336 __be64 T127_prio_7;
337 __be64 T127_novlan;
338 __be64 T127_loopbk;
339 /* Transmit frames with a length of 128 to 255 octets */
340 __be64 T255_prio_0;
341 __be64 T255_prio_1;
342 __be64 T255_prio_2;
343 __be64 T255_prio_3;
344 __be64 T255_prio_4;
345 __be64 T255_prio_5;
346 __be64 T255_prio_6;
347 __be64 T255_prio_7;
348 __be64 T255_novlan;
349 __be64 T255_loopbk;
350 /* Transmit frames with a length of 256 to 511 octets */
351 __be64 T511_prio_0;
352 __be64 T511_prio_1;
353 __be64 T511_prio_2;
354 __be64 T511_prio_3;
355 __be64 T511_prio_4;
356 __be64 T511_prio_5;
357 __be64 T511_prio_6;
358 __be64 T511_prio_7;
359 __be64 T511_novlan;
360 __be64 T511_loopbk;
361 /* Transmit frames with a length of 512 to 1023 octets */
362 __be64 T1023_prio_0;
363 __be64 T1023_prio_1;
364 __be64 T1023_prio_2;
365 __be64 T1023_prio_3;
366 __be64 T1023_prio_4;
367 __be64 T1023_prio_5;
368 __be64 T1023_prio_6;
369 __be64 T1023_prio_7;
370 __be64 T1023_novlan;
371 __be64 T1023_loopbk;
372 /* Transmit frames with a length of 1024 to 1518 octets */
373 __be64 T1518_prio_0;
374 __be64 T1518_prio_1;
375 __be64 T1518_prio_2;
376 __be64 T1518_prio_3;
377 __be64 T1518_prio_4;
378 __be64 T1518_prio_5;
379 __be64 T1518_prio_6;
380 __be64 T1518_prio_7;
381 __be64 T1518_novlan;
382 __be64 T1518_loopbk;
383 /* Counts transmit frames with a length of 1519 to 1522 bytes */
384 __be64 T1522_prio_0;
385 __be64 T1522_prio_1;
386 __be64 T1522_prio_2;
387 __be64 T1522_prio_3;
388 __be64 T1522_prio_4;
389 __be64 T1522_prio_5;
390 __be64 T1522_prio_6;
391 __be64 T1522_prio_7;
392 __be64 T1522_novlan;
393 __be64 T1522_loopbk;
394 /* Transmit frames with a length of 1523 to 1548 octets */
395 __be64 T1548_prio_0;
396 __be64 T1548_prio_1;
397 __be64 T1548_prio_2;
398 __be64 T1548_prio_3;
399 __be64 T1548_prio_4;
400 __be64 T1548_prio_5;
401 __be64 T1548_prio_6;
402 __be64 T1548_prio_7;
403 __be64 T1548_novlan;
404 __be64 T1548_loopbk;
405 /* Counts transmit frames with a length of 1549 to MTU bytes */
406 __be64 T2MTU_prio_0;
407 __be64 T2MTU_prio_1;
408 __be64 T2MTU_prio_2;
409 __be64 T2MTU_prio_3;
410 __be64 T2MTU_prio_4;
411 __be64 T2MTU_prio_5;
412 __be64 T2MTU_prio_6;
413 __be64 T2MTU_prio_7;
414 __be64 T2MTU_novlan;
415 __be64 T2MTU_loopbk;
416 /* Transmit frames with a length greater than MTU octets and a good CRC. */
417 __be64 TGIANT_prio_0;
418 __be64 TGIANT_prio_1;
419 __be64 TGIANT_prio_2;
420 __be64 TGIANT_prio_3;
421 __be64 TGIANT_prio_4;
422 __be64 TGIANT_prio_5;
423 __be64 TGIANT_prio_6;
424 __be64 TGIANT_prio_7;
425 __be64 TGIANT_novlan;
426 __be64 TGIANT_loopbk;
427 /* Transmit broadcast frames with a good CRC */
428 __be64 TBCAST_prio_0;
429 __be64 TBCAST_prio_1;
430 __be64 TBCAST_prio_2;
431 __be64 TBCAST_prio_3;
432 __be64 TBCAST_prio_4;
433 __be64 TBCAST_prio_5;
434 __be64 TBCAST_prio_6;
435 __be64 TBCAST_prio_7;
436 __be64 TBCAST_novlan;
437 __be64 TBCAST_loopbk;
438 /* Transmit multicast frames with a good CRC */
439 __be64 TMCAST_prio_0;
440 __be64 TMCAST_prio_1;
441 __be64 TMCAST_prio_2;
442 __be64 TMCAST_prio_3;
443 __be64 TMCAST_prio_4;
444 __be64 TMCAST_prio_5;
445 __be64 TMCAST_prio_6;
446 __be64 TMCAST_prio_7;
447 __be64 TMCAST_novlan;
448 __be64 TMCAST_loopbk;
449 /* Transmit good frames that are neither broadcast nor multicast */
450 __be64 TTOTG_prio_0;
451 __be64 TTOTG_prio_1;
452 __be64 TTOTG_prio_2;
453 __be64 TTOTG_prio_3;
454 __be64 TTOTG_prio_4;
455 __be64 TTOTG_prio_5;
456 __be64 TTOTG_prio_6;
457 __be64 TTOTG_prio_7;
458 __be64 TTOTG_novlan;
459 __be64 TTOTG_loopbk;
460
461 /* total octets of transmitted frames, including framing characters */
462 __be64 TTTLOCT_prio_0;
463 /* total octets of transmitted frames, not including framing characters */
464 __be64 TTTLOCT_NOFRM_prio_0;
465 /* ifOutOctets */
466 __be64 TOCT_prio_0;
467
468 __be64 TTTLOCT_prio_1;
469 __be64 TTTLOCT_NOFRM_prio_1;
470 __be64 TOCT_prio_1;
471
472 __be64 TTTLOCT_prio_2;
473 __be64 TTTLOCT_NOFRM_prio_2;
474 __be64 TOCT_prio_2;
475
476 __be64 TTTLOCT_prio_3;
477 __be64 TTTLOCT_NOFRM_prio_3;
478 __be64 TOCT_prio_3;
479
480 __be64 TTTLOCT_prio_4;
481 __be64 TTTLOCT_NOFRM_prio_4;
482 __be64 TOCT_prio_4;
483
484 __be64 TTTLOCT_prio_5;
485 __be64 TTTLOCT_NOFRM_prio_5;
486 __be64 TOCT_prio_5;
487
488 __be64 TTTLOCT_prio_6;
489 __be64 TTTLOCT_NOFRM_prio_6;
490 __be64 TOCT_prio_6;
491
492 __be64 TTTLOCT_prio_7;
493 __be64 TTTLOCT_NOFRM_prio_7;
494 __be64 TOCT_prio_7;
495
496 __be64 TTTLOCT_novlan;
497 __be64 TTTLOCT_NOFRM_novlan;
498 __be64 TOCT_novlan;
499
500 __be64 TTTLOCT_loopbk;
501 __be64 TTTLOCT_NOFRM_loopbk;
502 __be64 TOCT_loopbk;
503
504 /* Total frames transmitted with a good CRC that are not aborted */
505 __be64 TTOT_prio_0;
506 /* Total number of frames transmitted with 802.1Q encapsulation */
507 __be64 T1Q_prio_0;
508 __be64 reserved13;
509
510 __be64 TTOT_prio_1;
511 __be64 T1Q_prio_1;
512 __be64 reserved14;
513
514 __be64 TTOT_prio_2;
515 __be64 T1Q_prio_2;
516 __be64 reserved15;
517
518 __be64 TTOT_prio_3;
519 __be64 T1Q_prio_3;
520 __be64 reserved16;
521
522 __be64 TTOT_prio_4;
523 __be64 T1Q_prio_4;
524 __be64 reserved17;
525
526 __be64 TTOT_prio_5;
527 __be64 T1Q_prio_5;
528 __be64 reserved18;
529
530 __be64 TTOT_prio_6;
531 __be64 T1Q_prio_6;
532 __be64 reserved19;
533
534 __be64 TTOT_prio_7;
535 __be64 T1Q_prio_7;
536 __be64 reserved20;
537
538 __be64 TTOT_novlan;
539 __be64 T1Q_novlan;
540 __be64 reserved21;
541
542 __be64 TTOT_loopbk;
543 __be64 T1Q_loopbk;
544 __be64 reserved22;
545
546 /* Received frames with a length greater than MTU octets and a bad CRC */
547 __be32 RJBBR;
548 /* Received frames with a bad CRC that are not runts, jabbers,
549 or alignment errors */
550 __be32 RCRC;
551 /* Received frames with SFD with a length of less than 64 octets and a
552 bad CRC */
553 __be32 RRUNT;
554 /* Received frames with a length less than 64 octets and a good CRC */
555 __be32 RSHORT;
556 /* Total Number of Received Packets Dropped */
557 __be32 RDROP;
558 /* Drop due to overflow */
559 __be32 RdropOvflw;
560 /* Drop due to overflow */
561 __be32 RdropLength;
562 /* Total of good frames. Does not include frames received with
563 frame-too-long, FCS, or length errors */
564 __be32 RTOTFRMS;
565 /* Total dropped Xmited packets */
566 __be32 TDROP;
567};
568
569
570#endif
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c
new file mode 100644
index 000000000000..a0545209e507
--- /dev/null
+++ b/drivers/net/mlx4/en_resources.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/vmalloc.h>
35#include <linux/mlx4/qp.h>
36
37#include "mlx4_en.h"
38
39void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
40 int is_tx, int rss, int qpn, int cqn, int srqn,
41 struct mlx4_qp_context *context)
42{
43 struct mlx4_en_dev *mdev = priv->mdev;
44
45 memset(context, 0, sizeof *context);
46 context->flags = cpu_to_be32(7 << 16 | rss << 13);
47 context->pd = cpu_to_be32(mdev->priv_pdn);
48 context->mtu_msgmax = 0xff;
49 context->rq_size_stride = 0;
50 if (is_tx)
51 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
52 else
53 context->sq_size_stride = 1;
54 context->usr_page = cpu_to_be32(mdev->priv_uar.index);
55 context->local_qpn = cpu_to_be32(qpn);
56 context->pri_path.ackto = 1 & 0x07;
57 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
58 context->pri_path.counter_index = 0xff;
59 context->cqn_send = cpu_to_be32(cqn);
60 context->cqn_recv = cpu_to_be32(cqn);
61 context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
62 if (!rss)
63 context->srqn = cpu_to_be32(MLX4_EN_USE_SRQ | srqn);
64}
65
66
67int mlx4_en_map_buffer(struct mlx4_buf *buf)
68{
69 struct page **pages;
70 int i;
71
72 if (BITS_PER_LONG == 64 || buf->nbufs == 1)
73 return 0;
74
75 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
76 if (!pages)
77 return -ENOMEM;
78
79 for (i = 0; i < buf->nbufs; ++i)
80 pages[i] = virt_to_page(buf->page_list[i].buf);
81
82 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
83 kfree(pages);
84 if (!buf->direct.buf)
85 return -ENOMEM;
86
87 return 0;
88}
89
90void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
91{
92 if (BITS_PER_LONG == 64 || buf->nbufs == 1)
93 return;
94
95 vunmap(buf->direct.buf);
96}
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
new file mode 100644
index 000000000000..6232227f56c3
--- /dev/null
+++ b/drivers/net/mlx4/en_rx.c
@@ -0,0 +1,1080 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
36#include <linux/skbuff.h>
37#include <linux/if_ether.h>
38#include <linux/if_vlan.h>
39#include <linux/vmalloc.h>
40
41#include "mlx4_en.h"
42
43static void *get_wqe(struct mlx4_en_rx_ring *ring, int n)
44{
45 int offset = n << ring->srq.wqe_shift;
46 return ring->buf + offset;
47}
48
49static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
50{
51 return;
52}
53
54static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
55 void **ip_hdr, void **tcpudp_hdr,
56 u64 *hdr_flags, void *priv)
57{
58 *mac_hdr = page_address(frags->page) + frags->page_offset;
59 *ip_hdr = *mac_hdr + ETH_HLEN;
60 *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
61 *hdr_flags = LRO_IPV4 | LRO_TCP;
62
63 return 0;
64}
65
66static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
67 struct mlx4_en_rx_desc *rx_desc,
68 struct skb_frag_struct *skb_frags,
69 struct mlx4_en_rx_alloc *ring_alloc,
70 int i)
71{
72 struct mlx4_en_dev *mdev = priv->mdev;
73 struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
74 struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
75 struct page *page;
76 dma_addr_t dma;
77
78 if (page_alloc->offset == frag_info->last_offset) {
79 /* Allocate new page */
80 page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
81 if (!page)
82 return -ENOMEM;
83
84 skb_frags[i].page = page_alloc->page;
85 skb_frags[i].page_offset = page_alloc->offset;
86 page_alloc->page = page;
87 page_alloc->offset = frag_info->frag_align;
88 } else {
89 page = page_alloc->page;
90 get_page(page);
91
92 skb_frags[i].page = page;
93 skb_frags[i].page_offset = page_alloc->offset;
94 page_alloc->offset += frag_info->frag_stride;
95 }
96 dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
97 skb_frags[i].page_offset, frag_info->frag_size,
98 PCI_DMA_FROMDEVICE);
99 rx_desc->data[i].addr = cpu_to_be64(dma);
100 return 0;
101}
102
103static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
104 struct mlx4_en_rx_ring *ring)
105{
106 struct mlx4_en_rx_alloc *page_alloc;
107 int i;
108
109 for (i = 0; i < priv->num_frags; i++) {
110 page_alloc = &ring->page_alloc[i];
111 page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
112 MLX4_EN_ALLOC_ORDER);
113 if (!page_alloc->page)
114 goto out;
115
116 page_alloc->offset = priv->frag_info[i].frag_align;
117 mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
118 i, page_alloc->page);
119 }
120 return 0;
121
122out:
123 while (i--) {
124 page_alloc = &ring->page_alloc[i];
125 put_page(page_alloc->page);
126 page_alloc->page = NULL;
127 }
128 return -ENOMEM;
129}
130
131static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
132 struct mlx4_en_rx_ring *ring)
133{
134 struct mlx4_en_rx_alloc *page_alloc;
135 int i;
136
137 for (i = 0; i < priv->num_frags; i++) {
138 page_alloc = &ring->page_alloc[i];
139 mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
140 i, page_count(page_alloc->page));
141
142 put_page(page_alloc->page);
143 page_alloc->page = NULL;
144 }
145}
146
147
148static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
149 struct mlx4_en_rx_ring *ring, int index)
150{
151 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
152 struct skb_frag_struct *skb_frags = ring->rx_info +
153 (index << priv->log_rx_info);
154 int possible_frags;
155 int i;
156
157 /* Pre-link descriptor */
158 rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask);
159
160 /* Set size and memtype fields */
161 for (i = 0; i < priv->num_frags; i++) {
162 skb_frags[i].size = priv->frag_info[i].frag_size;
163 rx_desc->data[i].byte_count =
164 cpu_to_be32(priv->frag_info[i].frag_size);
165 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
166 }
167
168 /* If the number of used fragments does not fill up the ring stride,
169 * remaining (unused) fragments must be padded with null address/size
170 * and a special memory key */
171 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
172 for (i = priv->num_frags; i < possible_frags; i++) {
173 rx_desc->data[i].byte_count = 0;
174 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
175 rx_desc->data[i].addr = 0;
176 }
177}
178
179
180static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
181 struct mlx4_en_rx_ring *ring, int index)
182{
183 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
184 struct skb_frag_struct *skb_frags = ring->rx_info +
185 (index << priv->log_rx_info);
186 int i;
187
188 for (i = 0; i < priv->num_frags; i++)
189 if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
190 goto err;
191
192 return 0;
193
194err:
195 while (i--)
196 put_page(skb_frags[i].page);
197 return -ENOMEM;
198}
199
200static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
201{
202 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
203}
204
205static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
206{
207 struct mlx4_en_dev *mdev = priv->mdev;
208 struct mlx4_en_rx_ring *ring;
209 int ring_ind;
210 int buf_ind;
211
212 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
213 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
214 ring = &priv->rx_ring[ring_ind];
215
216 if (mlx4_en_prepare_rx_desc(priv, ring,
217 ring->actual_size)) {
218 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
219 mlx4_err(mdev, "Failed to allocate "
220 "enough rx buffers\n");
221 return -ENOMEM;
222 } else {
223 if (netif_msg_rx_err(priv))
224 mlx4_warn(mdev,
225 "Only %d buffers allocated\n",
226 ring->actual_size);
227 goto out;
228 }
229 }
230 ring->actual_size++;
231 ring->prod++;
232 }
233 }
234out:
235 return 0;
236}
237
238static int mlx4_en_fill_rx_buf(struct net_device *dev,
239 struct mlx4_en_rx_ring *ring)
240{
241 struct mlx4_en_priv *priv = netdev_priv(dev);
242 int num = 0;
243 int err;
244
245 while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
246 err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
247 ring->size_mask);
248 if (err) {
249 if (netif_msg_rx_err(priv))
250 mlx4_warn(priv->mdev,
251 "Failed preparing rx descriptor\n");
252 priv->port_stats.rx_alloc_failed++;
253 break;
254 }
255 ++num;
256 ++ring->prod;
257 }
258 if ((u32) (ring->prod - ring->cons) == ring->size)
259 ring->full = 1;
260
261 return num;
262}
263
264static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
265 struct mlx4_en_rx_ring *ring)
266{
267 struct mlx4_en_dev *mdev = priv->mdev;
268 struct skb_frag_struct *skb_frags;
269 struct mlx4_en_rx_desc *rx_desc;
270 dma_addr_t dma;
271 int index;
272 int nr;
273
274 mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
275 ring->cons, ring->prod);
276
277 /* Unmap and free Rx buffers */
278 BUG_ON((u32) (ring->prod - ring->cons) > ring->size);
279 while (ring->cons != ring->prod) {
280 index = ring->cons & ring->size_mask;
281 rx_desc = ring->buf + (index << ring->log_stride);
282 skb_frags = ring->rx_info + (index << priv->log_rx_info);
283 mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);
284
285 for (nr = 0; nr < priv->num_frags; nr++) {
286 mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
287 dma = be64_to_cpu(rx_desc->data[nr].addr);
288
289 mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
290 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
291 PCI_DMA_FROMDEVICE);
292 put_page(skb_frags[nr].page);
293 }
294 ++ring->cons;
295 }
296}
297
298
299void mlx4_en_rx_refill(struct work_struct *work)
300{
301 struct delayed_work *delay = container_of(work, struct delayed_work, work);
302 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
303 refill_task);
304 struct mlx4_en_dev *mdev = priv->mdev;
305 struct net_device *dev = priv->dev;
306 struct mlx4_en_rx_ring *ring;
307 int need_refill = 0;
308 int i;
309
310 mutex_lock(&mdev->state_lock);
311 if (!mdev->device_up || !priv->port_up)
312 goto out;
313
314 /* We only get here if there are no receive buffers, so we can't race
315 * with Rx interrupts while filling buffers */
316 for (i = 0; i < priv->rx_ring_num; i++) {
317 ring = &priv->rx_ring[i];
318 if (ring->need_refill) {
319 if (mlx4_en_fill_rx_buf(dev, ring)) {
320 ring->need_refill = 0;
321 mlx4_en_update_rx_prod_db(ring);
322 } else
323 need_refill = 1;
324 }
325 }
326 if (need_refill)
327 queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
328
329out:
330 mutex_unlock(&mdev->state_lock);
331}
332
333
334int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
335 struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
336{
337 struct mlx4_en_dev *mdev = priv->mdev;
338 int err;
339 int tmp;
340
341 /* Sanity check SRQ size before proceeding */
342 if (size >= mdev->dev->caps.max_srq_wqes)
343 return -EINVAL;
344
345 ring->prod = 0;
346 ring->cons = 0;
347 ring->size = size;
348 ring->size_mask = size - 1;
349 ring->stride = stride;
350 ring->log_stride = ffs(ring->stride) - 1;
351 ring->buf_size = ring->size * ring->stride;
352
353 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
354 sizeof(struct skb_frag_struct));
355 ring->rx_info = vmalloc(tmp);
356 if (!ring->rx_info) {
357 mlx4_err(mdev, "Failed allocating rx_info ring\n");
358 return -ENOMEM;
359 }
360 mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
361 ring->rx_info, tmp);
362
363 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
364 ring->buf_size, 2 * PAGE_SIZE);
365 if (err)
366 goto err_ring;
367
368 err = mlx4_en_map_buffer(&ring->wqres.buf);
369 if (err) {
370 mlx4_err(mdev, "Failed to map RX buffer\n");
371 goto err_hwq;
372 }
373 ring->buf = ring->wqres.buf.direct.buf;
374
375 /* Configure lro mngr */
376 memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
377 ring->lro.dev = priv->dev;
378 ring->lro.features = LRO_F_NAPI;
379 ring->lro.frag_align_pad = NET_IP_ALIGN;
380 ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
381 ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
382 ring->lro.max_desc = mdev->profile.num_lro;
383 ring->lro.max_aggr = MAX_SKB_FRAGS;
384 ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
385 sizeof(struct net_lro_desc),
386 GFP_KERNEL);
387 if (!ring->lro.lro_arr) {
388 mlx4_err(mdev, "Failed to allocate lro array\n");
389 goto err_map;
390 }
391 ring->lro.get_frag_header = mlx4_en_get_frag_header;
392
393 return 0;
394
395err_map:
396 mlx4_en_unmap_buffer(&ring->wqres.buf);
397err_hwq:
398 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
399err_ring:
400 vfree(ring->rx_info);
401 ring->rx_info = NULL;
402 return err;
403}
404
405int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
406{
407 struct mlx4_en_dev *mdev = priv->mdev;
408 struct mlx4_wqe_srq_next_seg *next;
409 struct mlx4_en_rx_ring *ring;
410 int i;
411 int ring_ind;
412 int err;
413 int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
414 DS_SIZE * priv->num_frags);
415 int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE;
416
417 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
418 ring = &priv->rx_ring[ring_ind];
419
420 ring->prod = 0;
421 ring->cons = 0;
422 ring->actual_size = 0;
423 ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
424
425 ring->stride = stride;
426 ring->log_stride = ffs(ring->stride) - 1;
427 ring->buf_size = ring->size * ring->stride;
428
429 memset(ring->buf, 0, ring->buf_size);
430 mlx4_en_update_rx_prod_db(ring);
431
432 /* Initailize all descriptors */
433 for (i = 0; i < ring->size; i++)
434 mlx4_en_init_rx_desc(priv, ring, i);
435
436 /* Initialize page allocators */
437 err = mlx4_en_init_allocator(priv, ring);
438 if (err) {
439 mlx4_err(mdev, "Failed initializing ring allocator\n");
440 goto err_allocator;
441 }
442
443 /* Fill Rx buffers */
444 ring->full = 0;
445 }
446 if (mlx4_en_fill_rx_buffers(priv))
447 goto err_buffers;
448
449 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
450 ring = &priv->rx_ring[ring_ind];
451
452 mlx4_en_update_rx_prod_db(ring);
453
454 /* Configure SRQ representing the ring */
455 ring->srq.max = ring->size;
456 ring->srq.max_gs = max_gs;
457 ring->srq.wqe_shift = ilog2(ring->stride);
458
459 for (i = 0; i < ring->srq.max; ++i) {
460 next = get_wqe(ring, i);
461 next->next_wqe_index =
462 cpu_to_be16((i + 1) & (ring->srq.max - 1));
463 }
464
465 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt,
466 ring->wqres.db.dma, &ring->srq);
467 if (err){
468 mlx4_err(mdev, "Failed to allocate srq\n");
469 goto err_srq;
470 }
471 ring->srq.event = mlx4_en_srq_event;
472 }
473
474 return 0;
475
476err_srq:
477 while (ring_ind >= 0) {
478 ring = &priv->rx_ring[ring_ind];
479 mlx4_srq_free(mdev->dev, &ring->srq);
480 ring_ind--;
481 }
482
483err_buffers:
484 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
485 mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
486
487 ring_ind = priv->rx_ring_num - 1;
488err_allocator:
489 while (ring_ind >= 0) {
490 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
491 ring_ind--;
492 }
493 return err;
494}
495
496void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
497 struct mlx4_en_rx_ring *ring)
498{
499 struct mlx4_en_dev *mdev = priv->mdev;
500
501 kfree(ring->lro.lro_arr);
502 mlx4_en_unmap_buffer(&ring->wqres.buf);
503 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
504 vfree(ring->rx_info);
505 ring->rx_info = NULL;
506}
507
508void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
509 struct mlx4_en_rx_ring *ring)
510{
511 struct mlx4_en_dev *mdev = priv->mdev;
512
513 mlx4_srq_free(mdev->dev, &ring->srq);
514 mlx4_en_free_rx_buf(priv, ring);
515 mlx4_en_destroy_allocator(priv, ring);
516}
517
518
519/* Unmap a completed descriptor and free unused pages */
520static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
521 struct mlx4_en_rx_desc *rx_desc,
522 struct skb_frag_struct *skb_frags,
523 struct skb_frag_struct *skb_frags_rx,
524 struct mlx4_en_rx_alloc *page_alloc,
525 int length)
526{
527 struct mlx4_en_dev *mdev = priv->mdev;
528 struct mlx4_en_frag_info *frag_info;
529 int nr;
530 dma_addr_t dma;
531
532 /* Collect used fragments while replacing them in the HW descirptors */
533 for (nr = 0; nr < priv->num_frags; nr++) {
534 frag_info = &priv->frag_info[nr];
535 if (length <= frag_info->frag_prefix_size)
536 break;
537
538 /* Save page reference in skb */
539 skb_frags_rx[nr].page = skb_frags[nr].page;
540 skb_frags_rx[nr].size = skb_frags[nr].size;
541 skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
542 dma = be64_to_cpu(rx_desc->data[nr].addr);
543
544 /* Allocate a replacement page */
545 if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
546 goto fail;
547
548 /* Unmap buffer */
549 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
550 PCI_DMA_FROMDEVICE);
551 }
552 /* Adjust size of last fragment to match actual length */
553 skb_frags_rx[nr - 1].size = length -
554 priv->frag_info[nr - 1].frag_prefix_size;
555 return nr;
556
557fail:
558 /* Drop all accumulated fragments (which have already been replaced in
559 * the descriptor) of this packet; remaining fragments are reused... */
560 while (nr > 0) {
561 nr--;
562 put_page(skb_frags_rx[nr].page);
563 }
564 return 0;
565}
566
567
568static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
569 struct mlx4_en_rx_desc *rx_desc,
570 struct skb_frag_struct *skb_frags,
571 struct mlx4_en_rx_alloc *page_alloc,
572 unsigned int length)
573{
574 struct mlx4_en_dev *mdev = priv->mdev;
575 struct sk_buff *skb;
576 void *va;
577 int used_frags;
578 dma_addr_t dma;
579
580 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
581 if (!skb) {
582 mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n");
583 return NULL;
584 }
585 skb->dev = priv->dev;
586 skb_reserve(skb, NET_IP_ALIGN);
587 skb->len = length;
588 skb->truesize = length + sizeof(struct sk_buff);
589
590 /* Get pointer to first fragment so we could copy the headers into the
591 * (linear part of the) skb */
592 va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
593
594 if (length <= SMALL_PACKET_SIZE) {
595 /* We are copying all relevant data to the skb - temporarily
596 * synch buffers for the copy */
597 dma = be64_to_cpu(rx_desc->data[0].addr);
598 dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
599 length, DMA_FROM_DEVICE);
600 skb_copy_to_linear_data(skb, va, length);
601 dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
602 length, DMA_FROM_DEVICE);
603 skb->tail += length;
604 } else {
605
606 /* Move relevant fragments to skb */
607 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
608 skb_shinfo(skb)->frags,
609 page_alloc, length);
610 skb_shinfo(skb)->nr_frags = used_frags;
611
612 /* Copy headers into the skb linear buffer */
613 memcpy(skb->data, va, HEADER_COPY_SIZE);
614 skb->tail += HEADER_COPY_SIZE;
615
616 /* Skip headers in first fragment */
617 skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
618
619 /* Adjust size of first fragment */
620 skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
621 skb->data_len = length - HEADER_COPY_SIZE;
622 }
623 return skb;
624}
625
626static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
627 struct mlx4_en_rx_ring *ring,
628 int from, int to, int num)
629{
630 struct skb_frag_struct *skb_frags_from;
631 struct skb_frag_struct *skb_frags_to;
632 struct mlx4_en_rx_desc *rx_desc_from;
633 struct mlx4_en_rx_desc *rx_desc_to;
634 int from_index, to_index;
635 int nr, i;
636
637 for (i = 0; i < num; i++) {
638 from_index = (from + i) & ring->size_mask;
639 to_index = (to + i) & ring->size_mask;
640 skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
641 skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
642 rx_desc_from = ring->buf + (from_index << ring->log_stride);
643 rx_desc_to = ring->buf + (to_index << ring->log_stride);
644
645 for (nr = 0; nr < priv->num_frags; nr++) {
646 skb_frags_to[nr].page = skb_frags_from[nr].page;
647 skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
648 rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
649 }
650 }
651}
652
653
654int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
655{
656 struct mlx4_en_priv *priv = netdev_priv(dev);
657 struct mlx4_en_dev *mdev = priv->mdev;
658 struct mlx4_cqe *cqe;
659 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
660 struct skb_frag_struct *skb_frags;
661 struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
662 struct mlx4_en_rx_desc *rx_desc;
663 struct sk_buff *skb;
664 int index;
665 int nr;
666 unsigned int length;
667 int polled = 0;
668 int ip_summed;
669
670 if (!priv->port_up)
671 return 0;
672
673 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
674 * descriptor offset can be deduced from the CQE index instead of
675 * reading 'cqe->index' */
676 index = cq->mcq.cons_index & ring->size_mask;
677 cqe = &cq->buf[index];
678
679 /* Process all completed CQEs */
680 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
681 cq->mcq.cons_index & cq->size)) {
682
683 skb_frags = ring->rx_info + (index << priv->log_rx_info);
684 rx_desc = ring->buf + (index << ring->log_stride);
685
686 /*
687 * make sure we read the CQE after we read the ownership bit
688 */
689 rmb();
690
691 /* Drop packet on bad receive or bad checksum */
692 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
693 MLX4_CQE_OPCODE_ERROR)) {
694 mlx4_err(mdev, "CQE completed in error - vendor "
695 "syndrom:%d syndrom:%d\n",
696 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
697 ((struct mlx4_err_cqe *) cqe)->syndrome);
698 goto next;
699 }
700 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
701 mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
702 goto next;
703 }
704
705 /*
706 * Packet is OK - process it.
707 */
708 length = be32_to_cpu(cqe->byte_cnt);
709 ring->bytes += length;
710 ring->packets++;
711
712 if (likely(priv->rx_csum)) {
713 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
714 (cqe->checksum == cpu_to_be16(0xffff))) {
715 priv->port_stats.rx_chksum_good++;
716 /* This packet is eligible for LRO if it is:
717 * - DIX Ethernet (type interpretation)
718 * - TCP/IP (v4)
719 * - without IP options
720 * - not an IP fragment */
721 if (mlx4_en_can_lro(cqe->status) &&
722 dev->features & NETIF_F_LRO) {
723
724 nr = mlx4_en_complete_rx_desc(
725 priv, rx_desc,
726 skb_frags, lro_frags,
727 ring->page_alloc, length);
728 if (!nr)
729 goto next;
730
731 if (priv->vlgrp && (cqe->vlan_my_qpn &
732 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
733 lro_vlan_hwaccel_receive_frags(
734 &ring->lro, lro_frags,
735 length, length,
736 priv->vlgrp,
737 be16_to_cpu(cqe->sl_vid),
738 NULL, 0);
739 } else
740 lro_receive_frags(&ring->lro,
741 lro_frags,
742 length,
743 length,
744 NULL, 0);
745
746 goto next;
747 }
748
749 /* LRO not possible, complete processing here */
750 ip_summed = CHECKSUM_UNNECESSARY;
751 INC_PERF_COUNTER(priv->pstats.lro_misses);
752 } else {
753 ip_summed = CHECKSUM_NONE;
754 priv->port_stats.rx_chksum_none++;
755 }
756 } else {
757 ip_summed = CHECKSUM_NONE;
758 priv->port_stats.rx_chksum_none++;
759 }
760
761 skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
762 ring->page_alloc, length);
763 if (!skb) {
764 priv->stats.rx_dropped++;
765 goto next;
766 }
767
768 skb->ip_summed = ip_summed;
769 skb->protocol = eth_type_trans(skb, dev);
770
771 /* Push it up the stack */
772 if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
773 MLX4_CQE_VLAN_PRESENT_MASK)) {
774 vlan_hwaccel_receive_skb(skb, priv->vlgrp,
775 be16_to_cpu(cqe->sl_vid));
776 } else
777 netif_receive_skb(skb);
778
779 dev->last_rx = jiffies;
780
781next:
782 ++cq->mcq.cons_index;
783 index = (cq->mcq.cons_index) & ring->size_mask;
784 cqe = &cq->buf[index];
785 if (++polled == budget) {
786 /* We are here because we reached the NAPI budget -
787 * flush only pending LRO sessions */
788 lro_flush_all(&ring->lro);
789 goto out;
790 }
791 }
792
793 /* If CQ is empty flush all LRO sessions unconditionally */
794 lro_flush_all(&ring->lro);
795
796out:
797 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
798 mlx4_cq_set_ci(&cq->mcq);
799 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
800 ring->cons = cq->mcq.cons_index;
801 ring->prod += polled; /* Polled descriptors were realocated in place */
802 if (unlikely(!ring->full)) {
803 mlx4_en_copy_desc(priv, ring, ring->cons - polled,
804 ring->prod - polled, polled);
805 mlx4_en_fill_rx_buf(dev, ring);
806 }
807 mlx4_en_update_rx_prod_db(ring);
808 return polled;
809}
810
811
812void mlx4_en_rx_irq(struct mlx4_cq *mcq)
813{
814 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
815 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
816
817 if (priv->port_up)
818 netif_rx_schedule(cq->dev, &cq->napi);
819 else
820 mlx4_en_arm_cq(priv, cq);
821}
822
823/* Rx CQ polling - called by NAPI */
824int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
825{
826 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
827 struct net_device *dev = cq->dev;
828 struct mlx4_en_priv *priv = netdev_priv(dev);
829 int done;
830
831 done = mlx4_en_process_rx_cq(dev, cq, budget);
832
833 /* If we used up all the quota - we're probably not done yet... */
834 if (done == budget)
835 INC_PERF_COUNTER(priv->pstats.napi_quota);
836 else {
837 /* Done for now */
838 netif_rx_complete(dev, napi);
839 mlx4_en_arm_cq(priv, cq);
840 }
841 return done;
842}
843
844
845/* Calculate the last offset position that accomodates a full fragment
846 * (assuming fagment size = stride-align) */
847static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
848{
849 u16 res = MLX4_EN_ALLOC_SIZE % stride;
850 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
851
852 mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
853 "res:%d offset:%d\n", stride, align, res, offset);
854 return offset;
855}
856
857
858static int frag_sizes[] = {
859 FRAG_SZ0,
860 FRAG_SZ1,
861 FRAG_SZ2,
862 FRAG_SZ3
863};
864
865void mlx4_en_calc_rx_buf(struct net_device *dev)
866{
867 struct mlx4_en_priv *priv = netdev_priv(dev);
868 int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
869 int buf_size = 0;
870 int i = 0;
871
872 while (buf_size < eff_mtu) {
873 priv->frag_info[i].frag_size =
874 (eff_mtu > buf_size + frag_sizes[i]) ?
875 frag_sizes[i] : eff_mtu - buf_size;
876 priv->frag_info[i].frag_prefix_size = buf_size;
877 if (!i) {
878 priv->frag_info[i].frag_align = NET_IP_ALIGN;
879 priv->frag_info[i].frag_stride =
880 ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
881 } else {
882 priv->frag_info[i].frag_align = 0;
883 priv->frag_info[i].frag_stride =
884 ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
885 }
886 priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
887 priv, priv->frag_info[i].frag_stride,
888 priv->frag_info[i].frag_align);
889 buf_size += priv->frag_info[i].frag_size;
890 i++;
891 }
892
893 priv->num_frags = i;
894 priv->rx_skb_size = eff_mtu;
895 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
896
897 mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
898 "num_frags:%d):\n", eff_mtu, priv->num_frags);
899 for (i = 0; i < priv->num_frags; i++) {
900 mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
901 "stride:%d last_offset:%d\n", i,
902 priv->frag_info[i].frag_size,
903 priv->frag_info[i].frag_prefix_size,
904 priv->frag_info[i].frag_align,
905 priv->frag_info[i].frag_stride,
906 priv->frag_info[i].last_offset);
907 }
908}
909
910/* RSS related functions */
911
912/* Calculate rss size and map each entry in rss table to rx ring */
913void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
914 struct mlx4_en_rss_map *rss_map,
915 int num_entries, int num_rings)
916{
917 int i;
918
919 rss_map->size = roundup_pow_of_two(num_entries);
920 mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
921 rss_map->size);
922
923 for (i = 0; i < rss_map->size; i++) {
924 rss_map->map[i] = i % num_rings;
925 mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
926 }
927}
928
929static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
930{
931 return;
932}
933
934
935static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
936 int qpn, int srqn, int cqn,
937 enum mlx4_qp_state *state,
938 struct mlx4_qp *qp)
939{
940 struct mlx4_en_dev *mdev = priv->mdev;
941 struct mlx4_qp_context *context;
942 int err = 0;
943
944 context = kmalloc(sizeof *context , GFP_KERNEL);
945 if (!context) {
946 mlx4_err(mdev, "Failed to allocate qp context\n");
947 return -ENOMEM;
948 }
949
950 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
951 if (err) {
952 mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn);
953 goto out;
954 return err;
955 }
956 qp->event = mlx4_en_sqp_event;
957
958 memset(context, 0, sizeof *context);
959 mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context);
960
961 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state);
962 if (err) {
963 mlx4_qp_remove(mdev->dev, qp);
964 mlx4_qp_free(mdev->dev, qp);
965 }
966out:
967 kfree(context);
968 return err;
969}
970
971/* Allocate rx qp's and configure them according to rss map */
972int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
973{
974 struct mlx4_en_dev *mdev = priv->mdev;
975 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
976 struct mlx4_qp_context context;
977 struct mlx4_en_rss_context *rss_context;
978 void *ptr;
979 int rss_xor = mdev->profile.rss_xor;
980 u8 rss_mask = mdev->profile.rss_mask;
981 int i, srqn, qpn, cqn;
982 int err = 0;
983 int good_qps = 0;
984
985 mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port);
986 err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
987 rss_map->size, &rss_map->base_qpn);
988 if (err) {
989 mlx4_err(mdev, "Failed reserving %d qps for port %u\n",
990 rss_map->size, priv->port);
991 return err;
992 }
993
994 for (i = 0; i < rss_map->size; i++) {
995 cqn = priv->rx_ring[rss_map->map[i]].cqn;
996 srqn = priv->rx_ring[rss_map->map[i]].srq.srqn;
997 qpn = rss_map->base_qpn + i;
998 err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn,
999 &rss_map->state[i],
1000 &rss_map->qps[i]);
1001 if (err)
1002 goto rss_err;
1003
1004 ++good_qps;
1005 }
1006
1007 /* Configure RSS indirection qp */
1008 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
1009 if (err) {
1010 mlx4_err(mdev, "Failed to reserve range for RSS "
1011 "indirection qp\n");
1012 goto rss_err;
1013 }
1014 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
1015 if (err) {
1016 mlx4_err(mdev, "Failed to allocate RSS indirection QP\n");
1017 goto reserve_err;
1018 }
1019 rss_map->indir_qp.event = mlx4_en_sqp_event;
1020 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1021 priv->rx_ring[0].cqn, 0, &context);
1022
1023 ptr = ((void *) &context) + 0x3c;
1024 rss_context = (struct mlx4_en_rss_context *) ptr;
1025 rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size) << 24 |
1026 (rss_map->base_qpn));
1027 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
1028 rss_context->hash_fn = rss_xor & 0x3;
1029 rss_context->flags = rss_mask << 2;
1030
1031 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
1032 &rss_map->indir_qp, &rss_map->indir_state);
1033 if (err)
1034 goto indir_err;
1035
1036 return 0;
1037
1038indir_err:
1039 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1040 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
1041 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
1042 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
1043reserve_err:
1044 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
1045rss_err:
1046 for (i = 0; i < good_qps; i++) {
1047 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1048 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1049 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1050 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1051 }
1052 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
1053 return err;
1054}
1055
1056void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
1057{
1058 struct mlx4_en_dev *mdev = priv->mdev;
1059 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1060 int i;
1061
1062 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1063 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
1064 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
1065 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
1066 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
1067
1068 for (i = 0; i < rss_map->size; i++) {
1069 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1070 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1071 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1072 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1073 }
1074 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
1075}
1076
1077
1078
1079
1080
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
new file mode 100644
index 000000000000..8592f8fb8475
--- /dev/null
+++ b/drivers/net/mlx4/en_tx.c
@@ -0,0 +1,820 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <asm/page.h>
35#include <linux/mlx4/cq.h>
36#include <linux/mlx4/qp.h>
37#include <linux/skbuff.h>
38#include <linux/if_vlan.h>
39#include <linux/vmalloc.h>
40
41#include "mlx4_en.h"
42
43enum {
44 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
45};
46
47static int inline_thold __read_mostly = MAX_INLINE;
48
49module_param_named(inline_thold, inline_thold, int, 0444);
50MODULE_PARM_DESC(inline_thold, "treshold for using inline data");
51
52int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
53 struct mlx4_en_tx_ring *ring, u32 size,
54 u16 stride)
55{
56 struct mlx4_en_dev *mdev = priv->mdev;
57 int tmp;
58 int err;
59
60 ring->size = size;
61 ring->size_mask = size - 1;
62 ring->stride = stride;
63
64 inline_thold = min(inline_thold, MAX_INLINE);
65
66 spin_lock_init(&ring->comp_lock);
67
68 tmp = size * sizeof(struct mlx4_en_tx_info);
69 ring->tx_info = vmalloc(tmp);
70 if (!ring->tx_info) {
71 mlx4_err(mdev, "Failed allocating tx_info ring\n");
72 return -ENOMEM;
73 }
74 mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
75 ring->tx_info, tmp);
76
77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
78 if (!ring->bounce_buf) {
79 mlx4_err(mdev, "Failed allocating bounce buffer\n");
80 err = -ENOMEM;
81 goto err_tx;
82 }
83 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
84
85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
86 2 * PAGE_SIZE);
87 if (err) {
88 mlx4_err(mdev, "Failed allocating hwq resources\n");
89 goto err_bounce;
90 }
91
92 err = mlx4_en_map_buffer(&ring->wqres.buf);
93 if (err) {
94 mlx4_err(mdev, "Failed to map TX buffer\n");
95 goto err_hwq_res;
96 }
97
98 ring->buf = ring->wqres.buf.direct.buf;
99
100 mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
103
104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
105 if (err) {
106 mlx4_err(mdev, "Failed reserving qp for tx ring.\n");
107 goto err_map;
108 }
109
110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
111 if (err) {
112 mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
113 goto err_reserve;
114 }
115
116 return 0;
117
118err_reserve:
119 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
120err_map:
121 mlx4_en_unmap_buffer(&ring->wqres.buf);
122err_hwq_res:
123 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
124err_bounce:
125 kfree(ring->bounce_buf);
126 ring->bounce_buf = NULL;
127err_tx:
128 vfree(ring->tx_info);
129 ring->tx_info = NULL;
130 return err;
131}
132
133void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
134 struct mlx4_en_tx_ring *ring)
135{
136 struct mlx4_en_dev *mdev = priv->mdev;
137 mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
138
139 mlx4_qp_remove(mdev->dev, &ring->qp);
140 mlx4_qp_free(mdev->dev, &ring->qp);
141 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
142 mlx4_en_unmap_buffer(&ring->wqres.buf);
143 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
144 kfree(ring->bounce_buf);
145 ring->bounce_buf = NULL;
146 vfree(ring->tx_info);
147 ring->tx_info = NULL;
148}
149
150int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
151 struct mlx4_en_tx_ring *ring,
152 int cq, int srqn)
153{
154 struct mlx4_en_dev *mdev = priv->mdev;
155 int err;
156
157 ring->cqn = cq;
158 ring->prod = 0;
159 ring->cons = 0xffffffff;
160 ring->last_nr_txbb = 1;
161 ring->poll_cnt = 0;
162 ring->blocked = 0;
163 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
164 memset(ring->buf, 0, ring->buf_size);
165
166 ring->qp_state = MLX4_QP_STATE_RST;
167 ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
168
169 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
170 ring->cqn, srqn, &ring->context);
171
172 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
173 &ring->qp, &ring->qp_state);
174
175 return err;
176}
177
178void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
179 struct mlx4_en_tx_ring *ring)
180{
181 struct mlx4_en_dev *mdev = priv->mdev;
182
183 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
184 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
185}
186
187
188static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
189 struct mlx4_en_tx_ring *ring,
190 int index, u8 owner)
191{
192 struct mlx4_en_dev *mdev = priv->mdev;
193 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
194 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
195 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
196 struct sk_buff *skb = tx_info->skb;
197 struct skb_frag_struct *frag;
198 void *end = ring->buf + ring->buf_size;
199 int frags = skb_shinfo(skb)->nr_frags;
200 int i;
201 __be32 *ptr = (__be32 *)tx_desc;
202 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
203
204 /* Optimize the common case when there are no wraparounds */
205 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
206 if (tx_info->linear) {
207 pci_unmap_single(mdev->pdev,
208 (dma_addr_t) be64_to_cpu(data->addr),
209 be32_to_cpu(data->byte_count),
210 PCI_DMA_TODEVICE);
211 ++data;
212 }
213
214 for (i = 0; i < frags; i++) {
215 frag = &skb_shinfo(skb)->frags[i];
216 pci_unmap_page(mdev->pdev,
217 (dma_addr_t) be64_to_cpu(data[i].addr),
218 frag->size, PCI_DMA_TODEVICE);
219 }
220 /* Stamp the freed descriptor */
221 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
222 *ptr = stamp;
223 ptr += STAMP_DWORDS;
224 }
225
226 } else {
227 if ((void *) data >= end) {
228 data = (struct mlx4_wqe_data_seg *)
229 (ring->buf + ((void *) data - end));
230 }
231
232 if (tx_info->linear) {
233 pci_unmap_single(mdev->pdev,
234 (dma_addr_t) be64_to_cpu(data->addr),
235 be32_to_cpu(data->byte_count),
236 PCI_DMA_TODEVICE);
237 ++data;
238 }
239
240 for (i = 0; i < frags; i++) {
241 /* Check for wraparound before unmapping */
242 if ((void *) data >= end)
243 data = (struct mlx4_wqe_data_seg *) ring->buf;
244 frag = &skb_shinfo(skb)->frags[i];
245 pci_unmap_page(mdev->pdev,
246 (dma_addr_t) be64_to_cpu(data->addr),
247 frag->size, PCI_DMA_TODEVICE);
248 }
249 /* Stamp the freed descriptor */
250 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
251 *ptr = stamp;
252 ptr += STAMP_DWORDS;
253 if ((void *) ptr >= end) {
254 ptr = ring->buf;
255 stamp ^= cpu_to_be32(0x80000000);
256 }
257 }
258
259 }
260 dev_kfree_skb_any(skb);
261 return tx_info->nr_txbb;
262}
263
264
265int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
266{
267 struct mlx4_en_priv *priv = netdev_priv(dev);
268 int cnt = 0;
269
270 /* Skip last polled descriptor */
271 ring->cons += ring->last_nr_txbb;
272 mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
273 ring->cons, ring->prod);
274
275 if ((u32) (ring->prod - ring->cons) > ring->size) {
276 if (netif_msg_tx_err(priv))
277 mlx4_warn(priv->mdev, "Tx consumer passed producer!\n");
278 return 0;
279 }
280
281 while (ring->cons != ring->prod) {
282 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
283 ring->cons & ring->size_mask,
284 !!(ring->cons & ring->size));
285 ring->cons += ring->last_nr_txbb;
286 cnt++;
287 }
288
289 if (cnt)
290 mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
291
292 return cnt;
293}
294
295void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
296{
297 int block = 8 / ring_num;
298 int extra = 8 - (block * ring_num);
299 int num = 0;
300 u16 ring = 1;
301 int prio;
302
303 if (ring_num == 1) {
304 for (prio = 0; prio < 8; prio++)
305 prio_map[prio] = 0;
306 return;
307 }
308
309 for (prio = 0; prio < 8; prio++) {
310 if (extra && (num == block + 1)) {
311 ring++;
312 num = 0;
313 extra--;
314 } else if (!extra && (num == block)) {
315 ring++;
316 num = 0;
317 }
318 prio_map[prio] = ring;
319 mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
320 num++;
321 }
322}
323
324static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
325{
326 struct mlx4_en_priv *priv = netdev_priv(dev);
327 struct mlx4_cq *mcq = &cq->mcq;
328 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
329 struct mlx4_cqe *cqe = cq->buf;
330 u16 index;
331 u16 new_index;
332 u32 txbbs_skipped = 0;
333 u32 cq_last_sav;
334
335 /* index always points to the first TXBB of the last polled descriptor */
336 index = ring->cons & ring->size_mask;
337 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
338 if (index == new_index)
339 return;
340
341 if (!priv->port_up)
342 return;
343
344 /*
345 * We use a two-stage loop:
346 * - the first samples the HW-updated CQE
347 * - the second frees TXBBs until the last sample
348 * This lets us amortize CQE cache misses, while still polling the CQ
349 * until is quiescent.
350 */
351 cq_last_sav = mcq->cons_index;
352 do {
353 do {
354 /* Skip over last polled CQE */
355 index = (index + ring->last_nr_txbb) & ring->size_mask;
356 txbbs_skipped += ring->last_nr_txbb;
357
358 /* Poll next CQE */
359 ring->last_nr_txbb = mlx4_en_free_tx_desc(
360 priv, ring, index,
361 !!((ring->cons + txbbs_skipped) &
362 ring->size));
363 ++mcq->cons_index;
364
365 } while (index != new_index);
366
367 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
368 } while (index != new_index);
369 AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
370 (u32) (mcq->cons_index - cq_last_sav));
371
372 /*
373 * To prevent CQ overflow we first update CQ consumer and only then
374 * the ring consumer.
375 */
376 mlx4_cq_set_ci(mcq);
377 wmb();
378 ring->cons += txbbs_skipped;
379
380 /* Wakeup Tx queue if this ring stopped it */
381 if (unlikely(ring->blocked)) {
382 if (((u32) (ring->prod - ring->cons) <=
383 ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) {
384
385 /* TODO: support multiqueue netdevs. Currently, we block
386 * when *any* ring is full. Note that:
387 * - 2 Tx rings can unblock at the same time and call
388 * netif_wake_queue(), which is OK since this
389 * operation is idempotent.
390 * - We might wake the queue just after another ring
391 * stopped it. This is no big deal because the next
392 * transmission on that ring would stop the queue.
393 */
394 ring->blocked = 0;
395 netif_wake_queue(dev);
396 priv->port_stats.wake_queue++;
397 }
398 }
399}
400
401void mlx4_en_tx_irq(struct mlx4_cq *mcq)
402{
403 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
404 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
405 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
406
407 spin_lock_irq(&ring->comp_lock);
408 cq->armed = 0;
409 mlx4_en_process_tx_cq(cq->dev, cq);
410 if (ring->blocked)
411 mlx4_en_arm_cq(priv, cq);
412 else
413 mod_timer(&cq->timer, jiffies + 1);
414 spin_unlock_irq(&ring->comp_lock);
415}
416
417
418void mlx4_en_poll_tx_cq(unsigned long data)
419{
420 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
421 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
422 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
423 u32 inflight;
424
425 INC_PERF_COUNTER(priv->pstats.tx_poll);
426
427 netif_tx_lock(priv->dev);
428 spin_lock_irq(&ring->comp_lock);
429 mlx4_en_process_tx_cq(cq->dev, cq);
430 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
431
432 /* If there are still packets in flight and the timer has not already
433 * been scheduled by the Tx routine then schedule it here to guarantee
434 * completion processing of these packets */
435 if (inflight && priv->port_up)
436 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
437
438 spin_unlock_irq(&ring->comp_lock);
439 netif_tx_unlock(priv->dev);
440}
441
442static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
443 struct mlx4_en_tx_ring *ring,
444 u32 index,
445 unsigned int desc_size)
446{
447 u32 copy = (ring->size - index) * TXBB_SIZE;
448 int i;
449
450 for (i = desc_size - copy - 4; i >= 0; i -= 4) {
451 if ((i & (TXBB_SIZE - 1)) == 0)
452 wmb();
453
454 *((u32 *) (ring->buf + i)) =
455 *((u32 *) (ring->bounce_buf + copy + i));
456 }
457
458 for (i = copy - 4; i >= 4 ; i -= 4) {
459 if ((i & (TXBB_SIZE - 1)) == 0)
460 wmb();
461
462 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
463 *((u32 *) (ring->bounce_buf + i));
464 }
465
466 /* Return real descriptor location */
467 return ring->buf + index * TXBB_SIZE;
468}
469
470static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
471{
472 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
473 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
474
475 /* If we don't have a pending timer, set one up to catch our recent
476 post in case the interface becomes idle */
477 if (!timer_pending(&cq->timer))
478 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
479
480 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
481 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
482 mlx4_en_process_tx_cq(priv->dev, cq);
483}
484
485static void *get_frag_ptr(struct sk_buff *skb)
486{
487 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
488 struct page *page = frag->page;
489 void *ptr;
490
491 ptr = page_address(page);
492 if (unlikely(!ptr))
493 return NULL;
494
495 return ptr + frag->page_offset;
496}
497
498static int is_inline(struct sk_buff *skb, void **pfrag)
499{
500 void *ptr;
501
502 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
503 if (skb_shinfo(skb)->nr_frags == 1) {
504 ptr = get_frag_ptr(skb);
505 if (unlikely(!ptr))
506 return 0;
507
508 if (pfrag)
509 *pfrag = ptr;
510
511 return 1;
512 } else if (unlikely(skb_shinfo(skb)->nr_frags))
513 return 0;
514 else
515 return 1;
516 }
517
518 return 0;
519}
520
521static int inline_size(struct sk_buff *skb)
522{
523 if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
524 <= MLX4_INLINE_ALIGN)
525 return ALIGN(skb->len + CTRL_SIZE +
526 sizeof(struct mlx4_wqe_inline_seg), 16);
527 else
528 return ALIGN(skb->len + CTRL_SIZE + 2 *
529 sizeof(struct mlx4_wqe_inline_seg), 16);
530}
531
532static int get_real_size(struct sk_buff *skb, struct net_device *dev,
533 int *lso_header_size)
534{
535 struct mlx4_en_priv *priv = netdev_priv(dev);
536 struct mlx4_en_dev *mdev = priv->mdev;
537 int real_size;
538
539 if (skb_is_gso(skb)) {
540 *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
541 real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
542 ALIGN(*lso_header_size + 4, DS_SIZE);
543 if (unlikely(*lso_header_size != skb_headlen(skb))) {
544 /* We add a segment for the skb linear buffer only if
545 * it contains data */
546 if (*lso_header_size < skb_headlen(skb))
547 real_size += DS_SIZE;
548 else {
549 if (netif_msg_tx_err(priv))
550 mlx4_warn(mdev, "Non-linear headers\n");
551 dev_kfree_skb_any(skb);
552 return 0;
553 }
554 }
555 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
556 if (netif_msg_tx_err(priv))
557 mlx4_warn(mdev, "LSO header size too big\n");
558 dev_kfree_skb_any(skb);
559 return 0;
560 }
561 } else {
562 *lso_header_size = 0;
563 if (!is_inline(skb, NULL))
564 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
565 else
566 real_size = inline_size(skb);
567 }
568
569 return real_size;
570}
571
572static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
573 int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
574{
575 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
576 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
577
578 if (skb->len <= spc) {
579 inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
580 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
581 if (skb_shinfo(skb)->nr_frags)
582 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
583 skb_shinfo(skb)->frags[0].size);
584
585 } else {
586 inl->byte_count = cpu_to_be32(1 << 31 | spc);
587 if (skb_headlen(skb) <= spc) {
588 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
589 if (skb_headlen(skb) < spc) {
590 memcpy(((void *)(inl + 1)) + skb_headlen(skb),
591 fragptr, spc - skb_headlen(skb));
592 fragptr += spc - skb_headlen(skb);
593 }
594 inl = (void *) (inl + 1) + spc;
595 memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
596 } else {
597 skb_copy_from_linear_data(skb, inl + 1, spc);
598 inl = (void *) (inl + 1) + spc;
599 skb_copy_from_linear_data_offset(skb, spc, inl + 1,
600 skb_headlen(skb) - spc);
601 if (skb_shinfo(skb)->nr_frags)
602 memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
603 fragptr, skb_shinfo(skb)->frags[0].size);
604 }
605
606 wmb();
607 inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
608 }
609 tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
610 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
611 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
612}
613
614static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb,
615 u16 *vlan_tag)
616{
617 int tx_ind;
618
619 /* Obtain VLAN information if present */
620 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
621 *vlan_tag = vlan_tx_tag_get(skb);
622 /* Set the Tx ring to use according to vlan priority */
623 tx_ind = priv->tx_prio_map[*vlan_tag >> 13];
624 } else {
625 *vlan_tag = 0;
626 tx_ind = 0;
627 }
628 return tx_ind;
629}
630
631int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
632{
633 struct mlx4_en_priv *priv = netdev_priv(dev);
634 struct mlx4_en_dev *mdev = priv->mdev;
635 struct mlx4_en_tx_ring *ring;
636 struct mlx4_en_cq *cq;
637 struct mlx4_en_tx_desc *tx_desc;
638 struct mlx4_wqe_data_seg *data;
639 struct skb_frag_struct *frag;
640 struct mlx4_en_tx_info *tx_info;
641 int tx_ind = 0;
642 int nr_txbb;
643 int desc_size;
644 int real_size;
645 dma_addr_t dma;
646 u32 index;
647 __be32 op_own;
648 u16 vlan_tag;
649 int i;
650 int lso_header_size;
651 void *fragptr;
652
653 if (unlikely(!skb->len)) {
654 dev_kfree_skb_any(skb);
655 return NETDEV_TX_OK;
656 }
657 real_size = get_real_size(skb, dev, &lso_header_size);
658 if (unlikely(!real_size))
659 return NETDEV_TX_OK;
660
661 /* Allign descriptor to TXBB size */
662 desc_size = ALIGN(real_size, TXBB_SIZE);
663 nr_txbb = desc_size / TXBB_SIZE;
664 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
665 if (netif_msg_tx_err(priv))
666 mlx4_warn(mdev, "Oversized header or SG list\n");
667 dev_kfree_skb_any(skb);
668 return NETDEV_TX_OK;
669 }
670
671 tx_ind = get_vlan_info(priv, skb, &vlan_tag);
672 ring = &priv->tx_ring[tx_ind];
673
674 /* Check available TXBBs And 2K spare for prefetch */
675 if (unlikely(((int)(ring->prod - ring->cons)) >
676 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
677 /* every full Tx ring stops queue.
678 * TODO: implement multi-queue support (per-queue stop) */
679 netif_stop_queue(dev);
680 ring->blocked = 1;
681 priv->port_stats.queue_stopped++;
682
683 /* Use interrupts to find out when queue opened */
684 cq = &priv->tx_cq[tx_ind];
685 mlx4_en_arm_cq(priv, cq);
686 return NETDEV_TX_BUSY;
687 }
688
689 /* Now that we know what Tx ring to use */
690 if (unlikely(!priv->port_up)) {
691 if (netif_msg_tx_err(priv))
692 mlx4_warn(mdev, "xmit: port down!\n");
693 dev_kfree_skb_any(skb);
694 return NETDEV_TX_OK;
695 }
696
697 /* Track current inflight packets for performance analysis */
698 AVG_PERF_COUNTER(priv->pstats.inflight_avg,
699 (u32) (ring->prod - ring->cons - 1));
700
701 /* Packet is good - grab an index and transmit it */
702 index = ring->prod & ring->size_mask;
703
704 /* See if we have enough space for whole descriptor TXBB for setting
705 * SW ownership on next descriptor; if not, use a bounce buffer. */
706 if (likely(index + nr_txbb <= ring->size))
707 tx_desc = ring->buf + index * TXBB_SIZE;
708 else
709 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
710
711 /* Save skb in tx_info ring */
712 tx_info = &ring->tx_info[index];
713 tx_info->skb = skb;
714 tx_info->nr_txbb = nr_txbb;
715
716 /* Prepare ctrl segement apart opcode+ownership, which depends on
717 * whether LSO is used */
718 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
719 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
720 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
721 tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
722 MLX4_WQE_CTRL_SOLICITED);
723 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
724 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
725 MLX4_WQE_CTRL_TCP_UDP_CSUM);
726 priv->port_stats.tx_chksum_offload++;
727 }
728
729 /* Handle LSO (TSO) packets */
730 if (lso_header_size) {
731 /* Mark opcode as LSO */
732 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
733 ((ring->prod & ring->size) ?
734 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
735
736 /* Fill in the LSO prefix */
737 tx_desc->lso.mss_hdr_size = cpu_to_be32(
738 skb_shinfo(skb)->gso_size << 16 | lso_header_size);
739
740 /* Copy headers;
741 * note that we already verified that it is linear */
742 memcpy(tx_desc->lso.header, skb->data, lso_header_size);
743 data = ((void *) &tx_desc->lso +
744 ALIGN(lso_header_size + 4, DS_SIZE));
745
746 priv->port_stats.tso_packets++;
747 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
748 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
749 ring->bytes += skb->len + (i - 1) * lso_header_size;
750 ring->packets += i;
751 } else {
752 /* Normal (Non LSO) packet */
753 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
754 ((ring->prod & ring->size) ?
755 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
756 data = &tx_desc->data;
757 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
758 ring->packets++;
759
760 }
761 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
762
763
764 /* valid only for none inline segments */
765 tx_info->data_offset = (void *) data - (void *) tx_desc;
766
767 tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
768 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
769
770 if (!is_inline(skb, &fragptr)) {
771 /* Map fragments */
772 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
773 frag = &skb_shinfo(skb)->frags[i];
774 dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
775 frag->size, PCI_DMA_TODEVICE);
776 data->addr = cpu_to_be64(dma);
777 data->lkey = cpu_to_be32(mdev->mr.key);
778 wmb();
779 data->byte_count = cpu_to_be32(frag->size);
780 --data;
781 }
782
783 /* Map linear part */
784 if (tx_info->linear) {
785 dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
786 skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
787 data->addr = cpu_to_be64(dma);
788 data->lkey = cpu_to_be32(mdev->mr.key);
789 wmb();
790 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
791 }
792 } else
793 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
794
795 ring->prod += nr_txbb;
796
797 /* If we used a bounce buffer then copy descriptor back into place */
798 if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
799 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
800
801 /* Run destructor before passing skb to HW */
802 if (likely(!skb_shared(skb)))
803 skb_orphan(skb);
804
805 /* Ensure new descirptor hits memory
806 * before setting ownership of this descriptor to HW */
807 wmb();
808 tx_desc->ctrl.owner_opcode = op_own;
809
810 /* Ring doorbell! */
811 wmb();
812 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
813 dev->trans_start = jiffies;
814
815 /* Poll CQ here */
816 mlx4_en_xmit_poll(priv, tx_ind);
817
818 return 0;
819}
820
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8a8b56135a58..de169338cd90 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -558,7 +558,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
558 int i; 558 int i;
559 559
560 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, 560 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
561 dev->caps.num_eqs - 1, dev->caps.reserved_eqs); 561 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
562 if (err) 562 if (err)
563 return err; 563 return err;
564 564
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7e32955da982..cee199ceba2f 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -88,6 +88,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
88 [ 8] = "P_Key violation counter", 88 [ 8] = "P_Key violation counter",
89 [ 9] = "Q_Key violation counter", 89 [ 9] = "Q_Key violation counter",
90 [10] = "VMM", 90 [10] = "VMM",
91 [12] = "DPDP",
91 [16] = "MW support", 92 [16] = "MW support",
92 [17] = "APM support", 93 [17] = "APM support",
93 [18] = "Atomic ops support", 94 [18] = "Atomic ops support",
@@ -346,7 +347,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
346 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 347 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
347 dev_cap->max_vl[i] = field >> 4; 348 dev_cap->max_vl[i] = field >> 4;
348 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); 349 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
349 dev_cap->max_mtu[i] = field >> 4; 350 dev_cap->ib_mtu[i] = field >> 4;
350 dev_cap->max_port_width[i] = field & 0xf; 351 dev_cap->max_port_width[i] = field & 0xf;
351 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); 352 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
352 dev_cap->max_gids[i] = 1 << (field & 0xf); 353 dev_cap->max_gids[i] = 1 << (field & 0xf);
@@ -354,10 +355,14 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
354 dev_cap->max_pkeys[i] = 1 << (field & 0xf); 355 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
355 } 356 }
356 } else { 357 } else {
358#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
357#define QUERY_PORT_MTU_OFFSET 0x01 359#define QUERY_PORT_MTU_OFFSET 0x01
360#define QUERY_PORT_ETH_MTU_OFFSET 0x02
358#define QUERY_PORT_WIDTH_OFFSET 0x06 361#define QUERY_PORT_WIDTH_OFFSET 0x06
359#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 362#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
363#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
360#define QUERY_PORT_MAX_VL_OFFSET 0x0b 364#define QUERY_PORT_MAX_VL_OFFSET 0x0b
365#define QUERY_PORT_MAC_OFFSET 0x10
361 366
362 for (i = 1; i <= dev_cap->num_ports; ++i) { 367 for (i = 1; i <= dev_cap->num_ports; ++i) {
363 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, 368 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
@@ -365,8 +370,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
365 if (err) 370 if (err)
366 goto out; 371 goto out;
367 372
373 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
374 dev_cap->supported_port_types[i] = field & 3;
368 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); 375 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
369 dev_cap->max_mtu[i] = field & 0xf; 376 dev_cap->ib_mtu[i] = field & 0xf;
370 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); 377 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
371 dev_cap->max_port_width[i] = field & 0xf; 378 dev_cap->max_port_width[i] = field & 0xf;
372 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); 379 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
@@ -374,6 +381,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
374 dev_cap->max_pkeys[i] = 1 << (field & 0xf); 381 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
375 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); 382 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
376 dev_cap->max_vl[i] = field & 0xf; 383 dev_cap->max_vl[i] = field & 0xf;
384 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
385 dev_cap->log_max_macs[i] = field & 0xf;
386 dev_cap->log_max_vlans[i] = field >> 4;
387 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
388 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
377 } 389 }
378 } 390 }
379 391
@@ -407,7 +419,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
407 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", 419 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
408 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); 420 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
409 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", 421 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
410 dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1], 422 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
411 dev_cap->max_port_width[1]); 423 dev_cap->max_port_width[1]);
412 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", 424 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
413 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 425 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
@@ -819,7 +831,7 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
819 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; 831 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
820 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); 832 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
821 833
822 field = 128 << dev->caps.mtu_cap[port]; 834 field = 128 << dev->caps.ib_mtu_cap[port];
823 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); 835 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
824 field = dev->caps.gid_table_len[port]; 836 field = dev->caps.gid_table_len[port];
825 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); 837 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index decbb5c2ad41..526d7f30c041 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -66,11 +66,13 @@ struct mlx4_dev_cap {
66 int local_ca_ack_delay; 66 int local_ca_ack_delay;
67 int num_ports; 67 int num_ports;
68 u32 max_msg_sz; 68 u32 max_msg_sz;
69 int max_mtu[MLX4_MAX_PORTS + 1]; 69 int ib_mtu[MLX4_MAX_PORTS + 1];
70 int max_port_width[MLX4_MAX_PORTS + 1]; 70 int max_port_width[MLX4_MAX_PORTS + 1];
71 int max_vl[MLX4_MAX_PORTS + 1]; 71 int max_vl[MLX4_MAX_PORTS + 1];
72 int max_gids[MLX4_MAX_PORTS + 1]; 72 int max_gids[MLX4_MAX_PORTS + 1];
73 int max_pkeys[MLX4_MAX_PORTS + 1]; 73 int max_pkeys[MLX4_MAX_PORTS + 1];
74 u64 def_mac[MLX4_MAX_PORTS + 1];
75 u16 eth_mtu[MLX4_MAX_PORTS + 1];
74 u16 stat_rate_support; 76 u16 stat_rate_support;
75 u32 flags; 77 u32 flags;
76 int reserved_uars; 78 int reserved_uars;
@@ -102,6 +104,9 @@ struct mlx4_dev_cap {
102 u32 reserved_lkey; 104 u32 reserved_lkey;
103 u64 max_icm_sz; 105 u64 max_icm_sz;
104 int max_gso_sz; 106 int max_gso_sz;
107 u8 supported_port_types[MLX4_MAX_PORTS + 1];
108 u8 log_max_macs[MLX4_MAX_PORTS + 1];
109 u8 log_max_vlans[MLX4_MAX_PORTS + 1];
105}; 110};
106 111
107struct mlx4_adapter { 112struct mlx4_adapter {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 1252a919de2e..90a0281d15ea 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -85,6 +85,57 @@ static struct mlx4_profile default_profile = {
85 .num_mtt = 1 << 20, 85 .num_mtt = 1 << 20,
86}; 86};
87 87
88static int log_num_mac = 2;
89module_param_named(log_num_mac, log_num_mac, int, 0444);
90MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
91
92static int log_num_vlan;
93module_param_named(log_num_vlan, log_num_vlan, int, 0444);
94MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
95
96static int use_prio;
97module_param_named(use_prio, use_prio, bool, 0444);
98MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
99 "(0/1, default 0)");
100
101static int mlx4_check_port_params(struct mlx4_dev *dev,
102 enum mlx4_port_type *port_type)
103{
104 int i;
105
106 for (i = 0; i < dev->caps.num_ports - 1; i++) {
107 if (port_type[i] != port_type[i+1] &&
108 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
109 mlx4_err(dev, "Only same port types supported "
110 "on this HCA, aborting.\n");
111 return -EINVAL;
112 }
113 }
114 if ((port_type[0] == MLX4_PORT_TYPE_ETH) &&
115 (port_type[1] == MLX4_PORT_TYPE_IB)) {
116 mlx4_err(dev, "eth-ib configuration is not supported.\n");
117 return -EINVAL;
118 }
119
120 for (i = 0; i < dev->caps.num_ports; i++) {
121 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
122 mlx4_err(dev, "Requested port type for port %d is not "
123 "supported on this HCA\n", i + 1);
124 return -EINVAL;
125 }
126 }
127 return 0;
128}
129
130static void mlx4_set_port_mask(struct mlx4_dev *dev)
131{
132 int i;
133
134 dev->caps.port_mask = 0;
135 for (i = 1; i <= dev->caps.num_ports; ++i)
136 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
137 dev->caps.port_mask |= 1 << (i - 1);
138}
88static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 139static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
89{ 140{
90 int err; 141 int err;
@@ -120,10 +171,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
120 dev->caps.num_ports = dev_cap->num_ports; 171 dev->caps.num_ports = dev_cap->num_ports;
121 for (i = 1; i <= dev->caps.num_ports; ++i) { 172 for (i = 1; i <= dev->caps.num_ports; ++i) {
122 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 173 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
123 dev->caps.mtu_cap[i] = dev_cap->max_mtu[i]; 174 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
124 dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; 175 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
125 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; 176 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
126 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; 177 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
178 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
179 dev->caps.def_mac[i] = dev_cap->def_mac[i];
180 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
127 } 181 }
128 182
129 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 183 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
@@ -134,7 +188,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
134 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 188 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
135 dev->caps.max_wqes = dev_cap->max_qp_sz; 189 dev->caps.max_wqes = dev_cap->max_qp_sz;
136 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 190 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
137 dev->caps.reserved_qps = dev_cap->reserved_qps;
138 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 191 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
139 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 192 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
140 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 193 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
@@ -163,9 +216,138 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
163 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 216 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
164 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 217 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
165 218
219 dev->caps.log_num_macs = log_num_mac;
220 dev->caps.log_num_vlans = log_num_vlan;
221 dev->caps.log_num_prios = use_prio ? 3 : 0;
222
223 for (i = 1; i <= dev->caps.num_ports; ++i) {
224 if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
225 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
226 else
227 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
228
229 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
230 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
231 mlx4_warn(dev, "Requested number of MACs is too much "
232 "for port %d, reducing to %d.\n",
233 i, 1 << dev->caps.log_num_macs);
234 }
235 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
236 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
237 mlx4_warn(dev, "Requested number of VLANs is too much "
238 "for port %d, reducing to %d.\n",
239 i, 1 << dev->caps.log_num_vlans);
240 }
241 }
242
243 mlx4_set_port_mask(dev);
244
245 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
246 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
247 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
248 (1 << dev->caps.log_num_macs) *
249 (1 << dev->caps.log_num_vlans) *
250 (1 << dev->caps.log_num_prios) *
251 dev->caps.num_ports;
252 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
253
254 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
255 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
256 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
257 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
258
166 return 0; 259 return 0;
167} 260}
168 261
262/*
263 * Change the port configuration of the device.
264 * Every user of this function must hold the port mutex.
265 */
266static int mlx4_change_port_types(struct mlx4_dev *dev,
267 enum mlx4_port_type *port_types)
268{
269 int err = 0;
270 int change = 0;
271 int port;
272
273 for (port = 0; port < dev->caps.num_ports; port++) {
274 if (port_types[port] != dev->caps.port_type[port + 1]) {
275 change = 1;
276 dev->caps.port_type[port + 1] = port_types[port];
277 }
278 }
279 if (change) {
280 mlx4_unregister_device(dev);
281 for (port = 1; port <= dev->caps.num_ports; port++) {
282 mlx4_CLOSE_PORT(dev, port);
283 err = mlx4_SET_PORT(dev, port);
284 if (err) {
285 mlx4_err(dev, "Failed to set port %d, "
286 "aborting\n", port);
287 goto out;
288 }
289 }
290 mlx4_set_port_mask(dev);
291 err = mlx4_register_device(dev);
292 }
293
294out:
295 return err;
296}
297
298static ssize_t show_port_type(struct device *dev,
299 struct device_attribute *attr,
300 char *buf)
301{
302 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
303 port_attr);
304 struct mlx4_dev *mdev = info->dev;
305
306 return sprintf(buf, "%s\n",
307 mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ?
308 "ib" : "eth");
309}
310
311static ssize_t set_port_type(struct device *dev,
312 struct device_attribute *attr,
313 const char *buf, size_t count)
314{
315 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
316 port_attr);
317 struct mlx4_dev *mdev = info->dev;
318 struct mlx4_priv *priv = mlx4_priv(mdev);
319 enum mlx4_port_type types[MLX4_MAX_PORTS];
320 int i;
321 int err = 0;
322
323 if (!strcmp(buf, "ib\n"))
324 info->tmp_type = MLX4_PORT_TYPE_IB;
325 else if (!strcmp(buf, "eth\n"))
326 info->tmp_type = MLX4_PORT_TYPE_ETH;
327 else {
328 mlx4_err(mdev, "%s is not supported port type\n", buf);
329 return -EINVAL;
330 }
331
332 mutex_lock(&priv->port_mutex);
333 for (i = 0; i < mdev->caps.num_ports; i++)
334 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
335 mdev->caps.port_type[i+1];
336
337 err = mlx4_check_port_params(mdev, types);
338 if (err)
339 goto out;
340
341 for (i = 1; i <= mdev->caps.num_ports; i++)
342 priv->port[i].tmp_type = 0;
343
344 err = mlx4_change_port_types(mdev, types);
345
346out:
347 mutex_unlock(&priv->port_mutex);
348 return err ? err : count;
349}
350
169static int mlx4_load_fw(struct mlx4_dev *dev) 351static int mlx4_load_fw(struct mlx4_dev *dev)
170{ 352{
171 struct mlx4_priv *priv = mlx4_priv(dev); 353 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -211,7 +393,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
211 ((u64) (MLX4_CMPT_TYPE_QP * 393 ((u64) (MLX4_CMPT_TYPE_QP *
212 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 394 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
213 cmpt_entry_sz, dev->caps.num_qps, 395 cmpt_entry_sz, dev->caps.num_qps,
214 dev->caps.reserved_qps, 0, 0); 396 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
397 0, 0);
215 if (err) 398 if (err)
216 goto err; 399 goto err;
217 400
@@ -336,7 +519,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
336 init_hca->qpc_base, 519 init_hca->qpc_base,
337 dev_cap->qpc_entry_sz, 520 dev_cap->qpc_entry_sz,
338 dev->caps.num_qps, 521 dev->caps.num_qps,
339 dev->caps.reserved_qps, 0, 0); 522 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
523 0, 0);
340 if (err) { 524 if (err) {
341 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 525 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
342 goto err_unmap_dmpt; 526 goto err_unmap_dmpt;
@@ -346,7 +530,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
346 init_hca->auxc_base, 530 init_hca->auxc_base,
347 dev_cap->aux_entry_sz, 531 dev_cap->aux_entry_sz,
348 dev->caps.num_qps, 532 dev->caps.num_qps,
349 dev->caps.reserved_qps, 0, 0); 533 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
534 0, 0);
350 if (err) { 535 if (err) {
351 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 536 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
352 goto err_unmap_qp; 537 goto err_unmap_qp;
@@ -356,7 +541,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
356 init_hca->altc_base, 541 init_hca->altc_base,
357 dev_cap->altc_entry_sz, 542 dev_cap->altc_entry_sz,
358 dev->caps.num_qps, 543 dev->caps.num_qps,
359 dev->caps.reserved_qps, 0, 0); 544 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
545 0, 0);
360 if (err) { 546 if (err) {
361 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 547 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
362 goto err_unmap_auxc; 548 goto err_unmap_auxc;
@@ -366,7 +552,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
366 init_hca->rdmarc_base, 552 init_hca->rdmarc_base,
367 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 553 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
368 dev->caps.num_qps, 554 dev->caps.num_qps,
369 dev->caps.reserved_qps, 0, 0); 555 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
556 0, 0);
370 if (err) { 557 if (err) {
371 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 558 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
372 goto err_unmap_altc; 559 goto err_unmap_altc;
@@ -565,6 +752,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
565{ 752{
566 struct mlx4_priv *priv = mlx4_priv(dev); 753 struct mlx4_priv *priv = mlx4_priv(dev);
567 int err; 754 int err;
755 int port;
756 __be32 ib_port_default_caps;
568 757
569 err = mlx4_init_uar_table(dev); 758 err = mlx4_init_uar_table(dev);
570 if (err) { 759 if (err) {
@@ -663,8 +852,27 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
663 goto err_qp_table_free; 852 goto err_qp_table_free;
664 } 853 }
665 854
855 for (port = 1; port <= dev->caps.num_ports; port++) {
856 ib_port_default_caps = 0;
857 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
858 if (err)
859 mlx4_warn(dev, "failed to get port %d default "
860 "ib capabilities (%d). Continuing with "
861 "caps = 0\n", port, err);
862 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
863 err = mlx4_SET_PORT(dev, port);
864 if (err) {
865 mlx4_err(dev, "Failed to set port %d, aborting\n",
866 port);
867 goto err_mcg_table_free;
868 }
869 }
870
666 return 0; 871 return 0;
667 872
873err_mcg_table_free:
874 mlx4_cleanup_mcg_table(dev);
875
668err_qp_table_free: 876err_qp_table_free:
669 mlx4_cleanup_qp_table(dev); 877 mlx4_cleanup_qp_table(dev);
670 878
@@ -728,11 +936,45 @@ no_msi:
728 priv->eq_table.eq[i].irq = dev->pdev->irq; 936 priv->eq_table.eq[i].irq = dev->pdev->irq;
729} 937}
730 938
939static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
940{
941 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
942 int err = 0;
943
944 info->dev = dev;
945 info->port = port;
946 mlx4_init_mac_table(dev, &info->mac_table);
947 mlx4_init_vlan_table(dev, &info->vlan_table);
948
949 sprintf(info->dev_name, "mlx4_port%d", port);
950 info->port_attr.attr.name = info->dev_name;
951 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
952 info->port_attr.show = show_port_type;
953 info->port_attr.store = set_port_type;
954
955 err = device_create_file(&dev->pdev->dev, &info->port_attr);
956 if (err) {
957 mlx4_err(dev, "Failed to create file for port %d\n", port);
958 info->port = -1;
959 }
960
961 return err;
962}
963
964static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
965{
966 if (info->port < 0)
967 return;
968
969 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
970}
971
731static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 972static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
732{ 973{
733 struct mlx4_priv *priv; 974 struct mlx4_priv *priv;
734 struct mlx4_dev *dev; 975 struct mlx4_dev *dev;
735 int err; 976 int err;
977 int port;
736 978
737 printk(KERN_INFO PFX "Initializing %s\n", 979 printk(KERN_INFO PFX "Initializing %s\n",
738 pci_name(pdev)); 980 pci_name(pdev));
@@ -807,6 +1049,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
807 INIT_LIST_HEAD(&priv->ctx_list); 1049 INIT_LIST_HEAD(&priv->ctx_list);
808 spin_lock_init(&priv->ctx_lock); 1050 spin_lock_init(&priv->ctx_lock);
809 1051
1052 mutex_init(&priv->port_mutex);
1053
810 INIT_LIST_HEAD(&priv->pgdir_list); 1054 INIT_LIST_HEAD(&priv->pgdir_list);
811 mutex_init(&priv->pgdir_mutex); 1055 mutex_init(&priv->pgdir_mutex);
812 1056
@@ -842,15 +1086,24 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
842 if (err) 1086 if (err)
843 goto err_close; 1087 goto err_close;
844 1088
1089 for (port = 1; port <= dev->caps.num_ports; port++) {
1090 err = mlx4_init_port_info(dev, port);
1091 if (err)
1092 goto err_port;
1093 }
1094
845 err = mlx4_register_device(dev); 1095 err = mlx4_register_device(dev);
846 if (err) 1096 if (err)
847 goto err_cleanup; 1097 goto err_port;
848 1098
849 pci_set_drvdata(pdev, dev); 1099 pci_set_drvdata(pdev, dev);
850 1100
851 return 0; 1101 return 0;
852 1102
853err_cleanup: 1103err_port:
1104 for (port = 1; port <= dev->caps.num_ports; port++)
1105 mlx4_cleanup_port_info(&priv->port[port]);
1106
854 mlx4_cleanup_mcg_table(dev); 1107 mlx4_cleanup_mcg_table(dev);
855 mlx4_cleanup_qp_table(dev); 1108 mlx4_cleanup_qp_table(dev);
856 mlx4_cleanup_srq_table(dev); 1109 mlx4_cleanup_srq_table(dev);
@@ -907,8 +1160,10 @@ static void mlx4_remove_one(struct pci_dev *pdev)
907 if (dev) { 1160 if (dev) {
908 mlx4_unregister_device(dev); 1161 mlx4_unregister_device(dev);
909 1162
910 for (p = 1; p <= dev->caps.num_ports; ++p) 1163 for (p = 1; p <= dev->caps.num_ports; p++) {
1164 mlx4_cleanup_port_info(&priv->port[p]);
911 mlx4_CLOSE_PORT(dev, p); 1165 mlx4_CLOSE_PORT(dev, p);
1166 }
912 1167
913 mlx4_cleanup_mcg_table(dev); 1168 mlx4_cleanup_mcg_table(dev);
914 mlx4_cleanup_qp_table(dev); 1169 mlx4_cleanup_qp_table(dev);
@@ -948,6 +1203,8 @@ static struct pci_device_id mlx4_pci_table[] = {
948 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ 1203 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
949 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ 1204 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
950 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ 1205 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
1206 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
1207 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
951 { 0, } 1208 { 0, }
952}; 1209};
953 1210
@@ -960,10 +1217,28 @@ static struct pci_driver mlx4_driver = {
960 .remove = __devexit_p(mlx4_remove_one) 1217 .remove = __devexit_p(mlx4_remove_one)
961}; 1218};
962 1219
1220static int __init mlx4_verify_params(void)
1221{
1222 if ((log_num_mac < 0) || (log_num_mac > 7)) {
1223 printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac);
1224 return -1;
1225 }
1226
1227 if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
1228 printk(KERN_WARNING "mlx4_core: bad num_vlan: %d\n", log_num_vlan);
1229 return -1;
1230 }
1231
1232 return 0;
1233}
1234
963static int __init mlx4_init(void) 1235static int __init mlx4_init(void)
964{ 1236{
965 int ret; 1237 int ret;
966 1238
1239 if (mlx4_verify_params())
1240 return -EINVAL;
1241
967 ret = mlx4_catas_init(); 1242 ret = mlx4_catas_init();
968 if (ret) 1243 if (ret)
969 return ret; 1244 return ret;
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c83f88ce0736..592c01ae2c5d 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -368,8 +368,8 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
368 struct mlx4_priv *priv = mlx4_priv(dev); 368 struct mlx4_priv *priv = mlx4_priv(dev);
369 int err; 369 int err;
370 370
371 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, 371 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
372 dev->caps.num_amgms, dev->caps.num_amgms - 1, 0); 372 dev->caps.num_amgms - 1, 0, 0);
373 if (err) 373 if (err)
374 return err; 374 return err;
375 375
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 5337e3ac3e78..34c909deaff3 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -87,6 +87,9 @@ enum {
87 87
88#ifdef CONFIG_MLX4_DEBUG 88#ifdef CONFIG_MLX4_DEBUG
89extern int mlx4_debug_level; 89extern int mlx4_debug_level;
90#else /* CONFIG_MLX4_DEBUG */
91#define mlx4_debug_level (0)
92#endif /* CONFIG_MLX4_DEBUG */
90 93
91#define mlx4_dbg(mdev, format, arg...) \ 94#define mlx4_dbg(mdev, format, arg...) \
92 do { \ 95 do { \
@@ -94,12 +97,6 @@ extern int mlx4_debug_level;
94 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \ 97 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \
95 } while (0) 98 } while (0)
96 99
97#else /* CONFIG_MLX4_DEBUG */
98
99#define mlx4_dbg(mdev, format, arg...) do { (void) mdev; } while (0)
100
101#endif /* CONFIG_MLX4_DEBUG */
102
103#define mlx4_err(mdev, format, arg...) \ 100#define mlx4_err(mdev, format, arg...) \
104 dev_err(&mdev->pdev->dev, format, ## arg) 101 dev_err(&mdev->pdev->dev, format, ## arg)
105#define mlx4_info(mdev, format, arg...) \ 102#define mlx4_info(mdev, format, arg...) \
@@ -111,6 +108,7 @@ struct mlx4_bitmap {
111 u32 last; 108 u32 last;
112 u32 top; 109 u32 top;
113 u32 max; 110 u32 max;
111 u32 reserved_top;
114 u32 mask; 112 u32 mask;
115 spinlock_t lock; 113 spinlock_t lock;
116 unsigned long *table; 114 unsigned long *table;
@@ -251,6 +249,38 @@ struct mlx4_catas_err {
251 struct list_head list; 249 struct list_head list;
252}; 250};
253 251
252#define MLX4_MAX_MAC_NUM 128
253#define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3)
254
255struct mlx4_mac_table {
256 __be64 entries[MLX4_MAX_MAC_NUM];
257 int refs[MLX4_MAX_MAC_NUM];
258 struct mutex mutex;
259 int total;
260 int max;
261};
262
263#define MLX4_MAX_VLAN_NUM 128
264#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
265
266struct mlx4_vlan_table {
267 __be32 entries[MLX4_MAX_VLAN_NUM];
268 int refs[MLX4_MAX_VLAN_NUM];
269 struct mutex mutex;
270 int total;
271 int max;
272};
273
274struct mlx4_port_info {
275 struct mlx4_dev *dev;
276 int port;
277 char dev_name[16];
278 struct device_attribute port_attr;
279 enum mlx4_port_type tmp_type;
280 struct mlx4_mac_table mac_table;
281 struct mlx4_vlan_table vlan_table;
282};
283
254struct mlx4_priv { 284struct mlx4_priv {
255 struct mlx4_dev dev; 285 struct mlx4_dev dev;
256 286
@@ -279,6 +309,8 @@ struct mlx4_priv {
279 309
280 struct mlx4_uar driver_uar; 310 struct mlx4_uar driver_uar;
281 void __iomem *kar; 311 void __iomem *kar;
312 struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
313 struct mutex port_mutex;
282}; 314};
283 315
284static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 316static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -288,7 +320,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
288 320
289u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); 321u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
290void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); 322void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
291int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved); 323u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
324void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
325int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
326 u32 reserved_bot, u32 resetrved_top);
292void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); 327void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
293 328
294int mlx4_reset(struct mlx4_dev *dev); 329int mlx4_reset(struct mlx4_dev *dev);
@@ -346,4 +381,10 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
346 381
347void mlx4_handle_catas_err(struct mlx4_dev *dev); 382void mlx4_handle_catas_err(struct mlx4_dev *dev);
348 383
384void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
385void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
386
387int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
388int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
389
349#endif /* MLX4_H */ 390#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
new file mode 100644
index 000000000000..98ddc0811f93
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -0,0 +1,561 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef _MLX4_EN_H_
35#define _MLX4_EN_H_
36
37#include <linux/compiler.h>
38#include <linux/list.h>
39#include <linux/mutex.h>
40#include <linux/netdevice.h>
41#include <linux/inet_lro.h>
42
43#include <linux/mlx4/device.h>
44#include <linux/mlx4/qp.h>
45#include <linux/mlx4/cq.h>
46#include <linux/mlx4/srq.h>
47#include <linux/mlx4/doorbell.h>
48
49#include "en_port.h"
50
51#define DRV_NAME "mlx4_en"
52#define DRV_VERSION "1.4.0"
53#define DRV_RELDATE "Sep 2008"
54
55
56#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
57
58#define mlx4_dbg(mlevel, priv, format, arg...) \
59 if (NETIF_MSG_##mlevel & priv->msg_enable) \
60 printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\
61 (&priv->mdev->pdev->dev)->bus_id , ## arg)
62
63#define mlx4_err(mdev, format, arg...) \
64 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
65 (&mdev->pdev->dev)->bus_id , ## arg)
66#define mlx4_info(mdev, format, arg...) \
67 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
68 (&mdev->pdev->dev)->bus_id , ## arg)
69#define mlx4_warn(mdev, format, arg...) \
70 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
71 (&mdev->pdev->dev)->bus_id , ## arg)
72
73/*
74 * Device constants
75 */
76
77
78#define MLX4_EN_PAGE_SHIFT 12
79#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
80#define MAX_TX_RINGS 16
81#define MAX_RX_RINGS 16
82#define MAX_RSS_MAP_SIZE 64
83#define RSS_FACTOR 2
84#define TXBB_SIZE 64
85#define HEADROOM (2048 / TXBB_SIZE + 1)
86#define MAX_LSO_HDR_SIZE 92
87#define STAMP_STRIDE 64
88#define STAMP_DWORDS (STAMP_STRIDE / 4)
89#define STAMP_SHIFT 31
90#define STAMP_VAL 0x7fffffff
91#define STATS_DELAY (HZ / 4)
92
93/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
94#define MAX_DESC_SIZE 512
95#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
96
97/*
98 * OS related constants and tunables
99 */
100
101#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
102
103#define MLX4_EN_ALLOC_ORDER 2
104#define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
105
106#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
107
108/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
109 * and 4K allocations) */
110enum {
111 FRAG_SZ0 = 512 - NET_IP_ALIGN,
112 FRAG_SZ1 = 1024,
113 FRAG_SZ2 = 4096,
114 FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
115};
116#define MLX4_EN_MAX_RX_FRAGS 4
117
118/* Minimum ring size for our page-allocation sceme to work */
119#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
120#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
121
122#define MLX4_EN_TX_RING_NUM 9
123#define MLX4_EN_DEF_TX_RING_SIZE 1024
124#define MLX4_EN_DEF_RX_RING_SIZE 1024
125
126/* Target number of bytes to coalesce with interrupt moderation */
127#define MLX4_EN_RX_COAL_TARGET 0x20000
128#define MLX4_EN_RX_COAL_TIME 0x10
129
130#define MLX4_EN_TX_COAL_PKTS 5
131#define MLX4_EN_TX_COAL_TIME 0x80
132
133#define MLX4_EN_RX_RATE_LOW 400000
134#define MLX4_EN_RX_COAL_TIME_LOW 0
135#define MLX4_EN_RX_RATE_HIGH 450000
136#define MLX4_EN_RX_COAL_TIME_HIGH 128
137#define MLX4_EN_RX_SIZE_THRESH 1024
138#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
139#define MLX4_EN_SAMPLE_INTERVAL 0
140
141#define MLX4_EN_AUTO_CONF 0xffff
142
143#define MLX4_EN_DEF_RX_PAUSE 1
144#define MLX4_EN_DEF_TX_PAUSE 1
145
146/* Interval between sucessive polls in the Tx routine when polling is used
147 instead of interrupts (in per-core Tx rings) - should be power of 2 */
148#define MLX4_EN_TX_POLL_MODER 16
149#define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
150
151#define ETH_LLC_SNAP_SIZE 8
152
153#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
154#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
155
156#define MLX4_EN_MIN_MTU 46
157#define ETH_BCAST 0xffffffffffffULL
158
159#ifdef MLX4_EN_PERF_STAT
160/* Number of samples to 'average' */
161#define AVG_SIZE 128
162#define AVG_FACTOR 1024
163#define NUM_PERF_STATS NUM_PERF_COUNTERS
164
165#define INC_PERF_COUNTER(cnt) (++(cnt))
166#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
167#define AVG_PERF_COUNTER(cnt, sample) \
168 ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
169#define GET_PERF_COUNTER(cnt) (cnt)
170#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
171
172#else
173
174#define NUM_PERF_STATS 0
175#define INC_PERF_COUNTER(cnt) do {} while (0)
176#define ADD_PERF_COUNTER(cnt, add) do {} while (0)
177#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
178#define GET_PERF_COUNTER(cnt) (0)
179#define GET_AVG_PERF_COUNTER(cnt) (0)
180#endif /* MLX4_EN_PERF_STAT */
181
182/*
183 * Configurables
184 */
185
186enum cq_type {
187 RX = 0,
188 TX = 1,
189};
190
191
192/*
193 * Useful macros
194 */
195#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
196#define XNOR(x, y) (!(x) == !(y))
197#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
198
199
200struct mlx4_en_tx_info {
201 struct sk_buff *skb;
202 u32 nr_txbb;
203 u8 linear;
204 u8 data_offset;
205};
206
207
208#define MLX4_EN_BIT_DESC_OWN 0x80000000
209#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
210#define MLX4_EN_MEMTYPE_PAD 0x100
211#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
212
213
214struct mlx4_en_tx_desc {
215 struct mlx4_wqe_ctrl_seg ctrl;
216 union {
217 struct mlx4_wqe_data_seg data; /* at least one data segment */
218 struct mlx4_wqe_lso_seg lso;
219 struct mlx4_wqe_inline_seg inl;
220 };
221};
222
223#define MLX4_EN_USE_SRQ 0x01000000
224
225struct mlx4_en_rx_alloc {
226 struct page *page;
227 u16 offset;
228};
229
230struct mlx4_en_tx_ring {
231 struct mlx4_hwq_resources wqres;
232 u32 size ; /* number of TXBBs */
233 u32 size_mask;
234 u16 stride;
235 u16 cqn; /* index of port CQ associated with this ring */
236 u32 prod;
237 u32 cons;
238 u32 buf_size;
239 u32 doorbell_qpn;
240 void *buf;
241 u16 poll_cnt;
242 int blocked;
243 struct mlx4_en_tx_info *tx_info;
244 u8 *bounce_buf;
245 u32 last_nr_txbb;
246 struct mlx4_qp qp;
247 struct mlx4_qp_context context;
248 int qpn;
249 enum mlx4_qp_state qp_state;
250 struct mlx4_srq dummy;
251 unsigned long bytes;
252 unsigned long packets;
253 spinlock_t comp_lock;
254};
255
256struct mlx4_en_rx_desc {
257 struct mlx4_wqe_srq_next_seg next;
258 /* actual number of entries depends on rx ring stride */
259 struct mlx4_wqe_data_seg data[0];
260};
261
262struct mlx4_en_rx_ring {
263 struct mlx4_srq srq;
264 struct mlx4_hwq_resources wqres;
265 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
266 struct net_lro_mgr lro;
267 u32 size ; /* number of Rx descs*/
268 u32 actual_size;
269 u32 size_mask;
270 u16 stride;
271 u16 log_stride;
272 u16 cqn; /* index of port CQ associated with this ring */
273 u32 prod;
274 u32 cons;
275 u32 buf_size;
276 int need_refill;
277 int full;
278 void *buf;
279 void *rx_info;
280 unsigned long bytes;
281 unsigned long packets;
282};
283
284
285static inline int mlx4_en_can_lro(__be16 status)
286{
287 return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
288 MLX4_CQE_STATUS_IPV4F |
289 MLX4_CQE_STATUS_IPV6 |
290 MLX4_CQE_STATUS_IPV4OPT |
291 MLX4_CQE_STATUS_TCP |
292 MLX4_CQE_STATUS_UDP |
293 MLX4_CQE_STATUS_IPOK)) ==
294 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
295 MLX4_CQE_STATUS_IPOK |
296 MLX4_CQE_STATUS_TCP);
297}
298
299struct mlx4_en_cq {
300 struct mlx4_cq mcq;
301 struct mlx4_hwq_resources wqres;
302 int ring;
303 spinlock_t lock;
304 struct net_device *dev;
305 struct napi_struct napi;
306 /* Per-core Tx cq processing support */
307 struct timer_list timer;
308 int size;
309 int buf_size;
310 unsigned vector;
311 enum cq_type is_tx;
312 u16 moder_time;
313 u16 moder_cnt;
314 int armed;
315 struct mlx4_cqe *buf;
316#define MLX4_EN_OPCODE_ERROR 0x1e
317};
318
319struct mlx4_en_port_profile {
320 u32 flags;
321 u32 tx_ring_num;
322 u32 rx_ring_num;
323 u32 tx_ring_size;
324 u32 rx_ring_size;
325 u8 rx_pause;
326 u8 rx_ppp;
327 u8 tx_pause;
328 u8 tx_ppp;
329};
330
331struct mlx4_en_profile {
332 int rss_xor;
333 int num_lro;
334 u8 rss_mask;
335 u32 active_ports;
336 u32 small_pkt_int;
337 int rx_moder_cnt;
338 int rx_moder_time;
339 int auto_moder;
340 u8 no_reset;
341 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
342};
343
344struct mlx4_en_dev {
345 struct mlx4_dev *dev;
346 struct pci_dev *pdev;
347 struct mutex state_lock;
348 struct net_device *pndev[MLX4_MAX_PORTS + 1];
349 u32 port_cnt;
350 bool device_up;
351 struct mlx4_en_profile profile;
352 u32 LSO_support;
353 struct workqueue_struct *workqueue;
354 struct device *dma_device;
355 void __iomem *uar_map;
356 struct mlx4_uar priv_uar;
357 struct mlx4_mr mr;
358 u32 priv_pdn;
359 spinlock_t uar_lock;
360};
361
362
363struct mlx4_en_rss_map {
364 int size;
365 int base_qpn;
366 u16 map[MAX_RSS_MAP_SIZE];
367 struct mlx4_qp qps[MAX_RSS_MAP_SIZE];
368 enum mlx4_qp_state state[MAX_RSS_MAP_SIZE];
369 struct mlx4_qp indir_qp;
370 enum mlx4_qp_state indir_state;
371};
372
373struct mlx4_en_rss_context {
374 __be32 base_qpn;
375 __be32 default_qpn;
376 u16 reserved;
377 u8 hash_fn;
378 u8 flags;
379 __be32 rss_key[10];
380};
381
382struct mlx4_en_pkt_stats {
383 unsigned long broadcast;
384 unsigned long rx_prio[8];
385 unsigned long tx_prio[8];
386#define NUM_PKT_STATS 17
387};
388
389struct mlx4_en_port_stats {
390 unsigned long lro_aggregated;
391 unsigned long lro_flushed;
392 unsigned long lro_no_desc;
393 unsigned long tso_packets;
394 unsigned long queue_stopped;
395 unsigned long wake_queue;
396 unsigned long tx_timeout;
397 unsigned long rx_alloc_failed;
398 unsigned long rx_chksum_good;
399 unsigned long rx_chksum_none;
400 unsigned long tx_chksum_offload;
401#define NUM_PORT_STATS 11
402};
403
404struct mlx4_en_perf_stats {
405 u32 tx_poll;
406 u64 tx_pktsz_avg;
407 u32 inflight_avg;
408 u16 tx_coal_avg;
409 u16 rx_coal_avg;
410 u32 napi_quota;
411#define NUM_PERF_COUNTERS 6
412};
413
414struct mlx4_en_frag_info {
415 u16 frag_size;
416 u16 frag_prefix_size;
417 u16 frag_stride;
418 u16 frag_align;
419 u16 last_offset;
420
421};
422
423struct mlx4_en_priv {
424 struct mlx4_en_dev *mdev;
425 struct mlx4_en_port_profile *prof;
426 struct net_device *dev;
427 struct vlan_group *vlgrp;
428 struct net_device_stats stats;
429 struct net_device_stats ret_stats;
430 spinlock_t stats_lock;
431
432 unsigned long last_moder_packets;
433 unsigned long last_moder_tx_packets;
434 unsigned long last_moder_bytes;
435 unsigned long last_moder_jiffies;
436 int last_moder_time;
437 u16 rx_usecs;
438 u16 rx_frames;
439 u16 tx_usecs;
440 u16 tx_frames;
441 u32 pkt_rate_low;
442 u16 rx_usecs_low;
443 u32 pkt_rate_high;
444 u16 rx_usecs_high;
445 u16 sample_interval;
446 u16 adaptive_rx_coal;
447 u32 msg_enable;
448
449 struct mlx4_hwq_resources res;
450 int link_state;
451 int last_link_state;
452 bool port_up;
453 int port;
454 int registered;
455 int allocated;
456 int stride;
457 int rx_csum;
458 u64 mac;
459 int mac_index;
460 unsigned max_mtu;
461 int base_qpn;
462
463 struct mlx4_en_rss_map rss_map;
464 u16 tx_prio_map[8];
465 u32 flags;
466#define MLX4_EN_FLAG_PROMISC 0x1
467 u32 tx_ring_num;
468 u32 rx_ring_num;
469 u32 rx_skb_size;
470 struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
471 u16 num_frags;
472 u16 log_rx_info;
473
474 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
475 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
476 struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
477 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
478 struct work_struct mcast_task;
479 struct work_struct mac_task;
480 struct delayed_work refill_task;
481 struct work_struct watchdog_task;
482 struct work_struct linkstate_task;
483 struct delayed_work stats_task;
484 struct mlx4_en_perf_stats pstats;
485 struct mlx4_en_pkt_stats pkstats;
486 struct mlx4_en_port_stats port_stats;
487 struct dev_mc_list *mc_list;
488 struct mlx4_en_stat_out_mbox hw_stats;
489};
490
491
492void mlx4_en_destroy_netdev(struct net_device *dev);
493int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
494 struct mlx4_en_port_profile *prof);
495
496int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
497
498int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
499 int entries, int ring, enum cq_type mode);
500void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
501int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
502void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
503int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
504int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
505
506void mlx4_en_poll_tx_cq(unsigned long data);
507void mlx4_en_tx_irq(struct mlx4_cq *mcq);
508int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
509
510int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
511 u32 size, u16 stride);
512void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
513int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
514 struct mlx4_en_tx_ring *ring,
515 int cq, int srqn);
516void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
517 struct mlx4_en_tx_ring *ring);
518
519int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
520 struct mlx4_en_rx_ring *ring,
521 u32 size, u16 stride);
522void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
523 struct mlx4_en_rx_ring *ring);
524int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
525void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
526 struct mlx4_en_rx_ring *ring);
527int mlx4_en_process_rx_cq(struct net_device *dev,
528 struct mlx4_en_cq *cq,
529 int budget);
530int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
531void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
532 int is_tx, int rss, int qpn, int cqn, int srqn,
533 struct mlx4_qp_context *context);
534int mlx4_en_map_buffer(struct mlx4_buf *buf);
535void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
536
537void mlx4_en_calc_rx_buf(struct net_device *dev);
538void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
539 struct mlx4_en_rss_map *rss_map,
540 int num_entries, int num_rings);
541void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
542int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
543void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
544int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
545void mlx4_en_rx_refill(struct work_struct *work);
546void mlx4_en_rx_irq(struct mlx4_cq *mcq);
547
548int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
549int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp);
550int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
551 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
552int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
553 u8 promisc);
554
555int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
556
557/*
558 * Globals
559 */
560extern const struct ethtool_ops mlx4_en_ethtool_ops;
561#endif
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index d1dd5b48dbd1..0caf74cae8bc 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -461,7 +461,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
461 int err; 461 int err;
462 462
463 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 463 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
464 ~0, dev->caps.reserved_mrws); 464 ~0, dev->caps.reserved_mrws, 0);
465 if (err) 465 if (err)
466 return err; 466 return err;
467 467
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index aa616892d09c..26d1a7a9e375 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -62,7 +62,7 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
62 struct mlx4_priv *priv = mlx4_priv(dev); 62 struct mlx4_priv *priv = mlx4_priv(dev);
63 63
64 return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, 64 return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
65 (1 << 24) - 1, dev->caps.reserved_pds); 65 (1 << 24) - 1, dev->caps.reserved_pds, 0);
66} 66}
67 67
68void mlx4_cleanup_pd_table(struct mlx4_dev *dev) 68void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -100,7 +100,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
100 100
101 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, 101 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
102 dev->caps.num_uars, dev->caps.num_uars - 1, 102 dev->caps.num_uars, dev->caps.num_uars - 1,
103 max(128, dev->caps.reserved_uars)); 103 max(128, dev->caps.reserved_uars), 0);
104} 104}
105 105
106void mlx4_cleanup_uar_table(struct mlx4_dev *dev) 106void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
new file mode 100644
index 000000000000..0a057e5dc63b
--- /dev/null
+++ b/drivers/net/mlx4/port.c
@@ -0,0 +1,319 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/errno.h>
34#include <linux/if_ether.h>
35
36#include <linux/mlx4/cmd.h>
37
38#include "mlx4.h"
39
40#define MLX4_MAC_VALID (1ull << 63)
41#define MLX4_MAC_MASK 0xffffffffffffULL
42
43#define MLX4_VLAN_VALID (1u << 31)
44#define MLX4_VLAN_MASK 0xfff
45
46void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
47{
48 int i;
49
50 mutex_init(&table->mutex);
51 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
52 table->entries[i] = 0;
53 table->refs[i] = 0;
54 }
55 table->max = 1 << dev->caps.log_num_macs;
56 table->total = 0;
57}
58
59void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
60{
61 int i;
62
63 mutex_init(&table->mutex);
64 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
65 table->entries[i] = 0;
66 table->refs[i] = 0;
67 }
68 table->max = 1 << dev->caps.log_num_vlans;
69 table->total = 0;
70}
71
72static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
73 __be64 *entries)
74{
75 struct mlx4_cmd_mailbox *mailbox;
76 u32 in_mod;
77 int err;
78
79 mailbox = mlx4_alloc_cmd_mailbox(dev);
80 if (IS_ERR(mailbox))
81 return PTR_ERR(mailbox);
82
83 memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
84
85 in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
86 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
87 MLX4_CMD_TIME_CLASS_B);
88
89 mlx4_free_cmd_mailbox(dev, mailbox);
90 return err;
91}
92
93int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
94{
95 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
96 int i, err = 0;
97 int free = -1;
98
99 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
100 mutex_lock(&table->mutex);
101 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
102 if (free < 0 && !table->refs[i]) {
103 free = i;
104 continue;
105 }
106
107 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
108 /* MAC already registered, increase refernce count */
109 *index = i;
110 ++table->refs[i];
111 goto out;
112 }
113 }
114 mlx4_dbg(dev, "Free MAC index is %d\n", free);
115
116 if (table->total == table->max) {
117 /* No free mac entries */
118 err = -ENOSPC;
119 goto out;
120 }
121
122 /* Register new MAC */
123 table->refs[free] = 1;
124 table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
125
126 err = mlx4_set_port_mac_table(dev, port, table->entries);
127 if (unlikely(err)) {
128 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
129 table->refs[free] = 0;
130 table->entries[free] = 0;
131 goto out;
132 }
133
134 *index = free;
135 ++table->total;
136out:
137 mutex_unlock(&table->mutex);
138 return err;
139}
140EXPORT_SYMBOL_GPL(mlx4_register_mac);
141
142void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index)
143{
144 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
145
146 mutex_lock(&table->mutex);
147 if (!table->refs[index]) {
148 mlx4_warn(dev, "No MAC entry for index %d\n", index);
149 goto out;
150 }
151 if (--table->refs[index]) {
152 mlx4_warn(dev, "Have more references for index %d,"
153 "no need to modify MAC table\n", index);
154 goto out;
155 }
156 table->entries[index] = 0;
157 mlx4_set_port_mac_table(dev, port, table->entries);
158 --table->total;
159out:
160 mutex_unlock(&table->mutex);
161}
162EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
163
164static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
165 __be32 *entries)
166{
167 struct mlx4_cmd_mailbox *mailbox;
168 u32 in_mod;
169 int err;
170
171 mailbox = mlx4_alloc_cmd_mailbox(dev);
172 if (IS_ERR(mailbox))
173 return PTR_ERR(mailbox);
174
175 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
176 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
177 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
178 MLX4_CMD_TIME_CLASS_B);
179
180 mlx4_free_cmd_mailbox(dev, mailbox);
181
182 return err;
183}
184
185int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
186{
187 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
188 int i, err = 0;
189 int free = -1;
190
191 mutex_lock(&table->mutex);
192 for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
193 if (free < 0 && (table->refs[i] == 0)) {
194 free = i;
195 continue;
196 }
197
198 if (table->refs[i] &&
199 (vlan == (MLX4_VLAN_MASK &
200 be32_to_cpu(table->entries[i])))) {
201 /* Vlan already registered, increase refernce count */
202 *index = i;
203 ++table->refs[i];
204 goto out;
205 }
206 }
207
208 if (table->total == table->max) {
209 /* No free vlan entries */
210 err = -ENOSPC;
211 goto out;
212 }
213
214 /* Register new MAC */
215 table->refs[free] = 1;
216 table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
217
218 err = mlx4_set_port_vlan_table(dev, port, table->entries);
219 if (unlikely(err)) {
220 mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
221 table->refs[free] = 0;
222 table->entries[free] = 0;
223 goto out;
224 }
225
226 *index = free;
227 ++table->total;
228out:
229 mutex_unlock(&table->mutex);
230 return err;
231}
232EXPORT_SYMBOL_GPL(mlx4_register_vlan);
233
234void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
235{
236 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
237
238 if (index < MLX4_VLAN_REGULAR) {
239 mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
240 return;
241 }
242
243 mutex_lock(&table->mutex);
244 if (!table->refs[index]) {
245 mlx4_warn(dev, "No vlan entry for index %d\n", index);
246 goto out;
247 }
248 if (--table->refs[index]) {
249 mlx4_dbg(dev, "Have more references for index %d,"
250 "no need to modify vlan table\n", index);
251 goto out;
252 }
253 table->entries[index] = 0;
254 mlx4_set_port_vlan_table(dev, port, table->entries);
255 --table->total;
256out:
257 mutex_unlock(&table->mutex);
258}
259EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
260
261int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
262{
263 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
264 u8 *inbuf, *outbuf;
265 int err;
266
267 inmailbox = mlx4_alloc_cmd_mailbox(dev);
268 if (IS_ERR(inmailbox))
269 return PTR_ERR(inmailbox);
270
271 outmailbox = mlx4_alloc_cmd_mailbox(dev);
272 if (IS_ERR(outmailbox)) {
273 mlx4_free_cmd_mailbox(dev, inmailbox);
274 return PTR_ERR(outmailbox);
275 }
276
277 inbuf = inmailbox->buf;
278 outbuf = outmailbox->buf;
279 memset(inbuf, 0, 256);
280 memset(outbuf, 0, 256);
281 inbuf[0] = 1;
282 inbuf[1] = 1;
283 inbuf[2] = 1;
284 inbuf[3] = 1;
285 *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
286 *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
287
288 err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
289 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
290 if (!err)
291 *caps = *(__be32 *) (outbuf + 84);
292 mlx4_free_cmd_mailbox(dev, inmailbox);
293 mlx4_free_cmd_mailbox(dev, outmailbox);
294 return err;
295}
296
297int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
298{
299 struct mlx4_cmd_mailbox *mailbox;
300 int err;
301 u8 is_eth = dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
302
303 mailbox = mlx4_alloc_cmd_mailbox(dev);
304 if (IS_ERR(mailbox))
305 return PTR_ERR(mailbox);
306
307 memset(mailbox->buf, 0, 256);
308 if (is_eth) {
309 ((u8 *) mailbox->buf)[3] = 6;
310 ((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15);
311 ((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15);
312 } else
313 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
314 err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
315 MLX4_CMD_TIME_CLASS_B);
316
317 mlx4_free_cmd_mailbox(dev, mailbox);
318 return err;
319}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index c49a86044bf7..1c565ef8d179 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -147,19 +147,42 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
147} 147}
148EXPORT_SYMBOL_GPL(mlx4_qp_modify); 148EXPORT_SYMBOL_GPL(mlx4_qp_modify);
149 149
150int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp) 150int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
151{
152 struct mlx4_priv *priv = mlx4_priv(dev);
153 struct mlx4_qp_table *qp_table = &priv->qp_table;
154 int qpn;
155
156 qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
157 if (qpn == -1)
158 return -ENOMEM;
159
160 *base = qpn;
161 return 0;
162}
163EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
164
165void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
166{
167 struct mlx4_priv *priv = mlx4_priv(dev);
168 struct mlx4_qp_table *qp_table = &priv->qp_table;
169 if (base_qpn < dev->caps.sqp_start + 8)
170 return;
171
172 mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
173}
174EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
175
176int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
151{ 177{
152 struct mlx4_priv *priv = mlx4_priv(dev); 178 struct mlx4_priv *priv = mlx4_priv(dev);
153 struct mlx4_qp_table *qp_table = &priv->qp_table; 179 struct mlx4_qp_table *qp_table = &priv->qp_table;
154 int err; 180 int err;
155 181
156 if (sqpn) 182 if (!qpn)
157 qp->qpn = sqpn; 183 return -EINVAL;
158 else { 184
159 qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap); 185 qp->qpn = qpn;
160 if (qp->qpn == -1)
161 return -ENOMEM;
162 }
163 186
164 err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); 187 err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
165 if (err) 188 if (err)
@@ -208,9 +231,6 @@ err_put_qp:
208 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); 231 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
209 232
210err_out: 233err_out:
211 if (!sqpn)
212 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
213
214 return err; 234 return err;
215} 235}
216EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 236EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
@@ -239,9 +259,6 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
239 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); 259 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
240 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); 260 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
241 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); 261 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
242
243 if (qp->qpn >= dev->caps.sqp_start + 8)
244 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
245} 262}
246EXPORT_SYMBOL_GPL(mlx4_qp_free); 263EXPORT_SYMBOL_GPL(mlx4_qp_free);
247 264
@@ -255,6 +272,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
255{ 272{
256 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 273 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
257 int err; 274 int err;
275 int reserved_from_top = 0;
258 276
259 spin_lock_init(&qp_table->lock); 277 spin_lock_init(&qp_table->lock);
260 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); 278 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
@@ -264,9 +282,40 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
264 * block of special QPs must be aligned to a multiple of 8, so 282 * block of special QPs must be aligned to a multiple of 8, so
265 * round up. 283 * round up.
266 */ 284 */
267 dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8); 285 dev->caps.sqp_start =
286 ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
287
288 {
289 int sort[MLX4_NUM_QP_REGION];
290 int i, j, tmp;
291 int last_base = dev->caps.num_qps;
292
293 for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
294 sort[i] = i;
295
296 for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
297 for (j = 2; j < i; ++j) {
298 if (dev->caps.reserved_qps_cnt[sort[j]] >
299 dev->caps.reserved_qps_cnt[sort[j - 1]]) {
300 tmp = sort[j];
301 sort[j] = sort[j - 1];
302 sort[j - 1] = tmp;
303 }
304 }
305 }
306
307 for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
308 last_base -= dev->caps.reserved_qps_cnt[sort[i]];
309 dev->caps.reserved_qps_base[sort[i]] = last_base;
310 reserved_from_top +=
311 dev->caps.reserved_qps_cnt[sort[i]];
312 }
313
314 }
315
268 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, 316 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
269 (1 << 24) - 1, dev->caps.sqp_start + 8); 317 (1 << 23) - 1, dev->caps.sqp_start + 8,
318 reserved_from_top);
270 if (err) 319 if (err)
271 return err; 320 return err;
272 321
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index 533eb6db24b3..fe9f218691f5 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -245,7 +245,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
245 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); 245 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
246 246
247 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, 247 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
248 dev->caps.num_srqs - 1, dev->caps.reserved_srqs); 248 dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
249 if (err) 249 if (err)
250 return err; 250 return err;
251 251
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a9c8c08044b1..e513f76f2a9f 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -899,7 +899,8 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
899 if (skb != NULL) { 899 if (skb != NULL) {
900 if (skb_queue_len(&mp->rx_recycle) < 900 if (skb_queue_len(&mp->rx_recycle) <
901 mp->default_rx_ring_size && 901 mp->default_rx_ring_size &&
902 skb_recycle_check(skb, mp->skb_size)) 902 skb_recycle_check(skb, mp->skb_size +
903 dma_get_cache_alignment() - 1))
903 __skb_queue_head(&mp->rx_recycle, skb); 904 __skb_queue_head(&mp->rx_recycle, skb);
904 else 905 else
905 dev_kfree_skb(skb); 906 dev_kfree_skb(skb);
@@ -1066,9 +1067,12 @@ static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
1066 return 0; 1067 return 0;
1067 } 1068 }
1068 1069
1069 if (!wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), 1070 if (!smi_is_done(msp)) {
1070 msecs_to_jiffies(100))) 1071 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
1071 return -ETIMEDOUT; 1072 msecs_to_jiffies(100));
1073 if (!smi_is_done(msp))
1074 return -ETIMEDOUT;
1075 }
1072 1076
1073 return 0; 1077 return 0;
1074} 1078}
@@ -2432,8 +2436,8 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2432 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2436 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2433 2437
2434 if (pd == NULL || pd->shared_smi == NULL) { 2438 if (pd == NULL || pd->shared_smi == NULL) {
2435 mdiobus_free(msp->smi_bus);
2436 mdiobus_unregister(msp->smi_bus); 2439 mdiobus_unregister(msp->smi_bus);
2440 mdiobus_free(msp->smi_bus);
2437 } 2441 }
2438 if (msp->err_interrupt != NO_IRQ) 2442 if (msp->err_interrupt != NO_IRQ)
2439 free_irq(msp->err_interrupt, msp); 2443 free_irq(msp->err_interrupt, msp);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index a9aebad52652..b37867097308 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
75#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
77 77
78#define MYRI10GE_VERSION_STR "1.4.3-1.369" 78#define MYRI10GE_VERSION_STR "1.4.3-1.378"
79 79
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -1393,6 +1393,8 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
1393 if (tx->req == tx->done) { 1393 if (tx->req == tx->done) {
1394 tx->queue_active = 0; 1394 tx->queue_active = 0;
1395 put_be32(htonl(1), tx->send_stop); 1395 put_be32(htonl(1), tx->send_stop);
1396 mb();
1397 mmiowb();
1396 } 1398 }
1397 __netif_tx_unlock(dev_queue); 1399 __netif_tx_unlock(dev_queue);
1398 } 1400 }
@@ -2497,6 +2499,10 @@ static int myri10ge_open(struct net_device *dev)
2497 return 0; 2499 return 0;
2498 2500
2499abort_with_rings: 2501abort_with_rings:
2502 while (slice) {
2503 slice--;
2504 napi_disable(&mgp->ss[slice].napi);
2505 }
2500 for (i = 0; i < mgp->num_slices; i++) 2506 for (i = 0; i < mgp->num_slices; i++)
2501 myri10ge_free_rings(&mgp->ss[i]); 2507 myri10ge_free_rings(&mgp->ss[i]);
2502 2508
@@ -2860,6 +2866,8 @@ again:
2860 if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) { 2866 if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
2861 tx->queue_active = 1; 2867 tx->queue_active = 1;
2862 put_be32(htonl(1), tx->send_go); 2868 put_be32(htonl(1), tx->send_go);
2869 mb();
2870 mmiowb();
2863 } 2871 }
2864 tx->pkt_start++; 2872 tx->pkt_start++;
2865 if ((avail - count) < MXGEFW_MAX_SEND_DESC) { 2873 if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index b9bed82e1d21..b289a0a2b945 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -401,6 +401,8 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
401 priv->xmac_base = priv->xc->xmac_base; 401 priv->xmac_base = priv->xc->xmac_base;
402 priv->sram_base = priv->xc->sram_base; 402 priv->sram_base = priv->xc->sram_base;
403 403
404 spin_lock_init(&priv->lock);
405
404 ret = pfifo_request(PFIFO_MASK(priv->id)); 406 ret = pfifo_request(PFIFO_MASK(priv->id));
405 if (ret) { 407 if (ret) {
406 printk("unable to request PFIFO\n"); 408 printk("unable to request PFIFO\n");
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index ebc812702903..1b6f548c4411 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -33,8 +33,8 @@
33 33
34#define DRV_MODULE_NAME "niu" 34#define DRV_MODULE_NAME "niu"
35#define PFX DRV_MODULE_NAME ": " 35#define PFX DRV_MODULE_NAME ": "
36#define DRV_MODULE_VERSION "0.9" 36#define DRV_MODULE_VERSION "1.0"
37#define DRV_MODULE_RELDATE "May 4, 2008" 37#define DRV_MODULE_RELDATE "Nov 14, 2008"
38 38
39static char version[] __devinitdata = 39static char version[] __devinitdata =
40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -51,8 +51,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
51#ifndef readq 51#ifndef readq
52static u64 readq(void __iomem *reg) 52static u64 readq(void __iomem *reg)
53{ 53{
54 return (((u64)readl(reg + 0x4UL) << 32) | 54 return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
55 (u64)readl(reg));
56} 55}
57 56
58static void writeq(u64 val, void __iomem *reg) 57static void writeq(u64 val, void __iomem *reg)
@@ -407,7 +406,7 @@ static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
407} 406}
408 407
409/* Mode is always 10G fiber. */ 408/* Mode is always 10G fiber. */
410static int serdes_init_niu(struct niu *np) 409static int serdes_init_niu_10g_fiber(struct niu *np)
411{ 410{
412 struct niu_link_config *lp = &np->link_config; 411 struct niu_link_config *lp = &np->link_config;
413 u32 tx_cfg, rx_cfg; 412 u32 tx_cfg, rx_cfg;
@@ -444,6 +443,223 @@ static int serdes_init_niu(struct niu *np)
444 return 0; 443 return 0;
445} 444}
446 445
446static int serdes_init_niu_1g_serdes(struct niu *np)
447{
448 struct niu_link_config *lp = &np->link_config;
449 u16 pll_cfg, pll_sts;
450 int max_retry = 100;
451 u64 sig, mask, val;
452 u32 tx_cfg, rx_cfg;
453 unsigned long i;
454 int err;
455
456 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
457 PLL_TX_CFG_RATE_HALF);
458 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
459 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
460 PLL_RX_CFG_RATE_HALF);
461
462 if (np->port == 0)
463 rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
464
465 if (lp->loopback_mode == LOOPBACK_PHY) {
466 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
467
468 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
469 ESR2_TI_PLL_TEST_CFG_L, test_cfg);
470
471 tx_cfg |= PLL_TX_CFG_ENTEST;
472 rx_cfg |= PLL_RX_CFG_ENTEST;
473 }
474
475 /* Initialize PLL for 1G */
476 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
477
478 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
479 ESR2_TI_PLL_CFG_L, pll_cfg);
480 if (err) {
481 dev_err(np->device, PFX "NIU Port %d "
482 "serdes_init_niu_1g_serdes: "
483 "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
484 return err;
485 }
486
487 pll_sts = PLL_CFG_ENPLL;
488
489 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
490 ESR2_TI_PLL_STS_L, pll_sts);
491 if (err) {
492 dev_err(np->device, PFX "NIU Port %d "
493 "serdes_init_niu_1g_serdes: "
494 "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
495 return err;
496 }
497
498 udelay(200);
499
500 /* Initialize all 4 lanes of the SERDES. */
501 for (i = 0; i < 4; i++) {
502 err = esr2_set_tx_cfg(np, i, tx_cfg);
503 if (err)
504 return err;
505 }
506
507 for (i = 0; i < 4; i++) {
508 err = esr2_set_rx_cfg(np, i, rx_cfg);
509 if (err)
510 return err;
511 }
512
513 switch (np->port) {
514 case 0:
515 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
516 mask = val;
517 break;
518
519 case 1:
520 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
521 mask = val;
522 break;
523
524 default:
525 return -EINVAL;
526 }
527
528 while (max_retry--) {
529 sig = nr64(ESR_INT_SIGNALS);
530 if ((sig & mask) == val)
531 break;
532
533 mdelay(500);
534 }
535
536 if ((sig & mask) != val) {
537 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
538 "[%08x]\n", np->port, (int) (sig & mask), (int) val);
539 return -ENODEV;
540 }
541
542 return 0;
543}
544
545static int serdes_init_niu_10g_serdes(struct niu *np)
546{
547 struct niu_link_config *lp = &np->link_config;
548 u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
549 int max_retry = 100;
550 u64 sig, mask, val;
551 unsigned long i;
552 int err;
553
554 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
555 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
556 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
557 PLL_RX_CFG_EQ_LP_ADAPTIVE);
558
559 if (lp->loopback_mode == LOOPBACK_PHY) {
560 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
561
562 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
563 ESR2_TI_PLL_TEST_CFG_L, test_cfg);
564
565 tx_cfg |= PLL_TX_CFG_ENTEST;
566 rx_cfg |= PLL_RX_CFG_ENTEST;
567 }
568
569 /* Initialize PLL for 10G */
570 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
571
572 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
573 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
574 if (err) {
575 dev_err(np->device, PFX "NIU Port %d "
576 "serdes_init_niu_10g_serdes: "
577 "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
578 return err;
579 }
580
581 pll_sts = PLL_CFG_ENPLL;
582
583 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
584 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
585 if (err) {
586 dev_err(np->device, PFX "NIU Port %d "
587 "serdes_init_niu_10g_serdes: "
588 "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
589 return err;
590 }
591
592 udelay(200);
593
594 /* Initialize all 4 lanes of the SERDES. */
595 for (i = 0; i < 4; i++) {
596 err = esr2_set_tx_cfg(np, i, tx_cfg);
597 if (err)
598 return err;
599 }
600
601 for (i = 0; i < 4; i++) {
602 err = esr2_set_rx_cfg(np, i, rx_cfg);
603 if (err)
604 return err;
605 }
606
607 /* check if serdes is ready */
608
609 switch (np->port) {
610 case 0:
611 mask = ESR_INT_SIGNALS_P0_BITS;
612 val = (ESR_INT_SRDY0_P0 |
613 ESR_INT_DET0_P0 |
614 ESR_INT_XSRDY_P0 |
615 ESR_INT_XDP_P0_CH3 |
616 ESR_INT_XDP_P0_CH2 |
617 ESR_INT_XDP_P0_CH1 |
618 ESR_INT_XDP_P0_CH0);
619 break;
620
621 case 1:
622 mask = ESR_INT_SIGNALS_P1_BITS;
623 val = (ESR_INT_SRDY0_P1 |
624 ESR_INT_DET0_P1 |
625 ESR_INT_XSRDY_P1 |
626 ESR_INT_XDP_P1_CH3 |
627 ESR_INT_XDP_P1_CH2 |
628 ESR_INT_XDP_P1_CH1 |
629 ESR_INT_XDP_P1_CH0);
630 break;
631
632 default:
633 return -EINVAL;
634 }
635
636 while (max_retry--) {
637 sig = nr64(ESR_INT_SIGNALS);
638 if ((sig & mask) == val)
639 break;
640
641 mdelay(500);
642 }
643
644 if ((sig & mask) != val) {
645 pr_info(PFX "NIU Port %u signal bits [%08x] are not "
646 "[%08x] for 10G...trying 1G\n",
647 np->port, (int) (sig & mask), (int) val);
648
649 /* 10G failed, try initializing at 1G */
650 err = serdes_init_niu_1g_serdes(np);
651 if (!err) {
652 np->flags &= ~NIU_FLAGS_10G;
653 np->mac_xcvr = MAC_XCVR_PCS;
654 } else {
655 dev_err(np->device, PFX "Port %u 10G/1G SERDES "
656 "Link Failed \n", np->port);
657 return -ENODEV;
658 }
659 }
660 return 0;
661}
662
447static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) 663static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
448{ 664{
449 int err; 665 int err;
@@ -1955,13 +2171,23 @@ static const struct niu_phy_ops phy_ops_10g_serdes = {
1955 .link_status = link_status_10g_serdes, 2171 .link_status = link_status_10g_serdes,
1956}; 2172};
1957 2173
2174static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
2175 .serdes_init = serdes_init_niu_10g_serdes,
2176 .link_status = link_status_10g_serdes,
2177};
2178
2179static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
2180 .serdes_init = serdes_init_niu_1g_serdes,
2181 .link_status = link_status_1g_serdes,
2182};
2183
1958static const struct niu_phy_ops phy_ops_1g_rgmii = { 2184static const struct niu_phy_ops phy_ops_1g_rgmii = {
1959 .xcvr_init = xcvr_init_1g_rgmii, 2185 .xcvr_init = xcvr_init_1g_rgmii,
1960 .link_status = link_status_1g_rgmii, 2186 .link_status = link_status_1g_rgmii,
1961}; 2187};
1962 2188
1963static const struct niu_phy_ops phy_ops_10g_fiber_niu = { 2189static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
1964 .serdes_init = serdes_init_niu, 2190 .serdes_init = serdes_init_niu_10g_fiber,
1965 .xcvr_init = xcvr_init_10g, 2191 .xcvr_init = xcvr_init_10g,
1966 .link_status = link_status_10g, 2192 .link_status = link_status_10g,
1967}; 2193};
@@ -1999,11 +2225,21 @@ struct niu_phy_template {
1999 u32 phy_addr_base; 2225 u32 phy_addr_base;
2000}; 2226};
2001 2227
2002static const struct niu_phy_template phy_template_niu = { 2228static const struct niu_phy_template phy_template_niu_10g_fiber = {
2003 .ops = &phy_ops_10g_fiber_niu, 2229 .ops = &phy_ops_10g_fiber_niu,
2004 .phy_addr_base = 16, 2230 .phy_addr_base = 16,
2005}; 2231};
2006 2232
2233static const struct niu_phy_template phy_template_niu_10g_serdes = {
2234 .ops = &phy_ops_10g_serdes_niu,
2235 .phy_addr_base = 0,
2236};
2237
2238static const struct niu_phy_template phy_template_niu_1g_serdes = {
2239 .ops = &phy_ops_1g_serdes_niu,
2240 .phy_addr_base = 0,
2241};
2242
2007static const struct niu_phy_template phy_template_10g_fiber = { 2243static const struct niu_phy_template phy_template_10g_fiber = {
2008 .ops = &phy_ops_10g_fiber, 2244 .ops = &phy_ops_10g_fiber,
2009 .phy_addr_base = 8, 2245 .phy_addr_base = 8,
@@ -2183,8 +2419,25 @@ static int niu_determine_phy_disposition(struct niu *np)
2183 u32 phy_addr_off = 0; 2419 u32 phy_addr_off = 0;
2184 2420
2185 if (plat_type == PLAT_TYPE_NIU) { 2421 if (plat_type == PLAT_TYPE_NIU) {
2186 tp = &phy_template_niu; 2422 switch (np->flags &
2187 phy_addr_off += np->port; 2423 (NIU_FLAGS_10G |
2424 NIU_FLAGS_FIBER |
2425 NIU_FLAGS_XCVR_SERDES)) {
2426 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2427 /* 10G Serdes */
2428 tp = &phy_template_niu_10g_serdes;
2429 break;
2430 case NIU_FLAGS_XCVR_SERDES:
2431 /* 1G Serdes */
2432 tp = &phy_template_niu_1g_serdes;
2433 break;
2434 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2435 /* 10G Fiber */
2436 default:
2437 tp = &phy_template_niu_10g_fiber;
2438 phy_addr_off += np->port;
2439 break;
2440 }
2188 } else { 2441 } else {
2189 switch (np->flags & 2442 switch (np->flags &
2190 (NIU_FLAGS_10G | 2443 (NIU_FLAGS_10G |
@@ -7214,6 +7467,12 @@ static int __devinit niu_phy_type_prop_decode(struct niu *np,
7214 np->flags |= NIU_FLAGS_10G; 7467 np->flags |= NIU_FLAGS_10G;
7215 np->flags &= ~NIU_FLAGS_FIBER; 7468 np->flags &= ~NIU_FLAGS_FIBER;
7216 np->mac_xcvr = MAC_XCVR_XPCS; 7469 np->mac_xcvr = MAC_XCVR_XPCS;
7470 } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
7471 /* 10G Serdes or 1G Serdes, default to 10G */
7472 np->flags |= NIU_FLAGS_10G;
7473 np->flags &= ~NIU_FLAGS_FIBER;
7474 np->flags |= NIU_FLAGS_XCVR_SERDES;
7475 np->mac_xcvr = MAC_XCVR_XPCS;
7217 } else { 7476 } else {
7218 return -EINVAL; 7477 return -EINVAL;
7219 } 7478 }
@@ -7742,6 +8001,8 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
7742 u32 val; 8001 u32 val;
7743 int err; 8002 int err;
7744 8003
8004 num_10g = num_1g = 0;
8005
7745 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 8006 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
7746 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 8007 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
7747 num_10g = 0; 8008 num_10g = 0;
@@ -7758,6 +8019,16 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
7758 parent->num_ports = 2; 8019 parent->num_ports = 2;
7759 val = (phy_encode(PORT_TYPE_10G, 0) | 8020 val = (phy_encode(PORT_TYPE_10G, 0) |
7760 phy_encode(PORT_TYPE_10G, 1)); 8021 phy_encode(PORT_TYPE_10G, 1));
8022 } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
8023 (parent->plat_type == PLAT_TYPE_NIU)) {
8024 /* this is the Monza case */
8025 if (np->flags & NIU_FLAGS_10G) {
8026 val = (phy_encode(PORT_TYPE_10G, 0) |
8027 phy_encode(PORT_TYPE_10G, 1));
8028 } else {
8029 val = (phy_encode(PORT_TYPE_1G, 0) |
8030 phy_encode(PORT_TYPE_1G, 1));
8031 }
7761 } else { 8032 } else {
7762 err = fill_phy_probe_info(np, parent, info); 8033 err = fill_phy_probe_info(np, parent, info);
7763 if (err) 8034 if (err)
@@ -8657,7 +8928,9 @@ static void __devinit niu_device_announce(struct niu *np)
8657 dev->name, 8928 dev->name,
8658 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 8929 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
8659 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 8930 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
8660 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"), 8931 (np->flags & NIU_FLAGS_FIBER ? "FIBER" :
8932 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
8933 "COPPER")),
8661 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 8934 (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
8662 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 8935 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
8663 np->vpd.phy_type); 8936 np->vpd.phy_type);
@@ -8667,7 +8940,6 @@ static void __devinit niu_device_announce(struct niu *np)
8667static int __devinit niu_pci_init_one(struct pci_dev *pdev, 8940static int __devinit niu_pci_init_one(struct pci_dev *pdev,
8668 const struct pci_device_id *ent) 8941 const struct pci_device_id *ent)
8669{ 8942{
8670 unsigned long niureg_base, niureg_len;
8671 union niu_parent_id parent_id; 8943 union niu_parent_id parent_id;
8672 struct net_device *dev; 8944 struct net_device *dev;
8673 struct niu *np; 8945 struct niu *np;
@@ -8758,10 +9030,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
8758 9030
8759 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 9031 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
8760 9032
8761 niureg_base = pci_resource_start(pdev, 0); 9033 np->regs = pci_ioremap_bar(pdev, 0);
8762 niureg_len = pci_resource_len(pdev, 0);
8763
8764 np->regs = ioremap_nocache(niureg_base, niureg_len);
8765 if (!np->regs) { 9034 if (!np->regs) {
8766 dev_err(&pdev->dev, PFX "Cannot map device registers, " 9035 dev_err(&pdev->dev, PFX "Cannot map device registers, "
8767 "aborting.\n"); 9036 "aborting.\n");
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index c6fa883daa22..180ca8ae93de 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -1048,6 +1048,13 @@
1048#define PLL_CFG_LD_SHIFT 8 1048#define PLL_CFG_LD_SHIFT 8
1049#define PLL_CFG_MPY 0x0000001e 1049#define PLL_CFG_MPY 0x0000001e
1050#define PLL_CFG_MPY_SHIFT 1 1050#define PLL_CFG_MPY_SHIFT 1
1051#define PLL_CFG_MPY_4X 0x0
1052#define PLL_CFG_MPY_5X 0x00000002
1053#define PLL_CFG_MPY_6X 0x00000004
1054#define PLL_CFG_MPY_8X 0x00000008
1055#define PLL_CFG_MPY_10X 0x0000000a
1056#define PLL_CFG_MPY_12X 0x0000000c
1057#define PLL_CFG_MPY_12P5X 0x0000000e
1051#define PLL_CFG_ENPLL 0x00000001 1058#define PLL_CFG_ENPLL 0x00000001
1052 1059
1053#define ESR2_TI_PLL_STS_L (ESR2_BASE + 0x002) 1060#define ESR2_TI_PLL_STS_L (ESR2_BASE + 0x002)
@@ -1093,6 +1100,9 @@
1093#define PLL_TX_CFG_INVPAIR 0x00000080 1100#define PLL_TX_CFG_INVPAIR 0x00000080
1094#define PLL_TX_CFG_RATE 0x00000060 1101#define PLL_TX_CFG_RATE 0x00000060
1095#define PLL_TX_CFG_RATE_SHIFT 5 1102#define PLL_TX_CFG_RATE_SHIFT 5
1103#define PLL_TX_CFG_RATE_FULL 0x0
1104#define PLL_TX_CFG_RATE_HALF 0x20
1105#define PLL_TX_CFG_RATE_QUAD 0x40
1096#define PLL_TX_CFG_BUSWIDTH 0x0000001c 1106#define PLL_TX_CFG_BUSWIDTH 0x0000001c
1097#define PLL_TX_CFG_BUSWIDTH_SHIFT 2 1107#define PLL_TX_CFG_BUSWIDTH_SHIFT 2
1098#define PLL_TX_CFG_ENTEST 0x00000002 1108#define PLL_TX_CFG_ENTEST 0x00000002
@@ -1132,6 +1142,9 @@
1132#define PLL_RX_CFG_INVPAIR 0x00000080 1142#define PLL_RX_CFG_INVPAIR 0x00000080
1133#define PLL_RX_CFG_RATE 0x00000060 1143#define PLL_RX_CFG_RATE 0x00000060
1134#define PLL_RX_CFG_RATE_SHIFT 5 1144#define PLL_RX_CFG_RATE_SHIFT 5
1145#define PLL_RX_CFG_RATE_FULL 0x0
1146#define PLL_RX_CFG_RATE_HALF 0x20
1147#define PLL_RX_CFG_RATE_QUAD 0x40
1135#define PLL_RX_CFG_BUSWIDTH 0x0000001c 1148#define PLL_RX_CFG_BUSWIDTH 0x0000001c
1136#define PLL_RX_CFG_BUSWIDTH_SHIFT 2 1149#define PLL_RX_CFG_BUSWIDTH_SHIFT 2
1137#define PLL_RX_CFG_ENTEST 0x00000002 1150#define PLL_RX_CFG_ENTEST 0x00000002
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index b37a498939ae..0418045166c3 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -779,6 +779,7 @@ static struct pcmcia_device_id axnet_ids[] = {
779 PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2), 779 PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2),
780 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8), 780 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8),
781 PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609), 781 PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609),
782 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875),
782 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04), 783 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04),
783 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116), 784 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116),
784 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058), 785 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058),
@@ -1174,7 +1175,6 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
1174 * ax_interrupt - handle the interrupts from an 8390 1175 * ax_interrupt - handle the interrupts from an 8390
1175 * @irq: interrupt number 1176 * @irq: interrupt number
1176 * @dev_id: a pointer to the net_device 1177 * @dev_id: a pointer to the net_device
1177 * @regs: unused
1178 * 1178 *
1179 * Handle the ether interface interrupts. We pull packets from 1179 * Handle the ether interface interrupts. We pull packets from
1180 * the 8390 via the card specific functions and fire them at the networking 1180 * the 8390 via the card specific functions and fire them at the networking
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index cf3cca4642f2..f51944b28cfa 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -349,7 +349,7 @@ static int ibmtr_suspend(struct pcmcia_device *link)
349 return 0; 349 return 0;
350} 350}
351 351
352static int ibmtr_resume(struct pcmcia_device *link) 352static int __devinit ibmtr_resume(struct pcmcia_device *link)
353{ 353{
354 ibmtr_dev_t *info = link->priv; 354 ibmtr_dev_t *info = link->priv;
355 struct net_device *dev = info->dev; 355 struct net_device *dev = info->dev;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index e40d6301aa7a..ce486f094492 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1693,7 +1693,6 @@ static struct pcmcia_device_id pcnet_ids[] = {
1693 PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover NE4100", 0x36e1191f, 0xa6617ec8), 1693 PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover NE4100", 0x36e1191f, 0xa6617ec8),
1694 PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J12", 0x18df0ba0, 0xbc912d76), 1694 PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J12", 0x18df0ba0, 0xbc912d76),
1695 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA410TX", 0x9aa79dc3, 0x60e5bc0e), 1695 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA410TX", 0x9aa79dc3, 0x60e5bc0e),
1696 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875),
1697 PCMCIA_DEVICE_PROD_ID12("Network Everywhere", "Fast Ethernet 10/100 PC Card", 0x820a67b6, 0x31ed1a5f), 1696 PCMCIA_DEVICE_PROD_ID12("Network Everywhere", "Fast Ethernet 10/100 PC Card", 0x820a67b6, 0x31ed1a5f),
1698 PCMCIA_DEVICE_PROD_ID12("NextCom K.K.", "Next Hawk", 0xaedaec74, 0xad050ef1), 1697 PCMCIA_DEVICE_PROD_ID12("NextCom K.K.", "Next Hawk", 0xaedaec74, 0xad050ef1),
1699 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100Mbps Ethernet Card", 0x281f1c5d, 0x6e41773b), 1698 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100Mbps Ethernet Card", 0x281f1c5d, 0x6e41773b),
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 4aa547947040..eb6411c4694f 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -227,6 +227,59 @@ static int m88e1111_config_init(struct phy_device *phydev)
227 return 0; 227 return 0;
228} 228}
229 229
230static int m88e1118_config_aneg(struct phy_device *phydev)
231{
232 int err;
233
234 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
235 if (err < 0)
236 return err;
237
238 err = phy_write(phydev, MII_M1011_PHY_SCR,
239 MII_M1011_PHY_SCR_AUTO_CROSS);
240 if (err < 0)
241 return err;
242
243 err = genphy_config_aneg(phydev);
244 return 0;
245}
246
247static int m88e1118_config_init(struct phy_device *phydev)
248{
249 int err;
250
251 /* Change address */
252 err = phy_write(phydev, 0x16, 0x0002);
253 if (err < 0)
254 return err;
255
256 /* Enable 1000 Mbit */
257 err = phy_write(phydev, 0x15, 0x1070);
258 if (err < 0)
259 return err;
260
261 /* Change address */
262 err = phy_write(phydev, 0x16, 0x0003);
263 if (err < 0)
264 return err;
265
266 /* Adjust LED Control */
267 err = phy_write(phydev, 0x10, 0x021e);
268 if (err < 0)
269 return err;
270
271 /* Reset address */
272 err = phy_write(phydev, 0x16, 0x0);
273 if (err < 0)
274 return err;
275
276 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
277 if (err < 0)
278 return err;
279
280 return 0;
281}
282
230static int m88e1145_config_init(struct phy_device *phydev) 283static int m88e1145_config_init(struct phy_device *phydev)
231{ 284{
232 int err; 285 int err;
@@ -416,6 +469,19 @@ static struct phy_driver marvell_drivers[] = {
416 .driver = { .owner = THIS_MODULE }, 469 .driver = { .owner = THIS_MODULE },
417 }, 470 },
418 { 471 {
472 .phy_id = 0x01410e10,
473 .phy_id_mask = 0xfffffff0,
474 .name = "Marvell 88E1118",
475 .features = PHY_GBIT_FEATURES,
476 .flags = PHY_HAS_INTERRUPT,
477 .config_init = &m88e1118_config_init,
478 .config_aneg = &m88e1118_config_aneg,
479 .read_status = &genphy_read_status,
480 .ack_interrupt = &marvell_ack_interrupt,
481 .config_intr = &marvell_config_intr,
482 .driver = {.owner = THIS_MODULE,},
483 },
484 {
419 .phy_id = 0x01410cd0, 485 .phy_id = 0x01410cd0,
420 .phy_id_mask = 0xfffffff0, 486 .phy_id_mask = 0xfffffff0,
421 .name = "Marvell 88E1145", 487 .name = "Marvell 88E1145",
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index d0ed1ef284a8..289fc267edf3 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -105,8 +105,6 @@ int mdiobus_register(struct mii_bus *bus)
105 return -EINVAL; 105 return -EINVAL;
106 } 106 }
107 107
108 bus->state = MDIOBUS_REGISTERED;
109
110 mutex_init(&bus->mdio_lock); 108 mutex_init(&bus->mdio_lock);
111 109
112 if (bus->reset) 110 if (bus->reset)
@@ -123,6 +121,9 @@ int mdiobus_register(struct mii_bus *bus)
123 } 121 }
124 } 122 }
125 123
124 if (!err)
125 bus->state = MDIOBUS_REGISTERED;
126
126 pr_info("%s: probed\n", bus->name); 127 pr_info("%s: probed\n", bus->name);
127 128
128 return err; 129 return err;
@@ -136,7 +137,7 @@ void mdiobus_unregister(struct mii_bus *bus)
136 BUG_ON(bus->state != MDIOBUS_REGISTERED); 137 BUG_ON(bus->state != MDIOBUS_REGISTERED);
137 bus->state = MDIOBUS_UNREGISTERED; 138 bus->state = MDIOBUS_UNREGISTERED;
138 139
139 device_unregister(&bus->dev); 140 device_del(&bus->dev);
140 for (i = 0; i < PHY_MAX_ADDR; i++) { 141 for (i = 0; i < PHY_MAX_ADDR; i++) {
141 if (bus->phy_map[i]) 142 if (bus->phy_map[i])
142 device_unregister(&bus->phy_map[i]->dev); 143 device_unregister(&bus->phy_map[i]->dev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e11b03b2b25a..25acbbde4a60 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -227,8 +227,17 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
227 if (r) 227 if (r)
228 return ERR_PTR(r); 228 return ERR_PTR(r);
229 229
230 /* If the phy_id is all Fs, there is no device there */ 230 /* If the phy_id is mostly Fs, there is no device there */
231 if (0xffffffff == phy_id) 231 if ((phy_id & 0x1fffffff) == 0x1fffffff)
232 return NULL;
233
234 /*
235 * Broken hardware is sometimes missing the pull down resistor on the
236 * MDIO line, which results in reads to non-existent devices returning
237 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
238 * device as well.
239 */
240 if (phy_id == 0)
232 return NULL; 241 return NULL;
233 242
234 dev = phy_device_create(bus, addr, phy_id); 243 dev = phy_device_create(bus, addr, phy_id);
@@ -564,20 +573,32 @@ EXPORT_SYMBOL(genphy_restart_aneg);
564 */ 573 */
565int genphy_config_aneg(struct phy_device *phydev) 574int genphy_config_aneg(struct phy_device *phydev)
566{ 575{
567 int result = 0; 576 int result;
568 577
569 if (AUTONEG_ENABLE == phydev->autoneg) { 578 if (AUTONEG_ENABLE != phydev->autoneg)
570 int result = genphy_config_advert(phydev); 579 return genphy_setup_forced(phydev);
571 580
572 if (result < 0) /* error */ 581 result = genphy_config_advert(phydev);
573 return result; 582
583 if (result < 0) /* error */
584 return result;
585
586 if (result == 0) {
587 /* Advertisment hasn't changed, but maybe aneg was never on to
588 * begin with? Or maybe phy was isolated? */
589 int ctl = phy_read(phydev, MII_BMCR);
590
591 if (ctl < 0)
592 return ctl;
593
594 if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
595 result = 1; /* do restart aneg */
596 }
574 597
575 /* Only restart aneg if we are advertising something different 598 /* Only restart aneg if we are advertising something different
576 * than we were before. */ 599 * than we were before. */
577 if (result > 0) 600 if (result > 0)
578 result = genphy_restart_aneg(phydev); 601 result = genphy_restart_aneg(phydev);
579 } else
580 result = genphy_setup_forced(phydev);
581 602
582 return result; 603 return result;
583} 604}
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 8874497b6bbf..dd3b2447e85a 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -34,6 +34,8 @@
34#define MII_VSC8244_IMASK_DUPLEX 0x1000 34#define MII_VSC8244_IMASK_DUPLEX 0x1000
35#define MII_VSC8244_IMASK_MASK 0xf000 35#define MII_VSC8244_IMASK_MASK 0xf000
36 36
37#define MII_VSC8221_IMASK_MASK 0xa000
38
37/* Vitesse Interrupt Status Register */ 39/* Vitesse Interrupt Status Register */
38#define MII_VSC8244_ISTAT 0x1a 40#define MII_VSC8244_ISTAT 0x1a
39#define MII_VSC8244_ISTAT_STATUS 0x8000 41#define MII_VSC8244_ISTAT_STATUS 0x8000
@@ -49,6 +51,12 @@
49#define MII_VSC8244_AUXCONSTAT_GBIT 0x0010 51#define MII_VSC8244_AUXCONSTAT_GBIT 0x0010
50#define MII_VSC8244_AUXCONSTAT_100 0x0008 52#define MII_VSC8244_AUXCONSTAT_100 0x0008
51 53
54#define MII_VSC8221_AUXCONSTAT_INIT 0x0004 /* need to set this bit? */
55#define MII_VSC8221_AUXCONSTAT_RESERVED 0x0004
56
57#define PHY_ID_VSC8244 0x000fc6c0
58#define PHY_ID_VSC8221 0x000fc550
59
52MODULE_DESCRIPTION("Vitesse PHY driver"); 60MODULE_DESCRIPTION("Vitesse PHY driver");
53MODULE_AUTHOR("Kriston Carson"); 61MODULE_AUTHOR("Kriston Carson");
54MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
@@ -95,13 +103,15 @@ static int vsc824x_ack_interrupt(struct phy_device *phydev)
95 return (err < 0) ? err : 0; 103 return (err < 0) ? err : 0;
96} 104}
97 105
98static int vsc824x_config_intr(struct phy_device *phydev) 106static int vsc82xx_config_intr(struct phy_device *phydev)
99{ 107{
100 int err; 108 int err;
101 109
102 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 110 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
103 err = phy_write(phydev, MII_VSC8244_IMASK, 111 err = phy_write(phydev, MII_VSC8244_IMASK,
104 MII_VSC8244_IMASK_MASK); 112 phydev->drv->phy_id == PHY_ID_VSC8244 ?
113 MII_VSC8244_IMASK_MASK :
114 MII_VSC8221_IMASK_MASK);
105 else { 115 else {
106 /* 116 /*
107 * The Vitesse PHY cannot clear the interrupt 117 * The Vitesse PHY cannot clear the interrupt
@@ -120,7 +130,7 @@ static int vsc824x_config_intr(struct phy_device *phydev)
120 130
121/* Vitesse 824x */ 131/* Vitesse 824x */
122static struct phy_driver vsc8244_driver = { 132static struct phy_driver vsc8244_driver = {
123 .phy_id = 0x000fc6c0, 133 .phy_id = PHY_ID_VSC8244,
124 .name = "Vitesse VSC8244", 134 .name = "Vitesse VSC8244",
125 .phy_id_mask = 0x000fffc0, 135 .phy_id_mask = 0x000fffc0,
126 .features = PHY_GBIT_FEATURES, 136 .features = PHY_GBIT_FEATURES,
@@ -129,19 +139,55 @@ static struct phy_driver vsc8244_driver = {
129 .config_aneg = &genphy_config_aneg, 139 .config_aneg = &genphy_config_aneg,
130 .read_status = &genphy_read_status, 140 .read_status = &genphy_read_status,
131 .ack_interrupt = &vsc824x_ack_interrupt, 141 .ack_interrupt = &vsc824x_ack_interrupt,
132 .config_intr = &vsc824x_config_intr, 142 .config_intr = &vsc82xx_config_intr,
133 .driver = { .owner = THIS_MODULE,}, 143 .driver = { .owner = THIS_MODULE,},
134}; 144};
135 145
136static int __init vsc8244_init(void) 146static int vsc8221_config_init(struct phy_device *phydev)
137{ 147{
138 return phy_driver_register(&vsc8244_driver); 148 int err;
149
150 err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT,
151 MII_VSC8221_AUXCONSTAT_INIT);
152 return err;
153
154 /* Perhaps we should set EXT_CON1 based on the interface?
155 Options are 802.3Z SerDes or SGMII */
156}
157
158/* Vitesse 8221 */
159static struct phy_driver vsc8221_driver = {
160 .phy_id = PHY_ID_VSC8221,
161 .phy_id_mask = 0x000ffff0,
162 .name = "Vitesse VSC8221",
163 .features = PHY_GBIT_FEATURES,
164 .flags = PHY_HAS_INTERRUPT,
165 .config_init = &vsc8221_config_init,
166 .config_aneg = &genphy_config_aneg,
167 .read_status = &genphy_read_status,
168 .ack_interrupt = &vsc824x_ack_interrupt,
169 .config_intr = &vsc82xx_config_intr,
170 .driver = { .owner = THIS_MODULE,},
171};
172
173static int __init vsc82xx_init(void)
174{
175 int err;
176
177 err = phy_driver_register(&vsc8244_driver);
178 if (err < 0)
179 return err;
180 err = phy_driver_register(&vsc8221_driver);
181 if (err < 0)
182 phy_driver_unregister(&vsc8244_driver);
183 return err;
139} 184}
140 185
141static void __exit vsc8244_exit(void) 186static void __exit vsc82xx_exit(void)
142{ 187{
143 phy_driver_unregister(&vsc8244_driver); 188 phy_driver_unregister(&vsc8244_driver);
189 phy_driver_unregister(&vsc8221_driver);
144} 190}
145 191
146module_init(vsc8244_init); 192module_init(vsc82xx_init);
147module_exit(vsc8244_exit); 193module_exit(vsc82xx_exit);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index fc6f4b8c64b3..b646e92134dc 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -399,11 +399,11 @@ static int pppoe_rcv(struct sk_buff *skb,
399 if (skb->len < len) 399 if (skb->len < len)
400 goto drop; 400 goto drop;
401 401
402 po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 402 if (pskb_trim_rcsum(skb, len))
403 if (!po)
404 goto drop; 403 goto drop;
405 404
406 if (pskb_trim_rcsum(skb, len)) 405 po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
406 if (!po)
407 goto drop; 407 goto drop;
408 408
409 return sk_receive_skb(sk_pppox(po), skb, 0); 409 return sk_receive_skb(sk_pppox(po), skb, 0);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 185b1dff10a8..e98d9773158d 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -1353,6 +1353,7 @@ static int pppol2tp_release(struct socket *sock)
1353 kfree_skb(skb); 1353 kfree_skb(skb);
1354 sock_put(sk); 1354 sock_put(sk);
1355 } 1355 }
1356 sock_put(sk);
1356 } 1357 }
1357 1358
1358 release_sock(sk); 1359 release_sock(sk);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 3cdd07c45b6d..508452c02151 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -1515,9 +1515,6 @@ static u32 ql_get_link_state(struct ql3_adapter *qdev)
1515 linkState = LS_UP; 1515 linkState = LS_UP;
1516 } else { 1516 } else {
1517 linkState = LS_DOWN; 1517 linkState = LS_DOWN;
1518 if (netif_msg_link(qdev))
1519 printk(KERN_WARNING PFX
1520 "%s: Link is down.\n", qdev->ndev->name);
1521 } 1518 }
1522 return linkState; 1519 return linkState;
1523} 1520}
@@ -1581,10 +1578,6 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1581 ql_mac_enable(qdev, 1); 1578 ql_mac_enable(qdev, 1);
1582 } 1579 }
1583 1580
1584 if (netif_msg_link(qdev))
1585 printk(KERN_DEBUG PFX
1586 "%s: Change port_link_state LS_DOWN to LS_UP.\n",
1587 qdev->ndev->name);
1588 qdev->port_link_state = LS_UP; 1581 qdev->port_link_state = LS_UP;
1589 netif_start_queue(qdev->ndev); 1582 netif_start_queue(qdev->ndev);
1590 netif_carrier_on(qdev->ndev); 1583 netif_carrier_on(qdev->ndev);
@@ -1655,14 +1648,9 @@ static void ql_link_state_machine_work(struct work_struct *work)
1655 /* Fall Through */ 1648 /* Fall Through */
1656 1649
1657 case LS_DOWN: 1650 case LS_DOWN:
1658 if (netif_msg_link(qdev))
1659 printk(KERN_DEBUG PFX
1660 "%s: port_link_state = LS_DOWN.\n",
1661 qdev->ndev->name);
1662 if (curr_link_state == LS_UP) { 1651 if (curr_link_state == LS_UP) {
1663 if (netif_msg_link(qdev)) 1652 if (netif_msg_link(qdev))
1664 printk(KERN_DEBUG PFX 1653 printk(KERN_INFO PFX "%s: Link is up.\n",
1665 "%s: curr_link_state = LS_UP.\n",
1666 qdev->ndev->name); 1654 qdev->ndev->name);
1667 if (ql_is_auto_neg_complete(qdev)) 1655 if (ql_is_auto_neg_complete(qdev))
1668 ql_finish_auto_neg(qdev); 1656 ql_finish_auto_neg(qdev);
@@ -1670,6 +1658,7 @@ static void ql_link_state_machine_work(struct work_struct *work)
1670 if (qdev->port_link_state == LS_UP) 1658 if (qdev->port_link_state == LS_UP)
1671 ql_link_down_detect_clear(qdev); 1659 ql_link_down_detect_clear(qdev);
1672 1660
1661 qdev->port_link_state = LS_UP;
1673 } 1662 }
1674 break; 1663 break;
1675 1664
@@ -1678,12 +1667,14 @@ static void ql_link_state_machine_work(struct work_struct *work)
1678 * See if the link is currently down or went down and came 1667 * See if the link is currently down or went down and came
1679 * back up 1668 * back up
1680 */ 1669 */
1681 if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) { 1670 if (curr_link_state == LS_DOWN) {
1682 if (netif_msg_link(qdev)) 1671 if (netif_msg_link(qdev))
1683 printk(KERN_INFO PFX "%s: Link is down.\n", 1672 printk(KERN_INFO PFX "%s: Link is down.\n",
1684 qdev->ndev->name); 1673 qdev->ndev->name);
1685 qdev->port_link_state = LS_DOWN; 1674 qdev->port_link_state = LS_DOWN;
1686 } 1675 }
1676 if (ql_link_down_detect(qdev))
1677 qdev->port_link_state = LS_DOWN;
1687 break; 1678 break;
1688 } 1679 }
1689 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1680 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 38116f9d4163..ba2e1c5b6bcf 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1375,7 +1375,6 @@ struct ql_adapter {
1375 spinlock_t adapter_lock; 1375 spinlock_t adapter_lock;
1376 spinlock_t hw_lock; 1376 spinlock_t hw_lock;
1377 spinlock_t stats_lock; 1377 spinlock_t stats_lock;
1378 spinlock_t legacy_lock; /* used for maintaining legacy intr sync */
1379 1378
1380 /* PCI Bus Relative Register Addresses */ 1379 /* PCI Bus Relative Register Addresses */
1381 void __iomem *reg_base; 1380 void __iomem *reg_base;
@@ -1399,8 +1398,6 @@ struct ql_adapter {
1399 struct msix_entry *msi_x_entry; 1398 struct msix_entry *msi_x_entry;
1400 struct intr_context intr_context[MAX_RX_RINGS]; 1399 struct intr_context intr_context[MAX_RX_RINGS];
1401 1400
1402 int (*legacy_check) (struct ql_adapter *);
1403
1404 int tx_ring_count; /* One per online CPU. */ 1401 int tx_ring_count; /* One per online CPU. */
1405 u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */ 1402 u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */
1406 u32 rss_ring_count; /* One per online CPU. */ 1403 u32 rss_ring_count; /* One per online CPU. */
@@ -1502,7 +1499,7 @@ void ql_mpi_work(struct work_struct *work);
1502void ql_mpi_reset_work(struct work_struct *work); 1499void ql_mpi_reset_work(struct work_struct *work);
1503int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); 1500int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
1504void ql_queue_asic_error(struct ql_adapter *qdev); 1501void ql_queue_asic_error(struct ql_adapter *qdev);
1505void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); 1502u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
1506void ql_set_ethtool_ops(struct net_device *ndev); 1503void ql_set_ethtool_ops(struct net_device *ndev);
1507int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); 1504int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
1508 1505
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 4b2caa6b7ac5..b83a9c9b6a97 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -577,41 +577,53 @@ static void ql_disable_interrupts(struct ql_adapter *qdev)
577 * incremented everytime we queue a worker and decremented everytime 577 * incremented everytime we queue a worker and decremented everytime
578 * a worker finishes. Once it hits zero we enable the interrupt. 578 * a worker finishes. Once it hits zero we enable the interrupt.
579 */ 579 */
580void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 580u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
581{ 581{
582 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) 582 u32 var = 0;
583 unsigned long hw_flags = 0;
584 struct intr_context *ctx = qdev->intr_context + intr;
585
586 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
587 /* Always enable if we're MSIX multi interrupts and
588 * it's not the default (zeroeth) interrupt.
589 */
583 ql_write32(qdev, INTR_EN, 590 ql_write32(qdev, INTR_EN,
584 qdev->intr_context[intr].intr_en_mask); 591 ctx->intr_en_mask);
585 else { 592 var = ql_read32(qdev, STS);
586 if (qdev->legacy_check) 593 return var;
587 spin_lock(&qdev->legacy_lock);
588 if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) {
589 QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n",
590 intr);
591 ql_write32(qdev, INTR_EN,
592 qdev->intr_context[intr].intr_en_mask);
593 } else {
594 QPRINTK(qdev, INTR, ERR,
595 "Skip enable, other queue(s) are active.\n");
596 }
597 if (qdev->legacy_check)
598 spin_unlock(&qdev->legacy_lock);
599 } 594 }
595
596 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
597 if (atomic_dec_and_test(&ctx->irq_cnt)) {
598 ql_write32(qdev, INTR_EN,
599 ctx->intr_en_mask);
600 var = ql_read32(qdev, STS);
601 }
602 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
603 return var;
600} 604}
601 605
602static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 606static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
603{ 607{
604 u32 var = 0; 608 u32 var = 0;
609 unsigned long hw_flags;
610 struct intr_context *ctx;
605 611
606 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) 612 /* HW disables for us if we're MSIX multi interrupts and
607 goto exit; 613 * it's not the default (zeroeth) interrupt.
608 else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) { 614 */
615 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
616 return 0;
617
618 ctx = qdev->intr_context + intr;
619 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
620 if (!atomic_read(&ctx->irq_cnt)) {
609 ql_write32(qdev, INTR_EN, 621 ql_write32(qdev, INTR_EN,
610 qdev->intr_context[intr].intr_dis_mask); 622 ctx->intr_dis_mask);
611 var = ql_read32(qdev, STS); 623 var = ql_read32(qdev, STS);
612 } 624 }
613 atomic_inc(&qdev->intr_context[intr].irq_cnt); 625 atomic_inc(&ctx->irq_cnt);
614exit: 626 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
615 return var; 627 return var;
616} 628}
617 629
@@ -623,7 +635,9 @@ static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
623 * and enables only if the result is zero. 635 * and enables only if the result is zero.
624 * So we precharge it here. 636 * So we precharge it here.
625 */ 637 */
626 atomic_set(&qdev->intr_context[i].irq_cnt, 1); 638 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
639 i == 0))
640 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
627 ql_enable_completion_interrupt(qdev, i); 641 ql_enable_completion_interrupt(qdev, i);
628 } 642 }
629 643
@@ -1725,19 +1739,6 @@ static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1725 return IRQ_HANDLED; 1739 return IRQ_HANDLED;
1726} 1740}
1727 1741
1728/* We check here to see if we're already handling a legacy
1729 * interrupt. If we are, then it must belong to another
1730 * chip with which we're sharing the interrupt line.
1731 */
1732int ql_legacy_check(struct ql_adapter *qdev)
1733{
1734 int err;
1735 spin_lock(&qdev->legacy_lock);
1736 err = atomic_read(&qdev->intr_context[0].irq_cnt);
1737 spin_unlock(&qdev->legacy_lock);
1738 return err;
1739}
1740
1741/* This handles a fatal error, MPI activity, and the default 1742/* This handles a fatal error, MPI activity, and the default
1742 * rx_ring in an MSI-X multiple vector environment. 1743 * rx_ring in an MSI-X multiple vector environment.
1743 * In MSI/Legacy environment it also process the rest of 1744 * In MSI/Legacy environment it also process the rest of
@@ -1752,12 +1753,15 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1752 int i; 1753 int i;
1753 int work_done = 0; 1754 int work_done = 0;
1754 1755
1755 if (qdev->legacy_check && qdev->legacy_check(qdev)) { 1756 spin_lock(&qdev->hw_lock);
1756 QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n"); 1757 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1757 return IRQ_NONE; /* Not our interrupt */ 1758 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1759 spin_unlock(&qdev->hw_lock);
1760 return IRQ_NONE;
1758 } 1761 }
1762 spin_unlock(&qdev->hw_lock);
1759 1763
1760 var = ql_read32(qdev, STS); 1764 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
1761 1765
1762 /* 1766 /*
1763 * Check for fatal error. 1767 * Check for fatal error.
@@ -1823,6 +1827,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1823 } 1827 }
1824 } 1828 }
1825 } 1829 }
1830 ql_enable_completion_interrupt(qdev, intr_context->intr);
1826 return work_done ? IRQ_HANDLED : IRQ_NONE; 1831 return work_done ? IRQ_HANDLED : IRQ_NONE;
1827} 1832}
1828 1833
@@ -2701,8 +2706,6 @@ msi:
2701 } 2706 }
2702 } 2707 }
2703 irq_type = LEG_IRQ; 2708 irq_type = LEG_IRQ;
2704 spin_lock_init(&qdev->legacy_lock);
2705 qdev->legacy_check = ql_legacy_check;
2706 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); 2709 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2707} 2710}
2708 2711
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index c821da21d8eb..4b7cb389dc49 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -81,6 +81,10 @@ static const int multicast_filter_limit = 32;
81#define RTL8169_TX_TIMEOUT (6*HZ) 81#define RTL8169_TX_TIMEOUT (6*HZ)
82#define RTL8169_PHY_TIMEOUT (10*HZ) 82#define RTL8169_PHY_TIMEOUT (10*HZ)
83 83
84#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
85#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
86#define RTL_EEPROM_SIG_ADDR 0x0000
87
84/* write/read MMIO register */ 88/* write/read MMIO register */
85#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) 89#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
86#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) 90#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
@@ -1911,74 +1915,6 @@ static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
1911 } 1915 }
1912} 1916}
1913 1917
1914static int rtl_eeprom_read(struct pci_dev *pdev, int cap, int addr, __le32 *val)
1915{
1916 int ret, count = 100;
1917 u16 status = 0;
1918 u32 value;
1919
1920 ret = pci_write_config_word(pdev, cap + PCI_VPD_ADDR, addr);
1921 if (ret < 0)
1922 return ret;
1923
1924 do {
1925 udelay(10);
1926 ret = pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &status);
1927 if (ret < 0)
1928 return ret;
1929 } while (!(status & PCI_VPD_ADDR_F) && --count);
1930
1931 if (!(status & PCI_VPD_ADDR_F))
1932 return -ETIMEDOUT;
1933
1934 ret = pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &value);
1935 if (ret < 0)
1936 return ret;
1937
1938 *val = cpu_to_le32(value);
1939
1940 return 0;
1941}
1942
1943static void rtl_init_mac_address(struct rtl8169_private *tp,
1944 void __iomem *ioaddr)
1945{
1946 struct pci_dev *pdev = tp->pci_dev;
1947 u8 cfg1;
1948 int vpd_cap;
1949 u8 mac[8];
1950 DECLARE_MAC_BUF(buf);
1951
1952 cfg1 = RTL_R8(Config1);
1953 if (!(cfg1 & VPD)) {
1954 dprintk("VPD access not enabled, enabling\n");
1955 RTL_W8(Cfg9346, Cfg9346_Unlock);
1956 RTL_W8(Config1, cfg1 | VPD);
1957 RTL_W8(Cfg9346, Cfg9346_Lock);
1958 }
1959
1960 vpd_cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
1961 if (!vpd_cap)
1962 return;
1963
1964 /* MAC address is stored in EEPROM at offset 0x0e
1965 * Realtek says: "The VPD address does not have to be a DWORD-aligned
1966 * address as defined in the PCI 2.2 Specifications, but the VPD data
1967 * is always consecutive 4-byte data starting from the VPD address
1968 * specified."
1969 */
1970 if (rtl_eeprom_read(pdev, vpd_cap, 0x000e, (__le32*)&mac[0]) < 0 ||
1971 rtl_eeprom_read(pdev, vpd_cap, 0x0012, (__le32*)&mac[4]) < 0) {
1972 dprintk("Reading MAC address from EEPROM failed\n");
1973 return;
1974 }
1975
1976 dprintk("MAC address found in EEPROM: %s\n", print_mac(buf, mac));
1977
1978 /* Write MAC address */
1979 rtl_rar_set(tp, mac);
1980}
1981
1982static int __devinit 1918static int __devinit
1983rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1919rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1984{ 1920{
@@ -2156,8 +2092,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2156 2092
2157 tp->mmio_addr = ioaddr; 2093 tp->mmio_addr = ioaddr;
2158 2094
2159 rtl_init_mac_address(tp, ioaddr);
2160
2161 /* Get MAC address */ 2095 /* Get MAC address */
2162 for (i = 0; i < MAC_ADDR_LEN; i++) 2096 for (i = 0; i < MAC_ADDR_LEN; i++)
2163 dev->dev_addr[i] = RTL_R8(MAC0 + i); 2097 dev->dev_addr[i] = RTL_R8(MAC0 + i);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index fa98af58223e..cd0d0873d978 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -174,8 +174,8 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
174 174
175/* EEPROM range with gPXE configuration */ 175/* EEPROM range with gPXE configuration */
176#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB 176#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
177#define EFX_ETHTOOL_EEPROM_MIN 0x100U 177#define EFX_ETHTOOL_EEPROM_MIN 0x800U
178#define EFX_ETHTOOL_EEPROM_MAX 0x400U 178#define EFX_ETHTOOL_EEPROM_MAX 0x1800U
179 179
180/************************************************************************** 180/**************************************************************************
181 * 181 *
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index a24bb68887ab..59f242a67714 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -927,7 +927,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
927 struct sh_eth_private *mdp = netdev_priv(ndev); 927 struct sh_eth_private *mdp = netdev_priv(ndev);
928 struct sh_eth_txdesc *txdesc; 928 struct sh_eth_txdesc *txdesc;
929 u32 entry; 929 u32 entry;
930 int flags; 930 unsigned long flags;
931 931
932 spin_lock_irqsave(&mdp->lock, flags); 932 spin_lock_irqsave(&mdp->lock, flags);
933 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 933 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
@@ -1141,7 +1141,7 @@ static int sh_mdio_init(struct net_device *ndev, int id)
1141 /* Hook up MII support for ethtool */ 1141 /* Hook up MII support for ethtool */
1142 mdp->mii_bus->name = "sh_mii"; 1142 mdp->mii_bus->name = "sh_mii";
1143 mdp->mii_bus->parent = &ndev->dev; 1143 mdp->mii_bus->parent = &ndev->dev;
1144 mdp->mii_bus->id[0] = id; 1144 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
1145 1145
1146 /* PHY IRQ */ 1146 /* PHY IRQ */
1147 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 1147 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 3fe01763760e..e6e3bf58a569 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -317,6 +317,7 @@ static struct mii_chip_info {
317 unsigned int type; 317 unsigned int type;
318 u32 feature; 318 u32 feature;
319} mii_chip_table[] = { 319} mii_chip_table[] = {
320 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
320 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 }, 321 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
321 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 }, 322 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
322 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 }, 323 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index fa3a460f8e2f..8e8337e8b072 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1630,7 +1630,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1630 * sis900_interrupt - sis900 interrupt handler 1630 * sis900_interrupt - sis900 interrupt handler
1631 * @irq: the irq number 1631 * @irq: the irq number
1632 * @dev_instance: the client data object 1632 * @dev_instance: the client data object
1633 * @regs: snapshot of processor context
1634 * 1633 *
1635 * The interrupt handler does all of the Rx thread work, 1634 * The interrupt handler does all of the Rx thread work,
1636 * and cleans up after the Tx thread 1635 * and cleans up after the Tx thread
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 8aa7460ef0e3..9a16a79b67d0 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -155,23 +155,17 @@ static void PRINT_PKT(u_char *buf, int length)
155/* this enables an interrupt in the interrupt mask register */ 155/* this enables an interrupt in the interrupt mask register */
156#define SMC_ENABLE_INT(lp, x) do { \ 156#define SMC_ENABLE_INT(lp, x) do { \
157 unsigned int __mask; \ 157 unsigned int __mask; \
158 unsigned long __flags; \
159 spin_lock_irqsave(&lp->lock, __flags); \
160 __mask = SMC_GET_INT_EN((lp)); \ 158 __mask = SMC_GET_INT_EN((lp)); \
161 __mask |= (x); \ 159 __mask |= (x); \
162 SMC_SET_INT_EN((lp), __mask); \ 160 SMC_SET_INT_EN((lp), __mask); \
163 spin_unlock_irqrestore(&lp->lock, __flags); \
164} while (0) 161} while (0)
165 162
166/* this disables an interrupt from the interrupt mask register */ 163/* this disables an interrupt from the interrupt mask register */
167#define SMC_DISABLE_INT(lp, x) do { \ 164#define SMC_DISABLE_INT(lp, x) do { \
168 unsigned int __mask; \ 165 unsigned int __mask; \
169 unsigned long __flags; \
170 spin_lock_irqsave(&lp->lock, __flags); \
171 __mask = SMC_GET_INT_EN((lp)); \ 166 __mask = SMC_GET_INT_EN((lp)); \
172 __mask &= ~(x); \ 167 __mask &= ~(x); \
173 SMC_SET_INT_EN((lp), __mask); \ 168 SMC_SET_INT_EN((lp), __mask); \
174 spin_unlock_irqrestore(&lp->lock, __flags); \
175} while (0) 169} while (0)
176 170
177/* 171/*
@@ -180,7 +174,7 @@ static void PRINT_PKT(u_char *buf, int length)
180static void smc911x_reset(struct net_device *dev) 174static void smc911x_reset(struct net_device *dev)
181{ 175{
182 struct smc911x_local *lp = netdev_priv(dev); 176 struct smc911x_local *lp = netdev_priv(dev);
183 unsigned int reg, timeout=0, resets=1; 177 unsigned int reg, timeout=0, resets=1, irq_cfg;
184 unsigned long flags; 178 unsigned long flags;
185 179
186 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 180 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
@@ -252,7 +246,12 @@ static void smc911x_reset(struct net_device *dev)
252 * Deassert IRQ for 1*10us for edge type interrupts 246 * Deassert IRQ for 1*10us for edge type interrupts
253 * and drive IRQ pin push-pull 247 * and drive IRQ pin push-pull
254 */ 248 */
255 SMC_SET_IRQ_CFG(lp, (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_); 249 irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_;
250#ifdef SMC_DYNAMIC_BUS_CONFIG
251 if (lp->cfg.irq_polarity)
252 irq_cfg |= INT_CFG_IRQ_POL_;
253#endif
254 SMC_SET_IRQ_CFG(lp, irq_cfg);
256 255
257 /* clear anything saved */ 256 /* clear anything saved */
258 if (lp->pending_tx_skb != NULL) { 257 if (lp->pending_tx_skb != NULL) {
@@ -274,6 +273,8 @@ static void smc911x_enable(struct net_device *dev)
274 273
275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 274 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
276 275
276 spin_lock_irqsave(&lp->lock, flags);
277
277 SMC_SET_MAC_ADDR(lp, dev->dev_addr); 278 SMC_SET_MAC_ADDR(lp, dev->dev_addr);
278 279
279 /* Enable TX */ 280 /* Enable TX */
@@ -286,12 +287,10 @@ static void smc911x_enable(struct net_device *dev)
286 SMC_SET_FIFO_TSL(lp, 64); 287 SMC_SET_FIFO_TSL(lp, 64);
287 SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); 288 SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
288 289
289 spin_lock_irqsave(&lp->lock, flags);
290 SMC_GET_MAC_CR(lp, cr); 290 SMC_GET_MAC_CR(lp, cr);
291 cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_; 291 cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
292 SMC_SET_MAC_CR(lp, cr); 292 SMC_SET_MAC_CR(lp, cr);
293 SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_); 293 SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_);
294 spin_unlock_irqrestore(&lp->lock, flags);
295 294
296 /* Add 2 byte padding to start of packets */ 295 /* Add 2 byte padding to start of packets */
297 SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_); 296 SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_);
@@ -300,9 +299,7 @@ static void smc911x_enable(struct net_device *dev)
300 if (cr & MAC_CR_RXEN_) 299 if (cr & MAC_CR_RXEN_)
301 DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name); 300 DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
302 301
303 spin_lock_irqsave(&lp->lock, flags);
304 SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_); 302 SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
305 spin_unlock_irqrestore(&lp->lock, flags);
306 303
307 /* Interrupt on every received packet */ 304 /* Interrupt on every received packet */
308 SMC_SET_FIFO_RSA(lp, 0x01); 305 SMC_SET_FIFO_RSA(lp, 0x01);
@@ -318,6 +315,8 @@ static void smc911x_enable(struct net_device *dev)
318 mask|=INT_EN_RDFO_EN_; 315 mask|=INT_EN_RDFO_EN_;
319 } 316 }
320 SMC_ENABLE_INT(lp, mask); 317 SMC_ENABLE_INT(lp, mask);
318
319 spin_unlock_irqrestore(&lp->lock, flags);
321} 320}
322 321
323/* 322/*
@@ -458,7 +457,6 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
458 struct sk_buff *skb; 457 struct sk_buff *skb;
459 unsigned int cmdA, cmdB, len; 458 unsigned int cmdA, cmdB, len;
460 unsigned char *buf; 459 unsigned char *buf;
461 unsigned long flags;
462 460
463 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__); 461 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
464 BUG_ON(lp->pending_tx_skb == NULL); 462 BUG_ON(lp->pending_tx_skb == NULL);
@@ -501,13 +499,11 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
501#else 499#else
502 SMC_PUSH_DATA(lp, buf, len); 500 SMC_PUSH_DATA(lp, buf, len);
503 dev->trans_start = jiffies; 501 dev->trans_start = jiffies;
504 dev_kfree_skb(skb); 502 dev_kfree_skb_irq(skb);
505#endif 503#endif
506 spin_lock_irqsave(&lp->lock, flags);
507 if (!lp->tx_throttle) { 504 if (!lp->tx_throttle) {
508 netif_wake_queue(dev); 505 netif_wake_queue(dev);
509 } 506 }
510 spin_unlock_irqrestore(&lp->lock, flags);
511 SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_); 507 SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
512} 508}
513 509
@@ -526,6 +522,8 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
526 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", 522 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
527 dev->name, __func__); 523 dev->name, __func__);
528 524
525 spin_lock_irqsave(&lp->lock, flags);
526
529 BUG_ON(lp->pending_tx_skb != NULL); 527 BUG_ON(lp->pending_tx_skb != NULL);
530 528
531 free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_; 529 free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
@@ -535,12 +533,10 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
535 if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) { 533 if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
536 DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n", 534 DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
537 dev->name, free); 535 dev->name, free);
538 spin_lock_irqsave(&lp->lock, flags);
539 /* Reenable when at least 1 packet of size MTU present */ 536 /* Reenable when at least 1 packet of size MTU present */
540 SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64); 537 SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
541 lp->tx_throttle = 1; 538 lp->tx_throttle = 1;
542 netif_stop_queue(dev); 539 netif_stop_queue(dev);
543 spin_unlock_irqrestore(&lp->lock, flags);
544 } 540 }
545 541
546 /* Drop packets when we run out of space in TX FIFO 542 /* Drop packets when we run out of space in TX FIFO
@@ -556,6 +552,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
556 lp->pending_tx_skb = NULL; 552 lp->pending_tx_skb = NULL;
557 dev->stats.tx_errors++; 553 dev->stats.tx_errors++;
558 dev->stats.tx_dropped++; 554 dev->stats.tx_dropped++;
555 spin_unlock_irqrestore(&lp->lock, flags);
559 dev_kfree_skb(skb); 556 dev_kfree_skb(skb);
560 return 0; 557 return 0;
561 } 558 }
@@ -565,7 +562,6 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
565 /* If the DMA is already running then defer this packet Tx until 562 /* If the DMA is already running then defer this packet Tx until
566 * the DMA IRQ starts it 563 * the DMA IRQ starts it
567 */ 564 */
568 spin_lock_irqsave(&lp->lock, flags);
569 if (lp->txdma_active) { 565 if (lp->txdma_active) {
570 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name); 566 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
571 lp->pending_tx_skb = skb; 567 lp->pending_tx_skb = skb;
@@ -576,11 +572,11 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
576 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name); 572 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
577 lp->txdma_active = 1; 573 lp->txdma_active = 1;
578 } 574 }
579 spin_unlock_irqrestore(&lp->lock, flags);
580 } 575 }
581#endif 576#endif
582 lp->pending_tx_skb = skb; 577 lp->pending_tx_skb = skb;
583 smc911x_hardware_send_pkt(dev); 578 smc911x_hardware_send_pkt(dev);
579 spin_unlock_irqrestore(&lp->lock, flags);
584 580
585 return 0; 581 return 0;
586} 582}
@@ -1242,7 +1238,7 @@ smc911x_rx_dma_irq(int dma, void *data)
1242 netif_rx(skb); 1238 netif_rx(skb);
1243 1239
1244 spin_lock_irqsave(&lp->lock, flags); 1240 spin_lock_irqsave(&lp->lock, flags);
1245 pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16; 1241 pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16;
1246 if (pkts != 0) { 1242 if (pkts != 0) {
1247 smc911x_rcv(dev); 1243 smc911x_rcv(dev);
1248 }else { 1244 }else {
@@ -1739,7 +1735,7 @@ static const struct ethtool_ops smc911x_ethtool_ops = {
1739 * This routine has a simple purpose -- make the SMC chip generate an 1735 * This routine has a simple purpose -- make the SMC chip generate an
1740 * interrupt, so an auto-detect routine can detect it, and find the IRQ, 1736 * interrupt, so an auto-detect routine can detect it, and find the IRQ,
1741 */ 1737 */
1742static int __init smc911x_findirq(struct net_device *dev) 1738static int __devinit smc911x_findirq(struct net_device *dev)
1743{ 1739{
1744 struct smc911x_local *lp = netdev_priv(dev); 1740 struct smc911x_local *lp = netdev_priv(dev);
1745 int timeout = 20; 1741 int timeout = 20;
@@ -1803,7 +1799,7 @@ static int __init smc911x_findirq(struct net_device *dev)
1803 * o actually GRAB the irq. 1799 * o actually GRAB the irq.
1804 * o GRAB the region 1800 * o GRAB the region
1805 */ 1801 */
1806static int __init smc911x_probe(struct net_device *dev) 1802static int __devinit smc911x_probe(struct net_device *dev)
1807{ 1803{
1808 struct smc911x_local *lp = netdev_priv(dev); 1804 struct smc911x_local *lp = netdev_priv(dev);
1809 int i, retval; 1805 int i, retval;
@@ -1817,7 +1813,7 @@ static int __init smc911x_probe(struct net_device *dev)
1817 val = SMC_GET_BYTE_TEST(lp); 1813 val = SMC_GET_BYTE_TEST(lp);
1818 DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val); 1814 DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
1819 if (val != 0x87654321) { 1815 if (val != 0x87654321) {
1820 printk(KERN_ERR "Invalid chip endian 0x08%x\n",val); 1816 printk(KERN_ERR "Invalid chip endian 0x%08x\n",val);
1821 retval = -ENODEV; 1817 retval = -ENODEV;
1822 goto err_out; 1818 goto err_out;
1823 } 1819 }
@@ -2052,9 +2048,11 @@ err_out:
2052 * 0 --> there is a device 2048 * 0 --> there is a device
2053 * anything else, error 2049 * anything else, error
2054 */ 2050 */
2055static int smc911x_drv_probe(struct platform_device *pdev) 2051static int __devinit smc911x_drv_probe(struct platform_device *pdev)
2056{ 2052{
2057 struct smc91x_platdata *pd = pdev->dev.platform_data; 2053#ifdef SMC_DYNAMIC_BUS_CONFIG
2054 struct smc911x_platdata *pd = pdev->dev.platform_data;
2055#endif
2058 struct net_device *ndev; 2056 struct net_device *ndev;
2059 struct resource *res; 2057 struct resource *res;
2060 struct smc911x_local *lp; 2058 struct smc911x_local *lp;
@@ -2126,7 +2124,7 @@ out:
2126 return ret; 2124 return ret;
2127} 2125}
2128 2126
2129static int smc911x_drv_remove(struct platform_device *pdev) 2127static int __devexit smc911x_drv_remove(struct platform_device *pdev)
2130{ 2128{
2131 struct net_device *ndev = platform_get_drvdata(pdev); 2129 struct net_device *ndev = platform_get_drvdata(pdev);
2132 struct smc911x_local *lp = netdev_priv(ndev); 2130 struct smc911x_local *lp = netdev_priv(ndev);
@@ -2186,9 +2184,9 @@ static int smc911x_drv_resume(struct platform_device *dev)
2186 2184
2187 if (netif_running(ndev)) { 2185 if (netif_running(ndev)) {
2188 smc911x_reset(ndev); 2186 smc911x_reset(ndev);
2189 smc911x_enable(ndev);
2190 if (lp->phy_type != 0) 2187 if (lp->phy_type != 0)
2191 smc911x_phy_configure(&lp->phy_configure); 2188 smc911x_phy_configure(&lp->phy_configure);
2189 smc911x_enable(ndev);
2192 netif_device_attach(ndev); 2190 netif_device_attach(ndev);
2193 } 2191 }
2194 } 2192 }
@@ -2197,7 +2195,7 @@ static int smc911x_drv_resume(struct platform_device *dev)
2197 2195
2198static struct platform_driver smc911x_driver = { 2196static struct platform_driver smc911x_driver = {
2199 .probe = smc911x_drv_probe, 2197 .probe = smc911x_drv_probe,
2200 .remove = smc911x_drv_remove, 2198 .remove = __devexit_p(smc911x_drv_remove),
2201 .suspend = smc911x_drv_suspend, 2199 .suspend = smc911x_drv_suspend,
2202 .resume = smc911x_drv_resume, 2200 .resume = smc911x_drv_resume,
2203 .driver = { 2201 .driver = {
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
index bf6240f23f5d..cc7d85bdfb3e 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/smc911x.h
@@ -50,6 +50,10 @@
50#define SMC_DYNAMIC_BUS_CONFIG 50#define SMC_DYNAMIC_BUS_CONFIG
51#endif 51#endif
52 52
53#ifdef SMC_USE_PXA_DMA
54#define SMC_USE_DMA
55#endif
56
53/* store this information for the driver.. */ 57/* store this information for the driver.. */
54struct smc911x_local { 58struct smc911x_local {
55 /* 59 /*
@@ -196,8 +200,6 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
196 200
197 201
198#ifdef SMC_USE_PXA_DMA 202#ifdef SMC_USE_PXA_DMA
199#define SMC_USE_DMA
200
201/* 203/*
202 * Define the request and free functions 204 * Define the request and free functions
203 * These are unfortunately architecture specific as no generic allocation 205 * These are unfortunately architecture specific as no generic allocation
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index c70870e0fd61..35c56abf4113 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1696,7 +1696,7 @@ static const struct ethtool_ops smc_ethtool_ops = {
1696 * I just deleted auto_irq.c, since it was never built... 1696 * I just deleted auto_irq.c, since it was never built...
1697 * --jgarzik 1697 * --jgarzik
1698 */ 1698 */
1699static int __init smc_findirq(struct smc_local *lp) 1699static int __devinit smc_findirq(struct smc_local *lp)
1700{ 1700{
1701 void __iomem *ioaddr = lp->base; 1701 void __iomem *ioaddr = lp->base;
1702 int timeout = 20; 1702 int timeout = 20;
@@ -1770,7 +1770,7 @@ static int __init smc_findirq(struct smc_local *lp)
1770 * o actually GRAB the irq. 1770 * o actually GRAB the irq.
1771 * o GRAB the region 1771 * o GRAB the region
1772 */ 1772 */
1773static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr, 1773static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr,
1774 unsigned long irq_flags) 1774 unsigned long irq_flags)
1775{ 1775{
1776 struct smc_local *lp = netdev_priv(dev); 1776 struct smc_local *lp = netdev_priv(dev);
@@ -2060,7 +2060,7 @@ static int smc_request_attrib(struct platform_device *pdev,
2060 struct net_device *ndev) 2060 struct net_device *ndev)
2061{ 2061{
2062 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); 2062 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2063 struct smc_local *lp = netdev_priv(ndev); 2063 struct smc_local *lp __maybe_unused = netdev_priv(ndev);
2064 2064
2065 if (!res) 2065 if (!res)
2066 return 0; 2066 return 0;
@@ -2075,7 +2075,7 @@ static void smc_release_attrib(struct platform_device *pdev,
2075 struct net_device *ndev) 2075 struct net_device *ndev)
2076{ 2076{
2077 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); 2077 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2078 struct smc_local *lp = netdev_priv(ndev); 2078 struct smc_local *lp __maybe_unused = netdev_priv(ndev);
2079 2079
2080 if (res) 2080 if (res)
2081 release_mem_region(res->start, ATTRIB_SIZE); 2081 release_mem_region(res->start, ATTRIB_SIZE);
@@ -2126,7 +2126,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
2126 * 0 --> there is a device 2126 * 0 --> there is a device
2127 * anything else, error 2127 * anything else, error
2128 */ 2128 */
2129static int smc_drv_probe(struct platform_device *pdev) 2129static int __devinit smc_drv_probe(struct platform_device *pdev)
2130{ 2130{
2131 struct smc91x_platdata *pd = pdev->dev.platform_data; 2131 struct smc91x_platdata *pd = pdev->dev.platform_data;
2132 struct smc_local *lp; 2132 struct smc_local *lp;
@@ -2240,7 +2240,7 @@ static int smc_drv_probe(struct platform_device *pdev)
2240 return ret; 2240 return ret;
2241} 2241}
2242 2242
2243static int smc_drv_remove(struct platform_device *pdev) 2243static int __devexit smc_drv_remove(struct platform_device *pdev)
2244{ 2244{
2245 struct net_device *ndev = platform_get_drvdata(pdev); 2245 struct net_device *ndev = platform_get_drvdata(pdev);
2246 struct smc_local *lp = netdev_priv(ndev); 2246 struct smc_local *lp = netdev_priv(ndev);
@@ -2305,7 +2305,7 @@ static int smc_drv_resume(struct platform_device *dev)
2305 2305
2306static struct platform_driver smc_driver = { 2306static struct platform_driver smc_driver = {
2307 .probe = smc_drv_probe, 2307 .probe = smc_drv_probe,
2308 .remove = smc_drv_remove, 2308 .remove = __devexit_p(smc_drv_remove),
2309 .suspend = smc_drv_suspend, 2309 .suspend = smc_drv_suspend,
2310 .resume = smc_drv_resume, 2310 .resume = smc_drv_resume,
2311 .driver = { 2311 .driver = {
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index b6435d0d71f9..07599b492359 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -672,7 +672,6 @@ write_hash:
672/** 672/**
673 * spider_net_prepare_tx_descr - fill tx descriptor with skb data 673 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
674 * @card: card structure 674 * @card: card structure
675 * @descr: descriptor structure to fill out
676 * @skb: packet to use 675 * @skb: packet to use
677 * 676 *
678 * returns 0 on success, <0 on failure. 677 * returns 0 on success, <0 on failure.
@@ -867,7 +866,6 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
867/** 866/**
868 * spider_net_kick_tx_dma - enables TX DMA processing 867 * spider_net_kick_tx_dma - enables TX DMA processing
869 * @card: card structure 868 * @card: card structure
870 * @descr: descriptor address to enable TX processing at
871 * 869 *
872 * This routine will start the transmit DMA running if 870 * This routine will start the transmit DMA running if
873 * it is not already running. This routine ned only be 871 * it is not already running. This routine ned only be
@@ -1637,7 +1635,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1637 * spider_net_interrupt - interrupt handler for spider_net 1635 * spider_net_interrupt - interrupt handler for spider_net
1638 * @irq: interrupt number 1636 * @irq: interrupt number
1639 * @ptr: pointer to net_device 1637 * @ptr: pointer to net_device
1640 * @regs: PU registers
1641 * 1638 *
1642 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no 1639 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
1643 * interrupt found raised by card. 1640 * interrupt found raised by card.
@@ -2419,7 +2416,6 @@ spider_net_undo_pci_setup(struct spider_net_card *card)
2419 2416
2420/** 2417/**
2421 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations 2418 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
2422 * @card: card structure
2423 * @pdev: PCI device 2419 * @pdev: PCI device
2424 * 2420 *
2425 * Returns the card structure or NULL if any errors occur 2421 * Returns the card structure or NULL if any errors occur
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 1d2ef8f47780..5a40f2d78beb 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1509,6 +1509,11 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1509 desc->status = 0; 1509 desc->status = 0;
1510 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE; 1510 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1511 } 1511 }
1512
1513 if (*quota == 0) { /* out of rx quota */
1514 retcode = 1;
1515 goto out;
1516 }
1512 writew(np->rx_done, np->base + CompletionQConsumerIdx); 1517 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1513 1518
1514 out: 1519 out:
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 4291458955ef..fed7eba65ead 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1142,6 +1142,70 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
1142 return NETDEV_TX_OK; 1142 return NETDEV_TX_OK;
1143} 1143}
1144 1144
1145static void gem_pcs_reset(struct gem *gp)
1146{
1147 int limit;
1148 u32 val;
1149
1150 /* Reset PCS unit. */
1151 val = readl(gp->regs + PCS_MIICTRL);
1152 val |= PCS_MIICTRL_RST;
1153 writel(val, gp->regs + PCS_MIICTRL);
1154
1155 limit = 32;
1156 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1157 udelay(100);
1158 if (limit-- <= 0)
1159 break;
1160 }
1161 if (limit <= 0)
1162 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1163 gp->dev->name);
1164}
1165
1166static void gem_pcs_reinit_adv(struct gem *gp)
1167{
1168 u32 val;
1169
1170 /* Make sure PCS is disabled while changing advertisement
1171 * configuration.
1172 */
1173 val = readl(gp->regs + PCS_CFG);
1174 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1175 writel(val, gp->regs + PCS_CFG);
1176
1177 /* Advertise all capabilities except assymetric
1178 * pause.
1179 */
1180 val = readl(gp->regs + PCS_MIIADV);
1181 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1182 PCS_MIIADV_SP | PCS_MIIADV_AP);
1183 writel(val, gp->regs + PCS_MIIADV);
1184
1185 /* Enable and restart auto-negotiation, disable wrapback/loopback,
1186 * and re-enable PCS.
1187 */
1188 val = readl(gp->regs + PCS_MIICTRL);
1189 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1190 val &= ~PCS_MIICTRL_WB;
1191 writel(val, gp->regs + PCS_MIICTRL);
1192
1193 val = readl(gp->regs + PCS_CFG);
1194 val |= PCS_CFG_ENABLE;
1195 writel(val, gp->regs + PCS_CFG);
1196
1197 /* Make sure serialink loopback is off. The meaning
1198 * of this bit is logically inverted based upon whether
1199 * you are in Serialink or SERDES mode.
1200 */
1201 val = readl(gp->regs + PCS_SCTRL);
1202 if (gp->phy_type == phy_serialink)
1203 val &= ~PCS_SCTRL_LOOP;
1204 else
1205 val |= PCS_SCTRL_LOOP;
1206 writel(val, gp->regs + PCS_SCTRL);
1207}
1208
1145#define STOP_TRIES 32 1209#define STOP_TRIES 32
1146 1210
1147/* Must be invoked under gp->lock and gp->tx_lock. */ 1211/* Must be invoked under gp->lock and gp->tx_lock. */
@@ -1168,6 +1232,9 @@ static void gem_reset(struct gem *gp)
1168 1232
1169 if (limit <= 0) 1233 if (limit <= 0)
1170 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); 1234 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
1235
1236 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
1237 gem_pcs_reinit_adv(gp);
1171} 1238}
1172 1239
1173/* Must be invoked under gp->lock and gp->tx_lock. */ 1240/* Must be invoked under gp->lock and gp->tx_lock. */
@@ -1324,7 +1391,7 @@ static int gem_set_link_modes(struct gem *gp)
1324 gp->phy_type == phy_serdes) { 1391 gp->phy_type == phy_serdes) {
1325 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1392 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1326 1393
1327 if (pcs_lpa & PCS_MIIADV_FD) 1394 if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
1328 full_duplex = 1; 1395 full_duplex = 1;
1329 speed = SPEED_1000; 1396 speed = SPEED_1000;
1330 } 1397 }
@@ -1488,6 +1555,9 @@ static void gem_link_timer(unsigned long data)
1488 val = readl(gp->regs + PCS_MIISTAT); 1555 val = readl(gp->regs + PCS_MIISTAT);
1489 1556
1490 if ((val & PCS_MIISTAT_LS) != 0) { 1557 if ((val & PCS_MIISTAT_LS) != 0) {
1558 if (gp->lstate == link_up)
1559 goto restart;
1560
1491 gp->lstate = link_up; 1561 gp->lstate = link_up;
1492 netif_carrier_on(gp->dev); 1562 netif_carrier_on(gp->dev);
1493 (void)gem_set_link_modes(gp); 1563 (void)gem_set_link_modes(gp);
@@ -1708,61 +1778,8 @@ static void gem_init_phy(struct gem *gp)
1708 if (gp->phy_mii.def && gp->phy_mii.def->ops->init) 1778 if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
1709 gp->phy_mii.def->ops->init(&gp->phy_mii); 1779 gp->phy_mii.def->ops->init(&gp->phy_mii);
1710 } else { 1780 } else {
1711 u32 val; 1781 gem_pcs_reset(gp);
1712 int limit; 1782 gem_pcs_reinit_adv(gp);
1713
1714 /* Reset PCS unit. */
1715 val = readl(gp->regs + PCS_MIICTRL);
1716 val |= PCS_MIICTRL_RST;
1717 writeb(val, gp->regs + PCS_MIICTRL);
1718
1719 limit = 32;
1720 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1721 udelay(100);
1722 if (limit-- <= 0)
1723 break;
1724 }
1725 if (limit <= 0)
1726 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1727 gp->dev->name);
1728
1729 /* Make sure PCS is disabled while changing advertisement
1730 * configuration.
1731 */
1732 val = readl(gp->regs + PCS_CFG);
1733 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1734 writel(val, gp->regs + PCS_CFG);
1735
1736 /* Advertise all capabilities except assymetric
1737 * pause.
1738 */
1739 val = readl(gp->regs + PCS_MIIADV);
1740 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1741 PCS_MIIADV_SP | PCS_MIIADV_AP);
1742 writel(val, gp->regs + PCS_MIIADV);
1743
1744 /* Enable and restart auto-negotiation, disable wrapback/loopback,
1745 * and re-enable PCS.
1746 */
1747 val = readl(gp->regs + PCS_MIICTRL);
1748 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1749 val &= ~PCS_MIICTRL_WB;
1750 writel(val, gp->regs + PCS_MIICTRL);
1751
1752 val = readl(gp->regs + PCS_CFG);
1753 val |= PCS_CFG_ENABLE;
1754 writel(val, gp->regs + PCS_CFG);
1755
1756 /* Make sure serialink loopback is off. The meaning
1757 * of this bit is logically inverted based upon whether
1758 * you are in Serialink or SERDES mode.
1759 */
1760 val = readl(gp->regs + PCS_SCTRL);
1761 if (gp->phy_type == phy_serialink)
1762 val &= ~PCS_SCTRL_LOOP;
1763 else
1764 val |= PCS_SCTRL_LOOP;
1765 writel(val, gp->regs + PCS_SCTRL);
1766 } 1783 }
1767 1784
1768 /* Default aneg parameters */ 1785 /* Default aneg parameters */
@@ -2680,6 +2697,21 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2680 cmd->speed = 0; 2697 cmd->speed = 0;
2681 cmd->duplex = cmd->port = cmd->phy_address = 2698 cmd->duplex = cmd->port = cmd->phy_address =
2682 cmd->transceiver = cmd->autoneg = 0; 2699 cmd->transceiver = cmd->autoneg = 0;
2700
2701 /* serdes means usually a Fibre connector, with most fixed */
2702 if (gp->phy_type == phy_serdes) {
2703 cmd->port = PORT_FIBRE;
2704 cmd->supported = (SUPPORTED_1000baseT_Half |
2705 SUPPORTED_1000baseT_Full |
2706 SUPPORTED_FIBRE | SUPPORTED_Autoneg |
2707 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
2708 cmd->advertising = cmd->supported;
2709 cmd->transceiver = XCVR_INTERNAL;
2710 if (gp->lstate == link_up)
2711 cmd->speed = SPEED_1000;
2712 cmd->duplex = DUPLEX_FULL;
2713 cmd->autoneg = 1;
2714 }
2683 } 2715 }
2684 cmd->maxtxpkt = cmd->maxrxpkt = 0; 2716 cmd->maxtxpkt = cmd->maxrxpkt = 0;
2685 2717
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index c41d68761364..e60498232b94 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1098,6 +1098,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1098 dma_addr_t tail_list_phys; 1098 dma_addr_t tail_list_phys;
1099 u8 *tail_buffer; 1099 u8 *tail_buffer;
1100 unsigned long flags; 1100 unsigned long flags;
1101 unsigned int txlen;
1101 1102
1102 if ( ! priv->phyOnline ) { 1103 if ( ! priv->phyOnline ) {
1103 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", 1104 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
@@ -1108,6 +1109,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1108 1109
1109 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE)) 1110 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1110 return 0; 1111 return 0;
1112 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1111 1113
1112 tail_list = priv->txList + priv->txTail; 1114 tail_list = priv->txList + priv->txTail;
1113 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; 1115 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
@@ -1125,16 +1127,16 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1125 1127
1126 if ( bbuf ) { 1128 if ( bbuf ) {
1127 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); 1129 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
1128 skb_copy_from_linear_data(skb, tail_buffer, skb->len); 1130 skb_copy_from_linear_data(skb, tail_buffer, txlen);
1129 } else { 1131 } else {
1130 tail_list->buffer[0].address = pci_map_single(priv->pciDev, 1132 tail_list->buffer[0].address = pci_map_single(priv->pciDev,
1131 skb->data, skb->len, 1133 skb->data, txlen,
1132 PCI_DMA_TODEVICE); 1134 PCI_DMA_TODEVICE);
1133 TLan_StoreSKB(tail_list, skb); 1135 TLan_StoreSKB(tail_list, skb);
1134 } 1136 }
1135 1137
1136 tail_list->frameSize = (u16) skb->len; 1138 tail_list->frameSize = (u16) txlen;
1137 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len; 1139 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1138 tail_list->buffer[1].count = 0; 1140 tail_list->buffer[1].count = 0;
1139 tail_list->buffer[1].address = 0; 1141 tail_list->buffer[1].address = 0;
1140 1142
@@ -1431,7 +1433,9 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1431 if ( ! bbuf ) { 1433 if ( ! bbuf ) {
1432 struct sk_buff *skb = TLan_GetSKB(head_list); 1434 struct sk_buff *skb = TLan_GetSKB(head_list);
1433 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, 1435 pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
1434 skb->len, PCI_DMA_TODEVICE); 1436 max(skb->len,
1437 (unsigned int)TLAN_MIN_FRAME_SIZE),
1438 PCI_DMA_TODEVICE);
1435 dev_kfree_skb_any(skb); 1439 dev_kfree_skb_any(skb);
1436 head_list->buffer[8].address = 0; 1440 head_list->buffer[8].address = 0;
1437 head_list->buffer[9].address = 0; 1441 head_list->buffer[9].address = 0;
@@ -2055,9 +2059,12 @@ static void TLan_FreeLists( struct net_device *dev )
2055 list = priv->txList + i; 2059 list = priv->txList + i;
2056 skb = TLan_GetSKB(list); 2060 skb = TLan_GetSKB(list);
2057 if ( skb ) { 2061 if ( skb ) {
2058 pci_unmap_single(priv->pciDev, 2062 pci_unmap_single(
2059 list->buffer[0].address, skb->len, 2063 priv->pciDev,
2060 PCI_DMA_TODEVICE); 2064 list->buffer[0].address,
2065 max(skb->len,
2066 (unsigned int)TLAN_MIN_FRAME_SIZE),
2067 PCI_DMA_TODEVICE);
2061 dev_kfree_skb_any( skb ); 2068 dev_kfree_skb_any( skb );
2062 list->buffer[8].address = 0; 2069 list->buffer[8].address = 0;
2063 list->buffer[9].address = 0; 2070 list->buffer[9].address = 0;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 8e46a513a252..c91852f49a48 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -420,9 +420,13 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
420 /* Allocate Tx/Rx descriptor memory */ 420 /* Allocate Tx/Rx descriptor memory */
421 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * 421 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
422 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); 422 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
423 if (!db->desc_pool_ptr)
424 goto err_out_res;
423 425
424 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * 426 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
425 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); 427 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
428 if (!db->buf_pool_ptr)
429 goto err_out_free_desc;
426 430
427 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; 431 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
428 db->first_tx_desc_dma = db->desc_pool_dma_ptr; 432 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
@@ -469,7 +473,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
469 473
470 err = register_netdev (dev); 474 err = register_netdev (dev);
471 if (err) 475 if (err)
472 goto err_out_res; 476 goto err_out_free_buf;
473 477
474 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, " 478 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, "
475 "%s, irq %d.\n", 479 "%s, irq %d.\n",
@@ -483,6 +487,12 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
483 487
484 return 0; 488 return 0;
485 489
490err_out_free_buf:
491 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
492 db->buf_pool_ptr, db->buf_pool_dma_ptr);
493err_out_free_desc:
494 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
495 db->desc_pool_ptr, db->desc_pool_dma_ptr);
486err_out_res: 496err_out_res:
487 pci_release_regions(pdev); 497 pci_release_regions(pdev);
488err_out_disable: 498err_out_disable:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 6daea0c91862..33b6d1b122fb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1070,8 +1070,6 @@ static int tun_chr_close(struct inode *inode, struct file *file)
1070 1070
1071 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name); 1071 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
1072 1072
1073 tun_chr_fasync(-1, file, 0);
1074
1075 rtnl_lock(); 1073 rtnl_lock();
1076 1074
1077 /* Detach from net device */ 1075 /* Detach from net device */
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index cfbbfee55836..68a7f5414133 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -37,7 +37,6 @@
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39#include <asm/types.h> 39#include <asm/types.h>
40#include <asm/uaccess.h>
41 40
42#include "ucc_geth.h" 41#include "ucc_geth.h"
43#include "ucc_geth_mii.h" 42#include "ucc_geth_mii.h"
@@ -324,17 +323,17 @@ static void uec_get_ethtool_stats(struct net_device *netdev,
324 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) { 323 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
325 base = (u32 __iomem *)&ugeth->ug_regs->tx64; 324 base = (u32 __iomem *)&ugeth->ug_regs->tx64;
326 for (i = 0; i < UEC_HW_STATS_LEN; i++) 325 for (i = 0; i < UEC_HW_STATS_LEN; i++)
327 data[j++] = (u64)in_be32(&base[i]); 326 data[j++] = in_be32(&base[i]);
328 } 327 }
329 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { 328 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
330 base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram; 329 base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram;
331 for (i = 0; i < UEC_TX_FW_STATS_LEN; i++) 330 for (i = 0; i < UEC_TX_FW_STATS_LEN; i++)
332 data[j++] = (u64)in_be32(&base[i]); 331 data[j++] = base ? in_be32(&base[i]) : 0;
333 } 332 }
334 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { 333 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
335 base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram; 334 base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram;
336 for (i = 0; i < UEC_RX_FW_STATS_LEN; i++) 335 for (i = 0; i < UEC_RX_FW_STATS_LEN; i++)
337 data[j++] = (u64)in_be32(&base[i]); 336 data[j++] = base ? in_be32(&base[i]) : 0;
338 } 337 }
339} 338}
340 339
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 37ecf845edfe..de57490103fc 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1102,12 +1102,14 @@ static int ax88178_link_reset(struct usbnet *dev)
1102 mode = AX88178_MEDIUM_DEFAULT; 1102 mode = AX88178_MEDIUM_DEFAULT;
1103 1103
1104 if (ecmd.speed == SPEED_1000) 1104 if (ecmd.speed == SPEED_1000)
1105 mode |= AX_MEDIUM_GM | AX_MEDIUM_ENCK; 1105 mode |= AX_MEDIUM_GM;
1106 else if (ecmd.speed == SPEED_100) 1106 else if (ecmd.speed == SPEED_100)
1107 mode |= AX_MEDIUM_PS; 1107 mode |= AX_MEDIUM_PS;
1108 else 1108 else
1109 mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM); 1109 mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM);
1110 1110
1111 mode |= AX_MEDIUM_ENCK;
1112
1111 if (ecmd.duplex == DUPLEX_FULL) 1113 if (ecmd.duplex == DUPLEX_FULL)
1112 mode |= AX_MEDIUM_FD; 1114 mode |= AX_MEDIUM_FD;
1113 else 1115 else
@@ -1444,6 +1446,10 @@ static const struct usb_device_id products [] = {
1444 // Apple USB Ethernet Adapter 1446 // Apple USB Ethernet Adapter
1445 USB_DEVICE(0x05ac, 0x1402), 1447 USB_DEVICE(0x05ac, 0x1402),
1446 .driver_info = (unsigned long) &ax88772_info, 1448 .driver_info = (unsigned long) &ax88772_info,
1449}, {
1450 // Cables-to-Go USB Ethernet Adapter
1451 USB_DEVICE(0x0b95, 0x772a),
1452 .driver_info = (unsigned long) &ax88772_info,
1447}, 1453},
1448 { }, // END 1454 { }, // END
1449}; 1455};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 78df2be8a728..db3377dae9d5 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -396,6 +396,20 @@ static void dm9601_set_multicast(struct net_device *net)
396 dm_write_reg_async(dev, DM_RX_CTRL, rx_ctl); 396 dm_write_reg_async(dev, DM_RX_CTRL, rx_ctl);
397} 397}
398 398
399static int dm9601_set_mac_address(struct net_device *net, void *p)
400{
401 struct sockaddr *addr = p;
402 struct usbnet *dev = netdev_priv(net);
403
404 if (!is_valid_ether_addr(addr->sa_data))
405 return -EINVAL;
406
407 memcpy(net->dev_addr, addr->sa_data, net->addr_len);
408 dm_write_async(dev, DM_PHY_ADDR, net->addr_len, net->dev_addr);
409
410 return 0;
411}
412
399static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) 413static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
400{ 414{
401 int ret; 415 int ret;
@@ -406,6 +420,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
406 420
407 dev->net->do_ioctl = dm9601_ioctl; 421 dev->net->do_ioctl = dm9601_ioctl;
408 dev->net->set_multicast_list = dm9601_set_multicast; 422 dev->net->set_multicast_list = dm9601_set_multicast;
423 dev->net->set_mac_address = dm9601_set_mac_address;
409 dev->net->ethtool_ops = &dm9601_ethtool_ops; 424 dev->net->ethtool_ops = &dm9601_ethtool_ops;
410 dev->net->hard_header_len += DM_TX_OVERHEAD; 425 dev->net->hard_header_len += DM_TX_OVERHEAD;
411 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 426 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1164c52e2c0a..8e90891f0e42 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2184,19 +2184,20 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
2184 struct usb_interface *interface) 2184 struct usb_interface *interface)
2185{ 2185{
2186 struct hso_net *hso_net = dev2net(hso_dev); 2186 struct hso_net *hso_net = dev2net(hso_dev);
2187 struct device *dev = hso_dev->dev; 2187 struct device *dev = &hso_net->net->dev;
2188 char *rfkn; 2188 char *rfkn;
2189 2189
2190 hso_net->rfkill = rfkill_allocate(&interface_to_usbdev(interface)->dev, 2190 hso_net->rfkill = rfkill_allocate(&interface_to_usbdev(interface)->dev,
2191 RFKILL_TYPE_WLAN); 2191 RFKILL_TYPE_WWAN);
2192 if (!hso_net->rfkill) { 2192 if (!hso_net->rfkill) {
2193 dev_err(dev, "%s - Out of memory", __func__); 2193 dev_err(dev, "%s - Out of memory\n", __func__);
2194 return; 2194 return;
2195 } 2195 }
2196 rfkn = kzalloc(20, GFP_KERNEL); 2196 rfkn = kzalloc(20, GFP_KERNEL);
2197 if (!rfkn) { 2197 if (!rfkn) {
2198 rfkill_free(hso_net->rfkill); 2198 rfkill_free(hso_net->rfkill);
2199 dev_err(dev, "%s - Out of memory", __func__); 2199 hso_net->rfkill = NULL;
2200 dev_err(dev, "%s - Out of memory\n", __func__);
2200 return; 2201 return;
2201 } 2202 }
2202 snprintf(rfkn, 20, "hso-%d", 2203 snprintf(rfkn, 20, "hso-%d",
@@ -2209,7 +2210,8 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
2209 kfree(rfkn); 2210 kfree(rfkn);
2210 hso_net->rfkill->name = NULL; 2211 hso_net->rfkill->name = NULL;
2211 rfkill_free(hso_net->rfkill); 2212 rfkill_free(hso_net->rfkill);
2212 dev_err(dev, "%s - Failed to register rfkill", __func__); 2213 hso_net->rfkill = NULL;
2214 dev_err(dev, "%s - Failed to register rfkill\n", __func__);
2213 return; 2215 return;
2214 } 2216 }
2215} 2217}
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 2dced383bcfb..11cb3e504e1c 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -521,7 +521,7 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
521 * we don't duplicate code for each option. 521 * we don't duplicate code for each option.
522 */ 522 */
523 523
524static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, char *devname) 524static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
525{ 525{
526 if (val == -1) 526 if (val == -1)
527 *opt = def; 527 *opt = def;
@@ -550,7 +550,7 @@ static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max,
550 * we don't duplicate code for each option. 550 * we don't duplicate code for each option.
551 */ 551 */
552 552
553static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, char *devname) 553static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, const char *devname)
554{ 554{
555 (*opt) &= (~flag); 555 (*opt) &= (~flag);
556 if (val == -1) 556 if (val == -1)
@@ -576,7 +576,7 @@ static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 fla
576 * for the current device 576 * for the current device
577 */ 577 */
578 578
579static void __devinit velocity_get_options(struct velocity_opt *opts, int index, char *devname) 579static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
580{ 580{
581 581
582 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname); 582 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
@@ -863,6 +863,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
863 static int first = 1; 863 static int first = 1;
864 struct net_device *dev; 864 struct net_device *dev;
865 int i; 865 int i;
866 const char *drv_string;
866 const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data]; 867 const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
867 struct velocity_info *vptr; 868 struct velocity_info *vptr;
868 struct mac_regs __iomem * regs; 869 struct mac_regs __iomem * regs;
@@ -935,7 +936,9 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
935 dev->dev_addr[i] = readb(&regs->PAR[i]); 936 dev->dev_addr[i] = readb(&regs->PAR[i]);
936 937
937 938
938 velocity_get_options(&vptr->options, velocity_nics, dev->name); 939 drv_string = dev_driver_string(&pdev->dev);
940
941 velocity_get_options(&vptr->options, velocity_nics, drv_string);
939 942
940 /* 943 /*
941 * Mask out the options cannot be set to the chip 944 * Mask out the options cannot be set to the chip
@@ -2293,7 +2296,7 @@ static void velocity_set_multi(struct net_device *dev)
2293 } 2296 }
2294 2297
2295 mac_set_cam_mask(regs, vptr->mCAMmask); 2298 mac_set_cam_mask(regs, vptr->mCAMmask);
2296 rx_mode = (RCR_AM | RCR_AB); 2299 rx_mode = RCR_AM | RCR_AB | RCR_AP;
2297 } 2300 }
2298 if (dev->mtu > 1500) 2301 if (dev->mtu > 1500)
2299 rx_mode |= RCR_AL; 2302 rx_mode |= RCR_AL;
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 327d58589e12..6e92f7b44b1a 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -756,10 +756,11 @@ static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
756 case CISCO_ADDR_REQ: 756 case CISCO_ADDR_REQ:
757 /* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */ 757 /* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */
758 { 758 {
759 struct in_device *in_dev;
760 struct in_ifaddr *ifa;
761 __be32 addr = 0, mask = htonl(~0U); /* FIXME: is the mask correct? */ 759 __be32 addr = 0, mask = htonl(~0U); /* FIXME: is the mask correct? */
762#ifdef CONFIG_INET 760#ifdef CONFIG_INET
761 struct in_device *in_dev;
762 struct in_ifaddr *ifa;
763
763 rcu_read_lock(); 764 rcu_read_lock();
764 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) 765 if ((in_dev = __in_dev_get_rcu(dev)) != NULL)
765 { 766 {
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index ccd9cd35ecbe..5bf7e01ef0e9 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -695,7 +695,6 @@ EXPORT_SYMBOL(z8530_nop);
695 * z8530_interrupt - Handle an interrupt from a Z8530 695 * z8530_interrupt - Handle an interrupt from a Z8530
696 * @irq: Interrupt number 696 * @irq: Interrupt number
697 * @dev_id: The Z8530 device that is interrupting. 697 * @dev_id: The Z8530 device that is interrupting.
698 * @regs: unused
699 * 698 *
700 * A Z85[2]30 device has stuck its hand in the air for attention. 699 * A Z85[2]30 device has stuck its hand in the air for attention.
701 * We scan both the channels on the chip for events and then call 700 * We scan both the channels on the chip for events and then call
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 9b95c4049b31..2d14255eb103 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -240,6 +240,10 @@ static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
240static void ath5k_reset_tsf(struct ieee80211_hw *hw); 240static void ath5k_reset_tsf(struct ieee80211_hw *hw);
241static int ath5k_beacon_update(struct ieee80211_hw *hw, 241static int ath5k_beacon_update(struct ieee80211_hw *hw,
242 struct sk_buff *skb); 242 struct sk_buff *skb);
243static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
244 struct ieee80211_vif *vif,
245 struct ieee80211_bss_conf *bss_conf,
246 u32 changes);
243 247
244static struct ieee80211_ops ath5k_hw_ops = { 248static struct ieee80211_ops ath5k_hw_ops = {
245 .tx = ath5k_tx, 249 .tx = ath5k_tx,
@@ -256,6 +260,7 @@ static struct ieee80211_ops ath5k_hw_ops = {
256 .get_tx_stats = ath5k_get_tx_stats, 260 .get_tx_stats = ath5k_get_tx_stats,
257 .get_tsf = ath5k_get_tsf, 261 .get_tsf = ath5k_get_tsf,
258 .reset_tsf = ath5k_reset_tsf, 262 .reset_tsf = ath5k_reset_tsf,
263 .bss_info_changed = ath5k_bss_info_changed,
259}; 264};
260 265
261/* 266/*
@@ -340,9 +345,9 @@ static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
340} 345}
341 346
342/* Interrupt handling */ 347/* Interrupt handling */
343static int ath5k_init(struct ath5k_softc *sc); 348static int ath5k_init(struct ath5k_softc *sc, bool is_resume);
344static int ath5k_stop_locked(struct ath5k_softc *sc); 349static int ath5k_stop_locked(struct ath5k_softc *sc);
345static int ath5k_stop_hw(struct ath5k_softc *sc); 350static int ath5k_stop_hw(struct ath5k_softc *sc, bool is_suspend);
346static irqreturn_t ath5k_intr(int irq, void *dev_id); 351static irqreturn_t ath5k_intr(int irq, void *dev_id);
347static void ath5k_tasklet_reset(unsigned long data); 352static void ath5k_tasklet_reset(unsigned long data);
348 353
@@ -646,7 +651,7 @@ ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
646 651
647 ath5k_led_off(sc); 652 ath5k_led_off(sc);
648 653
649 ath5k_stop_hw(sc); 654 ath5k_stop_hw(sc, true);
650 655
651 free_irq(pdev->irq, sc); 656 free_irq(pdev->irq, sc);
652 pci_save_state(pdev); 657 pci_save_state(pdev);
@@ -661,8 +666,7 @@ ath5k_pci_resume(struct pci_dev *pdev)
661{ 666{
662 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 667 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
663 struct ath5k_softc *sc = hw->priv; 668 struct ath5k_softc *sc = hw->priv;
664 struct ath5k_hw *ah = sc->ah; 669 int err;
665 int i, err;
666 670
667 pci_restore_state(pdev); 671 pci_restore_state(pdev);
668 672
@@ -683,21 +687,11 @@ ath5k_pci_resume(struct pci_dev *pdev)
683 goto err_no_irq; 687 goto err_no_irq;
684 } 688 }
685 689
686 err = ath5k_init(sc); 690 err = ath5k_init(sc, true);
687 if (err) 691 if (err)
688 goto err_irq; 692 goto err_irq;
689 ath5k_led_enable(sc); 693 ath5k_led_enable(sc);
690 694
691 /*
692 * Reset the key cache since some parts do not
693 * reset the contents on initial power up or resume.
694 *
695 * FIXME: This may need to be revisited when mac80211 becomes
696 * aware of suspend/resume.
697 */
698 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
699 ath5k_hw_reset_key(ah, i);
700
701 return 0; 695 return 0;
702err_irq: 696err_irq:
703 free_irq(pdev->irq, sc); 697 free_irq(pdev->irq, sc);
@@ -718,7 +712,6 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
718 struct ath5k_softc *sc = hw->priv; 712 struct ath5k_softc *sc = hw->priv;
719 struct ath5k_hw *ah = sc->ah; 713 struct ath5k_hw *ah = sc->ah;
720 u8 mac[ETH_ALEN]; 714 u8 mac[ETH_ALEN];
721 unsigned int i;
722 int ret; 715 int ret;
723 716
724 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device); 717 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
@@ -737,13 +730,6 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
737 __set_bit(ATH_STAT_MRRETRY, sc->status); 730 __set_bit(ATH_STAT_MRRETRY, sc->status);
738 731
739 /* 732 /*
740 * Reset the key cache since some parts do not
741 * reset the contents on initial power up.
742 */
743 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
744 ath5k_hw_reset_key(ah, i);
745
746 /*
747 * Collect the channel list. The 802.11 layer 733 * Collect the channel list. The 802.11 layer
748 * is resposible for filtering this list based 734 * is resposible for filtering this list based
749 * on settings like the phy mode and regulatory 735 * on settings like the phy mode and regulatory
@@ -2200,12 +2186,18 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2200\********************/ 2186\********************/
2201 2187
2202static int 2188static int
2203ath5k_init(struct ath5k_softc *sc) 2189ath5k_init(struct ath5k_softc *sc, bool is_resume)
2204{ 2190{
2205 int ret; 2191 struct ath5k_hw *ah = sc->ah;
2192 int ret, i;
2206 2193
2207 mutex_lock(&sc->lock); 2194 mutex_lock(&sc->lock);
2208 2195
2196 if (is_resume && !test_bit(ATH_STAT_STARTED, sc->status))
2197 goto out_ok;
2198
2199 __clear_bit(ATH_STAT_STARTED, sc->status);
2200
2209 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode); 2201 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2210 2202
2211 /* 2203 /*
@@ -2230,12 +2222,22 @@ ath5k_init(struct ath5k_softc *sc)
2230 if (ret) 2222 if (ret)
2231 goto done; 2223 goto done;
2232 2224
2225 /*
2226 * Reset the key cache since some parts do not reset the
2227 * contents on initial power up or resume from suspend.
2228 */
2229 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
2230 ath5k_hw_reset_key(ah, i);
2231
2232 __set_bit(ATH_STAT_STARTED, sc->status);
2233
2233 /* Set ack to be sent at low bit-rates */ 2234 /* Set ack to be sent at low bit-rates */
2234 ath5k_hw_set_ack_bitrate_high(sc->ah, false); 2235 ath5k_hw_set_ack_bitrate_high(ah, false);
2235 2236
2236 mod_timer(&sc->calib_tim, round_jiffies(jiffies + 2237 mod_timer(&sc->calib_tim, round_jiffies(jiffies +
2237 msecs_to_jiffies(ath5k_calinterval * 1000))); 2238 msecs_to_jiffies(ath5k_calinterval * 1000)));
2238 2239
2240out_ok:
2239 ret = 0; 2241 ret = 0;
2240done: 2242done:
2241 mmiowb(); 2243 mmiowb();
@@ -2290,7 +2292,7 @@ ath5k_stop_locked(struct ath5k_softc *sc)
2290 * stop is preempted). 2292 * stop is preempted).
2291 */ 2293 */
2292static int 2294static int
2293ath5k_stop_hw(struct ath5k_softc *sc) 2295ath5k_stop_hw(struct ath5k_softc *sc, bool is_suspend)
2294{ 2296{
2295 int ret; 2297 int ret;
2296 2298
@@ -2321,6 +2323,9 @@ ath5k_stop_hw(struct ath5k_softc *sc)
2321 } 2323 }
2322 } 2324 }
2323 ath5k_txbuf_free(sc, sc->bbuf); 2325 ath5k_txbuf_free(sc, sc->bbuf);
2326 if (!is_suspend)
2327 __clear_bit(ATH_STAT_STARTED, sc->status);
2328
2324 mmiowb(); 2329 mmiowb();
2325 mutex_unlock(&sc->lock); 2330 mutex_unlock(&sc->lock);
2326 2331
@@ -2718,12 +2723,12 @@ ath5k_reset_wake(struct ath5k_softc *sc)
2718 2723
2719static int ath5k_start(struct ieee80211_hw *hw) 2724static int ath5k_start(struct ieee80211_hw *hw)
2720{ 2725{
2721 return ath5k_init(hw->priv); 2726 return ath5k_init(hw->priv, false);
2722} 2727}
2723 2728
2724static void ath5k_stop(struct ieee80211_hw *hw) 2729static void ath5k_stop(struct ieee80211_hw *hw)
2725{ 2730{
2726 ath5k_stop_hw(hw->priv); 2731 ath5k_stop_hw(hw->priv, false);
2727} 2732}
2728 2733
2729static int ath5k_add_interface(struct ieee80211_hw *hw, 2734static int ath5k_add_interface(struct ieee80211_hw *hw,
@@ -2942,7 +2947,7 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
2942 sc->opmode != NL80211_IFTYPE_MESH_POINT && 2947 sc->opmode != NL80211_IFTYPE_MESH_POINT &&
2943 test_bit(ATH_STAT_PROMISC, sc->status)) 2948 test_bit(ATH_STAT_PROMISC, sc->status))
2944 rfilt |= AR5K_RX_FILTER_PROM; 2949 rfilt |= AR5K_RX_FILTER_PROM;
2945 if (sc->opmode == NL80211_IFTYPE_STATION || 2950 if ((sc->opmode == NL80211_IFTYPE_STATION && sc->assoc) ||
2946 sc->opmode == NL80211_IFTYPE_ADHOC) { 2951 sc->opmode == NL80211_IFTYPE_ADHOC) {
2947 rfilt |= AR5K_RX_FILTER_BEACON; 2952 rfilt |= AR5K_RX_FILTER_BEACON;
2948 } 2953 }
@@ -3083,4 +3088,32 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3083end: 3088end:
3084 return ret; 3089 return ret;
3085} 3090}
3091static void
3092set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3093{
3094 struct ath5k_softc *sc = hw->priv;
3095 struct ath5k_hw *ah = sc->ah;
3096 u32 rfilt;
3097 rfilt = ath5k_hw_get_rx_filter(ah);
3098 if (enable)
3099 rfilt |= AR5K_RX_FILTER_BEACON;
3100 else
3101 rfilt &= ~AR5K_RX_FILTER_BEACON;
3102 ath5k_hw_set_rx_filter(ah, rfilt);
3103 sc->filter_flags = rfilt;
3104}
3086 3105
3106static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3107 struct ieee80211_vif *vif,
3108 struct ieee80211_bss_conf *bss_conf,
3109 u32 changes)
3110{
3111 struct ath5k_softc *sc = hw->priv;
3112 if (changes & BSS_CHANGED_ASSOC) {
3113 mutex_lock(&sc->lock);
3114 sc->assoc = bss_conf->assoc;
3115 if (sc->opmode == NL80211_IFTYPE_STATION)
3116 set_beacon_filter(hw, sc->assoc);
3117 mutex_unlock(&sc->lock);
3118 }
3119}
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 9d0b728928e3..facc60ddada2 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -128,11 +128,12 @@ struct ath5k_softc {
128 size_t desc_len; /* size of TX/RX descriptors */ 128 size_t desc_len; /* size of TX/RX descriptors */
129 u16 cachelsz; /* cache line size */ 129 u16 cachelsz; /* cache line size */
130 130
131 DECLARE_BITMAP(status, 4); 131 DECLARE_BITMAP(status, 5);
132#define ATH_STAT_INVALID 0 /* disable hardware accesses */ 132#define ATH_STAT_INVALID 0 /* disable hardware accesses */
133#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */ 133#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
134#define ATH_STAT_PROMISC 2 134#define ATH_STAT_PROMISC 2
135#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */ 135#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
136#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
136 137
137 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ 138 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
138 unsigned int curmode; /* current phy mode */ 139 unsigned int curmode; /* current phy mode */
@@ -178,6 +179,7 @@ struct ath5k_softc {
178 179
179 struct timer_list calib_tim; /* calibration timer */ 180 struct timer_list calib_tim; /* calibration timer */
180 int power_level; /* Requested tx power in dbm */ 181 int power_level; /* Requested tx power in dbm */
182 bool assoc; /* assocate state */
181}; 183};
182 184
183#define ath5k_hw_hasbssidmask(_ah) \ 185#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index 8f92d670f614..ccaeb5c219d2 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -339,7 +339,7 @@ static struct {
339 { ATH5K_DEBUG_BEACON, "beacon", "beacon handling" }, 339 { ATH5K_DEBUG_BEACON, "beacon", "beacon handling" },
340 { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" }, 340 { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" },
341 { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" }, 341 { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" },
342 { ATH5K_DEBUG_LED, "led", "LED mamagement" }, 342 { ATH5K_DEBUG_LED, "led", "LED management" },
343 { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" }, 343 { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" },
344 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" }, 344 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
345 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, 345 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
@@ -417,19 +417,19 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
417 sc->debug.debugfs_phydir = debugfs_create_dir(wiphy_name(sc->hw->wiphy), 417 sc->debug.debugfs_phydir = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
418 ath5k_global_debugfs); 418 ath5k_global_debugfs);
419 419
420 sc->debug.debugfs_debug = debugfs_create_file("debug", 0666, 420 sc->debug.debugfs_debug = debugfs_create_file("debug", S_IWUSR | S_IRUGO,
421 sc->debug.debugfs_phydir, sc, &fops_debug); 421 sc->debug.debugfs_phydir, sc, &fops_debug);
422 422
423 sc->debug.debugfs_registers = debugfs_create_file("registers", 0444, 423 sc->debug.debugfs_registers = debugfs_create_file("registers", S_IRUGO,
424 sc->debug.debugfs_phydir, sc, &fops_registers); 424 sc->debug.debugfs_phydir, sc, &fops_registers);
425 425
426 sc->debug.debugfs_tsf = debugfs_create_file("tsf", 0666, 426 sc->debug.debugfs_tsf = debugfs_create_file("tsf", S_IWUSR | S_IRUGO,
427 sc->debug.debugfs_phydir, sc, &fops_tsf); 427 sc->debug.debugfs_phydir, sc, &fops_tsf);
428 428
429 sc->debug.debugfs_beacon = debugfs_create_file("beacon", 0666, 429 sc->debug.debugfs_beacon = debugfs_create_file("beacon", S_IWUSR | S_IRUGO,
430 sc->debug.debugfs_phydir, sc, &fops_beacon); 430 sc->debug.debugfs_phydir, sc, &fops_beacon);
431 431
432 sc->debug.debugfs_reset = debugfs_create_file("reset", 0222, 432 sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR,
433 sc->debug.debugfs_phydir, sc, &fops_reset); 433 sc->debug.debugfs_phydir, sc, &fops_reset);
434} 434}
435 435
diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath5k/desc.c
index dd1374052ba9..5e362a7a3620 100644
--- a/drivers/net/wireless/ath5k/desc.c
+++ b/drivers/net/wireless/ath5k/desc.c
@@ -531,10 +531,10 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
531 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL); 531 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
532 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, 532 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
533 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE); 533 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
534 rs->rs_antenna = rx_status->rx_status_0 & 534 rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
535 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA; 535 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA);
536 rs->rs_more = rx_status->rx_status_0 & 536 rs->rs_more = !!(rx_status->rx_status_0 &
537 AR5K_5210_RX_DESC_STATUS0_MORE; 537 AR5K_5210_RX_DESC_STATUS0_MORE);
538 /* TODO: this timestamp is 13 bit, later on we assume 15 bit */ 538 /* TODO: this timestamp is 13 bit, later on we assume 15 bit */
539 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, 539 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
540 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); 540 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
@@ -607,10 +607,10 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
607 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL); 607 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
608 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, 608 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
609 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE); 609 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
610 rs->rs_antenna = rx_status->rx_status_0 & 610 rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
611 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA; 611 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA);
612 rs->rs_more = rx_status->rx_status_0 & 612 rs->rs_more = !!(rx_status->rx_status_0 &
613 AR5K_5212_RX_DESC_STATUS0_MORE; 613 AR5K_5212_RX_DESC_STATUS0_MORE);
614 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, 614 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
615 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); 615 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
616 rs->rs_status = 0; 616 rs->rs_status = 0;
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index ea2e1a20b499..ceaa6c475c06 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -806,6 +806,8 @@ static const struct ath5k_ini_mode ar5212_rf5111_ini_mode_end[] = {
806 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 806 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
807 { AR5K_PHY(642), 807 { AR5K_PHY(642),
808 { 0xd03e6788, 0xd03e6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, 808 { 0xd03e6788, 0xd03e6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
809 { 0xa228,
810 { 0x000001b5, 0x000001b5, 0x000001b5, 0x000001b5, 0x000001b5 } },
809 { 0xa23c, 811 { 0xa23c,
810 { 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af } }, 812 { 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af } },
811}; 813};
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c
index 8f1886834e61..1b6d45b6772d 100644
--- a/drivers/net/wireless/ath5k/reset.c
+++ b/drivers/net/wireless/ath5k/reset.c
@@ -537,9 +537,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
537 mdelay(1); 537 mdelay(1);
538 538
539 /* 539 /*
540 * Write some more initial register settings 540 * Write some more initial register settings for revised chips
541 */ 541 */
542 if (ah->ah_version == AR5K_AR5212) { 542 if (ah->ah_version == AR5K_AR5212 &&
543 ah->ah_phy_revision > 0x41) {
543 ath5k_hw_reg_write(ah, 0x0002a002, 0x982c); 544 ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
544 545
545 if (channel->hw_value == CHANNEL_G) 546 if (channel->hw_value == CHANNEL_G)
@@ -558,19 +559,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
558 else 559 else
559 ath5k_hw_reg_write(ah, 0x00000000, 0x994c); 560 ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
560 561
561 /* Some bits are disabled here, we know nothing about 562 /* Got this from legacy-hal */
562 * register 0xa228 yet, most of the times this ends up 563 AR5K_REG_DISABLE_BITS(ah, 0xa228, 0x200);
563 * with a value 0x9b5 -haven't seen any dump with 564
564 * a different value- */ 565 AR5K_REG_MASKED_BITS(ah, 0xa228, 0x800, 0xfffe03ff);
565 /* Got this from decompiling binary HAL */
566 data = ath5k_hw_reg_read(ah, 0xa228);
567 data &= 0xfffffdff;
568 ath5k_hw_reg_write(ah, data, 0xa228);
569
570 data = ath5k_hw_reg_read(ah, 0xa228);
571 data &= 0xfffe03ff;
572 ath5k_hw_reg_write(ah, data, 0xa228);
573 data = 0;
574 566
575 /* Just write 0x9b5 ? */ 567 /* Just write 0x9b5 ? */
576 /* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */ 568 /* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index 9e15c30bbc06..4dd1c1bda0fb 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -170,7 +170,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
170 skb = (struct sk_buff *)bf->bf_mpdu; 170 skb = (struct sk_buff *)bf->bf_mpdu;
171 if (skb) { 171 if (skb) {
172 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 172 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
173 skb_end_pointer(skb) - skb->head, 173 skb->len,
174 PCI_DMA_TODEVICE); 174 PCI_DMA_TODEVICE);
175 } 175 }
176 176
@@ -193,7 +193,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
193 193
194 bf->bf_buf_addr = bf->bf_dmacontext = 194 bf->bf_buf_addr = bf->bf_dmacontext =
195 pci_map_single(sc->pdev, skb->data, 195 pci_map_single(sc->pdev, skb->data,
196 skb_end_pointer(skb) - skb->head, 196 skb->len,
197 PCI_DMA_TODEVICE); 197 PCI_DMA_TODEVICE);
198 198
199 skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data); 199 skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data);
@@ -352,7 +352,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
352 if (bf->bf_mpdu != NULL) { 352 if (bf->bf_mpdu != NULL) {
353 skb = (struct sk_buff *)bf->bf_mpdu; 353 skb = (struct sk_buff *)bf->bf_mpdu;
354 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 354 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
355 skb_end_pointer(skb) - skb->head, 355 skb->len,
356 PCI_DMA_TODEVICE); 356 PCI_DMA_TODEVICE);
357 dev_kfree_skb_any(skb); 357 dev_kfree_skb_any(skb);
358 bf->bf_mpdu = NULL; 358 bf->bf_mpdu = NULL;
@@ -412,7 +412,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
412 412
413 bf->bf_buf_addr = bf->bf_dmacontext = 413 bf->bf_buf_addr = bf->bf_dmacontext =
414 pci_map_single(sc->pdev, skb->data, 414 pci_map_single(sc->pdev, skb->data,
415 skb_end_pointer(skb) - skb->head, 415 skb->len,
416 PCI_DMA_TODEVICE); 416 PCI_DMA_TODEVICE);
417 bf->bf_mpdu = skb; 417 bf->bf_mpdu = skb;
418 418
@@ -439,7 +439,7 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
439 if (bf->bf_mpdu != NULL) { 439 if (bf->bf_mpdu != NULL) {
440 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; 440 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
441 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 441 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
442 skb_end_pointer(skb) - skb->head, 442 skb->len,
443 PCI_DMA_TODEVICE); 443 PCI_DMA_TODEVICE);
444 dev_kfree_skb_any(skb); 444 dev_kfree_skb_any(skb);
445 bf->bf_mpdu = NULL; 445 bf->bf_mpdu = NULL;
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 4983402af559..504a0444d89f 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -49,10 +49,12 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
49 ASSERT(skb != NULL); 49 ASSERT(skb != NULL);
50 ds->ds_vdata = skb->data; 50 ds->ds_vdata = skb->data;
51 51
52 /* setup rx descriptors */ 52 /* setup rx descriptors. The sc_rxbufsize here tells the harware
53 * how much data it can DMA to us and that we are prepared
54 * to process */
53 ath9k_hw_setuprxdesc(ah, 55 ath9k_hw_setuprxdesc(ah,
54 ds, 56 ds,
55 skb_tailroom(skb), /* buffer size */ 57 sc->sc_rxbufsize,
56 0); 58 0);
57 59
58 if (sc->sc_rxlink == NULL) 60 if (sc->sc_rxlink == NULL)
@@ -398,6 +400,13 @@ static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
398 * in rx'd frames. 400 * in rx'd frames.
399 */ 401 */
400 402
403 /* Note: the kernel can allocate a value greater than
404 * what we ask it to give us. We really only need 4 KB as that
405 * is this hardware supports and in fact we need at least 3849
406 * as that is the MAX AMSDU size this hardware supports.
407 * Unfortunately this means we may get 8 KB here from the
408 * kernel... and that is actually what is observed on some
409 * systems :( */
401 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1); 410 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
402 if (skb != NULL) { 411 if (skb != NULL) {
403 off = ((unsigned long) skb->data) % sc->sc_cachelsz; 412 off = ((unsigned long) skb->data) % sc->sc_cachelsz;
@@ -456,7 +465,7 @@ static int ath_rx_indicate(struct ath_softc *sc,
456 if (nskb != NULL) { 465 if (nskb != NULL) {
457 bf->bf_mpdu = nskb; 466 bf->bf_mpdu = nskb;
458 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data, 467 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data,
459 skb_end_pointer(nskb) - nskb->head, 468 sc->sc_rxbufsize,
460 PCI_DMA_FROMDEVICE); 469 PCI_DMA_FROMDEVICE);
461 bf->bf_dmacontext = bf->bf_buf_addr; 470 bf->bf_dmacontext = bf->bf_buf_addr;
462 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf; 471 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
@@ -542,7 +551,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
542 551
543 bf->bf_mpdu = skb; 552 bf->bf_mpdu = skb;
544 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data, 553 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
545 skb_end_pointer(skb) - skb->head, 554 sc->sc_rxbufsize,
546 PCI_DMA_FROMDEVICE); 555 PCI_DMA_FROMDEVICE);
547 bf->bf_dmacontext = bf->bf_buf_addr; 556 bf->bf_dmacontext = bf->bf_buf_addr;
548 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf; 557 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
@@ -1007,7 +1016,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
1007 1016
1008 pci_dma_sync_single_for_cpu(sc->pdev, 1017 pci_dma_sync_single_for_cpu(sc->pdev,
1009 bf->bf_buf_addr, 1018 bf->bf_buf_addr,
1010 skb_tailroom(skb), 1019 sc->sc_rxbufsize,
1011 PCI_DMA_FROMDEVICE); 1020 PCI_DMA_FROMDEVICE);
1012 pci_unmap_single(sc->pdev, 1021 pci_unmap_single(sc->pdev,
1013 bf->bf_buf_addr, 1022 bf->bf_buf_addr,
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index ffdf4876121b..a68f97c39359 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -918,9 +918,12 @@ struct hostap_interface {
918 918
919/* 919/*
920 * TX meta data - stored in skb->cb buffer, so this must not be increased over 920 * TX meta data - stored in skb->cb buffer, so this must not be increased over
921 * the 40-byte limit 921 * the 48-byte limit.
922 * THE PADDING THIS STARTS WITH IS A HORRIBLE HACK THAT SHOULD NOT LIVE
923 * TO SEE THE DAY.
922 */ 924 */
923struct hostap_skb_tx_data { 925struct hostap_skb_tx_data {
926 unsigned int __padding_for_default_qdiscs;
924 u32 magic; /* HOSTAP_SKB_TX_DATA_MAGIC */ 927 u32 magic; /* HOSTAP_SKB_TX_DATA_MAGIC */
925 u8 rate; /* transmit rate */ 928 u8 rate; /* transmit rate */
926#define HOSTAP_TX_FLAGS_WDS BIT(0) 929#define HOSTAP_TX_FLAGS_WDS BIT(0)
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index dcce3542d5a7..7a9f901d4ff6 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -3897,6 +3897,7 @@ static int ipw_disassociate(void *data)
3897 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) 3897 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3898 return 0; 3898 return 0;
3899 ipw_send_disassociate(data, 0); 3899 ipw_send_disassociate(data, 0);
3900 netif_carrier_off(priv->net_dev);
3900 return 1; 3901 return 1;
3901} 3902}
3902 3903
@@ -10190,6 +10191,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10190 u16 remaining_bytes; 10191 u16 remaining_bytes;
10191 int fc; 10192 int fc;
10192 10193
10194 if (!(priv->status & STATUS_ASSOCIATED))
10195 goto drop;
10196
10193 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 10197 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10194 switch (priv->ieee->iw_mode) { 10198 switch (priv->ieee->iw_mode) {
10195 case IW_MODE_ADHOC: 10199 case IW_MODE_ADHOC:
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 24a1aeb6448f..c4c0371c763b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1384,9 +1384,11 @@ void iwl_rx_handle(struct iwl_priv *priv)
1384 1384
1385 rxq->queue[i] = NULL; 1385 rxq->queue[i] = NULL;
1386 1386
1387 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 1387 dma_sync_single_range_for_cpu(
1388 priv->hw_params.rx_buf_size, 1388 &priv->pci_dev->dev, rxb->real_dma_addr,
1389 PCI_DMA_FROMDEVICE); 1389 rxb->aligned_dma_addr - rxb->real_dma_addr,
1390 priv->hw_params.rx_buf_size,
1391 PCI_DMA_FROMDEVICE);
1390 pkt = (struct iwl_rx_packet *)rxb->skb->data; 1392 pkt = (struct iwl_rx_packet *)rxb->skb->data;
1391 1393
1392 /* Reclaim a command buffer only if this packet is a response 1394 /* Reclaim a command buffer only if this packet is a response
@@ -1436,8 +1438,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
1436 rxb->skb = NULL; 1438 rxb->skb = NULL;
1437 } 1439 }
1438 1440
1439 pci_unmap_single(priv->pci_dev, rxb->dma_addr, 1441 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
1440 priv->hw_params.rx_buf_size, 1442 priv->hw_params.rx_buf_size + 256,
1441 PCI_DMA_FROMDEVICE); 1443 PCI_DMA_FROMDEVICE);
1442 spin_lock_irqsave(&rxq->lock, flags); 1444 spin_lock_irqsave(&rxq->lock, flags);
1443 list_add_tail(&rxb->list, &priv->rxq.rx_used); 1445 list_add_tail(&rxb->list, &priv->rxq.rx_used);
@@ -2090,7 +2092,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
2090 iwl4965_error_recovery(priv); 2092 iwl4965_error_recovery(priv);
2091 2093
2092 iwl_power_update_mode(priv, 1); 2094 iwl_power_update_mode(priv, 1);
2093 ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
2094 2095
2095 if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status)) 2096 if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
2096 iwl4965_set_mode(priv, priv->iw_mode); 2097 iwl4965_set_mode(priv, priv->iw_mode);
@@ -3252,7 +3253,11 @@ static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
3252 return; 3253 return;
3253 } 3254 }
3254 3255
3255 iwl_scan_cancel_timeout(priv, 100); 3256 if (iwl_scan_cancel(priv)) {
3257 /* cancel scan failed, just live w/ bad key and rely
3258 briefly on SW decryption */
3259 return;
3260 }
3256 3261
3257 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); 3262 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3258 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 3263 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 4c312c55f90c..01a845851338 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -290,6 +290,9 @@ void iwl_clear_stations_table(struct iwl_priv *priv)
290 priv->num_stations = 0; 290 priv->num_stations = 0;
291 memset(priv->stations, 0, sizeof(priv->stations)); 291 memset(priv->stations, 0, sizeof(priv->stations));
292 292
293 /* clean ucode key table bit map */
294 priv->ucode_key_table = 0;
295
293 spin_unlock_irqrestore(&priv->sta_lock, flags); 296 spin_unlock_irqrestore(&priv->sta_lock, flags);
294} 297}
295EXPORT_SYMBOL(iwl_clear_stations_table); 298EXPORT_SYMBOL(iwl_clear_stations_table);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index c018121085e9..9966d4e384ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -89,7 +89,8 @@ extern struct iwl_cfg iwl5100_abg_cfg;
89#define DEFAULT_LONG_RETRY_LIMIT 4U 89#define DEFAULT_LONG_RETRY_LIMIT 4U
90 90
91struct iwl_rx_mem_buffer { 91struct iwl_rx_mem_buffer {
92 dma_addr_t dma_addr; 92 dma_addr_t real_dma_addr;
93 dma_addr_t aligned_dma_addr;
93 struct sk_buff *skb; 94 struct sk_buff *skb;
94 struct list_head list; 95 struct list_head list;
95}; 96};
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 7cde9d76ff5d..0509c16dbe75 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -204,7 +204,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
204 list_del(element); 204 list_del(element);
205 205
206 /* Point to Rx buffer via next RBD in circular buffer */ 206 /* Point to Rx buffer via next RBD in circular buffer */
207 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr); 207 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr);
208 rxq->queue[rxq->write] = rxb; 208 rxq->queue[rxq->write] = rxb;
209 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 209 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
210 rxq->free_count--; 210 rxq->free_count--;
@@ -251,7 +251,7 @@ void iwl_rx_allocate(struct iwl_priv *priv)
251 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 251 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
252 252
253 /* Alloc a new receive buffer */ 253 /* Alloc a new receive buffer */
254 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size, 254 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
255 __GFP_NOWARN | GFP_ATOMIC); 255 __GFP_NOWARN | GFP_ATOMIC);
256 if (!rxb->skb) { 256 if (!rxb->skb) {
257 if (net_ratelimit()) 257 if (net_ratelimit())
@@ -266,9 +266,17 @@ void iwl_rx_allocate(struct iwl_priv *priv)
266 list_del(element); 266 list_del(element);
267 267
268 /* Get physical address of RB/SKB */ 268 /* Get physical address of RB/SKB */
269 rxb->dma_addr = 269 rxb->real_dma_addr = pci_map_single(
270 pci_map_single(priv->pci_dev, rxb->skb->data, 270 priv->pci_dev,
271 priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE); 271 rxb->skb->data,
272 priv->hw_params.rx_buf_size + 256,
273 PCI_DMA_FROMDEVICE);
274 /* dma address must be no more than 36 bits */
275 BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36));
276 /* and also 256 byte aligned! */
277 rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
278 skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
279
272 list_add_tail(&rxb->list, &rxq->rx_free); 280 list_add_tail(&rxb->list, &rxq->rx_free);
273 rxq->free_count++; 281 rxq->free_count++;
274 } 282 }
@@ -300,8 +308,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
300 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 308 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
301 if (rxq->pool[i].skb != NULL) { 309 if (rxq->pool[i].skb != NULL) {
302 pci_unmap_single(priv->pci_dev, 310 pci_unmap_single(priv->pci_dev,
303 rxq->pool[i].dma_addr, 311 rxq->pool[i].real_dma_addr,
304 priv->hw_params.rx_buf_size, 312 priv->hw_params.rx_buf_size + 256,
305 PCI_DMA_FROMDEVICE); 313 PCI_DMA_FROMDEVICE);
306 dev_kfree_skb(rxq->pool[i].skb); 314 dev_kfree_skb(rxq->pool[i].skb);
307 } 315 }
@@ -354,8 +362,8 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
354 * to an SKB, so we need to unmap and free potential storage */ 362 * to an SKB, so we need to unmap and free potential storage */
355 if (rxq->pool[i].skb != NULL) { 363 if (rxq->pool[i].skb != NULL) {
356 pci_unmap_single(priv->pci_dev, 364 pci_unmap_single(priv->pci_dev,
357 rxq->pool[i].dma_addr, 365 rxq->pool[i].real_dma_addr,
358 priv->hw_params.rx_buf_size, 366 priv->hw_params.rx_buf_size + 256,
359 PCI_DMA_FROMDEVICE); 367 PCI_DMA_FROMDEVICE);
360 priv->alloc_rxb_skb--; 368 priv->alloc_rxb_skb--;
361 dev_kfree_skb(rxq->pool[i].skb); 369 dev_kfree_skb(rxq->pool[i].skb);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 3b0bee331a33..c89365e2ca58 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -896,6 +896,13 @@ static void iwl_bg_request_scan(struct work_struct *data)
896 return; 896 return;
897 897
898 done: 898 done:
899 /* Cannot perform scan. Make sure we clear scanning
900 * bits from status so next scan request can be performed.
901 * If we don't clear scanning status bit here all next scan
902 * will fail
903 */
904 clear_bit(STATUS_SCAN_HW, &priv->status);
905 clear_bit(STATUS_SCANNING, &priv->status);
899 /* inform mac80211 scan aborted */ 906 /* inform mac80211 scan aborted */
900 queue_work(priv->workqueue, &priv->scan_completed); 907 queue_work(priv->workqueue, &priv->scan_completed);
901 mutex_unlock(&priv->mutex); 908 mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 61797f3f8d5c..26f7084d3011 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -475,7 +475,7 @@ static int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
475 if (!test_and_set_bit(i, &priv->ucode_key_table)) 475 if (!test_and_set_bit(i, &priv->ucode_key_table))
476 return i; 476 return i;
477 477
478 return -1; 478 return WEP_INVALID_OFFSET;
479} 479}
480 480
481int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) 481int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
@@ -620,6 +620,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
620 /* else, we are overriding an existing key => no need to allocated room 620 /* else, we are overriding an existing key => no need to allocated room
621 * in uCode. */ 621 * in uCode. */
622 622
623 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
624 "no space for new kew");
625
623 priv->stations[sta_id].sta.key.key_flags = key_flags; 626 priv->stations[sta_id].sta.key.key_flags = key_flags;
624 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 627 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
625 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 628 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@@ -637,6 +640,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
637{ 640{
638 unsigned long flags; 641 unsigned long flags;
639 __le16 key_flags = 0; 642 __le16 key_flags = 0;
643 int ret;
640 644
641 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); 645 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
642 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 646 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -664,14 +668,18 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
664 /* else, we are overriding an existing key => no need to allocated room 668 /* else, we are overriding an existing key => no need to allocated room
665 * in uCode. */ 669 * in uCode. */
666 670
671 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
672 "no space for new kew");
673
667 priv->stations[sta_id].sta.key.key_flags = key_flags; 674 priv->stations[sta_id].sta.key.key_flags = key_flags;
668 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 675 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
669 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 676 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
670 677
678 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
679
671 spin_unlock_irqrestore(&priv->sta_lock, flags); 680 spin_unlock_irqrestore(&priv->sta_lock, flags);
672 681
673 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); 682 return ret;
674 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
675} 683}
676 684
677static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, 685static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@@ -696,6 +704,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
696 /* else, we are overriding an existing key => no need to allocated room 704 /* else, we are overriding an existing key => no need to allocated room
697 * in uCode. */ 705 * in uCode. */
698 706
707 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
708 "no space for new kew");
709
699 /* This copy is acutally not needed: we get the key with each TX */ 710 /* This copy is acutally not needed: we get the key with each TX */
700 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); 711 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
701 712
@@ -734,6 +745,13 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
734 return 0; 745 return 0;
735 } 746 }
736 747
748 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
749 IWL_WARNING("Removing wrong key %d 0x%x\n",
750 keyconf->keyidx, key_flags);
751 spin_unlock_irqrestore(&priv->sta_lock, flags);
752 return 0;
753 }
754
737 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, 755 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
738 &priv->ucode_key_table)) 756 &priv->ucode_key_table))
739 IWL_ERROR("index %d not used in uCode key table.\n", 757 IWL_ERROR("index %d not used in uCode key table.\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index d15a2c997954..45a6b0c35695 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -5768,7 +5768,6 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5768 if (priv->error_recovering) 5768 if (priv->error_recovering)
5769 iwl3945_error_recovery(priv); 5769 iwl3945_error_recovery(priv);
5770 5770
5771 ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
5772 return; 5771 return;
5773 5772
5774 restart: 5773 restart:
@@ -6256,6 +6255,11 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6256 n_probes, 6255 n_probes,
6257 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 6256 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6258 6257
6258 if (scan->channel_count == 0) {
6259 IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count);
6260 goto done;
6261 }
6262
6259 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 6263 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
6260 scan->channel_count * sizeof(struct iwl3945_scan_channel); 6264 scan->channel_count * sizeof(struct iwl3945_scan_channel);
6261 cmd.data = scan; 6265 cmd.data = scan;
@@ -6273,6 +6277,14 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6273 return; 6277 return;
6274 6278
6275 done: 6279 done:
6280 /* can not perform scan make sure we clear scanning
6281 * bits from status so next scan request can be performed.
6282 * if we dont clear scanning status bit here all next scan
6283 * will fail
6284 */
6285 clear_bit(STATUS_SCAN_HW, &priv->status);
6286 clear_bit(STATUS_SCANNING, &priv->status);
6287
6276 /* inform mac80211 scan aborted */ 6288 /* inform mac80211 scan aborted */
6277 queue_work(priv->workqueue, &priv->scan_completed); 6289 queue_work(priv->workqueue, &priv->scan_completed);
6278 mutex_unlock(&priv->mutex); 6290 mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 297696de2da0..8265c7d25edc 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -605,9 +605,9 @@ int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
605 if (ret == 0) { 605 if (ret == 0) {
606 *curlevel = le16_to_cpu(cmd.curlevel); 606 *curlevel = le16_to_cpu(cmd.curlevel);
607 if (minlevel) 607 if (minlevel)
608 *minlevel = le16_to_cpu(cmd.minlevel); 608 *minlevel = cmd.minlevel;
609 if (maxlevel) 609 if (maxlevel)
610 *maxlevel = le16_to_cpu(cmd.maxlevel); 610 *maxlevel = cmd.maxlevel;
611 } 611 }
612 612
613 lbs_deb_leave(LBS_DEB_CMD); 613 lbs_deb_leave(LBS_DEB_CMD);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 5749f22b296f..079e6aa874dc 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -328,7 +328,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
328 lbs_deb_rx("rx err: frame received with bad length\n"); 328 lbs_deb_rx("rx err: frame received with bad length\n");
329 priv->stats.rx_length_errors++; 329 priv->stats.rx_length_errors++;
330 ret = -EINVAL; 330 ret = -EINVAL;
331 kfree(skb); 331 kfree_skb(skb);
332 goto done; 332 goto done;
333 } 333 }
334 334
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 8f66903641b9..22c4c6110521 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -598,8 +598,8 @@ static int lbs_process_bss(struct bss_descriptor *bss,
598 598
599 switch (elem->id) { 599 switch (elem->id) {
600 case MFIE_TYPE_SSID: 600 case MFIE_TYPE_SSID:
601 bss->ssid_len = elem->len; 601 bss->ssid_len = min_t(int, 32, elem->len);
602 memcpy(bss->ssid, elem->data, elem->len); 602 memcpy(bss->ssid, elem->data, bss->ssid_len);
603 lbs_deb_scan("got SSID IE: '%s', len %u\n", 603 lbs_deb_scan("got SSID IE: '%s', len %u\n",
604 escape_essid(bss->ssid, bss->ssid_len), 604 escape_essid(bss->ssid, bss->ssid_len),
605 bss->ssid_len); 605 bss->ssid_len);
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 1cc03a8dd67a..59634c33b1f9 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -331,7 +331,7 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
331 /* Fill the receive configuration URB and initialise the Rx call back */ 331 /* Fill the receive configuration URB and initialise the Rx call back */
332 usb_fill_bulk_urb(cardp->rx_urb, cardp->udev, 332 usb_fill_bulk_urb(cardp->rx_urb, cardp->udev,
333 usb_rcvbulkpipe(cardp->udev, cardp->ep_in), 333 usb_rcvbulkpipe(cardp->udev, cardp->ep_in),
334 (void *) (skb->tail), 334 skb_tail_pointer(skb),
335 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp); 335 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
336 336
337 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; 337 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 50904771f291..e0512e49d6d3 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -433,7 +433,7 @@ struct fw_info {
433const static struct fw_info orinoco_fw[] = { 433const static struct fw_info orinoco_fw[] = {
434 { "", "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 }, 434 { "", "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 },
435 { "", "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 }, 435 { "", "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
436 { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", "", 0x00003100, 0x100 } 436 { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", "", 0x00003100, 512 }
437}; 437};
438 438
439/* Structure used to access fields in FW 439/* Structure used to access fields in FW
@@ -458,7 +458,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
458 int ap) 458 int ap)
459{ 459{
460 /* Plug Data Area (PDA) */ 460 /* Plug Data Area (PDA) */
461 __le16 pda[512] = { 0 }; 461 __le16 *pda;
462 462
463 hermes_t *hw = &priv->hw; 463 hermes_t *hw = &priv->hw;
464 const struct firmware *fw_entry; 464 const struct firmware *fw_entry;
@@ -467,7 +467,11 @@ orinoco_dl_firmware(struct orinoco_private *priv,
467 const unsigned char *end; 467 const unsigned char *end;
468 const char *firmware; 468 const char *firmware;
469 struct net_device *dev = priv->ndev; 469 struct net_device *dev = priv->ndev;
470 int err; 470 int err = 0;
471
472 pda = kzalloc(fw->pda_size, GFP_KERNEL);
473 if (!pda)
474 return -ENOMEM;
471 475
472 if (ap) 476 if (ap)
473 firmware = fw->ap_fw; 477 firmware = fw->ap_fw;
@@ -478,17 +482,17 @@ orinoco_dl_firmware(struct orinoco_private *priv,
478 dev->name, firmware); 482 dev->name, firmware);
479 483
480 /* Read current plug data */ 484 /* Read current plug data */
481 err = hermes_read_pda(hw, pda, fw->pda_addr, 485 err = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 0);
482 min_t(u16, fw->pda_size, sizeof(pda)), 0);
483 printk(KERN_DEBUG "%s: Read PDA returned %d\n", dev->name, err); 486 printk(KERN_DEBUG "%s: Read PDA returned %d\n", dev->name, err);
484 if (err) 487 if (err)
485 return err; 488 goto free;
486 489
487 err = request_firmware(&fw_entry, firmware, priv->dev); 490 err = request_firmware(&fw_entry, firmware, priv->dev);
488 if (err) { 491 if (err) {
489 printk(KERN_ERR "%s: Cannot find firmware %s\n", 492 printk(KERN_ERR "%s: Cannot find firmware %s\n",
490 dev->name, firmware); 493 dev->name, firmware);
491 return -ENOENT; 494 err = -ENOENT;
495 goto free;
492 } 496 }
493 497
494 hdr = (const struct orinoco_fw_header *) fw_entry->data; 498 hdr = (const struct orinoco_fw_header *) fw_entry->data;
@@ -532,6 +536,9 @@ orinoco_dl_firmware(struct orinoco_private *priv,
532 536
533abort: 537abort:
534 release_firmware(fw_entry); 538 release_firmware(fw_entry);
539
540free:
541 kfree(pda);
535 return err; 542 return err;
536} 543}
537 544
@@ -549,12 +556,12 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
549 int secondary) 556 int secondary)
550{ 557{
551 hermes_t *hw = &priv->hw; 558 hermes_t *hw = &priv->hw;
552 int ret; 559 int ret = 0;
553 const unsigned char *ptr; 560 const unsigned char *ptr;
554 const unsigned char *first_block; 561 const unsigned char *first_block;
555 562
556 /* Plug Data Area (PDA) */ 563 /* Plug Data Area (PDA) */
557 __le16 pda[256]; 564 __le16 *pda = NULL;
558 565
559 /* Binary block begins after the 0x1A marker */ 566 /* Binary block begins after the 0x1A marker */
560 ptr = image; 567 ptr = image;
@@ -563,28 +570,33 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
563 570
564 /* Read the PDA from EEPROM */ 571 /* Read the PDA from EEPROM */
565 if (secondary) { 572 if (secondary) {
566 ret = hermes_read_pda(hw, pda, fw->pda_addr, sizeof(pda), 1); 573 pda = kzalloc(fw->pda_size, GFP_KERNEL);
574 if (!pda)
575 return -ENOMEM;
576
577 ret = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 1);
567 if (ret) 578 if (ret)
568 return ret; 579 goto free;
569 } 580 }
570 581
571 /* Stop the firmware, so that it can be safely rewritten */ 582 /* Stop the firmware, so that it can be safely rewritten */
572 if (priv->stop_fw) { 583 if (priv->stop_fw) {
573 ret = priv->stop_fw(priv, 1); 584 ret = priv->stop_fw(priv, 1);
574 if (ret) 585 if (ret)
575 return ret; 586 goto free;
576 } 587 }
577 588
578 /* Program the adapter with new firmware */ 589 /* Program the adapter with new firmware */
579 ret = hermes_program(hw, first_block, end); 590 ret = hermes_program(hw, first_block, end);
580 if (ret) 591 if (ret)
581 return ret; 592 goto free;
582 593
583 /* Write the PDA to the adapter */ 594 /* Write the PDA to the adapter */
584 if (secondary) { 595 if (secondary) {
585 size_t len = hermes_blocks_length(first_block); 596 size_t len = hermes_blocks_length(first_block);
586 ptr = first_block + len; 597 ptr = first_block + len;
587 ret = hermes_apply_pda(hw, ptr, pda); 598 ret = hermes_apply_pda(hw, ptr, pda);
599 kfree(pda);
588 if (ret) 600 if (ret)
589 return ret; 601 return ret;
590 } 602 }
@@ -608,6 +620,10 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
608 return -ENODEV; 620 return -ENODEV;
609 621
610 return 0; 622 return 0;
623
624free:
625 kfree(pda);
626 return ret;
611} 627}
612 628
613 629
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 117c7d3a52b0..827ca0384a4c 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -306,8 +306,8 @@ static int p54_convert_rev1(struct ieee80211_hw *dev,
306 return 0; 306 return 0;
307} 307}
308 308
309static const char *p54_rf_chips[] = { "NULL", "Indigo?", "Duette", 309static const char *p54_rf_chips[] = { "NULL", "Duette3", "Duette2",
310 "Frisbee", "Xbow", "Longbow" }; 310 "Frisbee", "Xbow", "Longbow", "NULL", "NULL" };
311static int p54_init_xbow_synth(struct ieee80211_hw *dev); 311static int p54_init_xbow_synth(struct ieee80211_hw *dev);
312 312
313static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) 313static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
@@ -319,6 +319,7 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
319 void *tmp; 319 void *tmp;
320 int err; 320 int err;
321 u8 *end = (u8 *)eeprom + len; 321 u8 *end = (u8 *)eeprom + len;
322 u16 synth = 0;
322 DECLARE_MAC_BUF(mac); 323 DECLARE_MAC_BUF(mac);
323 324
324 wrap = (struct eeprom_pda_wrap *) eeprom; 325 wrap = (struct eeprom_pda_wrap *) eeprom;
@@ -400,8 +401,8 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
400 tmp = entry->data; 401 tmp = entry->data;
401 while ((u8 *)tmp < entry->data + data_len) { 402 while ((u8 *)tmp < entry->data + data_len) {
402 struct bootrec_exp_if *exp_if = tmp; 403 struct bootrec_exp_if *exp_if = tmp;
403 if (le16_to_cpu(exp_if->if_id) == 0xF) 404 if (le16_to_cpu(exp_if->if_id) == 0xf)
404 priv->rxhw = le16_to_cpu(exp_if->variant) & 0x07; 405 synth = le16_to_cpu(exp_if->variant);
405 tmp += sizeof(struct bootrec_exp_if); 406 tmp += sizeof(struct bootrec_exp_if);
406 } 407 }
407 break; 408 break;
@@ -421,28 +422,20 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
421 entry = (void *)entry + (entry_len + 1)*2; 422 entry = (void *)entry + (entry_len + 1)*2;
422 } 423 }
423 424
424 if (!priv->iq_autocal || !priv->output_limit || !priv->curve_data) { 425 if (!synth || !priv->iq_autocal || !priv->output_limit ||
426 !priv->curve_data) {
425 printk(KERN_ERR "p54: not all required entries found in eeprom!\n"); 427 printk(KERN_ERR "p54: not all required entries found in eeprom!\n");
426 err = -EINVAL; 428 err = -EINVAL;
427 goto err; 429 goto err;
428 } 430 }
429 431
430 switch (priv->rxhw) { 432 priv->rxhw = synth & 0x07;
431 case 4: /* XBow */ 433 if (priv->rxhw == 4)
432 p54_init_xbow_synth(dev); 434 p54_init_xbow_synth(dev);
433 case 1: /* Indigo? */ 435 if (!(synth & 0x40))
434 case 2: /* Duette */
435 dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz;
436 case 3: /* Frisbee */
437 case 5: /* Longbow */
438 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz; 436 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
439 break; 437 if (!(synth & 0x80))
440 default: 438 dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz;
441 printk(KERN_ERR "%s: unsupported RF-Chip\n",
442 wiphy_name(dev->wiphy));
443 err = -EINVAL;
444 goto err;
445 }
446 439
447 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { 440 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
448 u8 perm_addr[ETH_ALEN]; 441 u8 perm_addr[ETH_ALEN];
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1c2a02a741af..88b3cad8b65e 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -346,68 +346,6 @@ static void p54p_tx(struct ieee80211_hw *dev, struct p54_control_hdr *data,
346 printk(KERN_INFO "%s: tx overflow.\n", wiphy_name(dev->wiphy)); 346 printk(KERN_INFO "%s: tx overflow.\n", wiphy_name(dev->wiphy));
347} 347}
348 348
349static int p54p_open(struct ieee80211_hw *dev)
350{
351 struct p54p_priv *priv = dev->priv;
352 int err;
353
354 init_completion(&priv->boot_comp);
355 err = request_irq(priv->pdev->irq, &p54p_interrupt,
356 IRQF_SHARED, "p54pci", dev);
357 if (err) {
358 printk(KERN_ERR "%s: failed to register IRQ handler\n",
359 wiphy_name(dev->wiphy));
360 return err;
361 }
362
363 memset(priv->ring_control, 0, sizeof(*priv->ring_control));
364 err = p54p_upload_firmware(dev);
365 if (err) {
366 free_irq(priv->pdev->irq, dev);
367 return err;
368 }
369 priv->rx_idx_data = priv->tx_idx_data = 0;
370 priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
371
372 p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
373 ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
374
375 p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
376 ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
377
378 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
379 P54P_READ(ring_control_base);
380 wmb();
381 udelay(10);
382
383 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
384 P54P_READ(int_enable);
385 wmb();
386 udelay(10);
387
388 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
389 P54P_READ(dev_int);
390
391 if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
392 printk(KERN_ERR "%s: Cannot boot firmware!\n",
393 wiphy_name(dev->wiphy));
394 free_irq(priv->pdev->irq, dev);
395 return -ETIMEDOUT;
396 }
397
398 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
399 P54P_READ(int_enable);
400 wmb();
401 udelay(10);
402
403 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
404 P54P_READ(dev_int);
405 wmb();
406 udelay(10);
407
408 return 0;
409}
410
411static void p54p_stop(struct ieee80211_hw *dev) 349static void p54p_stop(struct ieee80211_hw *dev)
412{ 350{
413 struct p54p_priv *priv = dev->priv; 351 struct p54p_priv *priv = dev->priv;
@@ -474,6 +412,68 @@ static void p54p_stop(struct ieee80211_hw *dev)
474 memset(ring_control, 0, sizeof(*ring_control)); 412 memset(ring_control, 0, sizeof(*ring_control));
475} 413}
476 414
415static int p54p_open(struct ieee80211_hw *dev)
416{
417 struct p54p_priv *priv = dev->priv;
418 int err;
419
420 init_completion(&priv->boot_comp);
421 err = request_irq(priv->pdev->irq, &p54p_interrupt,
422 IRQF_SHARED, "p54pci", dev);
423 if (err) {
424 printk(KERN_ERR "%s: failed to register IRQ handler\n",
425 wiphy_name(dev->wiphy));
426 return err;
427 }
428
429 memset(priv->ring_control, 0, sizeof(*priv->ring_control));
430 err = p54p_upload_firmware(dev);
431 if (err) {
432 free_irq(priv->pdev->irq, dev);
433 return err;
434 }
435 priv->rx_idx_data = priv->tx_idx_data = 0;
436 priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
437
438 p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
439 ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
440
441 p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
442 ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
443
444 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
445 P54P_READ(ring_control_base);
446 wmb();
447 udelay(10);
448
449 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
450 P54P_READ(int_enable);
451 wmb();
452 udelay(10);
453
454 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
455 P54P_READ(dev_int);
456
457 if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
458 printk(KERN_ERR "%s: Cannot boot firmware!\n",
459 wiphy_name(dev->wiphy));
460 p54p_stop(dev);
461 return -ETIMEDOUT;
462 }
463
464 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
465 P54P_READ(int_enable);
466 wmb();
467 udelay(10);
468
469 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
470 P54P_READ(dev_int);
471 wmb();
472 udelay(10);
473
474 return 0;
475}
476
477static int __devinit p54p_probe(struct pci_dev *pdev, 477static int __devinit p54p_probe(struct pci_dev *pdev,
478 const struct pci_device_id *id) 478 const struct pci_device_id *id)
479{ 479{
@@ -556,11 +556,13 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
556 spin_lock_init(&priv->lock); 556 spin_lock_init(&priv->lock);
557 tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev); 557 tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev);
558 558
559 p54p_open(dev); 559 err = p54p_open(dev);
560 if (err)
561 goto err_free_common;
560 err = p54_read_eeprom(dev); 562 err = p54_read_eeprom(dev);
561 p54p_stop(dev); 563 p54p_stop(dev);
562 if (err) 564 if (err)
563 goto err_free_desc; 565 goto err_free_common;
564 566
565 err = ieee80211_register_hw(dev); 567 err = ieee80211_register_hw(dev);
566 if (err) { 568 if (err) {
@@ -573,8 +575,6 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
573 575
574 err_free_common: 576 err_free_common:
575 p54_free_common(dev); 577 p54_free_common(dev);
576
577 err_free_desc:
578 pci_free_consistent(pdev, sizeof(*priv->ring_control), 578 pci_free_consistent(pdev, sizeof(*priv->ring_control),
579 priv->ring_control, priv->ring_control_dma); 579 priv->ring_control, priv->ring_control_dma);
580 580
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index f839ce044afd..95511ac22470 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,5 +1,5 @@
1menuconfig RT2X00 1menuconfig RT2X00
2 bool "Ralink driver support" 2 tristate "Ralink driver support"
3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL 3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
4 ---help--- 4 ---help---
5 This will enable the experimental support for the Ralink drivers, 5 This will enable the experimental support for the Ralink drivers,
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index 431e3c78bf27..69eb0132593b 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -48,6 +48,9 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
48 {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187}, 48 {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187},
49 /* Sitecom */ 49 /* Sitecom */
50 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, 50 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
51 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
52 /* Abocom */
53 {USB_DEVICE(0x13d1, 0xabe6), .driver_info = DEVICE_RTL8187},
51 {} 54 {}
52}; 55};
53 56
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index fe1867b25ff7..cac732f4047f 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -615,7 +615,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
615 struct ieee80211_hdr *tx_hdr; 615 struct ieee80211_hdr *tx_hdr;
616 616
617 tx_hdr = (struct ieee80211_hdr *)skb->data; 617 tx_hdr = (struct ieee80211_hdr *)skb->data;
618 if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1))) 618 if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
619 { 619 {
620 __skb_unlink(skb, q); 620 __skb_unlink(skb, q);
621 tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1); 621 tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index a60ae86bd5c9..a3ccd8c1c716 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -61,6 +61,7 @@ static struct usb_device_id usb_ids[] = {
61 { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 }, 61 { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
62 /* ZD1211B */ 62 /* ZD1211B */
63 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, 63 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
64 { USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B },
64 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, 65 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
65 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, 66 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
66 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, 67 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
@@ -82,6 +83,7 @@ static struct usb_device_id usb_ids[] = {
82 { USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B }, 83 { USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B },
83 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, 84 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B },
84 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, 85 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
86 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
85 /* "Driverless" devices that need ejecting */ 87 /* "Driverless" devices that need ejecting */
86 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 88 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
87 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, 89 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c6948d8f53f6..6d017adc914a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1785,7 +1785,7 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
1785 return 0; 1785 return 0;
1786} 1786}
1787 1787
1788static struct xenbus_driver netfront = { 1788static struct xenbus_driver netfront_driver = {
1789 .name = "vif", 1789 .name = "vif",
1790 .owner = THIS_MODULE, 1790 .owner = THIS_MODULE,
1791 .ids = netfront_ids, 1791 .ids = netfront_ids,
@@ -1805,7 +1805,7 @@ static int __init netif_init(void)
1805 1805
1806 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); 1806 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
1807 1807
1808 return xenbus_register_frontend(&netfront); 1808 return xenbus_register_frontend(&netfront_driver);
1809} 1809}
1810module_init(netif_init); 1810module_init(netif_init);
1811 1811
@@ -1815,7 +1815,7 @@ static void __exit netif_exit(void)
1815 if (xen_initial_domain()) 1815 if (xen_initial_domain())
1816 return; 1816 return;
1817 1817
1818 xenbus_unregister_driver(&netfront); 1818 xenbus_unregister_driver(&netfront_driver);
1819} 1819}
1820module_exit(netif_exit); 1820module_exit(netif_exit);
1821 1821
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
new file mode 100644
index 000000000000..da42aa06a3ba
--- /dev/null
+++ b/drivers/net/xtsonic.c
@@ -0,0 +1,319 @@
1/*
2 * xtsonic.c
3 *
4 * (C) 2001 - 2007 Tensilica Inc.
5 * Kevin Chea <kchea@yahoo.com>
6 * Marc Gauthier <marc@linux-xtensa.org>
7 * Chris Zankel <chris@zankel.net>
8 *
9 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
10 *
11 * This driver is based on work from Andreas Busse, but most of
12 * the code is rewritten.
13 *
14 * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
15 *
16 * A driver for the onboard Sonic ethernet controller on the XT2000.
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/fcntl.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/slab.h>
28#include <linux/string.h>
29#include <linux/delay.h>
30#include <linux/errno.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/platform_device.h>
35#include <linux/dma-mapping.h>
36
37#include <asm/io.h>
38#include <asm/pgtable.h>
39#include <asm/dma.h>
40
41static char xtsonic_string[] = "xtsonic";
42
43extern unsigned xtboard_nvram_valid(void);
44extern void xtboard_get_ether_addr(unsigned char *buf);
45
46#include "sonic.h"
47
48/*
49 * According to the documentation for the Sonic ethernet controller,
50 * EOBC should be 760 words (1520 bytes) for 32-bit applications, and,
51 * as such, 2 words less than the buffer size. The value for RBSIZE
52 * defined in sonic.h, however is only 1520.
53 *
54 * (Note that in 16-bit configurations, EOBC is 759 words (1518 bytes) and
55 * RBSIZE 1520 bytes)
56 */
57#undef SONIC_RBSIZE
58#define SONIC_RBSIZE 1524
59
60/*
61 * The chip provides 256 byte register space.
62 */
63#define SONIC_MEM_SIZE 0x100
64
65/*
66 * Macros to access SONIC registers
67 */
68#define SONIC_READ(reg) \
69 (0xffff & *((volatile unsigned int *)dev->base_addr+reg))
70
71#define SONIC_WRITE(reg,val) \
72 *((volatile unsigned int *)dev->base_addr+reg) = val
73
74
75/* Use 0 for production, 1 for verification, and >2 for debug */
76#ifdef SONIC_DEBUG
77static unsigned int sonic_debug = SONIC_DEBUG;
78#else
79static unsigned int sonic_debug = 1;
80#endif
81
82/*
83 * We cannot use station (ethernet) address prefixes to detect the
84 * sonic controller since these are board manufacturer depended.
85 * So we check for known Silicon Revision IDs instead.
86 */
87static unsigned short known_revisions[] =
88{
89 0x101, /* SONIC 83934 */
90 0xffff /* end of list */
91};
92
93static int xtsonic_open(struct net_device *dev)
94{
95 if (request_irq(dev->irq,&sonic_interrupt,IRQF_DISABLED,"sonic",dev)) {
96 printk(KERN_ERR "%s: unable to get IRQ %d.\n",
97 dev->name, dev->irq);
98 return -EAGAIN;
99 }
100 return sonic_open(dev);
101}
102
103static int xtsonic_close(struct net_device *dev)
104{
105 int err;
106 err = sonic_close(dev);
107 free_irq(dev->irq, dev);
108 return err;
109}
110
111static int __init sonic_probe1(struct net_device *dev)
112{
113 static unsigned version_printed = 0;
114 unsigned int silicon_revision;
115 struct sonic_local *lp = netdev_priv(dev);
116 unsigned int base_addr = dev->base_addr;
117 int i;
118 int err = 0;
119
120 if (!request_mem_region(base_addr, 0x100, xtsonic_string))
121 return -EBUSY;
122
123 /*
124 * get the Silicon Revision ID. If this is one of the known
125 * one assume that we found a SONIC ethernet controller at
126 * the expected location.
127 */
128 silicon_revision = SONIC_READ(SONIC_SR);
129 if (sonic_debug > 1)
130 printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
131
132 i = 0;
133 while ((known_revisions[i] != 0xffff) &&
134 (known_revisions[i] != silicon_revision))
135 i++;
136
137 if (known_revisions[i] == 0xffff) {
138 printk("SONIC ethernet controller not found (0x%4x)\n",
139 silicon_revision);
140 return -ENODEV;
141 }
142
143 if (sonic_debug && version_printed++ == 0)
144 printk(version);
145
146 /*
147 * Put the sonic into software reset, then retrieve ethernet address.
148 * Note: we are assuming that the boot-loader has initialized the cam.
149 */
150 SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
151 SONIC_WRITE(SONIC_DCR,
152 SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS);
153 SONIC_WRITE(SONIC_CEP,0);
154 SONIC_WRITE(SONIC_IMR,0);
155
156 SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
157 SONIC_WRITE(SONIC_CEP,0);
158
159 for (i=0; i<3; i++) {
160 unsigned int val = SONIC_READ(SONIC_CAP0-i);
161 dev->dev_addr[i*2] = val;
162 dev->dev_addr[i*2+1] = val >> 8;
163 }
164
165 /* Initialize the device structure. */
166
167 lp->dma_bitmode = SONIC_BITMODE32;
168
169 /*
170 * Allocate local private descriptor areas in uncached space.
171 * The entire structure must be located within the same 64kb segment.
172 * A simple way to ensure this is to allocate twice the
173 * size of the structure -- given that the structure is
174 * much less than 64 kB, at least one of the halves of
175 * the allocated area will be contained entirely in 64 kB.
176 * We also allocate extra space for a pointer to allow freeing
177 * this structure later on (in xtsonic_cleanup_module()).
178 */
179 lp->descriptors =
180 dma_alloc_coherent(lp->device,
181 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
182 &lp->descriptors_laddr, GFP_KERNEL);
183
184 if (lp->descriptors == NULL) {
185 printk(KERN_ERR "%s: couldn't alloc DMA memory for "
186 " descriptors.\n", lp->device->bus_id);
187 goto out;
188 }
189
190 lp->cda = lp->descriptors;
191 lp->tda = lp->cda + (SIZEOF_SONIC_CDA
192 * SONIC_BUS_SCALE(lp->dma_bitmode));
193 lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
194 * SONIC_BUS_SCALE(lp->dma_bitmode));
195 lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
196 * SONIC_BUS_SCALE(lp->dma_bitmode));
197
198 /* get the virtual dma address */
199
200 lp->cda_laddr = lp->descriptors_laddr;
201 lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
202 * SONIC_BUS_SCALE(lp->dma_bitmode));
203 lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
204 * SONIC_BUS_SCALE(lp->dma_bitmode));
205 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
206 * SONIC_BUS_SCALE(lp->dma_bitmode));
207
208 dev->open = xtsonic_open;
209 dev->stop = xtsonic_close;
210 dev->hard_start_xmit = sonic_send_packet;
211 dev->get_stats = sonic_get_stats;
212 dev->set_multicast_list = &sonic_multicast_list;
213 dev->tx_timeout = sonic_tx_timeout;
214 dev->watchdog_timeo = TX_TIMEOUT;
215
216 /*
217 * clear tally counter
218 */
219 SONIC_WRITE(SONIC_CRCT,0xffff);
220 SONIC_WRITE(SONIC_FAET,0xffff);
221 SONIC_WRITE(SONIC_MPT,0xffff);
222
223 return 0;
224out:
225 release_region(dev->base_addr, SONIC_MEM_SIZE);
226 return err;
227}
228
229
230/*
231 * Probe for a SONIC ethernet controller on an XT2000 board.
232 * Actually probing is superfluous but we're paranoid.
233 */
234
235int __init xtsonic_probe(struct platform_device *pdev)
236{
237 struct net_device *dev;
238 struct sonic_local *lp;
239 struct resource *resmem, *resirq;
240 int err = 0;
241
242 DECLARE_MAC_BUF(mac);
243
244 if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL)
245 return -ENODEV;
246
247 if ((resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0)) == NULL)
248 return -ENODEV;
249
250 if ((dev = alloc_etherdev(sizeof(struct sonic_local))) == NULL)
251 return -ENOMEM;
252
253 lp = netdev_priv(dev);
254 lp->device = &pdev->dev;
255 SET_NETDEV_DEV(dev, &pdev->dev);
256 netdev_boot_setup_check(dev);
257
258 dev->base_addr = resmem->start;
259 dev->irq = resirq->start;
260
261 if ((err = sonic_probe1(dev)))
262 goto out;
263 if ((err = register_netdev(dev)))
264 goto out1;
265
266 printk("%s: SONIC ethernet @%08lx, MAC %s, IRQ %d\n", dev->name,
267 dev->base_addr, print_mac(mac, dev->dev_addr), dev->irq);
268
269 return 0;
270
271out1:
272 release_region(dev->base_addr, SONIC_MEM_SIZE);
273out:
274 free_netdev(dev);
275
276 return err;
277}
278
279MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver");
280module_param(sonic_debug, int, 0);
281MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)");
282
283#include "sonic.c"
284
285static int __devexit xtsonic_device_remove (struct platform_device *pdev)
286{
287 struct net_device *dev = platform_get_drvdata(pdev);
288 struct sonic_local *lp = netdev_priv(dev);
289
290 unregister_netdev(dev);
291 dma_free_coherent(lp->device,
292 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
293 lp->descriptors, lp->descriptors_laddr);
294 release_region (dev->base_addr, SONIC_MEM_SIZE);
295 free_netdev(dev);
296
297 return 0;
298}
299
300static struct platform_driver xtsonic_driver = {
301 .probe = xtsonic_probe,
302 .remove = __devexit_p(xtsonic_device_remove),
303 .driver = {
304 .name = xtsonic_string,
305 },
306};
307
308static int __init xtsonic_init(void)
309{
310 return platform_driver_register(&xtsonic_driver);
311}
312
313static void __exit xtsonic_cleanup(void)
314{
315 platform_driver_unregister(&xtsonic_driver);
316}
317
318module_init(xtsonic_init);
319module_exit(xtsonic_cleanup);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 51e5214071da..224ae6bc67b6 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -105,7 +105,16 @@ EXPORT_SYMBOL(of_release_dev);
105int of_device_register(struct of_device *ofdev) 105int of_device_register(struct of_device *ofdev)
106{ 106{
107 BUG_ON(ofdev->node == NULL); 107 BUG_ON(ofdev->node == NULL);
108 return device_register(&ofdev->dev); 108
109 device_initialize(&ofdev->dev);
110
111 /* device_add will assume that this device is on the same node as
112 * the parent. If there is no parent defined, set the node
113 * explicitly */
114 if (!ofdev->dev.parent)
115 set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->node));
116
117 return device_add(&ofdev->dev);
109} 118}
110EXPORT_SYMBOL(of_device_register); 119EXPORT_SYMBOL(of_device_register);
111 120
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index 6a98dc8aa30b..24bbef777c19 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -41,7 +41,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
41 41
42 info.addr = *addr; 42 info.addr = *addr;
43 43
44 request_module(info.type); 44 request_module("%s", info.type);
45 45
46 result = i2c_new_device(adap, &info); 46 result = i2c_new_device(adap, &info);
47 if (result == NULL) { 47 if (result == NULL) {
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
index b01eec026f68..bed0ed6dcdc1 100644
--- a/drivers/of/of_spi.c
+++ b/drivers/of/of_spi.c
@@ -61,6 +61,8 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np)
61 spi->mode |= SPI_CPHA; 61 spi->mode |= SPI_CPHA;
62 if (of_find_property(nc, "spi-cpol", NULL)) 62 if (of_find_property(nc, "spi-cpol", NULL))
63 spi->mode |= SPI_CPOL; 63 spi->mode |= SPI_CPOL;
64 if (of_find_property(nc, "spi-cs-high", NULL))
65 spi->mode |= SPI_CS_HIGH;
64 66
65 /* Device speed */ 67 /* Device speed */
66 prop = of_get_property(nc, "spi-max-frequency", &len); 68 prop = of_get_property(nc, "spi-max-frequency", &len);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index ed982273fb8b..b55cd23ffdef 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -41,7 +41,6 @@ static cpumask_t marked_cpus = CPU_MASK_NONE;
41static DEFINE_SPINLOCK(task_mortuary); 41static DEFINE_SPINLOCK(task_mortuary);
42static void process_task_mortuary(void); 42static void process_task_mortuary(void);
43 43
44
45/* Take ownership of the task struct and place it on the 44/* Take ownership of the task struct and place it on the
46 * list for processing. Only after two full buffer syncs 45 * list for processing. Only after two full buffer syncs
47 * does the task eventually get freed, because by then 46 * does the task eventually get freed, because by then
@@ -341,7 +340,7 @@ static void add_trace_begin(void)
341 * Add IBS fetch and op entries to event buffer 340 * Add IBS fetch and op entries to event buffer
342 */ 341 */
343static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code, 342static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
344 int in_kernel, struct mm_struct *mm) 343 struct mm_struct *mm)
345{ 344{
346 unsigned long rip; 345 unsigned long rip;
347 int i, count; 346 int i, count;
@@ -565,9 +564,11 @@ void sync_buffer(int cpu)
565 struct task_struct *new; 564 struct task_struct *new;
566 unsigned long cookie = 0; 565 unsigned long cookie = 0;
567 int in_kernel = 1; 566 int in_kernel = 1;
568 unsigned int i;
569 sync_buffer_state state = sb_buffer_start; 567 sync_buffer_state state = sb_buffer_start;
568#ifndef CONFIG_OPROFILE_IBS
569 unsigned int i;
570 unsigned long available; 570 unsigned long available;
571#endif
571 572
572 mutex_lock(&buffer_mutex); 573 mutex_lock(&buffer_mutex);
573 574
@@ -575,9 +576,13 @@ void sync_buffer(int cpu)
575 576
576 /* Remember, only we can modify tail_pos */ 577 /* Remember, only we can modify tail_pos */
577 578
579#ifndef CONFIG_OPROFILE_IBS
578 available = get_slots(cpu_buf); 580 available = get_slots(cpu_buf);
579 581
580 for (i = 0; i < available; ++i) { 582 for (i = 0; i < available; ++i) {
583#else
584 while (get_slots(cpu_buf)) {
585#endif
581 struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos]; 586 struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
582 587
583 if (is_code(s->eip)) { 588 if (is_code(s->eip)) {
@@ -593,12 +598,10 @@ void sync_buffer(int cpu)
593#ifdef CONFIG_OPROFILE_IBS 598#ifdef CONFIG_OPROFILE_IBS
594 } else if (s->event == IBS_FETCH_BEGIN) { 599 } else if (s->event == IBS_FETCH_BEGIN) {
595 state = sb_bt_start; 600 state = sb_bt_start;
596 add_ibs_begin(cpu_buf, 601 add_ibs_begin(cpu_buf, IBS_FETCH_CODE, mm);
597 IBS_FETCH_CODE, in_kernel, mm);
598 } else if (s->event == IBS_OP_BEGIN) { 602 } else if (s->event == IBS_OP_BEGIN) {
599 state = sb_bt_start; 603 state = sb_bt_start;
600 add_ibs_begin(cpu_buf, 604 add_ibs_begin(cpu_buf, IBS_OP_CODE, mm);
601 IBS_OP_CODE, in_kernel, mm);
602#endif 605#endif
603 } else { 606 } else {
604 struct mm_struct *oldmm = mm; 607 struct mm_struct *oldmm = mm;
@@ -628,3 +631,27 @@ void sync_buffer(int cpu)
628 631
629 mutex_unlock(&buffer_mutex); 632 mutex_unlock(&buffer_mutex);
630} 633}
634
635/* The function can be used to add a buffer worth of data directly to
636 * the kernel buffer. The buffer is assumed to be a circular buffer.
637 * Take the entries from index start and end at index end, wrapping
638 * at max_entries.
639 */
640void oprofile_put_buff(unsigned long *buf, unsigned int start,
641 unsigned int stop, unsigned int max)
642{
643 int i;
644
645 i = start;
646
647 mutex_lock(&buffer_mutex);
648 while (i != stop) {
649 add_event_entry(buf[i++]);
650
651 if (i >= max)
652 i = 0;
653 }
654
655 mutex_unlock(&buffer_mutex);
656}
657
diff --git a/drivers/oprofile/buffer_sync.h b/drivers/oprofile/buffer_sync.h
index 08866f6a96a3..3110732c1835 100644
--- a/drivers/oprofile/buffer_sync.h
+++ b/drivers/oprofile/buffer_sync.h
@@ -9,13 +9,13 @@
9 9
10#ifndef OPROFILE_BUFFER_SYNC_H 10#ifndef OPROFILE_BUFFER_SYNC_H
11#define OPROFILE_BUFFER_SYNC_H 11#define OPROFILE_BUFFER_SYNC_H
12 12
13/* add the necessary profiling hooks */ 13/* add the necessary profiling hooks */
14int sync_start(void); 14int sync_start(void);
15 15
16/* remove the hooks */ 16/* remove the hooks */
17void sync_stop(void); 17void sync_stop(void);
18 18
19/* sync the given CPU's buffer */ 19/* sync the given CPU's buffer */
20void sync_buffer(int cpu); 20void sync_buffer(int cpu);
21 21
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index e1bd5a937f6c..01d38e78cde1 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -22,7 +22,7 @@
22#include <linux/oprofile.h> 22#include <linux/oprofile.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25 25
26#include "event_buffer.h" 26#include "event_buffer.h"
27#include "cpu_buffer.h" 27#include "cpu_buffer.h"
28#include "buffer_sync.h" 28#include "buffer_sync.h"
@@ -38,27 +38,40 @@ static int work_enabled;
38void free_cpu_buffers(void) 38void free_cpu_buffers(void)
39{ 39{
40 int i; 40 int i;
41 41
42 for_each_online_cpu(i) { 42 for_each_possible_cpu(i) {
43 vfree(per_cpu(cpu_buffer, i).buffer); 43 vfree(per_cpu(cpu_buffer, i).buffer);
44 per_cpu(cpu_buffer, i).buffer = NULL; 44 per_cpu(cpu_buffer, i).buffer = NULL;
45 } 45 }
46} 46}
47 47
48unsigned long oprofile_get_cpu_buffer_size(void)
49{
50 return fs_cpu_buffer_size;
51}
52
53void oprofile_cpu_buffer_inc_smpl_lost(void)
54{
55 struct oprofile_cpu_buffer *cpu_buf
56 = &__get_cpu_var(cpu_buffer);
57
58 cpu_buf->sample_lost_overflow++;
59}
60
48int alloc_cpu_buffers(void) 61int alloc_cpu_buffers(void)
49{ 62{
50 int i; 63 int i;
51 64
52 unsigned long buffer_size = fs_cpu_buffer_size; 65 unsigned long buffer_size = fs_cpu_buffer_size;
53 66
54 for_each_online_cpu(i) { 67 for_each_possible_cpu(i) {
55 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 68 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
56 69
57 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, 70 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
58 cpu_to_node(i)); 71 cpu_to_node(i));
59 if (!b->buffer) 72 if (!b->buffer)
60 goto fail; 73 goto fail;
61 74
62 b->last_task = NULL; 75 b->last_task = NULL;
63 b->last_is_kernel = -1; 76 b->last_is_kernel = -1;
64 b->tracing = 0; 77 b->tracing = 0;
@@ -112,7 +125,7 @@ void end_cpu_work(void)
112} 125}
113 126
114/* Resets the cpu buffer to a sane state. */ 127/* Resets the cpu buffer to a sane state. */
115void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf) 128void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf)
116{ 129{
117 /* reset these to invalid values; the next sample 130 /* reset these to invalid values; the next sample
118 * collected will populate the buffer with proper 131 * collected will populate the buffer with proper
@@ -123,7 +136,7 @@ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
123} 136}
124 137
125/* compute number of available slots in cpu_buffer queue */ 138/* compute number of available slots in cpu_buffer queue */
126static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b) 139static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b)
127{ 140{
128 unsigned long head = b->head_pos; 141 unsigned long head = b->head_pos;
129 unsigned long tail = b->tail_pos; 142 unsigned long tail = b->tail_pos;
@@ -134,7 +147,7 @@ static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
134 return tail + (b->buffer_size - head) - 1; 147 return tail + (b->buffer_size - head) - 1;
135} 148}
136 149
137static void increment_head(struct oprofile_cpu_buffer * b) 150static void increment_head(struct oprofile_cpu_buffer *b)
138{ 151{
139 unsigned long new_head = b->head_pos + 1; 152 unsigned long new_head = b->head_pos + 1;
140 153
@@ -149,17 +162,17 @@ static void increment_head(struct oprofile_cpu_buffer * b)
149} 162}
150 163
151static inline void 164static inline void
152add_sample(struct oprofile_cpu_buffer * cpu_buf, 165add_sample(struct oprofile_cpu_buffer *cpu_buf,
153 unsigned long pc, unsigned long event) 166 unsigned long pc, unsigned long event)
154{ 167{
155 struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos]; 168 struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos];
156 entry->eip = pc; 169 entry->eip = pc;
157 entry->event = event; 170 entry->event = event;
158 increment_head(cpu_buf); 171 increment_head(cpu_buf);
159} 172}
160 173
161static inline void 174static inline void
162add_code(struct oprofile_cpu_buffer * buffer, unsigned long value) 175add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
163{ 176{
164 add_sample(buffer, ESCAPE_CODE, value); 177 add_sample(buffer, ESCAPE_CODE, value);
165} 178}
@@ -173,10 +186,10 @@ add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
173 * pc. We tag this in the buffer by generating kernel enter/exit 186 * pc. We tag this in the buffer by generating kernel enter/exit
174 * events whenever is_kernel changes 187 * events whenever is_kernel changes
175 */ 188 */
176static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, 189static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
177 int is_kernel, unsigned long event) 190 int is_kernel, unsigned long event)
178{ 191{
179 struct task_struct * task; 192 struct task_struct *task;
180 193
181 cpu_buf->sample_received++; 194 cpu_buf->sample_received++;
182 195
@@ -205,7 +218,7 @@ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
205 cpu_buf->last_task = task; 218 cpu_buf->last_task = task;
206 add_code(cpu_buf, (unsigned long)task); 219 add_code(cpu_buf, (unsigned long)task);
207 } 220 }
208 221
209 add_sample(cpu_buf, pc, event); 222 add_sample(cpu_buf, pc, event);
210 return 1; 223 return 1;
211} 224}
@@ -222,7 +235,7 @@ static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
222 return 1; 235 return 1;
223} 236}
224 237
225static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf) 238static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
226{ 239{
227 cpu_buf->tracing = 0; 240 cpu_buf->tracing = 0;
228} 241}
@@ -257,21 +270,23 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
257 270
258#ifdef CONFIG_OPROFILE_IBS 271#ifdef CONFIG_OPROFILE_IBS
259 272
260#define MAX_IBS_SAMPLE_SIZE 14 273#define MAX_IBS_SAMPLE_SIZE 14
261static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf, 274
262 unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code) 275void oprofile_add_ibs_sample(struct pt_regs *const regs,
276 unsigned int *const ibs_sample, int ibs_code)
263{ 277{
278 int is_kernel = !user_mode(regs);
279 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
264 struct task_struct *task; 280 struct task_struct *task;
265 281
266 cpu_buf->sample_received++; 282 cpu_buf->sample_received++;
267 283
268 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { 284 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
285 /* we can't backtrace since we lost the source of this event */
269 cpu_buf->sample_lost_overflow++; 286 cpu_buf->sample_lost_overflow++;
270 return 0; 287 return;
271 } 288 }
272 289
273 is_kernel = !!is_kernel;
274
275 /* notice a switch from user->kernel or vice versa */ 290 /* notice a switch from user->kernel or vice versa */
276 if (cpu_buf->last_is_kernel != is_kernel) { 291 if (cpu_buf->last_is_kernel != is_kernel) {
277 cpu_buf->last_is_kernel = is_kernel; 292 cpu_buf->last_is_kernel = is_kernel;
@@ -281,7 +296,6 @@ static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
281 /* notice a task switch */ 296 /* notice a task switch */
282 if (!is_kernel) { 297 if (!is_kernel) {
283 task = current; 298 task = current;
284
285 if (cpu_buf->last_task != task) { 299 if (cpu_buf->last_task != task) {
286 cpu_buf->last_task = task; 300 cpu_buf->last_task = task;
287 add_code(cpu_buf, (unsigned long)task); 301 add_code(cpu_buf, (unsigned long)task);
@@ -289,36 +303,17 @@ static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
289 } 303 }
290 304
291 add_code(cpu_buf, ibs_code); 305 add_code(cpu_buf, ibs_code);
292 add_sample(cpu_buf, ibs[0], ibs[1]); 306 add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
293 add_sample(cpu_buf, ibs[2], ibs[3]); 307 add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
294 add_sample(cpu_buf, ibs[4], ibs[5]); 308 add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
295 309
296 if (ibs_code == IBS_OP_BEGIN) { 310 if (ibs_code == IBS_OP_BEGIN) {
297 add_sample(cpu_buf, ibs[6], ibs[7]); 311 add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
298 add_sample(cpu_buf, ibs[8], ibs[9]); 312 add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
299 add_sample(cpu_buf, ibs[10], ibs[11]); 313 add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
300 }
301
302 return 1;
303}
304
305void oprofile_add_ibs_sample(struct pt_regs *const regs,
306 unsigned int * const ibs_sample, u8 code)
307{
308 int is_kernel = !user_mode(regs);
309 unsigned long pc = profile_pc(regs);
310
311 struct oprofile_cpu_buffer *cpu_buf =
312 &per_cpu(cpu_buffer, smp_processor_id());
313
314 if (!backtrace_depth) {
315 log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
316 return;
317 } 314 }
318 315
319 /* if log_sample() fails we can't backtrace since we lost the source 316 if (backtrace_depth)
320 * of this event */
321 if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
322 oprofile_ops.backtrace(regs, backtrace_depth); 317 oprofile_ops.backtrace(regs, backtrace_depth);
323} 318}
324 319
@@ -363,11 +358,16 @@ void oprofile_add_trace(unsigned long pc)
363 */ 358 */
364static void wq_sync_buffer(struct work_struct *work) 359static void wq_sync_buffer(struct work_struct *work)
365{ 360{
366 struct oprofile_cpu_buffer * b = 361 struct oprofile_cpu_buffer *b =
367 container_of(work, struct oprofile_cpu_buffer, work.work); 362 container_of(work, struct oprofile_cpu_buffer, work.work);
368 if (b->cpu != smp_processor_id()) { 363 if (b->cpu != smp_processor_id()) {
369 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", 364 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
370 smp_processor_id(), b->cpu); 365 smp_processor_id(), b->cpu);
366
367 if (!cpu_online(b->cpu)) {
368 cancel_delayed_work(&b->work);
369 return;
370 }
371 } 371 }
372 sync_buffer(b->cpu); 372 sync_buffer(b->cpu);
373 373
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 9c44d004da69..d3cc26264db5 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -15,9 +15,9 @@
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/cache.h> 16#include <linux/cache.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18 18
19struct task_struct; 19struct task_struct;
20 20
21int alloc_cpu_buffers(void); 21int alloc_cpu_buffers(void);
22void free_cpu_buffers(void); 22void free_cpu_buffers(void);
23 23
@@ -31,15 +31,15 @@ struct op_sample {
31 unsigned long eip; 31 unsigned long eip;
32 unsigned long event; 32 unsigned long event;
33}; 33};
34 34
35struct oprofile_cpu_buffer { 35struct oprofile_cpu_buffer {
36 volatile unsigned long head_pos; 36 volatile unsigned long head_pos;
37 volatile unsigned long tail_pos; 37 volatile unsigned long tail_pos;
38 unsigned long buffer_size; 38 unsigned long buffer_size;
39 struct task_struct * last_task; 39 struct task_struct *last_task;
40 int last_is_kernel; 40 int last_is_kernel;
41 int tracing; 41 int tracing;
42 struct op_sample * buffer; 42 struct op_sample *buffer;
43 unsigned long sample_received; 43 unsigned long sample_received;
44 unsigned long sample_lost_overflow; 44 unsigned long sample_lost_overflow;
45 unsigned long backtrace_aborted; 45 unsigned long backtrace_aborted;
@@ -50,7 +50,7 @@ struct oprofile_cpu_buffer {
50 50
51DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 51DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
52 52
53void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf); 53void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf);
54 54
55/* transient events for the CPU buffer -> event buffer */ 55/* transient events for the CPU buffer -> event buffer */
56#define CPU_IS_KERNEL 1 56#define CPU_IS_KERNEL 1
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 8d692a5c8e73..191a3202cecc 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -19,16 +19,16 @@
19#include <linux/dcookies.h> 19#include <linux/dcookies.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22 22
23#include "oprof.h" 23#include "oprof.h"
24#include "event_buffer.h" 24#include "event_buffer.h"
25#include "oprofile_stats.h" 25#include "oprofile_stats.h"
26 26
27DEFINE_MUTEX(buffer_mutex); 27DEFINE_MUTEX(buffer_mutex);
28 28
29static unsigned long buffer_opened; 29static unsigned long buffer_opened;
30static DECLARE_WAIT_QUEUE_HEAD(buffer_wait); 30static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
31static unsigned long * event_buffer; 31static unsigned long *event_buffer;
32static unsigned long buffer_size; 32static unsigned long buffer_size;
33static unsigned long buffer_watershed; 33static unsigned long buffer_watershed;
34static size_t buffer_pos; 34static size_t buffer_pos;
@@ -66,7 +66,7 @@ void wake_up_buffer_waiter(void)
66 mutex_unlock(&buffer_mutex); 66 mutex_unlock(&buffer_mutex);
67} 67}
68 68
69 69
70int alloc_event_buffer(void) 70int alloc_event_buffer(void)
71{ 71{
72 int err = -ENOMEM; 72 int err = -ENOMEM;
@@ -76,13 +76,13 @@ int alloc_event_buffer(void)
76 buffer_size = fs_buffer_size; 76 buffer_size = fs_buffer_size;
77 buffer_watershed = fs_buffer_watershed; 77 buffer_watershed = fs_buffer_watershed;
78 spin_unlock_irqrestore(&oprofilefs_lock, flags); 78 spin_unlock_irqrestore(&oprofilefs_lock, flags);
79 79
80 if (buffer_watershed >= buffer_size) 80 if (buffer_watershed >= buffer_size)
81 return -EINVAL; 81 return -EINVAL;
82 82
83 event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); 83 event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
84 if (!event_buffer) 84 if (!event_buffer)
85 goto out; 85 goto out;
86 86
87 err = 0; 87 err = 0;
88out: 88out:
@@ -97,15 +97,15 @@ void free_event_buffer(void)
97 event_buffer = NULL; 97 event_buffer = NULL;
98} 98}
99 99
100 100
101static int event_buffer_open(struct inode * inode, struct file * file) 101static int event_buffer_open(struct inode *inode, struct file *file)
102{ 102{
103 int err = -EPERM; 103 int err = -EPERM;
104 104
105 if (!capable(CAP_SYS_ADMIN)) 105 if (!capable(CAP_SYS_ADMIN))
106 return -EPERM; 106 return -EPERM;
107 107
108 if (test_and_set_bit(0, &buffer_opened)) 108 if (test_and_set_bit_lock(0, &buffer_opened))
109 return -EBUSY; 109 return -EBUSY;
110 110
111 /* Register as a user of dcookies 111 /* Register as a user of dcookies
@@ -116,38 +116,38 @@ static int event_buffer_open(struct inode * inode, struct file * file)
116 file->private_data = dcookie_register(); 116 file->private_data = dcookie_register();
117 if (!file->private_data) 117 if (!file->private_data)
118 goto out; 118 goto out;
119 119
120 if ((err = oprofile_setup())) 120 if ((err = oprofile_setup()))
121 goto fail; 121 goto fail;
122 122
123 /* NB: the actual start happens from userspace 123 /* NB: the actual start happens from userspace
124 * echo 1 >/dev/oprofile/enable 124 * echo 1 >/dev/oprofile/enable
125 */ 125 */
126 126
127 return 0; 127 return 0;
128 128
129fail: 129fail:
130 dcookie_unregister(file->private_data); 130 dcookie_unregister(file->private_data);
131out: 131out:
132 clear_bit(0, &buffer_opened); 132 __clear_bit_unlock(0, &buffer_opened);
133 return err; 133 return err;
134} 134}
135 135
136 136
137static int event_buffer_release(struct inode * inode, struct file * file) 137static int event_buffer_release(struct inode *inode, struct file *file)
138{ 138{
139 oprofile_stop(); 139 oprofile_stop();
140 oprofile_shutdown(); 140 oprofile_shutdown();
141 dcookie_unregister(file->private_data); 141 dcookie_unregister(file->private_data);
142 buffer_pos = 0; 142 buffer_pos = 0;
143 atomic_set(&buffer_ready, 0); 143 atomic_set(&buffer_ready, 0);
144 clear_bit(0, &buffer_opened); 144 __clear_bit_unlock(0, &buffer_opened);
145 return 0; 145 return 0;
146} 146}
147 147
148 148
149static ssize_t event_buffer_read(struct file * file, char __user * buf, 149static ssize_t event_buffer_read(struct file *file, char __user *buf,
150 size_t count, loff_t * offset) 150 size_t count, loff_t *offset)
151{ 151{
152 int retval = -EINVAL; 152 int retval = -EINVAL;
153 size_t const max = buffer_size * sizeof(unsigned long); 153 size_t const max = buffer_size * sizeof(unsigned long);
@@ -172,18 +172,18 @@ static ssize_t event_buffer_read(struct file * file, char __user * buf,
172 retval = -EFAULT; 172 retval = -EFAULT;
173 173
174 count = buffer_pos * sizeof(unsigned long); 174 count = buffer_pos * sizeof(unsigned long);
175 175
176 if (copy_to_user(buf, event_buffer, count)) 176 if (copy_to_user(buf, event_buffer, count))
177 goto out; 177 goto out;
178 178
179 retval = count; 179 retval = count;
180 buffer_pos = 0; 180 buffer_pos = 0;
181 181
182out: 182out:
183 mutex_unlock(&buffer_mutex); 183 mutex_unlock(&buffer_mutex);
184 return retval; 184 return retval;
185} 185}
186 186
187const struct file_operations event_buffer_fops = { 187const struct file_operations event_buffer_fops = {
188 .open = event_buffer_open, 188 .open = event_buffer_open,
189 .release = event_buffer_release, 189 .release = event_buffer_release,
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
index 5076ed1ebd8f..4e70749f8d16 100644
--- a/drivers/oprofile/event_buffer.h
+++ b/drivers/oprofile/event_buffer.h
@@ -10,13 +10,20 @@
10#ifndef EVENT_BUFFER_H 10#ifndef EVENT_BUFFER_H
11#define EVENT_BUFFER_H 11#define EVENT_BUFFER_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <asm/mutex.h> 14#include <asm/mutex.h>
15 15
16int alloc_event_buffer(void); 16int alloc_event_buffer(void);
17 17
18void free_event_buffer(void); 18void free_event_buffer(void);
19 19
20/**
21 * Add data to the event buffer.
22 * The data passed is free-form, but typically consists of
23 * file offsets, dcookies, context information, and ESCAPE codes.
24 */
25void add_event_entry(unsigned long data);
26
20/* wake up the process sleeping on the event file */ 27/* wake up the process sleeping on the event file */
21void wake_up_buffer_waiter(void); 28void wake_up_buffer_waiter(void);
22 29
@@ -24,10 +31,10 @@ void wake_up_buffer_waiter(void);
24#define NO_COOKIE 0UL 31#define NO_COOKIE 0UL
25 32
26extern const struct file_operations event_buffer_fops; 33extern const struct file_operations event_buffer_fops;
27 34
28/* mutex between sync_cpu_buffers() and the 35/* mutex between sync_cpu_buffers() and the
29 * file reading code. 36 * file reading code.
30 */ 37 */
31extern struct mutex buffer_mutex; 38extern struct mutex buffer_mutex;
32 39
33#endif /* EVENT_BUFFER_H */ 40#endif /* EVENT_BUFFER_H */
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index 2c645170f06e..cd375907f26f 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -19,7 +19,7 @@
19#include "cpu_buffer.h" 19#include "cpu_buffer.h"
20#include "buffer_sync.h" 20#include "buffer_sync.h"
21#include "oprofile_stats.h" 21#include "oprofile_stats.h"
22 22
23struct oprofile_operations oprofile_ops; 23struct oprofile_operations oprofile_ops;
24 24
25unsigned long oprofile_started; 25unsigned long oprofile_started;
@@ -36,7 +36,7 @@ static int timer = 0;
36int oprofile_setup(void) 36int oprofile_setup(void)
37{ 37{
38 int err; 38 int err;
39 39
40 mutex_lock(&start_mutex); 40 mutex_lock(&start_mutex);
41 41
42 if ((err = alloc_cpu_buffers())) 42 if ((err = alloc_cpu_buffers()))
@@ -44,10 +44,10 @@ int oprofile_setup(void)
44 44
45 if ((err = alloc_event_buffer())) 45 if ((err = alloc_event_buffer()))
46 goto out1; 46 goto out1;
47 47
48 if (oprofile_ops.setup && (err = oprofile_ops.setup())) 48 if (oprofile_ops.setup && (err = oprofile_ops.setup()))
49 goto out2; 49 goto out2;
50 50
51 /* Note even though this starts part of the 51 /* Note even though this starts part of the
52 * profiling overhead, it's necessary to prevent 52 * profiling overhead, it's necessary to prevent
53 * us missing task deaths and eventually oopsing 53 * us missing task deaths and eventually oopsing
@@ -74,7 +74,7 @@ post_sync:
74 is_setup = 1; 74 is_setup = 1;
75 mutex_unlock(&start_mutex); 75 mutex_unlock(&start_mutex);
76 return 0; 76 return 0;
77 77
78out3: 78out3:
79 if (oprofile_ops.shutdown) 79 if (oprofile_ops.shutdown)
80 oprofile_ops.shutdown(); 80 oprofile_ops.shutdown();
@@ -92,17 +92,17 @@ out:
92int oprofile_start(void) 92int oprofile_start(void)
93{ 93{
94 int err = -EINVAL; 94 int err = -EINVAL;
95 95
96 mutex_lock(&start_mutex); 96 mutex_lock(&start_mutex);
97 97
98 if (!is_setup) 98 if (!is_setup)
99 goto out; 99 goto out;
100 100
101 err = 0; 101 err = 0;
102 102
103 if (oprofile_started) 103 if (oprofile_started)
104 goto out; 104 goto out;
105 105
106 oprofile_reset_stats(); 106 oprofile_reset_stats();
107 107
108 if ((err = oprofile_ops.start())) 108 if ((err = oprofile_ops.start()))
@@ -114,7 +114,7 @@ out:
114 return err; 114 return err;
115} 115}
116 116
117 117
118/* echo 0>/dev/oprofile/enable */ 118/* echo 0>/dev/oprofile/enable */
119void oprofile_stop(void) 119void oprofile_stop(void)
120{ 120{
@@ -204,13 +204,13 @@ static void __exit oprofile_exit(void)
204 oprofile_arch_exit(); 204 oprofile_arch_exit();
205} 205}
206 206
207 207
208module_init(oprofile_init); 208module_init(oprofile_init);
209module_exit(oprofile_exit); 209module_exit(oprofile_exit);
210 210
211module_param_named(timer, timer, int, 0644); 211module_param_named(timer, timer, int, 0644);
212MODULE_PARM_DESC(timer, "force use of timer interrupt"); 212MODULE_PARM_DESC(timer, "force use of timer interrupt");
213 213
214MODULE_LICENSE("GPL"); 214MODULE_LICENSE("GPL");
215MODULE_AUTHOR("John Levon <levon@movementarian.org>"); 215MODULE_AUTHOR("John Levon <levon@movementarian.org>");
216MODULE_DESCRIPTION("OProfile system profiler"); 216MODULE_DESCRIPTION("OProfile system profiler");
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 18323650806e..5df0c21a608f 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -11,7 +11,7 @@
11#define OPROF_H 11#define OPROF_H
12 12
13int oprofile_setup(void); 13int oprofile_setup(void);
14void oprofile_shutdown(void); 14void oprofile_shutdown(void);
15 15
16int oprofilefs_register(void); 16int oprofilefs_register(void);
17void oprofilefs_unregister(void); 17void oprofilefs_unregister(void);
@@ -20,20 +20,20 @@ int oprofile_start(void);
20void oprofile_stop(void); 20void oprofile_stop(void);
21 21
22struct oprofile_operations; 22struct oprofile_operations;
23 23
24extern unsigned long fs_buffer_size; 24extern unsigned long fs_buffer_size;
25extern unsigned long fs_cpu_buffer_size; 25extern unsigned long fs_cpu_buffer_size;
26extern unsigned long fs_buffer_watershed; 26extern unsigned long fs_buffer_watershed;
27extern struct oprofile_operations oprofile_ops; 27extern struct oprofile_operations oprofile_ops;
28extern unsigned long oprofile_started; 28extern unsigned long oprofile_started;
29extern unsigned long backtrace_depth; 29extern unsigned long backtrace_depth;
30 30
31struct super_block; 31struct super_block;
32struct dentry; 32struct dentry;
33 33
34void oprofile_create_files(struct super_block * sb, struct dentry * root); 34void oprofile_create_files(struct super_block *sb, struct dentry *root);
35void oprofile_timer_init(struct oprofile_operations * ops); 35void oprofile_timer_init(struct oprofile_operations *ops);
36 36
37int oprofile_set_backtrace(unsigned long depth); 37int oprofile_set_backtrace(unsigned long depth);
38 38
39#endif /* OPROF_H */ 39#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index ef953ba5ab6b..cc106d503ace 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -13,18 +13,18 @@
13#include "event_buffer.h" 13#include "event_buffer.h"
14#include "oprofile_stats.h" 14#include "oprofile_stats.h"
15#include "oprof.h" 15#include "oprof.h"
16 16
17unsigned long fs_buffer_size = 131072; 17unsigned long fs_buffer_size = 131072;
18unsigned long fs_cpu_buffer_size = 8192; 18unsigned long fs_cpu_buffer_size = 8192;
19unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */ 19unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
20 20
21static ssize_t depth_read(struct file * file, char __user * buf, size_t count, loff_t * offset) 21static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
22{ 22{
23 return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset); 23 return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset);
24} 24}
25 25
26 26
27static ssize_t depth_write(struct file * file, char const __user * buf, size_t count, loff_t * offset) 27static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
28{ 28{
29 unsigned long val; 29 unsigned long val;
30 int retval; 30 int retval;
@@ -49,8 +49,8 @@ static const struct file_operations depth_fops = {
49 .write = depth_write 49 .write = depth_write
50}; 50};
51 51
52 52
53static ssize_t pointer_size_read(struct file * file, char __user * buf, size_t count, loff_t * offset) 53static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
54{ 54{
55 return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset); 55 return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
56} 56}
@@ -61,24 +61,24 @@ static const struct file_operations pointer_size_fops = {
61}; 61};
62 62
63 63
64static ssize_t cpu_type_read(struct file * file, char __user * buf, size_t count, loff_t * offset) 64static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
65{ 65{
66 return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset); 66 return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
67} 67}
68 68
69 69
70static const struct file_operations cpu_type_fops = { 70static const struct file_operations cpu_type_fops = {
71 .read = cpu_type_read, 71 .read = cpu_type_read,
72}; 72};
73 73
74 74
75static ssize_t enable_read(struct file * file, char __user * buf, size_t count, loff_t * offset) 75static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
76{ 76{
77 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset); 77 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
78} 78}
79 79
80 80
81static ssize_t enable_write(struct file * file, char const __user * buf, size_t count, loff_t * offset) 81static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
82{ 82{
83 unsigned long val; 83 unsigned long val;
84 int retval; 84 int retval;
@@ -89,7 +89,7 @@ static ssize_t enable_write(struct file * file, char const __user * buf, size_t
89 retval = oprofilefs_ulong_from_user(&val, buf, count); 89 retval = oprofilefs_ulong_from_user(&val, buf, count);
90 if (retval) 90 if (retval)
91 return retval; 91 return retval;
92 92
93 if (val) 93 if (val)
94 retval = oprofile_start(); 94 retval = oprofile_start();
95 else 95 else
@@ -100,14 +100,14 @@ static ssize_t enable_write(struct file * file, char const __user * buf, size_t
100 return count; 100 return count;
101} 101}
102 102
103 103
104static const struct file_operations enable_fops = { 104static const struct file_operations enable_fops = {
105 .read = enable_read, 105 .read = enable_read,
106 .write = enable_write, 106 .write = enable_write,
107}; 107};
108 108
109 109
110static ssize_t dump_write(struct file * file, char const __user * buf, size_t count, loff_t * offset) 110static ssize_t dump_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
111{ 111{
112 wake_up_buffer_waiter(); 112 wake_up_buffer_waiter();
113 return count; 113 return count;
@@ -117,8 +117,8 @@ static ssize_t dump_write(struct file * file, char const __user * buf, size_t co
117static const struct file_operations dump_fops = { 117static const struct file_operations dump_fops = {
118 .write = dump_write, 118 .write = dump_write,
119}; 119};
120 120
121void oprofile_create_files(struct super_block * sb, struct dentry * root) 121void oprofile_create_files(struct super_block *sb, struct dentry *root)
122{ 122{
123 oprofilefs_create_file(sb, root, "enable", &enable_fops); 123 oprofilefs_create_file(sb, root, "enable", &enable_fops);
124 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); 124 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
@@ -126,7 +126,7 @@ void oprofile_create_files(struct super_block * sb, struct dentry * root)
126 oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size); 126 oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
127 oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed); 127 oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
128 oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size); 128 oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size);
129 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); 129 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
130 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); 130 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
131 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); 131 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
132 oprofile_create_stats_files(sb, root); 132 oprofile_create_stats_files(sb, root);
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index f99b28e7b79a..e1f6ce03705e 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -11,17 +11,17 @@
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/threads.h> 13#include <linux/threads.h>
14 14
15#include "oprofile_stats.h" 15#include "oprofile_stats.h"
16#include "cpu_buffer.h" 16#include "cpu_buffer.h"
17 17
18struct oprofile_stat_struct oprofile_stats; 18struct oprofile_stat_struct oprofile_stats;
19 19
20void oprofile_reset_stats(void) 20void oprofile_reset_stats(void)
21{ 21{
22 struct oprofile_cpu_buffer * cpu_buf; 22 struct oprofile_cpu_buffer *cpu_buf;
23 int i; 23 int i;
24 24
25 for_each_possible_cpu(i) { 25 for_each_possible_cpu(i) {
26 cpu_buf = &per_cpu(cpu_buffer, i); 26 cpu_buf = &per_cpu(cpu_buffer, i);
27 cpu_buf->sample_received = 0; 27 cpu_buf->sample_received = 0;
@@ -29,18 +29,18 @@ void oprofile_reset_stats(void)
29 cpu_buf->backtrace_aborted = 0; 29 cpu_buf->backtrace_aborted = 0;
30 cpu_buf->sample_invalid_eip = 0; 30 cpu_buf->sample_invalid_eip = 0;
31 } 31 }
32 32
33 atomic_set(&oprofile_stats.sample_lost_no_mm, 0); 33 atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
34 atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); 34 atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35 atomic_set(&oprofile_stats.event_lost_overflow, 0); 35 atomic_set(&oprofile_stats.event_lost_overflow, 0);
36} 36}
37 37
38 38
39void oprofile_create_stats_files(struct super_block * sb, struct dentry * root) 39void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
40{ 40{
41 struct oprofile_cpu_buffer * cpu_buf; 41 struct oprofile_cpu_buffer *cpu_buf;
42 struct dentry * cpudir; 42 struct dentry *cpudir;
43 struct dentry * dir; 43 struct dentry *dir;
44 char buf[10]; 44 char buf[10];
45 int i; 45 int i;
46 46
@@ -52,7 +52,7 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
52 cpu_buf = &per_cpu(cpu_buffer, i); 52 cpu_buf = &per_cpu(cpu_buffer, i);
53 snprintf(buf, 10, "cpu%d", i); 53 snprintf(buf, 10, "cpu%d", i);
54 cpudir = oprofilefs_mkdir(sb, dir, buf); 54 cpudir = oprofilefs_mkdir(sb, dir, buf);
55 55
56 /* Strictly speaking access to these ulongs is racy, 56 /* Strictly speaking access to these ulongs is racy,
57 * but we can't simply lock them, and they are 57 * but we can't simply lock them, and they are
58 * informational only. 58 * informational only.
@@ -66,7 +66,7 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
66 oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip", 66 oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip",
67 &cpu_buf->sample_invalid_eip); 67 &cpu_buf->sample_invalid_eip);
68 } 68 }
69 69
70 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm", 70 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
71 &oprofile_stats.sample_lost_no_mm); 71 &oprofile_stats.sample_lost_no_mm);
72 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping", 72 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping",
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index 6d755a633f15..3da0d08dc1f9 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -11,7 +11,7 @@
11#define OPROFILE_STATS_H 11#define OPROFILE_STATS_H
12 12
13#include <asm/atomic.h> 13#include <asm/atomic.h>
14 14
15struct oprofile_stat_struct { 15struct oprofile_stat_struct {
16 atomic_t sample_lost_no_mm; 16 atomic_t sample_lost_no_mm;
17 atomic_t sample_lost_no_mapping; 17 atomic_t sample_lost_no_mapping;
@@ -20,14 +20,14 @@ struct oprofile_stat_struct {
20}; 20};
21 21
22extern struct oprofile_stat_struct oprofile_stats; 22extern struct oprofile_stat_struct oprofile_stats;
23 23
24/* reset all stats to zero */ 24/* reset all stats to zero */
25void oprofile_reset_stats(void); 25void oprofile_reset_stats(void);
26 26
27struct super_block; 27struct super_block;
28struct dentry; 28struct dentry;
29 29
30/* create the stats/ dir */ 30/* create the stats/ dir */
31void oprofile_create_stats_files(struct super_block * sb, struct dentry * root); 31void oprofile_create_stats_files(struct super_block *sb, struct dentry *root);
32 32
33#endif /* OPROFILE_STATS_H */ 33#endif /* OPROFILE_STATS_H */
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 8543cb26cf34..ddc4c59f02dc 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -23,9 +23,9 @@
23 23
24DEFINE_SPINLOCK(oprofilefs_lock); 24DEFINE_SPINLOCK(oprofilefs_lock);
25 25
26static struct inode * oprofilefs_get_inode(struct super_block * sb, int mode) 26static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
27{ 27{
28 struct inode * inode = new_inode(sb); 28 struct inode *inode = new_inode(sb);
29 29
30 if (inode) { 30 if (inode) {
31 inode->i_mode = mode; 31 inode->i_mode = mode;
@@ -44,7 +44,7 @@ static struct super_operations s_ops = {
44}; 44};
45 45
46 46
47ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset) 47ssize_t oprofilefs_str_to_user(char const *str, char __user *buf, size_t count, loff_t *offset)
48{ 48{
49 return simple_read_from_buffer(buf, count, offset, str, strlen(str)); 49 return simple_read_from_buffer(buf, count, offset, str, strlen(str));
50} 50}
@@ -52,7 +52,7 @@ ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count
52 52
53#define TMPBUFSIZE 50 53#define TMPBUFSIZE 50
54 54
55ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset) 55ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t count, loff_t *offset)
56{ 56{
57 char tmpbuf[TMPBUFSIZE]; 57 char tmpbuf[TMPBUFSIZE];
58 size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val); 58 size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
@@ -62,7 +62,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
62} 62}
63 63
64 64
65int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) 65int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
66{ 66{
67 char tmpbuf[TMPBUFSIZE]; 67 char tmpbuf[TMPBUFSIZE];
68 unsigned long flags; 68 unsigned long flags;
@@ -85,16 +85,16 @@ int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, siz
85} 85}
86 86
87 87
88static ssize_t ulong_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset) 88static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
89{ 89{
90 unsigned long * val = file->private_data; 90 unsigned long *val = file->private_data;
91 return oprofilefs_ulong_to_user(*val, buf, count, offset); 91 return oprofilefs_ulong_to_user(*val, buf, count, offset);
92} 92}
93 93
94 94
95static ssize_t ulong_write_file(struct file * file, char const __user * buf, size_t count, loff_t * offset) 95static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
96{ 96{
97 unsigned long * value = file->private_data; 97 unsigned long *value = file->private_data;
98 int retval; 98 int retval;
99 99
100 if (*offset) 100 if (*offset)
@@ -108,7 +108,7 @@ static ssize_t ulong_write_file(struct file * file, char const __user * buf, siz
108} 108}
109 109
110 110
111static int default_open(struct inode * inode, struct file * filp) 111static int default_open(struct inode *inode, struct file *filp)
112{ 112{
113 if (inode->i_private) 113 if (inode->i_private)
114 filp->private_data = inode->i_private; 114 filp->private_data = inode->i_private;
@@ -129,12 +129,12 @@ static const struct file_operations ulong_ro_fops = {
129}; 129};
130 130
131 131
132static struct dentry * __oprofilefs_create_file(struct super_block * sb, 132static struct dentry *__oprofilefs_create_file(struct super_block *sb,
133 struct dentry * root, char const * name, const struct file_operations * fops, 133 struct dentry *root, char const *name, const struct file_operations *fops,
134 int perm) 134 int perm)
135{ 135{
136 struct dentry * dentry; 136 struct dentry *dentry;
137 struct inode * inode; 137 struct inode *inode;
138 138
139 dentry = d_alloc_name(root, name); 139 dentry = d_alloc_name(root, name);
140 if (!dentry) 140 if (!dentry)
@@ -150,10 +150,10 @@ static struct dentry * __oprofilefs_create_file(struct super_block * sb,
150} 150}
151 151
152 152
153int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root, 153int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
154 char const * name, unsigned long * val) 154 char const *name, unsigned long *val)
155{ 155{
156 struct dentry * d = __oprofilefs_create_file(sb, root, name, 156 struct dentry *d = __oprofilefs_create_file(sb, root, name,
157 &ulong_fops, 0644); 157 &ulong_fops, 0644);
158 if (!d) 158 if (!d)
159 return -EFAULT; 159 return -EFAULT;
@@ -163,10 +163,10 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
163} 163}
164 164
165 165
166int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root, 166int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
167 char const * name, unsigned long * val) 167 char const *name, unsigned long *val)
168{ 168{
169 struct dentry * d = __oprofilefs_create_file(sb, root, name, 169 struct dentry *d = __oprofilefs_create_file(sb, root, name,
170 &ulong_ro_fops, 0444); 170 &ulong_ro_fops, 0444);
171 if (!d) 171 if (!d)
172 return -EFAULT; 172 return -EFAULT;
@@ -176,23 +176,23 @@ int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
176} 176}
177 177
178 178
179static ssize_t atomic_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset) 179static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
180{ 180{
181 atomic_t * val = file->private_data; 181 atomic_t *val = file->private_data;
182 return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset); 182 return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
183} 183}
184 184
185 185
186static const struct file_operations atomic_ro_fops = { 186static const struct file_operations atomic_ro_fops = {
187 .read = atomic_read_file, 187 .read = atomic_read_file,
188 .open = default_open, 188 .open = default_open,
189}; 189};
190
191 190
192int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root, 191
193 char const * name, atomic_t * val) 192int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
193 char const *name, atomic_t *val)
194{ 194{
195 struct dentry * d = __oprofilefs_create_file(sb, root, name, 195 struct dentry *d = __oprofilefs_create_file(sb, root, name,
196 &atomic_ro_fops, 0444); 196 &atomic_ro_fops, 0444);
197 if (!d) 197 if (!d)
198 return -EFAULT; 198 return -EFAULT;
@@ -201,9 +201,9 @@ int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
201 return 0; 201 return 0;
202} 202}
203 203
204 204
205int oprofilefs_create_file(struct super_block * sb, struct dentry * root, 205int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
206 char const * name, const struct file_operations * fops) 206 char const *name, const struct file_operations *fops)
207{ 207{
208 if (!__oprofilefs_create_file(sb, root, name, fops, 0644)) 208 if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
209 return -EFAULT; 209 return -EFAULT;
@@ -211,8 +211,8 @@ int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
211} 211}
212 212
213 213
214int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root, 214int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
215 char const * name, const struct file_operations * fops, int perm) 215 char const *name, const struct file_operations *fops, int perm)
216{ 216{
217 if (!__oprofilefs_create_file(sb, root, name, fops, perm)) 217 if (!__oprofilefs_create_file(sb, root, name, fops, perm))
218 return -EFAULT; 218 return -EFAULT;
@@ -220,11 +220,11 @@ int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
220} 220}
221 221
222 222
223struct dentry * oprofilefs_mkdir(struct super_block * sb, 223struct dentry *oprofilefs_mkdir(struct super_block *sb,
224 struct dentry * root, char const * name) 224 struct dentry *root, char const *name)
225{ 225{
226 struct dentry * dentry; 226 struct dentry *dentry;
227 struct inode * inode; 227 struct inode *inode;
228 228
229 dentry = d_alloc_name(root, name); 229 dentry = d_alloc_name(root, name);
230 if (!dentry) 230 if (!dentry)
@@ -241,10 +241,10 @@ struct dentry * oprofilefs_mkdir(struct super_block * sb,
241} 241}
242 242
243 243
244static int oprofilefs_fill_super(struct super_block * sb, void * data, int silent) 244static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
245{ 245{
246 struct inode * root_inode; 246 struct inode *root_inode;
247 struct dentry * root_dentry; 247 struct dentry *root_dentry;
248 248
249 sb->s_blocksize = PAGE_CACHE_SIZE; 249 sb->s_blocksize = PAGE_CACHE_SIZE;
250 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 250 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 710a45f0d734..333f915568c7 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -19,7 +19,7 @@
19 19
20static int timer_notify(struct pt_regs *regs) 20static int timer_notify(struct pt_regs *regs)
21{ 21{
22 oprofile_add_sample(regs, 0); 22 oprofile_add_sample(regs, 0);
23 return 0; 23 return 0;
24} 24}
25 25
@@ -35,7 +35,7 @@ static void timer_stop(void)
35} 35}
36 36
37 37
38void __init oprofile_timer_init(struct oprofile_operations * ops) 38void __init oprofile_timer_init(struct oprofile_operations *ops)
39{ 39{
40 ops->create_files = NULL; 40 ops->create_files = NULL;
41 ops->setup = NULL; 41 ops->setup = NULL;
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 5ac207932fd7..685d94e69d44 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -86,7 +86,7 @@ static int eisa_eeprom_open(struct inode *inode, struct file *file)
86{ 86{
87 cycle_kernel_lock(); 87 cycle_kernel_lock();
88 88
89 if (file->f_mode & 2) 89 if (file->f_mode & FMODE_WRITE)
90 return -EINVAL; 90 return -EINVAL;
91 91
92 return 0; 92 return 0;
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 209b4a464bcf..855f389eea40 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -36,7 +36,7 @@ if PARPORT
36config PARPORT_PC 36config PARPORT_PC
37 tristate "PC-style hardware" 37 tristate "PC-style hardware"
38 depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \ 38 depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \
39 (!M68K || ISA) && !MN10300 && !AVR32 39 (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN
40 ---help--- 40 ---help---
41 You should say Y here if you have a PC-style parallel port. All 41 You should say Y here if you have a PC-style parallel port. All
42 IBM PC compatible computers and some Alphas have PC-style 42 IBM PC compatible computers and some Alphas have PC-style
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index b1899e9c1f65..0cd5fbc7f2c2 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -112,7 +112,7 @@ static int parport_probe(struct pcmcia_device *link)
112 112
113 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 113 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
114 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 114 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
115 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 115 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
116 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 116 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
117 link->conf.Attributes = CONF_ENABLE_IRQ; 117 link->conf.Attributes = CONF_ENABLE_IRQ;
118 link->conf.IntType = INT_MEMORY_AND_IO; 118 link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 8a846adf1dcf..96f3bdf0ec4b 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2791,6 +2791,7 @@ enum parport_pc_pci_cards {
2791 oxsemi_952, 2791 oxsemi_952,
2792 oxsemi_954, 2792 oxsemi_954,
2793 oxsemi_840, 2793 oxsemi_840,
2794 oxsemi_pcie_pport,
2794 aks_0100, 2795 aks_0100,
2795 mobility_pp, 2796 mobility_pp,
2796 netmos_9705, 2797 netmos_9705,
@@ -2868,6 +2869,7 @@ static struct parport_pc_pci {
2868 /* oxsemi_952 */ { 1, { { 0, 1 }, } }, 2869 /* oxsemi_952 */ { 1, { { 0, 1 }, } },
2869 /* oxsemi_954 */ { 1, { { 0, -1 }, } }, 2870 /* oxsemi_954 */ { 1, { { 0, -1 }, } },
2870 /* oxsemi_840 */ { 1, { { 0, 1 }, } }, 2871 /* oxsemi_840 */ { 1, { { 0, 1 }, } },
2872 /* oxsemi_pcie_pport */ { 1, { { 0, 1 }, } },
2871 /* aks_0100 */ { 1, { { 0, -1 }, } }, 2873 /* aks_0100 */ { 1, { { 0, -1 }, } },
2872 /* mobility_pp */ { 1, { { 0, 1 }, } }, 2874 /* mobility_pp */ { 1, { { 0, 1 }, } },
2873 /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */ 2875 /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */
@@ -2928,7 +2930,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
2928 { 0x1409, 0x7268, 0x1409, 0x0103, 0, 0, timedia_4008a }, 2930 { 0x1409, 0x7268, 0x1409, 0x0103, 0, 0, timedia_4008a },
2929 { 0x1409, 0x7268, 0x1409, 0x0104, 0, 0, timedia_4018 }, 2931 { 0x1409, 0x7268, 0x1409, 0x0104, 0, 0, timedia_4018 },
2930 { 0x1409, 0x7268, 0x1409, 0x9018, 0, 0, timedia_9018a }, 2932 { 0x1409, 0x7268, 0x1409, 0x9018, 0, 0, timedia_9018a },
2931 { 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp },
2932 { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_2P_EPP, 2933 { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_2P_EPP,
2933 PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_2p_epp }, 2934 PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_2p_epp },
2934 { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_1P_ECP, 2935 { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_1P_ECP,
@@ -2946,8 +2947,25 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
2946 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 }, 2947 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 },
2947 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840, 2948 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840,
2948 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_840 }, 2949 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_840 },
2950 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840,
2951 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2952 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840_G,
2953 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2954 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0,
2955 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2956 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0_G,
2957 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2958 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1,
2959 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2960 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_G,
2961 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2962 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_U,
2963 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2964 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU,
2965 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2949 { PCI_VENDOR_ID_AKS, PCI_DEVICE_ID_AKS_ALADDINCARD, 2966 { PCI_VENDOR_ID_AKS, PCI_DEVICE_ID_AKS_ALADDINCARD,
2950 PCI_ANY_ID, PCI_ANY_ID, 0, 0, aks_0100 }, 2967 PCI_ANY_ID, PCI_ANY_ID, 0, 0, aks_0100 },
2968 { 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp },
2951 /* NetMos communication controllers */ 2969 /* NetMos communication controllers */
2952 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9705, 2970 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9705,
2953 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9705 }, 2971 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9705 },
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index e2e95b36a603..101ed49a2d15 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -70,6 +70,8 @@ static int __devinit netmos_parallel_init(struct pci_dev *dev, struct parport_pc
70 * parallel ports and <S> is the number of serial ports. 70 * parallel ports and <S> is the number of serial ports.
71 */ 71 */
72 card->numports = (dev->subsystem_device & 0xf0) >> 4; 72 card->numports = (dev->subsystem_device & 0xf0) >> 4;
73 if (card->numports > ARRAY_SIZE(card->addr))
74 card->numports = ARRAY_SIZE(card->addr);
73 return 0; 75 return 0;
74} 76}
75 77
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 4b47f4ece5b7..af3bfe22847b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -3,7 +3,8 @@
3# 3#
4 4
5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \ 5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \
6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o 6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
7 irq.o
7obj-$(CONFIG_PROC_FS) += proc.o 8obj-$(CONFIG_PROC_FS) += proc.o
8 9
9# Build PCI Express stuff if needed 10# Build PCI Express stuff if needed
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 8b29c307f1a1..691b3adeb870 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -188,8 +188,7 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
188 return 0; 188 return 0;
189} 189}
190 190
191static int __init 191static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
192dmar_parse_dev(struct dmar_drhd_unit *dmaru)
193{ 192{
194 struct acpi_dmar_hardware_unit *drhd; 193 struct acpi_dmar_hardware_unit *drhd;
195 static int include_all; 194 static int include_all;
@@ -277,14 +276,15 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
277 drhd = (struct acpi_dmar_hardware_unit *)header; 276 drhd = (struct acpi_dmar_hardware_unit *)header;
278 printk (KERN_INFO PREFIX 277 printk (KERN_INFO PREFIX
279 "DRHD (flags: 0x%08x)base: 0x%016Lx\n", 278 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
280 drhd->flags, drhd->address); 279 drhd->flags, (unsigned long long)drhd->address);
281 break; 280 break;
282 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 281 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
283 rmrr = (struct acpi_dmar_reserved_memory *)header; 282 rmrr = (struct acpi_dmar_reserved_memory *)header;
284 283
285 printk (KERN_INFO PREFIX 284 printk (KERN_INFO PREFIX
286 "RMRR base: 0x%016Lx end: 0x%016Lx\n", 285 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
287 rmrr->base_address, rmrr->end_address); 286 (unsigned long long)rmrr->base_address,
287 (unsigned long long)rmrr->end_address);
288 break; 288 break;
289 } 289 }
290} 290}
@@ -328,7 +328,7 @@ parse_dmar_table(void)
328 if (!dmar) 328 if (!dmar)
329 return -ENODEV; 329 return -ENODEV;
330 330
331 if (dmar->width < PAGE_SHIFT_4K - 1) { 331 if (dmar->width < PAGE_SHIFT - 1) {
332 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); 332 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
333 return -EINVAL; 333 return -EINVAL;
334 } 334 }
@@ -460,8 +460,8 @@ void __init detect_intel_iommu(void)
460 460
461 ret = dmar_table_detect(); 461 ret = dmar_table_detect();
462 462
463#ifdef CONFIG_DMAR
464 { 463 {
464#ifdef CONFIG_INTR_REMAP
465 struct acpi_table_dmar *dmar; 465 struct acpi_table_dmar *dmar;
466 /* 466 /*
467 * for now we will disable dma-remapping when interrupt 467 * for now we will disable dma-remapping when interrupt
@@ -470,29 +470,17 @@ void __init detect_intel_iommu(void)
470 * is added, we will not need this any more. 470 * is added, we will not need this any more.
471 */ 471 */
472 dmar = (struct acpi_table_dmar *) dmar_tbl; 472 dmar = (struct acpi_table_dmar *) dmar_tbl;
473 if (ret && cpu_has_x2apic && dmar->flags & 0x1) { 473 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
474 printk(KERN_INFO 474 printk(KERN_INFO
475 "Queued invalidation will be enabled to support " 475 "Queued invalidation will be enabled to support "
476 "x2apic and Intr-remapping.\n"); 476 "x2apic and Intr-remapping.\n");
477 printk(KERN_INFO 477#endif
478 "Disabling IOMMU detection, because of missing " 478#ifdef CONFIG_DMAR
479 "queued invalidation support for IOTLB "
480 "invalidation\n");
481 printk(KERN_INFO
482 "Use \"nox2apic\", if you want to use Intel "
483 " IOMMU for DMA-remapping and don't care about "
484 " x2apic support\n");
485
486 dmar_disabled = 1;
487 goto end;
488 }
489
490 if (ret && !no_iommu && !iommu_detected && !swiotlb && 479 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
491 !dmar_disabled) 480 !dmar_disabled)
492 iommu_detected = 1; 481 iommu_detected = 1;
493 }
494end:
495#endif 482#endif
483 }
496 dmar_tbl = NULL; 484 dmar_tbl = NULL;
497} 485}
498 486
@@ -510,7 +498,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
510 498
511 iommu->seq_id = iommu_allocated++; 499 iommu->seq_id = iommu_allocated++;
512 500
513 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); 501 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
514 if (!iommu->reg) { 502 if (!iommu->reg) {
515 printk(KERN_ERR "IOMMU: can't map the region\n"); 503 printk(KERN_ERR "IOMMU: can't map the region\n");
516 goto error; 504 goto error;
@@ -521,8 +509,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
521 /* the registers might be more than one page */ 509 /* the registers might be more than one page */
522 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), 510 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
523 cap_max_fault_reg_offset(iommu->cap)); 511 cap_max_fault_reg_offset(iommu->cap));
524 map_size = PAGE_ALIGN_4K(map_size); 512 map_size = VTD_PAGE_ALIGN(map_size);
525 if (map_size > PAGE_SIZE_4K) { 513 if (map_size > VTD_PAGE_SIZE) {
526 iounmap(iommu->reg); 514 iounmap(iommu->reg);
527 iommu->reg = ioremap(drhd->reg_base_addr, map_size); 515 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
528 if (!iommu->reg) { 516 if (!iommu->reg) {
@@ -533,8 +521,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
533 521
534 ver = readl(iommu->reg + DMAR_VER_REG); 522 ver = readl(iommu->reg + DMAR_VER_REG);
535 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", 523 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
536 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), 524 (unsigned long long)drhd->reg_base_addr,
537 iommu->cap, iommu->ecap); 525 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
526 (unsigned long long)iommu->cap,
527 (unsigned long long)iommu->ecap);
538 528
539 spin_lock_init(&iommu->register_lock); 529 spin_lock_init(&iommu->register_lock);
540 530
@@ -587,11 +577,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
587 577
588 hw = qi->desc; 578 hw = qi->desc;
589 579
590 spin_lock(&qi->q_lock); 580 spin_lock_irqsave(&qi->q_lock, flags);
591 while (qi->free_cnt < 3) { 581 while (qi->free_cnt < 3) {
592 spin_unlock(&qi->q_lock); 582 spin_unlock_irqrestore(&qi->q_lock, flags);
593 cpu_relax(); 583 cpu_relax();
594 spin_lock(&qi->q_lock); 584 spin_lock_irqsave(&qi->q_lock, flags);
595 } 585 }
596 586
597 index = qi->free_head; 587 index = qi->free_head;
@@ -612,15 +602,22 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
612 qi->free_head = (qi->free_head + 2) % QI_LENGTH; 602 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
613 qi->free_cnt -= 2; 603 qi->free_cnt -= 2;
614 604
615 spin_lock_irqsave(&iommu->register_lock, flags); 605 spin_lock(&iommu->register_lock);
616 /* 606 /*
617 * update the HW tail register indicating the presence of 607 * update the HW tail register indicating the presence of
618 * new descriptors. 608 * new descriptors.
619 */ 609 */
620 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); 610 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
621 spin_unlock_irqrestore(&iommu->register_lock, flags); 611 spin_unlock(&iommu->register_lock);
622 612
623 while (qi->desc_status[wait_index] != QI_DONE) { 613 while (qi->desc_status[wait_index] != QI_DONE) {
614 /*
615 * We will leave the interrupts disabled, to prevent interrupt
616 * context to queue another cmd while a cmd is already submitted
617 * and waiting for completion on this cpu. This is to avoid
618 * a deadlock where the interrupt context can wait indefinitely
619 * for free slots in the queue.
620 */
624 spin_unlock(&qi->q_lock); 621 spin_unlock(&qi->q_lock);
625 cpu_relax(); 622 cpu_relax();
626 spin_lock(&qi->q_lock); 623 spin_lock(&qi->q_lock);
@@ -629,7 +626,7 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
629 qi->desc_status[index] = QI_DONE; 626 qi->desc_status[index] = QI_DONE;
630 627
631 reclaim_free_desc(qi); 628 reclaim_free_desc(qi);
632 spin_unlock(&qi->q_lock); 629 spin_unlock_irqrestore(&qi->q_lock, flags);
633} 630}
634 631
635/* 632/*
@@ -645,6 +642,62 @@ void qi_global_iec(struct intel_iommu *iommu)
645 qi_submit_sync(&desc, iommu); 642 qi_submit_sync(&desc, iommu);
646} 643}
647 644
645int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
646 u64 type, int non_present_entry_flush)
647{
648
649 struct qi_desc desc;
650
651 if (non_present_entry_flush) {
652 if (!cap_caching_mode(iommu->cap))
653 return 1;
654 else
655 did = 0;
656 }
657
658 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
659 | QI_CC_GRAN(type) | QI_CC_TYPE;
660 desc.high = 0;
661
662 qi_submit_sync(&desc, iommu);
663
664 return 0;
665
666}
667
668int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
669 unsigned int size_order, u64 type,
670 int non_present_entry_flush)
671{
672 u8 dw = 0, dr = 0;
673
674 struct qi_desc desc;
675 int ih = 0;
676
677 if (non_present_entry_flush) {
678 if (!cap_caching_mode(iommu->cap))
679 return 1;
680 else
681 did = 0;
682 }
683
684 if (cap_write_drain(iommu->cap))
685 dw = 1;
686
687 if (cap_read_drain(iommu->cap))
688 dr = 1;
689
690 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
691 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
692 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
693 | QI_IOTLB_AM(size_order);
694
695 qi_submit_sync(&desc, iommu);
696
697 return 0;
698
699}
700
648/* 701/*
649 * Enable Queued Invalidation interface. This is a must to support 702 * Enable Queued Invalidation interface. This is a must to support
650 * interrupt-remapping. Also used by DMA-remapping, which replaces 703 * interrupt-remapping. Also used by DMA-remapping, which replaces
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 5a58b075dd8d..9bcb6cbd5aa9 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -50,9 +50,6 @@
50#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) 50#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
51#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) 51#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
52 52
53/* name size which is used for entries in pcihpfs */
54#define SLOT_NAME_SIZE 20 /* {_SUN} */
55
56struct acpiphp_bridge; 53struct acpiphp_bridge;
57struct acpiphp_slot; 54struct acpiphp_slot;
58 55
@@ -63,9 +60,13 @@ struct slot {
63 struct hotplug_slot *hotplug_slot; 60 struct hotplug_slot *hotplug_slot;
64 struct acpiphp_slot *acpi_slot; 61 struct acpiphp_slot *acpi_slot;
65 struct hotplug_slot_info info; 62 struct hotplug_slot_info info;
66 char name[SLOT_NAME_SIZE];
67}; 63};
68 64
65static inline const char *slot_name(struct slot *slot)
66{
67 return hotplug_slot_name(slot->hotplug_slot);
68}
69
69/* 70/*
70 * struct acpiphp_bridge - PCI bridge information 71 * struct acpiphp_bridge - PCI bridge information
71 * 72 *
@@ -112,7 +113,7 @@ struct acpiphp_slot {
112 113
113 u8 device; /* pci device# */ 114 u8 device; /* pci device# */
114 115
115 u32 sun; /* ACPI _SUN (slot unique number) */ 116 unsigned long long sun; /* ACPI _SUN (slot unique number) */
116 u32 flags; /* see below */ 117 u32 flags; /* see below */
117}; 118};
118 119
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 0e496e866a84..43c10bd261b4 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -44,6 +44,9 @@
44 44
45#define MY_NAME "acpiphp" 45#define MY_NAME "acpiphp"
46 46
47/* name size which is used for entries in pcihpfs */
48#define SLOT_NAME_SIZE 21 /* {_SUN} */
49
47static int debug; 50static int debug;
48int acpiphp_debug; 51int acpiphp_debug;
49 52
@@ -84,7 +87,6 @@ static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
84 .get_adapter_status = get_adapter_status, 87 .get_adapter_status = get_adapter_status,
85}; 88};
86 89
87
88/** 90/**
89 * acpiphp_register_attention - set attention LED callback 91 * acpiphp_register_attention - set attention LED callback
90 * @info: must be completely filled with LED callbacks 92 * @info: must be completely filled with LED callbacks
@@ -136,7 +138,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
136{ 138{
137 struct slot *slot = hotplug_slot->private; 139 struct slot *slot = hotplug_slot->private;
138 140
139 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 141 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
140 142
141 /* enable the specified slot */ 143 /* enable the specified slot */
142 return acpiphp_enable_slot(slot->acpi_slot); 144 return acpiphp_enable_slot(slot->acpi_slot);
@@ -154,7 +156,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
154 struct slot *slot = hotplug_slot->private; 156 struct slot *slot = hotplug_slot->private;
155 int retval; 157 int retval;
156 158
157 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 159 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
158 160
159 /* disable the specified slot */ 161 /* disable the specified slot */
160 retval = acpiphp_disable_slot(slot->acpi_slot); 162 retval = acpiphp_disable_slot(slot->acpi_slot);
@@ -177,7 +179,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
177 { 179 {
178 int retval = -ENODEV; 180 int retval = -ENODEV;
179 181
180 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 182 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
181 183
182 if (attention_info && try_module_get(attention_info->owner)) { 184 if (attention_info && try_module_get(attention_info->owner)) {
183 retval = attention_info->set_attn(hotplug_slot, status); 185 retval = attention_info->set_attn(hotplug_slot, status);
@@ -200,7 +202,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
200{ 202{
201 struct slot *slot = hotplug_slot->private; 203 struct slot *slot = hotplug_slot->private;
202 204
203 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 205 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
204 206
205 *value = acpiphp_get_power_status(slot->acpi_slot); 207 *value = acpiphp_get_power_status(slot->acpi_slot);
206 208
@@ -222,7 +224,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
222{ 224{
223 int retval = -EINVAL; 225 int retval = -EINVAL;
224 226
225 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 227 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
226 228
227 if (attention_info && try_module_get(attention_info->owner)) { 229 if (attention_info && try_module_get(attention_info->owner)) {
228 retval = attention_info->get_attn(hotplug_slot, value); 230 retval = attention_info->get_attn(hotplug_slot, value);
@@ -245,7 +247,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
245{ 247{
246 struct slot *slot = hotplug_slot->private; 248 struct slot *slot = hotplug_slot->private;
247 249
248 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 250 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
249 251
250 *value = acpiphp_get_latch_status(slot->acpi_slot); 252 *value = acpiphp_get_latch_status(slot->acpi_slot);
251 253
@@ -265,7 +267,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
265{ 267{
266 struct slot *slot = hotplug_slot->private; 268 struct slot *slot = hotplug_slot->private;
267 269
268 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 270 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
269 271
270 *value = acpiphp_get_adapter_status(slot->acpi_slot); 272 *value = acpiphp_get_adapter_status(slot->acpi_slot);
271 273
@@ -299,7 +301,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
299{ 301{
300 struct slot *slot = hotplug_slot->private; 302 struct slot *slot = hotplug_slot->private;
301 303
302 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 304 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
303 305
304 kfree(slot->hotplug_slot); 306 kfree(slot->hotplug_slot);
305 kfree(slot); 307 kfree(slot);
@@ -310,6 +312,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
310{ 312{
311 struct slot *slot; 313 struct slot *slot;
312 int retval = -ENOMEM; 314 int retval = -ENOMEM;
315 char name[SLOT_NAME_SIZE];
313 316
314 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 317 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
315 if (!slot) 318 if (!slot)
@@ -321,8 +324,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
321 324
322 slot->hotplug_slot->info = &slot->info; 325 slot->hotplug_slot->info = &slot->info;
323 326
324 slot->hotplug_slot->name = slot->name;
325
326 slot->hotplug_slot->private = slot; 327 slot->hotplug_slot->private = slot;
327 slot->hotplug_slot->release = &release_slot; 328 slot->hotplug_slot->release = &release_slot;
328 slot->hotplug_slot->ops = &acpi_hotplug_slot_ops; 329 slot->hotplug_slot->ops = &acpi_hotplug_slot_ops;
@@ -336,11 +337,12 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
336 slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; 337 slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
337 338
338 acpiphp_slot->slot = slot; 339 acpiphp_slot->slot = slot;
339 snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun); 340 snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun);
340 341
341 retval = pci_hp_register(slot->hotplug_slot, 342 retval = pci_hp_register(slot->hotplug_slot,
342 acpiphp_slot->bridge->pci_bus, 343 acpiphp_slot->bridge->pci_bus,
343 acpiphp_slot->device); 344 acpiphp_slot->device,
345 name);
344 if (retval == -EBUSY) 346 if (retval == -EBUSY)
345 goto error_hpslot; 347 goto error_hpslot;
346 if (retval) { 348 if (retval) {
@@ -348,7 +350,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
348 goto error_hpslot; 350 goto error_hpslot;
349 } 351 }
350 352
351 info("Slot [%s] registered\n", slot->hotplug_slot->name); 353 info("Slot [%s] registered\n", slot_name(slot));
352 354
353 return 0; 355 return 0;
354error_hpslot: 356error_hpslot:
@@ -365,7 +367,7 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
365 struct slot *slot = acpiphp_slot->slot; 367 struct slot *slot = acpiphp_slot->slot;
366 int retval = 0; 368 int retval = 0;
367 369
368 info ("Slot [%s] unregistered\n", slot->hotplug_slot->name); 370 info("Slot [%s] unregistered\n", slot_name(slot));
369 371
370 retval = pci_hp_deregister(slot->hotplug_slot); 372 retval = pci_hp_deregister(slot->hotplug_slot);
371 if (retval) 373 if (retval)
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a3e4705dd8f0..3affc6472e65 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -169,7 +169,9 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
169} 169}
170 170
171 171
172 172static struct acpi_dock_ops acpiphp_dock_ops = {
173 .handler = handle_hotplug_event_func,
174};
173 175
174/* callback routine to register each ACPI PCI slot object */ 176/* callback routine to register each ACPI PCI slot object */
175static acpi_status 177static acpi_status
@@ -180,7 +182,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
180 struct acpiphp_func *newfunc; 182 struct acpiphp_func *newfunc;
181 acpi_handle tmp; 183 acpi_handle tmp;
182 acpi_status status = AE_OK; 184 acpi_status status = AE_OK;
183 unsigned long adr, sun; 185 unsigned long long adr, sun;
184 int device, function, retval; 186 int device, function, retval;
185 187
186 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); 188 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
@@ -253,13 +255,13 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
253 255
254 bridge->nr_slots++; 256 bridge->nr_slots++;
255 257
256 dbg("found ACPI PCI Hotplug slot %d at PCI %04x:%02x:%02x\n", 258 dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
257 slot->sun, pci_domain_nr(bridge->pci_bus), 259 slot->sun, pci_domain_nr(bridge->pci_bus),
258 bridge->pci_bus->number, slot->device); 260 bridge->pci_bus->number, slot->device);
259 retval = acpiphp_register_hotplug_slot(slot); 261 retval = acpiphp_register_hotplug_slot(slot);
260 if (retval) { 262 if (retval) {
261 if (retval == -EBUSY) 263 if (retval == -EBUSY)
262 warn("Slot %d already registered by another " 264 warn("Slot %llu already registered by another "
263 "hotplug driver\n", slot->sun); 265 "hotplug driver\n", slot->sun);
264 else 266 else
265 warn("acpiphp_register_hotplug_slot failed " 267 warn("acpiphp_register_hotplug_slot failed "
@@ -285,7 +287,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
285 */ 287 */
286 newfunc->flags &= ~FUNC_HAS_EJ0; 288 newfunc->flags &= ~FUNC_HAS_EJ0;
287 if (register_hotplug_dock_device(handle, 289 if (register_hotplug_dock_device(handle,
288 handle_hotplug_event_func, newfunc)) 290 &acpiphp_dock_ops, newfunc))
289 dbg("failed to register dock device\n"); 291 dbg("failed to register dock device\n");
290 292
291 /* we need to be notified when dock events happen 293 /* we need to be notified when dock events happen
@@ -528,7 +530,7 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
528{ 530{
529 acpi_status status; 531 acpi_status status;
530 acpi_handle dummy_handle; 532 acpi_handle dummy_handle;
531 unsigned long tmp; 533 unsigned long long tmp;
532 int device, function; 534 int device, function;
533 struct pci_dev *dev; 535 struct pci_dev *dev;
534 struct pci_bus *pci_bus = context; 536 struct pci_bus *pci_bus = context;
@@ -573,7 +575,7 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
573static int add_bridge(acpi_handle handle) 575static int add_bridge(acpi_handle handle)
574{ 576{
575 acpi_status status; 577 acpi_status status;
576 unsigned long tmp; 578 unsigned long long tmp;
577 int seg, bus; 579 int seg, bus;
578 acpi_handle dummy_handle; 580 acpi_handle dummy_handle;
579 struct pci_bus *pci_bus; 581 struct pci_bus *pci_bus;
@@ -767,7 +769,7 @@ static int get_gsi_base(acpi_handle handle, u32 *gsi_base)
767{ 769{
768 acpi_status status; 770 acpi_status status;
769 int result = -1; 771 int result = -1;
770 unsigned long gsb; 772 unsigned long long gsb;
771 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 773 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
772 union acpi_object *obj; 774 union acpi_object *obj;
773 void *table; 775 void *table;
@@ -808,7 +810,7 @@ static acpi_status
808ioapic_add(acpi_handle handle, u32 lvl, void *context, void **rv) 810ioapic_add(acpi_handle handle, u32 lvl, void *context, void **rv)
809{ 811{
810 acpi_status status; 812 acpi_status status;
811 unsigned long sta; 813 unsigned long long sta;
812 acpi_handle tmp; 814 acpi_handle tmp;
813 struct pci_dev *pdev; 815 struct pci_dev *pdev;
814 u32 gsi_base; 816 u32 gsi_base;
@@ -872,7 +874,7 @@ static acpi_status
872ioapic_remove(acpi_handle handle, u32 lvl, void *context, void **rv) 874ioapic_remove(acpi_handle handle, u32 lvl, void *context, void **rv)
873{ 875{
874 acpi_status status; 876 acpi_status status;
875 unsigned long sta; 877 unsigned long long sta;
876 acpi_handle tmp; 878 acpi_handle tmp;
877 u32 gsi_base; 879 u32 gsi_base;
878 struct acpiphp_ioapic *pos, *n, *ioapic = NULL; 880 struct acpiphp_ioapic *pos, *n, *ioapic = NULL;
@@ -1264,7 +1266,7 @@ static int disable_device(struct acpiphp_slot *slot)
1264static unsigned int get_slot_status(struct acpiphp_slot *slot) 1266static unsigned int get_slot_status(struct acpiphp_slot *slot)
1265{ 1267{
1266 acpi_status status; 1268 acpi_status status;
1267 unsigned long sta = 0; 1269 unsigned long long sta = 0;
1268 u32 dvid; 1270 u32 dvid;
1269 struct list_head *l; 1271 struct list_head *l;
1270 struct acpiphp_func *func; 1272 struct acpiphp_func *func;
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 2b7c45e39370..881fdd2b7313 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -183,7 +183,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
183 union acpi_object args[2]; 183 union acpi_object args[2];
184 struct acpi_object_list params = { .pointer = args, .count = 2 }; 184 struct acpi_object_list params = { .pointer = args, .count = 2 };
185 acpi_status stat; 185 acpi_status stat;
186 unsigned long rc; 186 unsigned long long rc;
187 union apci_descriptor *ibm_slot; 187 union apci_descriptor *ibm_slot;
188 188
189 ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot)); 189 ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot));
@@ -204,7 +204,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
204 err("APLS evaluation failed: 0x%08x\n", stat); 204 err("APLS evaluation failed: 0x%08x\n", stat);
205 return -ENODEV; 205 return -ENODEV;
206 } else if (!rc) { 206 } else if (!rc) {
207 err("APLS method failed: 0x%08lx\n", rc); 207 err("APLS method failed: 0x%08llx\n", rc);
208 return -ERANGE; 208 return -ERANGE;
209 } 209 }
210 return 0; 210 return 0;
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index d9769b30be9a..9fff878cf026 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -30,6 +30,7 @@
30 30
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33#include <linux/pci_hotplug.h>
33 34
34/* PICMG 2.1 R2.0 HS CSR bits: */ 35/* PICMG 2.1 R2.0 HS CSR bits: */
35#define HS_CSR_INS 0x0080 36#define HS_CSR_INS 0x0080
@@ -69,6 +70,11 @@ struct cpci_hp_controller {
69 struct cpci_hp_controller_ops *ops; 70 struct cpci_hp_controller_ops *ops;
70}; 71};
71 72
73static inline const char *slot_name(struct slot *slot)
74{
75 return hotplug_slot_name(slot->hotplug_slot);
76}
77
72extern int cpci_hp_register_controller(struct cpci_hp_controller *controller); 78extern int cpci_hp_register_controller(struct cpci_hp_controller *controller);
73extern int cpci_hp_unregister_controller(struct cpci_hp_controller *controller); 79extern int cpci_hp_unregister_controller(struct cpci_hp_controller *controller);
74extern int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last); 80extern int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 935947991dc9..de94f4feef8c 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -108,7 +108,7 @@ enable_slot(struct hotplug_slot *hotplug_slot)
108 struct slot *slot = hotplug_slot->private; 108 struct slot *slot = hotplug_slot->private;
109 int retval = 0; 109 int retval = 0;
110 110
111 dbg("%s - physical_slot = %s", __func__, hotplug_slot->name); 111 dbg("%s - physical_slot = %s", __func__, slot_name(slot));
112 112
113 if (controller->ops->set_power) 113 if (controller->ops->set_power)
114 retval = controller->ops->set_power(slot, 1); 114 retval = controller->ops->set_power(slot, 1);
@@ -121,25 +121,23 @@ disable_slot(struct hotplug_slot *hotplug_slot)
121 struct slot *slot = hotplug_slot->private; 121 struct slot *slot = hotplug_slot->private;
122 int retval = 0; 122 int retval = 0;
123 123
124 dbg("%s - physical_slot = %s", __func__, hotplug_slot->name); 124 dbg("%s - physical_slot = %s", __func__, slot_name(slot));
125 125
126 down_write(&list_rwsem); 126 down_write(&list_rwsem);
127 127
128 /* Unconfigure device */ 128 /* Unconfigure device */
129 dbg("%s - unconfiguring slot %s", 129 dbg("%s - unconfiguring slot %s", __func__, slot_name(slot));
130 __func__, slot->hotplug_slot->name);
131 if ((retval = cpci_unconfigure_slot(slot))) { 130 if ((retval = cpci_unconfigure_slot(slot))) {
132 err("%s - could not unconfigure slot %s", 131 err("%s - could not unconfigure slot %s",
133 __func__, slot->hotplug_slot->name); 132 __func__, slot_name(slot));
134 goto disable_error; 133 goto disable_error;
135 } 134 }
136 dbg("%s - finished unconfiguring slot %s", 135 dbg("%s - finished unconfiguring slot %s", __func__, slot_name(slot));
137 __func__, slot->hotplug_slot->name);
138 136
139 /* Clear EXT (by setting it) */ 137 /* Clear EXT (by setting it) */
140 if (cpci_clear_ext(slot)) { 138 if (cpci_clear_ext(slot)) {
141 err("%s - could not clear EXT for slot %s", 139 err("%s - could not clear EXT for slot %s",
142 __func__, slot->hotplug_slot->name); 140 __func__, slot_name(slot));
143 retval = -ENODEV; 141 retval = -ENODEV;
144 goto disable_error; 142 goto disable_error;
145 } 143 }
@@ -214,7 +212,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
214 struct slot *slot = hotplug_slot->private; 212 struct slot *slot = hotplug_slot->private;
215 213
216 kfree(slot->hotplug_slot->info); 214 kfree(slot->hotplug_slot->info);
217 kfree(slot->hotplug_slot->name);
218 kfree(slot->hotplug_slot); 215 kfree(slot->hotplug_slot);
219 if (slot->dev) 216 if (slot->dev)
220 pci_dev_put(slot->dev); 217 pci_dev_put(slot->dev);
@@ -222,12 +219,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
222} 219}
223 220
224#define SLOT_NAME_SIZE 6 221#define SLOT_NAME_SIZE 6
225static void
226make_slot_name(struct slot *slot)
227{
228 snprintf(slot->hotplug_slot->name,
229 SLOT_NAME_SIZE, "%02x:%02x", slot->bus->number, slot->number);
230}
231 222
232int 223int
233cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last) 224cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
@@ -235,7 +226,7 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
235 struct slot *slot; 226 struct slot *slot;
236 struct hotplug_slot *hotplug_slot; 227 struct hotplug_slot *hotplug_slot;
237 struct hotplug_slot_info *info; 228 struct hotplug_slot_info *info;
238 char *name; 229 char name[SLOT_NAME_SIZE];
239 int status = -ENOMEM; 230 int status = -ENOMEM;
240 int i; 231 int i;
241 232
@@ -262,34 +253,31 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
262 goto error_hpslot; 253 goto error_hpslot;
263 hotplug_slot->info = info; 254 hotplug_slot->info = info;
264 255
265 name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
266 if (!name)
267 goto error_info;
268 hotplug_slot->name = name;
269
270 slot->bus = bus; 256 slot->bus = bus;
271 slot->number = i; 257 slot->number = i;
272 slot->devfn = PCI_DEVFN(i, 0); 258 slot->devfn = PCI_DEVFN(i, 0);
273 259
260 snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i);
261
274 hotplug_slot->private = slot; 262 hotplug_slot->private = slot;
275 hotplug_slot->release = &release_slot; 263 hotplug_slot->release = &release_slot;
276 make_slot_name(slot);
277 hotplug_slot->ops = &cpci_hotplug_slot_ops; 264 hotplug_slot->ops = &cpci_hotplug_slot_ops;
278 265
279 /* 266 /*
280 * Initialize the slot info structure with some known 267 * Initialize the slot info structure with some known
281 * good values. 268 * good values.
282 */ 269 */
283 dbg("initializing slot %s", slot->hotplug_slot->name); 270 dbg("initializing slot %s", name);
284 info->power_status = cpci_get_power_status(slot); 271 info->power_status = cpci_get_power_status(slot);
285 info->attention_status = cpci_get_attention_status(slot); 272 info->attention_status = cpci_get_attention_status(slot);
286 273
287 dbg("registering slot %s", slot->hotplug_slot->name); 274 dbg("registering slot %s", name);
288 status = pci_hp_register(slot->hotplug_slot, bus, i); 275 status = pci_hp_register(slot->hotplug_slot, bus, i, name);
289 if (status) { 276 if (status) {
290 err("pci_hp_register failed with error %d", status); 277 err("pci_hp_register failed with error %d", status);
291 goto error_name; 278 goto error_info;
292 } 279 }
280 dbg("slot registered with name: %s", slot_name(slot));
293 281
294 /* Add slot to our internal list */ 282 /* Add slot to our internal list */
295 down_write(&list_rwsem); 283 down_write(&list_rwsem);
@@ -298,8 +286,6 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
298 up_write(&list_rwsem); 286 up_write(&list_rwsem);
299 } 287 }
300 return 0; 288 return 0;
301error_name:
302 kfree(name);
303error_info: 289error_info:
304 kfree(info); 290 kfree(info);
305error_hpslot: 291error_hpslot:
@@ -327,7 +313,7 @@ cpci_hp_unregister_bus(struct pci_bus *bus)
327 list_del(&slot->slot_list); 313 list_del(&slot->slot_list);
328 slots--; 314 slots--;
329 315
330 dbg("deregistering slot %s", slot->hotplug_slot->name); 316 dbg("deregistering slot %s", slot_name(slot));
331 status = pci_hp_deregister(slot->hotplug_slot); 317 status = pci_hp_deregister(slot->hotplug_slot);
332 if (status) { 318 if (status) {
333 err("pci_hp_deregister failed with error %d", 319 err("pci_hp_deregister failed with error %d",
@@ -379,11 +365,10 @@ init_slots(int clear_ins)
379 return -1; 365 return -1;
380 } 366 }
381 list_for_each_entry(slot, &slot_list, slot_list) { 367 list_for_each_entry(slot, &slot_list, slot_list) {
382 dbg("%s - looking at slot %s", 368 dbg("%s - looking at slot %s", __func__, slot_name(slot));
383 __func__, slot->hotplug_slot->name);
384 if (clear_ins && cpci_check_and_clear_ins(slot)) 369 if (clear_ins && cpci_check_and_clear_ins(slot))
385 dbg("%s - cleared INS for slot %s", 370 dbg("%s - cleared INS for slot %s",
386 __func__, slot->hotplug_slot->name); 371 __func__, slot_name(slot));
387 dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0)); 372 dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0));
388 if (dev) { 373 if (dev) {
389 if (update_adapter_status(slot->hotplug_slot, 1)) 374 if (update_adapter_status(slot->hotplug_slot, 1))
@@ -414,8 +399,7 @@ check_slots(void)
414 } 399 }
415 extracted = inserted = 0; 400 extracted = inserted = 0;
416 list_for_each_entry(slot, &slot_list, slot_list) { 401 list_for_each_entry(slot, &slot_list, slot_list) {
417 dbg("%s - looking at slot %s", 402 dbg("%s - looking at slot %s", __func__, slot_name(slot));
418 __func__, slot->hotplug_slot->name);
419 if (cpci_check_and_clear_ins(slot)) { 403 if (cpci_check_and_clear_ins(slot)) {
420 /* 404 /*
421 * Some broken hardware (e.g. PLX 9054AB) asserts 405 * Some broken hardware (e.g. PLX 9054AB) asserts
@@ -423,35 +407,34 @@ check_slots(void)
423 */ 407 */
424 if (slot->dev) { 408 if (slot->dev) {
425 warn("slot %s already inserted", 409 warn("slot %s already inserted",
426 slot->hotplug_slot->name); 410 slot_name(slot));
427 inserted++; 411 inserted++;
428 continue; 412 continue;
429 } 413 }
430 414
431 /* Process insertion */ 415 /* Process insertion */
432 dbg("%s - slot %s inserted", 416 dbg("%s - slot %s inserted", __func__, slot_name(slot));
433 __func__, slot->hotplug_slot->name);
434 417
435 /* GSM, debug */ 418 /* GSM, debug */
436 hs_csr = cpci_get_hs_csr(slot); 419 hs_csr = cpci_get_hs_csr(slot);
437 dbg("%s - slot %s HS_CSR (1) = %04x", 420 dbg("%s - slot %s HS_CSR (1) = %04x",
438 __func__, slot->hotplug_slot->name, hs_csr); 421 __func__, slot_name(slot), hs_csr);
439 422
440 /* Configure device */ 423 /* Configure device */
441 dbg("%s - configuring slot %s", 424 dbg("%s - configuring slot %s",
442 __func__, slot->hotplug_slot->name); 425 __func__, slot_name(slot));
443 if (cpci_configure_slot(slot)) { 426 if (cpci_configure_slot(slot)) {
444 err("%s - could not configure slot %s", 427 err("%s - could not configure slot %s",
445 __func__, slot->hotplug_slot->name); 428 __func__, slot_name(slot));
446 continue; 429 continue;
447 } 430 }
448 dbg("%s - finished configuring slot %s", 431 dbg("%s - finished configuring slot %s",
449 __func__, slot->hotplug_slot->name); 432 __func__, slot_name(slot));
450 433
451 /* GSM, debug */ 434 /* GSM, debug */
452 hs_csr = cpci_get_hs_csr(slot); 435 hs_csr = cpci_get_hs_csr(slot);
453 dbg("%s - slot %s HS_CSR (2) = %04x", 436 dbg("%s - slot %s HS_CSR (2) = %04x",
454 __func__, slot->hotplug_slot->name, hs_csr); 437 __func__, slot_name(slot), hs_csr);
455 438
456 if (update_latch_status(slot->hotplug_slot, 1)) 439 if (update_latch_status(slot->hotplug_slot, 1))
457 warn("failure to update latch file"); 440 warn("failure to update latch file");
@@ -464,18 +447,18 @@ check_slots(void)
464 /* GSM, debug */ 447 /* GSM, debug */
465 hs_csr = cpci_get_hs_csr(slot); 448 hs_csr = cpci_get_hs_csr(slot);
466 dbg("%s - slot %s HS_CSR (3) = %04x", 449 dbg("%s - slot %s HS_CSR (3) = %04x",
467 __func__, slot->hotplug_slot->name, hs_csr); 450 __func__, slot_name(slot), hs_csr);
468 451
469 inserted++; 452 inserted++;
470 } else if (cpci_check_ext(slot)) { 453 } else if (cpci_check_ext(slot)) {
471 /* Process extraction request */ 454 /* Process extraction request */
472 dbg("%s - slot %s extracted", 455 dbg("%s - slot %s extracted",
473 __func__, slot->hotplug_slot->name); 456 __func__, slot_name(slot));
474 457
475 /* GSM, debug */ 458 /* GSM, debug */
476 hs_csr = cpci_get_hs_csr(slot); 459 hs_csr = cpci_get_hs_csr(slot);
477 dbg("%s - slot %s HS_CSR = %04x", 460 dbg("%s - slot %s HS_CSR = %04x",
478 __func__, slot->hotplug_slot->name, hs_csr); 461 __func__, slot_name(slot), hs_csr);
479 462
480 if (!slot->extracting) { 463 if (!slot->extracting) {
481 if (update_latch_status(slot->hotplug_slot, 0)) { 464 if (update_latch_status(slot->hotplug_slot, 0)) {
@@ -493,7 +476,7 @@ check_slots(void)
493 * bother trying to tell the driver or not? 476 * bother trying to tell the driver or not?
494 */ 477 */
495 err("card in slot %s was improperly removed", 478 err("card in slot %s was improperly removed",
496 slot->hotplug_slot->name); 479 slot_name(slot));
497 if (update_adapter_status(slot->hotplug_slot, 0)) 480 if (update_adapter_status(slot->hotplug_slot, 0))
498 warn("failure to update adapter file"); 481 warn("failure to update adapter file");
499 slot->extracting = 0; 482 slot->extracting = 0;
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index df82b95e2874..829c327cfb5e 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -209,7 +209,7 @@ int cpci_led_on(struct slot* slot)
209 hs_cap + 2, 209 hs_cap + 2,
210 hs_csr)) { 210 hs_csr)) {
211 err("Could not set LOO for slot %s", 211 err("Could not set LOO for slot %s",
212 slot->hotplug_slot->name); 212 hotplug_slot_name(slot->hotplug_slot));
213 return -ENODEV; 213 return -ENODEV;
214 } 214 }
215 } 215 }
@@ -238,7 +238,7 @@ int cpci_led_off(struct slot* slot)
238 hs_cap + 2, 238 hs_cap + 2,
239 hs_csr)) { 239 hs_csr)) {
240 err("Could not clear LOO for slot %s", 240 err("Could not clear LOO for slot %s",
241 slot->hotplug_slot->name); 241 hotplug_slot_name(slot->hotplug_slot));
242 return -ENODEV; 242 return -ENODEV;
243 } 243 }
244 } 244 }
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index b1decfa88b7a..afaf8f69f73e 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -449,6 +449,11 @@ extern u8 cpqhp_disk_irq;
449 449
450/* inline functions */ 450/* inline functions */
451 451
452static inline char *slot_name(struct slot *slot)
453{
454 return hotplug_slot_name(slot->hotplug_slot);
455}
456
452/* 457/*
453 * return_resource 458 * return_resource
454 * 459 *
@@ -696,14 +701,6 @@ static inline int get_presence_status(struct controller *ctrl, struct slot *slot
696 return presence_save; 701 return presence_save;
697} 702}
698 703
699#define SLOT_NAME_SIZE 10
700
701static inline void make_slot_name(char *buffer, int buffer_size, struct slot *slot)
702{
703 snprintf(buffer, buffer_size, "%d", slot->number);
704}
705
706
707static inline int wait_for_ctrl_irq(struct controller *ctrl) 704static inline int wait_for_ctrl_irq(struct controller *ctrl)
708{ 705{
709 DECLARE_WAITQUEUE(wait, current); 706 DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 54defec51d08..8514c3a1746a 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -315,14 +315,15 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
315{ 315{
316 struct slot *slot = hotplug_slot->private; 316 struct slot *slot = hotplug_slot->private;
317 317
318 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 318 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
319 319
320 kfree(slot->hotplug_slot->info); 320 kfree(slot->hotplug_slot->info);
321 kfree(slot->hotplug_slot->name);
322 kfree(slot->hotplug_slot); 321 kfree(slot->hotplug_slot);
323 kfree(slot); 322 kfree(slot);
324} 323}
325 324
325#define SLOT_NAME_SIZE 10
326
326static int ctrl_slot_setup(struct controller *ctrl, 327static int ctrl_slot_setup(struct controller *ctrl,
327 void __iomem *smbios_start, 328 void __iomem *smbios_start,
328 void __iomem *smbios_table) 329 void __iomem *smbios_table)
@@ -335,6 +336,7 @@ static int ctrl_slot_setup(struct controller *ctrl,
335 u8 slot_number; 336 u8 slot_number;
336 u8 ctrl_slot; 337 u8 ctrl_slot;
337 u32 tempdword; 338 u32 tempdword;
339 char name[SLOT_NAME_SIZE];
338 void __iomem *slot_entry= NULL; 340 void __iomem *slot_entry= NULL;
339 int result = -ENOMEM; 341 int result = -ENOMEM;
340 342
@@ -363,16 +365,12 @@ static int ctrl_slot_setup(struct controller *ctrl,
363 if (!hotplug_slot->info) 365 if (!hotplug_slot->info)
364 goto error_hpslot; 366 goto error_hpslot;
365 hotplug_slot_info = hotplug_slot->info; 367 hotplug_slot_info = hotplug_slot->info;
366 hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
367
368 if (!hotplug_slot->name)
369 goto error_info;
370 368
371 slot->ctrl = ctrl; 369 slot->ctrl = ctrl;
372 slot->bus = ctrl->bus; 370 slot->bus = ctrl->bus;
373 slot->device = slot_device; 371 slot->device = slot_device;
374 slot->number = slot_number; 372 slot->number = slot_number;
375 dbg("slot->number = %d\n", slot->number); 373 dbg("slot->number = %u\n", slot->number);
376 374
377 slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9, 375 slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
378 slot_entry); 376 slot_entry);
@@ -418,9 +416,9 @@ static int ctrl_slot_setup(struct controller *ctrl,
418 /* register this slot with the hotplug pci core */ 416 /* register this slot with the hotplug pci core */
419 hotplug_slot->release = &release_slot; 417 hotplug_slot->release = &release_slot;
420 hotplug_slot->private = slot; 418 hotplug_slot->private = slot;
421 make_slot_name(hotplug_slot->name, SLOT_NAME_SIZE, slot); 419 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
422 hotplug_slot->ops = &cpqphp_hotplug_slot_ops; 420 hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
423 421
424 hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot); 422 hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
425 hotplug_slot_info->attention_status = 423 hotplug_slot_info->attention_status =
426 cpq_get_attention_status(ctrl, slot); 424 cpq_get_attention_status(ctrl, slot);
@@ -435,11 +433,12 @@ static int ctrl_slot_setup(struct controller *ctrl,
435 slot->number, ctrl->slot_device_offset, 433 slot->number, ctrl->slot_device_offset,
436 slot_number); 434 slot_number);
437 result = pci_hp_register(hotplug_slot, 435 result = pci_hp_register(hotplug_slot,
438 ctrl->pci_dev->subordinate, 436 ctrl->pci_dev->bus,
439 slot->device); 437 slot->device,
438 name);
440 if (result) { 439 if (result) {
441 err("pci_hp_register failed with error %d\n", result); 440 err("pci_hp_register failed with error %d\n", result);
442 goto error_name; 441 goto error_info;
443 } 442 }
444 443
445 slot->next = ctrl->slot; 444 slot->next = ctrl->slot;
@@ -451,8 +450,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
451 } 450 }
452 451
453 return 0; 452 return 0;
454error_name:
455 kfree(hotplug_slot->name);
456error_info: 453error_info:
457 kfree(hotplug_slot_info); 454 kfree(hotplug_slot_info);
458error_hpslot: 455error_hpslot:
@@ -638,7 +635,7 @@ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status)
638 u8 device; 635 u8 device;
639 u8 function; 636 u8 function;
640 637
641 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 638 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
642 639
643 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) 640 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
644 return -ENODEV; 641 return -ENODEV;
@@ -665,7 +662,7 @@ static int process_SI(struct hotplug_slot *hotplug_slot)
665 u8 device; 662 u8 device;
666 u8 function; 663 u8 function;
667 664
668 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 665 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
669 666
670 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) 667 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
671 return -ENODEV; 668 return -ENODEV;
@@ -697,7 +694,7 @@ static int process_SS(struct hotplug_slot *hotplug_slot)
697 u8 device; 694 u8 device;
698 u8 function; 695 u8 function;
699 696
700 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 697 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
701 698
702 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) 699 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
703 return -ENODEV; 700 return -ENODEV;
@@ -720,7 +717,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
720 struct slot *slot = hotplug_slot->private; 717 struct slot *slot = hotplug_slot->private;
721 struct controller *ctrl = slot->ctrl; 718 struct controller *ctrl = slot->ctrl;
722 719
723 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 720 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
724 721
725 return cpqhp_hardware_test(ctrl, value); 722 return cpqhp_hardware_test(ctrl, value);
726} 723}
@@ -731,7 +728,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
731 struct slot *slot = hotplug_slot->private; 728 struct slot *slot = hotplug_slot->private;
732 struct controller *ctrl = slot->ctrl; 729 struct controller *ctrl = slot->ctrl;
733 730
734 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 731 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
735 732
736 *value = get_slot_enabled(ctrl, slot); 733 *value = get_slot_enabled(ctrl, slot);
737 return 0; 734 return 0;
@@ -742,7 +739,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
742 struct slot *slot = hotplug_slot->private; 739 struct slot *slot = hotplug_slot->private;
743 struct controller *ctrl = slot->ctrl; 740 struct controller *ctrl = slot->ctrl;
744 741
745 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 742 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
746 743
747 *value = cpq_get_attention_status(ctrl, slot); 744 *value = cpq_get_attention_status(ctrl, slot);
748 return 0; 745 return 0;
@@ -753,7 +750,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
753 struct slot *slot = hotplug_slot->private; 750 struct slot *slot = hotplug_slot->private;
754 struct controller *ctrl = slot->ctrl; 751 struct controller *ctrl = slot->ctrl;
755 752
756 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 753 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
757 754
758 *value = cpq_get_latch_status(ctrl, slot); 755 *value = cpq_get_latch_status(ctrl, slot);
759 756
@@ -765,7 +762,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
765 struct slot *slot = hotplug_slot->private; 762 struct slot *slot = hotplug_slot->private;
766 struct controller *ctrl = slot->ctrl; 763 struct controller *ctrl = slot->ctrl;
767 764
768 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 765 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
769 766
770 *value = get_presence_status(ctrl, slot); 767 *value = get_presence_status(ctrl, slot);
771 768
@@ -777,7 +774,7 @@ static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
777 struct slot *slot = hotplug_slot->private; 774 struct slot *slot = hotplug_slot->private;
778 struct controller *ctrl = slot->ctrl; 775 struct controller *ctrl = slot->ctrl;
779 776
780 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 777 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
781 778
782 *value = ctrl->speed_capability; 779 *value = ctrl->speed_capability;
783 780
@@ -789,7 +786,7 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
789 struct slot *slot = hotplug_slot->private; 786 struct slot *slot = hotplug_slot->private;
790 struct controller *ctrl = slot->ctrl; 787 struct controller *ctrl = slot->ctrl;
791 788
792 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 789 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
793 790
794 *value = ctrl->speed; 791 *value = ctrl->speed;
795 792
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index ef041ca91c27..a60a25290995 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1139,7 +1139,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1139 for(slot = ctrl->slot; slot; slot = slot->next) { 1139 for(slot = ctrl->slot; slot; slot = slot->next) {
1140 if (slot->device == (hp_slot + ctrl->slot_device_offset)) 1140 if (slot->device == (hp_slot + ctrl->slot_device_offset))
1141 continue; 1141 continue;
1142 if (!slot->hotplug_slot && !slot->hotplug_slot->info) 1142 if (!slot->hotplug_slot || !slot->hotplug_slot->info)
1143 continue; 1143 continue;
1144 if (slot->hotplug_slot->info->adapter_status == 0) 1144 if (slot->hotplug_slot->info->adapter_status == 0)
1145 continue; 1145 continue;
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index 146ca9cd1567..3a2637a00934 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -66,10 +66,10 @@ struct dummy_slot {
66 struct pci_dev *dev; 66 struct pci_dev *dev;
67 struct work_struct remove_work; 67 struct work_struct remove_work;
68 unsigned long removed; 68 unsigned long removed;
69 char name[8];
70}; 69};
71 70
72static int debug; 71static int debug;
72static int dup_slots;
73static LIST_HEAD(slot_list); 73static LIST_HEAD(slot_list);
74static struct workqueue_struct *dummyphp_wq; 74static struct workqueue_struct *dummyphp_wq;
75 75
@@ -96,10 +96,13 @@ static void dummy_release(struct hotplug_slot *slot)
96 kfree(dslot); 96 kfree(dslot);
97} 97}
98 98
99#define SLOT_NAME_SIZE 8
100
99static int add_slot(struct pci_dev *dev) 101static int add_slot(struct pci_dev *dev)
100{ 102{
101 struct dummy_slot *dslot; 103 struct dummy_slot *dslot;
102 struct hotplug_slot *slot; 104 struct hotplug_slot *slot;
105 char name[SLOT_NAME_SIZE];
103 int retval = -ENOMEM; 106 int retval = -ENOMEM;
104 static int count = 1; 107 static int count = 1;
105 108
@@ -119,19 +122,22 @@ static int add_slot(struct pci_dev *dev)
119 if (!dslot) 122 if (!dslot)
120 goto error_info; 123 goto error_info;
121 124
122 slot->name = dslot->name; 125 if (dup_slots)
123 snprintf(slot->name, sizeof(dslot->name), "fake%d", count++); 126 snprintf(name, SLOT_NAME_SIZE, "fake");
124 dbg("slot->name = %s\n", slot->name); 127 else
128 snprintf(name, SLOT_NAME_SIZE, "fake%d", count++);
129 dbg("slot->name = %s\n", name);
125 slot->ops = &dummy_hotplug_slot_ops; 130 slot->ops = &dummy_hotplug_slot_ops;
126 slot->release = &dummy_release; 131 slot->release = &dummy_release;
127 slot->private = dslot; 132 slot->private = dslot;
128 133
129 retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn)); 134 retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn), name);
130 if (retval) { 135 if (retval) {
131 err("pci_hp_register failed with error %d\n", retval); 136 err("pci_hp_register failed with error %d\n", retval);
132 goto error_dslot; 137 goto error_dslot;
133 } 138 }
134 139
140 dbg("slot->name = %s\n", hotplug_slot_name(slot));
135 dslot->slot = slot; 141 dslot->slot = slot;
136 dslot->dev = pci_dev_get(dev); 142 dslot->dev = pci_dev_get(dev);
137 list_add (&dslot->node, &slot_list); 143 list_add (&dslot->node, &slot_list);
@@ -167,10 +173,11 @@ static void remove_slot(struct dummy_slot *dslot)
167{ 173{
168 int retval; 174 int retval;
169 175
170 dbg("removing slot %s\n", dslot->slot->name); 176 dbg("removing slot %s\n", hotplug_slot_name(dslot->slot));
171 retval = pci_hp_deregister(dslot->slot); 177 retval = pci_hp_deregister(dslot->slot);
172 if (retval) 178 if (retval)
173 err("Problem unregistering a slot %s\n", dslot->slot->name); 179 err("Problem unregistering a slot %s\n",
180 hotplug_slot_name(dslot->slot));
174} 181}
175 182
176/* called from the single-threaded workqueue handler to remove a slot */ 183/* called from the single-threaded workqueue handler to remove a slot */
@@ -308,7 +315,7 @@ static int disable_slot(struct hotplug_slot *slot)
308 return -ENODEV; 315 return -ENODEV;
309 dslot = slot->private; 316 dslot = slot->private;
310 317
311 dbg("%s - physical_slot = %s\n", __func__, slot->name); 318 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(slot));
312 319
313 for (func = 7; func >= 0; func--) { 320 for (func = 7; func >= 0; func--) {
314 dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func); 321 dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func);
@@ -373,4 +380,5 @@ MODULE_DESCRIPTION(DRIVER_DESC);
373MODULE_LICENSE("GPL"); 380MODULE_LICENSE("GPL");
374module_param(debug, bool, S_IRUGO | S_IWUSR); 381module_param(debug, bool, S_IRUGO | S_IWUSR);
375MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); 382MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
376 383module_param(dup_slots, bool, S_IRUGO | S_IWUSR);
384MODULE_PARM_DESC(dup_slots, "Force duplicate slot names for debugging");
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index 612d96301509..a8d391a4957d 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -707,17 +707,16 @@ struct slot {
707 u8 device; 707 u8 device;
708 u8 number; 708 u8 number;
709 u8 real_physical_slot_num; 709 u8 real_physical_slot_num;
710 char name[100];
711 u32 capabilities; 710 u32 capabilities;
712 u8 supported_speed; 711 u8 supported_speed;
713 u8 supported_bus_mode; 712 u8 supported_bus_mode;
713 u8 flag; /* this is for disable slot and polling */
714 u8 ctlr_index;
714 struct hotplug_slot *hotplug_slot; 715 struct hotplug_slot *hotplug_slot;
715 struct controller *ctrl; 716 struct controller *ctrl;
716 struct pci_func *func; 717 struct pci_func *func;
717 u8 irq[4]; 718 u8 irq[4];
718 u8 flag; /* this is for disable slot and polling */
719 int bit_mode; /* 0 = 32, 1 = 64 */ 719 int bit_mode; /* 0 = 32, 1 = 64 */
720 u8 ctlr_index;
721 struct bus_info *bus_on; 720 struct bus_info *bus_on;
722 struct list_head ibm_slot_list; 721 struct list_head ibm_slot_list;
723 u8 status; 722 u8 status;
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index c892daae74d6..633e743442ac 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1402,10 +1402,6 @@ static int __init ibmphp_init(void)
1402 goto error; 1402 goto error;
1403 } 1403 }
1404 1404
1405 /* lock ourselves into memory with a module
1406 * count of -1 so that no one can unload us. */
1407 module_put(THIS_MODULE);
1408
1409exit: 1405exit:
1410 return rc; 1406 return rc;
1411 1407
@@ -1423,4 +1419,3 @@ static void __exit ibmphp_exit(void)
1423} 1419}
1424 1420
1425module_init(ibmphp_init); 1421module_init(ibmphp_init);
1426module_exit(ibmphp_exit);
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 8cfd1c4926c8..c1abac8ab5c3 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -587,11 +587,14 @@ static u8 calculate_first_slot (u8 slot_num)
587 return first_slot + 1; 587 return first_slot + 1;
588 588
589} 589}
590
591#define SLOT_NAME_SIZE 30
592
590static char *create_file_name (struct slot * slot_cur) 593static char *create_file_name (struct slot * slot_cur)
591{ 594{
592 struct opt_rio *opt_vg_ptr = NULL; 595 struct opt_rio *opt_vg_ptr = NULL;
593 struct opt_rio_lo *opt_lo_ptr = NULL; 596 struct opt_rio_lo *opt_lo_ptr = NULL;
594 static char str[30]; 597 static char str[SLOT_NAME_SIZE];
595 int which = 0; /* rxe = 1, chassis = 0 */ 598 int which = 0; /* rxe = 1, chassis = 0 */
596 u8 number = 1; /* either chassis or rxe # */ 599 u8 number = 1; /* either chassis or rxe # */
597 u8 first_slot = 1; 600 u8 first_slot = 1;
@@ -703,7 +706,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
703 706
704 slot = hotplug_slot->private; 707 slot = hotplug_slot->private;
705 kfree(slot->hotplug_slot->info); 708 kfree(slot->hotplug_slot->info);
706 kfree(slot->hotplug_slot->name);
707 kfree(slot->hotplug_slot); 709 kfree(slot->hotplug_slot);
708 slot->ctrl = NULL; 710 slot->ctrl = NULL;
709 slot->bus_on = NULL; 711 slot->bus_on = NULL;
@@ -734,6 +736,7 @@ static int __init ebda_rsrc_controller (void)
734 struct bus_info *bus_info_ptr1, *bus_info_ptr2; 736 struct bus_info *bus_info_ptr1, *bus_info_ptr2;
735 int rc; 737 int rc;
736 struct slot *tmp_slot; 738 struct slot *tmp_slot;
739 char name[SLOT_NAME_SIZE];
737 740
738 addr = hpc_list_ptr->phys_addr; 741 addr = hpc_list_ptr->phys_addr;
739 for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) { 742 for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) {
@@ -897,12 +900,6 @@ static int __init ebda_rsrc_controller (void)
897 goto error_no_hp_info; 900 goto error_no_hp_info;
898 } 901 }
899 902
900 hp_slot_ptr->name = kmalloc(30, GFP_KERNEL);
901 if (!hp_slot_ptr->name) {
902 rc = -ENOMEM;
903 goto error_no_hp_name;
904 }
905
906 tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL); 903 tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL);
907 if (!tmp_slot) { 904 if (!tmp_slot) {
908 rc = -ENOMEM; 905 rc = -ENOMEM;
@@ -964,9 +961,9 @@ static int __init ebda_rsrc_controller (void)
964 } /* each hpc */ 961 } /* each hpc */
965 962
966 list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) { 963 list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) {
967 snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); 964 snprintf(name, SLOT_NAME_SIZE, "%s", create_file_name(tmp_slot));
968 pci_hp_register(tmp_slot->hotplug_slot, 965 pci_hp_register(tmp_slot->hotplug_slot,
969 pci_find_bus(0, tmp_slot->bus), tmp_slot->device); 966 pci_find_bus(0, tmp_slot->bus), tmp_slot->device, name);
970 } 967 }
971 968
972 print_ebda_hpc (); 969 print_ebda_hpc ();
@@ -976,8 +973,6 @@ static int __init ebda_rsrc_controller (void)
976error: 973error:
977 kfree (hp_slot_ptr->private); 974 kfree (hp_slot_ptr->private);
978error_no_slot: 975error_no_slot:
979 kfree (hp_slot_ptr->name);
980error_no_hp_name:
981 kfree (hp_slot_ptr->info); 976 kfree (hp_slot_ptr->info);
982error_no_hp_info: 977error_no_hp_info:
983 kfree (hp_slot_ptr); 978 kfree (hp_slot_ptr);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 2e6c4474644e..535fce0f07f9 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -37,6 +37,7 @@
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/mount.h> 38#include <linux/mount.h>
39#include <linux/namei.h> 39#include <linux/namei.h>
40#include <linux/mutex.h>
40#include <linux/pci.h> 41#include <linux/pci.h>
41#include <linux/pci_hotplug.h> 42#include <linux/pci_hotplug.h>
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
@@ -61,7 +62,7 @@ static int debug;
61////////////////////////////////////////////////////////////////// 62//////////////////////////////////////////////////////////////////
62 63
63static LIST_HEAD(pci_hotplug_slot_list); 64static LIST_HEAD(pci_hotplug_slot_list);
64static DEFINE_SPINLOCK(pci_hotplug_slot_list_lock); 65static DEFINE_MUTEX(pci_hp_mutex);
65 66
66/* these strings match up with the values in pci_bus_speed */ 67/* these strings match up with the values in pci_bus_speed */
67static char *pci_bus_speed_strings[] = { 68static char *pci_bus_speed_strings[] = {
@@ -530,16 +531,12 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
530 struct hotplug_slot *slot; 531 struct hotplug_slot *slot;
531 struct list_head *tmp; 532 struct list_head *tmp;
532 533
533 spin_lock(&pci_hotplug_slot_list_lock);
534 list_for_each (tmp, &pci_hotplug_slot_list) { 534 list_for_each (tmp, &pci_hotplug_slot_list) {
535 slot = list_entry (tmp, struct hotplug_slot, slot_list); 535 slot = list_entry (tmp, struct hotplug_slot, slot_list);
536 if (strcmp(slot->name, name) == 0) 536 if (strcmp(hotplug_slot_name(slot), name) == 0)
537 goto out; 537 return slot;
538 } 538 }
539 slot = NULL; 539 return NULL;
540out:
541 spin_unlock(&pci_hotplug_slot_list_lock);
542 return slot;
543} 540}
544 541
545/** 542/**
@@ -547,13 +544,15 @@ out:
547 * @bus: bus this slot is on 544 * @bus: bus this slot is on
548 * @slot: pointer to the &struct hotplug_slot to register 545 * @slot: pointer to the &struct hotplug_slot to register
549 * @slot_nr: slot number 546 * @slot_nr: slot number
547 * @name: name registered with kobject core
550 * 548 *
551 * Registers a hotplug slot with the pci hotplug subsystem, which will allow 549 * Registers a hotplug slot with the pci hotplug subsystem, which will allow
552 * userspace interaction to the slot. 550 * userspace interaction to the slot.
553 * 551 *
554 * Returns 0 if successful, anything else for an error. 552 * Returns 0 if successful, anything else for an error.
555 */ 553 */
556int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr) 554int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
555 const char *name)
557{ 556{
558 int result; 557 int result;
559 struct pci_slot *pci_slot; 558 struct pci_slot *pci_slot;
@@ -568,48 +567,29 @@ int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr)
568 return -EINVAL; 567 return -EINVAL;
569 } 568 }
570 569
571 /* Check if we have already registered a slot with the same name. */ 570 mutex_lock(&pci_hp_mutex);
572 if (get_slot_from_name(slot->name))
573 return -EEXIST;
574 571
575 /* 572 /*
576 * No problems if we call this interface from both ACPI_PCI_SLOT 573 * No problems if we call this interface from both ACPI_PCI_SLOT
577 * driver and call it here again. If we've already created the 574 * driver and call it here again. If we've already created the
578 * pci_slot, the interface will simply bump the refcount. 575 * pci_slot, the interface will simply bump the refcount.
579 */ 576 */
580 pci_slot = pci_create_slot(bus, slot_nr, slot->name); 577 pci_slot = pci_create_slot(bus, slot_nr, name, slot);
581 if (IS_ERR(pci_slot)) 578 if (IS_ERR(pci_slot)) {
582 return PTR_ERR(pci_slot); 579 result = PTR_ERR(pci_slot);
583 580 goto out;
584 if (pci_slot->hotplug) {
585 dbg("%s: already claimed\n", __func__);
586 pci_destroy_slot(pci_slot);
587 return -EBUSY;
588 } 581 }
589 582
590 slot->pci_slot = pci_slot; 583 slot->pci_slot = pci_slot;
591 pci_slot->hotplug = slot; 584 pci_slot->hotplug = slot;
592 585
593 /*
594 * Allow pcihp drivers to override the ACPI_PCI_SLOT name.
595 */
596 if (strcmp(kobject_name(&pci_slot->kobj), slot->name)) {
597 result = kobject_rename(&pci_slot->kobj, slot->name);
598 if (result) {
599 pci_destroy_slot(pci_slot);
600 return result;
601 }
602 }
603
604 spin_lock(&pci_hotplug_slot_list_lock);
605 list_add(&slot->slot_list, &pci_hotplug_slot_list); 586 list_add(&slot->slot_list, &pci_hotplug_slot_list);
606 spin_unlock(&pci_hotplug_slot_list_lock);
607 587
608 result = fs_add_slot(pci_slot); 588 result = fs_add_slot(pci_slot);
609 kobject_uevent(&pci_slot->kobj, KOBJ_ADD); 589 kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
610 dbg("Added slot %s to the list\n", slot->name); 590 dbg("Added slot %s to the list\n", name);
611 591out:
612 592 mutex_unlock(&pci_hp_mutex);
613 return result; 593 return result;
614} 594}
615 595
@@ -630,21 +610,23 @@ int pci_hp_deregister(struct hotplug_slot *hotplug)
630 if (!hotplug) 610 if (!hotplug)
631 return -ENODEV; 611 return -ENODEV;
632 612
633 temp = get_slot_from_name(hotplug->name); 613 mutex_lock(&pci_hp_mutex);
634 if (temp != hotplug) 614 temp = get_slot_from_name(hotplug_slot_name(hotplug));
615 if (temp != hotplug) {
616 mutex_unlock(&pci_hp_mutex);
635 return -ENODEV; 617 return -ENODEV;
618 }
636 619
637 spin_lock(&pci_hotplug_slot_list_lock);
638 list_del(&hotplug->slot_list); 620 list_del(&hotplug->slot_list);
639 spin_unlock(&pci_hotplug_slot_list_lock);
640 621
641 slot = hotplug->pci_slot; 622 slot = hotplug->pci_slot;
642 fs_remove_slot(slot); 623 fs_remove_slot(slot);
643 dbg("Removed slot %s from the list\n", hotplug->name); 624 dbg("Removed slot %s from the list\n", hotplug_slot_name(hotplug));
644 625
645 hotplug->release(hotplug); 626 hotplug->release(hotplug);
646 slot->hotplug = NULL; 627 slot->hotplug = NULL;
647 pci_destroy_slot(slot); 628 pci_destroy_slot(slot);
629 mutex_unlock(&pci_hp_mutex);
648 630
649 return 0; 631 return 0;
650} 632}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index c367978bd7fe..b2801a7ee37f 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -74,15 +74,13 @@ extern struct workqueue_struct *pciehp_wq;
74struct slot { 74struct slot {
75 u8 bus; 75 u8 bus;
76 u8 device; 76 u8 device;
77 u32 number;
78 u8 state; 77 u8 state;
79 struct timer_list task_event;
80 u8 hp_slot; 78 u8 hp_slot;
79 u32 number;
81 struct controller *ctrl; 80 struct controller *ctrl;
82 struct hpc_ops *hpc_ops; 81 struct hpc_ops *hpc_ops;
83 struct hotplug_slot *hotplug_slot; 82 struct hotplug_slot *hotplug_slot;
84 struct list_head slot_list; 83 struct list_head slot_list;
85 char name[SLOT_NAME_SIZE];
86 unsigned long last_emi_toggle; 84 unsigned long last_emi_toggle;
87 struct delayed_work work; /* work for button event */ 85 struct delayed_work work; /* work for button event */
88 struct mutex lock; 86 struct mutex lock;
@@ -112,6 +110,7 @@ struct controller {
112 struct timer_list poll_timer; 110 struct timer_list poll_timer;
113 int cmd_busy; 111 int cmd_busy;
114 unsigned int no_cmd_complete:1; 112 unsigned int no_cmd_complete:1;
113 unsigned int link_active_reporting:1;
115}; 114};
116 115
117#define INT_BUTTON_IGNORE 0 116#define INT_BUTTON_IGNORE 0
@@ -175,6 +174,11 @@ int pciehp_enable_slot(struct slot *p_slot);
175int pciehp_disable_slot(struct slot *p_slot); 174int pciehp_disable_slot(struct slot *p_slot);
176int pcie_enable_notification(struct controller *ctrl); 175int pcie_enable_notification(struct controller *ctrl);
177 176
177static inline const char *slot_name(struct slot *slot)
178{
179 return hotplug_slot_name(slot->hotplug_slot);
180}
181
178static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) 182static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
179{ 183{
180 struct slot *slot; 184 struct slot *slot;
@@ -184,7 +188,7 @@ static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
184 return slot; 188 return slot;
185 } 189 }
186 190
187 ctrl_err(ctrl, "%s: slot (device=0x%x) not found\n", __func__, device); 191 ctrl_err(ctrl, "Slot (device=0x%02x) not found\n", device);
188 return NULL; 192 return NULL;
189} 193}
190 194
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index c748a19db89d..39cf248d24e3 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -184,8 +184,8 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
184{ 184{
185 struct slot *slot = hotplug_slot->private; 185 struct slot *slot = hotplug_slot->private;
186 186
187 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 187 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
188 __func__, hotplug_slot->name); 188 __func__, hotplug_slot_name(hotplug_slot));
189 189
190 kfree(hotplug_slot->info); 190 kfree(hotplug_slot->info);
191 kfree(hotplug_slot); 191 kfree(hotplug_slot);
@@ -196,7 +196,7 @@ static int init_slots(struct controller *ctrl)
196 struct slot *slot; 196 struct slot *slot;
197 struct hotplug_slot *hotplug_slot; 197 struct hotplug_slot *hotplug_slot;
198 struct hotplug_slot_info *info; 198 struct hotplug_slot_info *info;
199 int len, dup = 1; 199 char name[SLOT_NAME_SIZE];
200 int retval = -ENOMEM; 200 int retval = -ENOMEM;
201 201
202 list_for_each_entry(slot, &ctrl->slot_list, slot_list) { 202 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
@@ -210,48 +210,37 @@ static int init_slots(struct controller *ctrl)
210 210
211 /* register this slot with the hotplug pci core */ 211 /* register this slot with the hotplug pci core */
212 hotplug_slot->info = info; 212 hotplug_slot->info = info;
213 hotplug_slot->name = slot->name;
214 hotplug_slot->private = slot; 213 hotplug_slot->private = slot;
215 hotplug_slot->release = &release_slot; 214 hotplug_slot->release = &release_slot;
216 hotplug_slot->ops = &pciehp_hotplug_slot_ops; 215 hotplug_slot->ops = &pciehp_hotplug_slot_ops;
217 get_power_status(hotplug_slot, &info->power_status);
218 get_attention_status(hotplug_slot, &info->attention_status);
219 get_latch_status(hotplug_slot, &info->latch_status);
220 get_adapter_status(hotplug_slot, &info->adapter_status);
221 slot->hotplug_slot = hotplug_slot; 216 slot->hotplug_slot = hotplug_slot;
217 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
222 218
223 ctrl_dbg(ctrl, "Registering bus=%x dev=%x hp_slot=%x sun=%x " 219 ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
224 "slot_device_offset=%x\n", slot->bus, slot->device, 220 "hp_slot=%x sun=%x slot_device_offset=%x\n",
225 slot->hp_slot, slot->number, ctrl->slot_device_offset); 221 pci_domain_nr(ctrl->pci_dev->subordinate),
226duplicate_name: 222 slot->bus, slot->device, slot->hp_slot, slot->number,
223 ctrl->slot_device_offset);
227 retval = pci_hp_register(hotplug_slot, 224 retval = pci_hp_register(hotplug_slot,
228 ctrl->pci_dev->subordinate, 225 ctrl->pci_dev->subordinate,
229 slot->device); 226 slot->device,
227 name);
230 if (retval) { 228 if (retval) {
231 /*
232 * If slot N already exists, we'll try to create
233 * slot N-1, N-2 ... N-M, until we overflow.
234 */
235 if (retval == -EEXIST) {
236 len = snprintf(slot->name, SLOT_NAME_SIZE,
237 "%d-%d", slot->number, dup++);
238 if (len < SLOT_NAME_SIZE)
239 goto duplicate_name;
240 else
241 ctrl_err(ctrl, "duplicate slot name "
242 "overflow\n");
243 }
244 ctrl_err(ctrl, "pci_hp_register failed with error %d\n", 229 ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
245 retval); 230 retval);
246 goto error_info; 231 goto error_info;
247 } 232 }
233 get_power_status(hotplug_slot, &info->power_status);
234 get_attention_status(hotplug_slot, &info->attention_status);
235 get_latch_status(hotplug_slot, &info->latch_status);
236 get_adapter_status(hotplug_slot, &info->adapter_status);
248 /* create additional sysfs entries */ 237 /* create additional sysfs entries */
249 if (EMI(ctrl)) { 238 if (EMI(ctrl)) {
250 retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj, 239 retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
251 &hotplug_slot_attr_lock.attr); 240 &hotplug_slot_attr_lock.attr);
252 if (retval) { 241 if (retval) {
253 pci_hp_deregister(hotplug_slot); 242 pci_hp_deregister(hotplug_slot);
254 ctrl_err(ctrl, "cannot create additional sysfs " 243 ctrl_err(ctrl, "Cannot create additional sysfs "
255 "entries\n"); 244 "entries\n");
256 goto error_info; 245 goto error_info;
257 } 246 }
@@ -286,8 +275,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
286{ 275{
287 struct slot *slot = hotplug_slot->private; 276 struct slot *slot = hotplug_slot->private;
288 277
289 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 278 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
290 __func__, hotplug_slot->name); 279 __func__, slot_name(slot));
291 280
292 hotplug_slot->info->attention_status = status; 281 hotplug_slot->info->attention_status = status;
293 282
@@ -302,8 +291,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
302{ 291{
303 struct slot *slot = hotplug_slot->private; 292 struct slot *slot = hotplug_slot->private;
304 293
305 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 294 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
306 __func__, hotplug_slot->name); 295 __func__, slot_name(slot));
307 296
308 return pciehp_sysfs_enable_slot(slot); 297 return pciehp_sysfs_enable_slot(slot);
309} 298}
@@ -313,8 +302,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
313{ 302{
314 struct slot *slot = hotplug_slot->private; 303 struct slot *slot = hotplug_slot->private;
315 304
316 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 305 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
317 __func__, hotplug_slot->name); 306 __func__, slot_name(slot));
318 307
319 return pciehp_sysfs_disable_slot(slot); 308 return pciehp_sysfs_disable_slot(slot);
320} 309}
@@ -324,8 +313,8 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
324 struct slot *slot = hotplug_slot->private; 313 struct slot *slot = hotplug_slot->private;
325 int retval; 314 int retval;
326 315
327 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 316 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
328 __func__, hotplug_slot->name); 317 __func__, slot_name(slot));
329 318
330 retval = slot->hpc_ops->get_power_status(slot, value); 319 retval = slot->hpc_ops->get_power_status(slot, value);
331 if (retval < 0) 320 if (retval < 0)
@@ -339,8 +328,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
339 struct slot *slot = hotplug_slot->private; 328 struct slot *slot = hotplug_slot->private;
340 int retval; 329 int retval;
341 330
342 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 331 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
343 __func__, hotplug_slot->name); 332 __func__, slot_name(slot));
344 333
345 retval = slot->hpc_ops->get_attention_status(slot, value); 334 retval = slot->hpc_ops->get_attention_status(slot, value);
346 if (retval < 0) 335 if (retval < 0)
@@ -354,8 +343,8 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
354 struct slot *slot = hotplug_slot->private; 343 struct slot *slot = hotplug_slot->private;
355 int retval; 344 int retval;
356 345
357 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 346 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
358 __func__, hotplug_slot->name); 347 __func__, slot_name(slot));
359 348
360 retval = slot->hpc_ops->get_latch_status(slot, value); 349 retval = slot->hpc_ops->get_latch_status(slot, value);
361 if (retval < 0) 350 if (retval < 0)
@@ -369,8 +358,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
369 struct slot *slot = hotplug_slot->private; 358 struct slot *slot = hotplug_slot->private;
370 int retval; 359 int retval;
371 360
372 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 361 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
373 __func__, hotplug_slot->name); 362 __func__, slot_name(slot));
374 363
375 retval = slot->hpc_ops->get_adapter_status(slot, value); 364 retval = slot->hpc_ops->get_adapter_status(slot, value);
376 if (retval < 0) 365 if (retval < 0)
@@ -385,8 +374,8 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
385 struct slot *slot = hotplug_slot->private; 374 struct slot *slot = hotplug_slot->private;
386 int retval; 375 int retval;
387 376
388 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 377 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
389 __func__, hotplug_slot->name); 378 __func__, slot_name(slot));
390 379
391 retval = slot->hpc_ops->get_max_bus_speed(slot, value); 380 retval = slot->hpc_ops->get_max_bus_speed(slot, value);
392 if (retval < 0) 381 if (retval < 0)
@@ -400,8 +389,8 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
400 struct slot *slot = hotplug_slot->private; 389 struct slot *slot = hotplug_slot->private;
401 int retval; 390 int retval;
402 391
403 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 392 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
404 __func__, hotplug_slot->name); 393 __func__, slot_name(slot));
405 394
406 retval = slot->hpc_ops->get_cur_bus_speed(slot, value); 395 retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
407 if (retval < 0) 396 if (retval < 0)
@@ -427,7 +416,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
427 416
428 ctrl = pcie_init(dev); 417 ctrl = pcie_init(dev);
429 if (!ctrl) { 418 if (!ctrl) {
430 dev_err(&dev->device, "controller initialization failed\n"); 419 dev_err(&dev->device, "Controller initialization failed\n");
431 goto err_out_none; 420 goto err_out_none;
432 } 421 }
433 set_service_data(dev, ctrl); 422 set_service_data(dev, ctrl);
@@ -436,25 +425,26 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
436 rc = init_slots(ctrl); 425 rc = init_slots(ctrl);
437 if (rc) { 426 if (rc) {
438 if (rc == -EBUSY) 427 if (rc == -EBUSY)
439 ctrl_warn(ctrl, "slot already registered by another " 428 ctrl_warn(ctrl, "Slot already registered by another "
440 "hotplug driver\n"); 429 "hotplug driver\n");
441 else 430 else
442 ctrl_err(ctrl, "slot initialization failed\n"); 431 ctrl_err(ctrl, "Slot initialization failed\n");
443 goto err_out_release_ctlr; 432 goto err_out_release_ctlr;
444 } 433 }
445 434
435 /* Check if slot is occupied */
446 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); 436 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
447 437 t_slot->hpc_ops->get_adapter_status(t_slot, &value);
448 t_slot->hpc_ops->get_adapter_status(t_slot, &value); /* Check if slot is occupied */ 438 if (value) {
449 if (value && pciehp_force) { 439 if (pciehp_force)
450 rc = pciehp_enable_slot(t_slot); 440 pciehp_enable_slot(t_slot);
451 if (rc) /* -ENODEV: shouldn't happen, but deal with it */ 441 } else {
452 value = 0; 442 /* Power off slot if not occupied */
453 } 443 if (POWER_CTRL(ctrl)) {
454 if ((POWER_CTRL(ctrl)) && !value) { 444 rc = t_slot->hpc_ops->power_off_slot(t_slot);
455 rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/ 445 if (rc)
456 if (rc) 446 goto err_out_free_ctrl_slot;
457 goto err_out_free_ctrl_slot; 447 }
458 } 448 }
459 449
460 return 0; 450 return 0;
@@ -536,7 +526,7 @@ static int __init pcied_init(void)
536 dbg("pcie_port_service_register = %d\n", retval); 526 dbg("pcie_port_service_register = %d\n", retval);
537 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 527 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
538 if (retval) 528 if (retval)
539 dbg("%s: Failure to register service\n", __func__); 529 dbg("Failure to register service\n");
540 return retval; 530 return retval;
541} 531}
542 532
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index acb7f9efd182..fead63c6b49e 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -61,12 +61,12 @@ u8 pciehp_handle_attention_button(struct slot *p_slot)
61 struct controller *ctrl = p_slot->ctrl; 61 struct controller *ctrl = p_slot->ctrl;
62 62
63 /* Attention Button Change */ 63 /* Attention Button Change */
64 ctrl_dbg(ctrl, "Attention button interrupt received.\n"); 64 ctrl_dbg(ctrl, "Attention button interrupt received\n");
65 65
66 /* 66 /*
67 * Button pressed - See if need to TAKE ACTION!!! 67 * Button pressed - See if need to TAKE ACTION!!!
68 */ 68 */
69 ctrl_info(ctrl, "Button pressed on Slot(%s)\n", p_slot->name); 69 ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot));
70 event_type = INT_BUTTON_PRESS; 70 event_type = INT_BUTTON_PRESS;
71 71
72 queue_interrupt_event(p_slot, event_type); 72 queue_interrupt_event(p_slot, event_type);
@@ -81,20 +81,20 @@ u8 pciehp_handle_switch_change(struct slot *p_slot)
81 struct controller *ctrl = p_slot->ctrl; 81 struct controller *ctrl = p_slot->ctrl;
82 82
83 /* Switch Change */ 83 /* Switch Change */
84 ctrl_dbg(ctrl, "Switch interrupt received.\n"); 84 ctrl_dbg(ctrl, "Switch interrupt received\n");
85 85
86 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 86 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
87 if (getstatus) { 87 if (getstatus) {
88 /* 88 /*
89 * Switch opened 89 * Switch opened
90 */ 90 */
91 ctrl_info(ctrl, "Latch open on Slot(%s)\n", p_slot->name); 91 ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot));
92 event_type = INT_SWITCH_OPEN; 92 event_type = INT_SWITCH_OPEN;
93 } else { 93 } else {
94 /* 94 /*
95 * Switch closed 95 * Switch closed
96 */ 96 */
97 ctrl_info(ctrl, "Latch close on Slot(%s)\n", p_slot->name); 97 ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot));
98 event_type = INT_SWITCH_CLOSE; 98 event_type = INT_SWITCH_CLOSE;
99 } 99 }
100 100
@@ -110,7 +110,7 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
110 struct controller *ctrl = p_slot->ctrl; 110 struct controller *ctrl = p_slot->ctrl;
111 111
112 /* Presence Change */ 112 /* Presence Change */
113 ctrl_dbg(ctrl, "Presence/Notify input change.\n"); 113 ctrl_dbg(ctrl, "Presence/Notify input change\n");
114 114
115 /* Switch is open, assume a presence change 115 /* Switch is open, assume a presence change
116 * Save the presence state 116 * Save the presence state
@@ -120,13 +120,14 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
120 /* 120 /*
121 * Card Present 121 * Card Present
122 */ 122 */
123 ctrl_info(ctrl, "Card present on Slot(%s)\n", p_slot->name); 123 ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot));
124 event_type = INT_PRESENCE_ON; 124 event_type = INT_PRESENCE_ON;
125 } else { 125 } else {
126 /* 126 /*
127 * Not Present 127 * Not Present
128 */ 128 */
129 ctrl_info(ctrl, "Card not present on Slot(%s)\n", p_slot->name); 129 ctrl_info(ctrl, "Card not present on Slot(%s)\n",
130 slot_name(p_slot));
130 event_type = INT_PRESENCE_OFF; 131 event_type = INT_PRESENCE_OFF;
131 } 132 }
132 133
@@ -141,22 +142,22 @@ u8 pciehp_handle_power_fault(struct slot *p_slot)
141 struct controller *ctrl = p_slot->ctrl; 142 struct controller *ctrl = p_slot->ctrl;
142 143
143 /* power fault */ 144 /* power fault */
144 ctrl_dbg(ctrl, "Power fault interrupt received.\n"); 145 ctrl_dbg(ctrl, "Power fault interrupt received\n");
145 146
146 if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) { 147 if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) {
147 /* 148 /*
148 * power fault Cleared 149 * power fault Cleared
149 */ 150 */
150 ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n", 151 ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n",
151 p_slot->name); 152 slot_name(p_slot));
152 event_type = INT_POWER_FAULT_CLEAR; 153 event_type = INT_POWER_FAULT_CLEAR;
153 } else { 154 } else {
154 /* 155 /*
155 * power fault 156 * power fault
156 */ 157 */
157 ctrl_info(ctrl, "Power fault on Slot(%s)\n", p_slot->name); 158 ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot));
158 event_type = INT_POWER_FAULT; 159 event_type = INT_POWER_FAULT;
159 ctrl_info(ctrl, "power fault bit %x set\n", 0); 160 ctrl_info(ctrl, "Power fault bit %x set\n", 0);
160 } 161 }
161 162
162 queue_interrupt_event(p_slot, event_type); 163 queue_interrupt_event(p_slot, event_type);
@@ -174,8 +175,7 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
174 if (POWER_CTRL(ctrl)) { 175 if (POWER_CTRL(ctrl)) {
175 if (pslot->hpc_ops->power_off_slot(pslot)) { 176 if (pslot->hpc_ops->power_off_slot(pslot)) {
176 ctrl_err(ctrl, 177 ctrl_err(ctrl,
177 "%s: Issue of Slot Power Off command failed\n", 178 "Issue of Slot Power Off command failed\n");
178 __func__);
179 return; 179 return;
180 } 180 }
181 } 181 }
@@ -192,8 +192,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
192 192
193 if (ATTN_LED(ctrl)) { 193 if (ATTN_LED(ctrl)) {
194 if (pslot->hpc_ops->set_attention_status(pslot, 1)) { 194 if (pslot->hpc_ops->set_attention_status(pslot, 1)) {
195 ctrl_err(ctrl, "%s: Issue of Set Attention " 195 ctrl_err(ctrl,
196 "Led command failed\n", __func__); 196 "Issue of Set Attention Led command failed\n");
197 return; 197 return;
198 } 198 }
199 } 199 }
@@ -210,8 +210,9 @@ static int board_added(struct slot *p_slot)
210{ 210{
211 int retval = 0; 211 int retval = 0;
212 struct controller *ctrl = p_slot->ctrl; 212 struct controller *ctrl = p_slot->ctrl;
213 struct pci_bus *parent = ctrl->pci_dev->subordinate;
213 214
214 ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d ,%d\n", 215 ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d, %d\n",
215 __func__, p_slot->device, ctrl->slot_device_offset, 216 __func__, p_slot->device, ctrl->slot_device_offset,
216 p_slot->hp_slot); 217 p_slot->hp_slot);
217 218
@@ -225,28 +226,25 @@ static int board_added(struct slot *p_slot)
225 if (PWR_LED(ctrl)) 226 if (PWR_LED(ctrl))
226 p_slot->hpc_ops->green_led_blink(p_slot); 227 p_slot->hpc_ops->green_led_blink(p_slot);
227 228
228 /* Wait for ~1 second */
229 msleep(1000);
230
231 /* Check link training status */ 229 /* Check link training status */
232 retval = p_slot->hpc_ops->check_lnk_status(ctrl); 230 retval = p_slot->hpc_ops->check_lnk_status(ctrl);
233 if (retval) { 231 if (retval) {
234 ctrl_err(ctrl, "%s: Failed to check link status\n", __func__); 232 ctrl_err(ctrl, "Failed to check link status\n");
235 set_slot_off(ctrl, p_slot); 233 set_slot_off(ctrl, p_slot);
236 return retval; 234 return retval;
237 } 235 }
238 236
239 /* Check for a power fault */ 237 /* Check for a power fault */
240 if (p_slot->hpc_ops->query_power_fault(p_slot)) { 238 if (p_slot->hpc_ops->query_power_fault(p_slot)) {
241 ctrl_dbg(ctrl, "%s: power fault detected\n", __func__); 239 ctrl_dbg(ctrl, "Power fault detected\n");
242 retval = POWER_FAILURE; 240 retval = POWER_FAILURE;
243 goto err_exit; 241 goto err_exit;
244 } 242 }
245 243
246 retval = pciehp_configure_device(p_slot); 244 retval = pciehp_configure_device(p_slot);
247 if (retval) { 245 if (retval) {
248 ctrl_err(ctrl, "Cannot add device 0x%x:%x\n", 246 ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n",
249 p_slot->bus, p_slot->device); 247 pci_domain_nr(parent), p_slot->bus, p_slot->device);
250 goto err_exit; 248 goto err_exit;
251 } 249 }
252 250
@@ -278,14 +276,14 @@ static int remove_board(struct slot *p_slot)
278 if (retval) 276 if (retval)
279 return retval; 277 return retval;
280 278
281 ctrl_dbg(ctrl, "In %s, hp_slot = %d\n", __func__, p_slot->hp_slot); 279 ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, p_slot->hp_slot);
282 280
283 if (POWER_CTRL(ctrl)) { 281 if (POWER_CTRL(ctrl)) {
284 /* power off slot */ 282 /* power off slot */
285 retval = p_slot->hpc_ops->power_off_slot(p_slot); 283 retval = p_slot->hpc_ops->power_off_slot(p_slot);
286 if (retval) { 284 if (retval) {
287 ctrl_err(ctrl, "%s: Issue of Slot Disable command " 285 ctrl_err(ctrl,
288 "failed\n", __func__); 286 "Issue of Slot Disable command failed\n");
289 return retval; 287 return retval;
290 } 288 }
291 } 289 }
@@ -326,8 +324,10 @@ static void pciehp_power_thread(struct work_struct *work)
326 switch (p_slot->state) { 324 switch (p_slot->state) {
327 case POWEROFF_STATE: 325 case POWEROFF_STATE:
328 mutex_unlock(&p_slot->lock); 326 mutex_unlock(&p_slot->lock);
329 ctrl_dbg(p_slot->ctrl, "%s: disabling bus:device(%x:%x)\n", 327 ctrl_dbg(p_slot->ctrl,
330 __func__, p_slot->bus, p_slot->device); 328 "Disabling domain:bus:device=%04x:%02x:%02x\n",
329 pci_domain_nr(p_slot->ctrl->pci_dev->subordinate),
330 p_slot->bus, p_slot->device);
331 pciehp_disable_slot(p_slot); 331 pciehp_disable_slot(p_slot);
332 mutex_lock(&p_slot->lock); 332 mutex_lock(&p_slot->lock);
333 p_slot->state = STATIC_STATE; 333 p_slot->state = STATIC_STATE;
@@ -412,12 +412,12 @@ static void handle_button_press_event(struct slot *p_slot)
412 p_slot->state = BLINKINGOFF_STATE; 412 p_slot->state = BLINKINGOFF_STATE;
413 ctrl_info(ctrl, 413 ctrl_info(ctrl,
414 "PCI slot #%s - powering off due to button " 414 "PCI slot #%s - powering off due to button "
415 "press.\n", p_slot->name); 415 "press.\n", slot_name(p_slot));
416 } else { 416 } else {
417 p_slot->state = BLINKINGON_STATE; 417 p_slot->state = BLINKINGON_STATE;
418 ctrl_info(ctrl, 418 ctrl_info(ctrl,
419 "PCI slot #%s - powering on due to button " 419 "PCI slot #%s - powering on due to button "
420 "press.\n", p_slot->name); 420 "press.\n", slot_name(p_slot));
421 } 421 }
422 /* blink green LED and turn off amber */ 422 /* blink green LED and turn off amber */
423 if (PWR_LED(ctrl)) 423 if (PWR_LED(ctrl))
@@ -434,8 +434,7 @@ static void handle_button_press_event(struct slot *p_slot)
434 * press the attention again before the 5 sec. limit 434 * press the attention again before the 5 sec. limit
435 * expires to cancel hot-add or hot-remove 435 * expires to cancel hot-add or hot-remove
436 */ 436 */
437 ctrl_info(ctrl, "Button cancel on Slot(%s)\n", p_slot->name); 437 ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
438 ctrl_dbg(ctrl, "%s: button cancel\n", __func__);
439 cancel_delayed_work(&p_slot->work); 438 cancel_delayed_work(&p_slot->work);
440 if (p_slot->state == BLINKINGOFF_STATE) { 439 if (p_slot->state == BLINKINGOFF_STATE) {
441 if (PWR_LED(ctrl)) 440 if (PWR_LED(ctrl))
@@ -447,7 +446,7 @@ static void handle_button_press_event(struct slot *p_slot)
447 if (ATTN_LED(ctrl)) 446 if (ATTN_LED(ctrl))
448 p_slot->hpc_ops->set_attention_status(p_slot, 0); 447 p_slot->hpc_ops->set_attention_status(p_slot, 0);
449 ctrl_info(ctrl, "PCI slot #%s - action canceled " 448 ctrl_info(ctrl, "PCI slot #%s - action canceled "
450 "due to button press\n", p_slot->name); 449 "due to button press\n", slot_name(p_slot));
451 p_slot->state = STATIC_STATE; 450 p_slot->state = STATIC_STATE;
452 break; 451 break;
453 case POWEROFF_STATE: 452 case POWEROFF_STATE:
@@ -457,7 +456,7 @@ static void handle_button_press_event(struct slot *p_slot)
457 * this means that the previous attention button action 456 * this means that the previous attention button action
458 * to hot-add or hot-remove is undergoing 457 * to hot-add or hot-remove is undergoing
459 */ 458 */
460 ctrl_info(ctrl, "Button ignore on Slot(%s)\n", p_slot->name); 459 ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
461 update_slot_info(p_slot); 460 update_slot_info(p_slot);
462 break; 461 break;
463 default: 462 default:
@@ -539,16 +538,15 @@ int pciehp_enable_slot(struct slot *p_slot)
539 538
540 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 539 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
541 if (rc || !getstatus) { 540 if (rc || !getstatus) {
542 ctrl_info(ctrl, "%s: no adapter on slot(%s)\n", 541 ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
543 __func__, p_slot->name);
544 mutex_unlock(&p_slot->ctrl->crit_sect); 542 mutex_unlock(&p_slot->ctrl->crit_sect);
545 return -ENODEV; 543 return -ENODEV;
546 } 544 }
547 if (MRL_SENS(p_slot->ctrl)) { 545 if (MRL_SENS(p_slot->ctrl)) {
548 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 546 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
549 if (rc || getstatus) { 547 if (rc || getstatus) {
550 ctrl_info(ctrl, "%s: latch open on slot(%s)\n", 548 ctrl_info(ctrl, "Latch open on slot(%s)\n",
551 __func__, p_slot->name); 549 slot_name(p_slot));
552 mutex_unlock(&p_slot->ctrl->crit_sect); 550 mutex_unlock(&p_slot->ctrl->crit_sect);
553 return -ENODEV; 551 return -ENODEV;
554 } 552 }
@@ -557,8 +555,8 @@ int pciehp_enable_slot(struct slot *p_slot)
557 if (POWER_CTRL(p_slot->ctrl)) { 555 if (POWER_CTRL(p_slot->ctrl)) {
558 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 556 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
559 if (rc || getstatus) { 557 if (rc || getstatus) {
560 ctrl_info(ctrl, "%s: already enabled on slot(%s)\n", 558 ctrl_info(ctrl, "Already enabled on slot(%s)\n",
561 __func__, p_slot->name); 559 slot_name(p_slot));
562 mutex_unlock(&p_slot->ctrl->crit_sect); 560 mutex_unlock(&p_slot->ctrl->crit_sect);
563 return -EINVAL; 561 return -EINVAL;
564 } 562 }
@@ -593,8 +591,8 @@ int pciehp_disable_slot(struct slot *p_slot)
593 if (!HP_SUPR_RM(p_slot->ctrl)) { 591 if (!HP_SUPR_RM(p_slot->ctrl)) {
594 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 592 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
595 if (ret || !getstatus) { 593 if (ret || !getstatus) {
596 ctrl_info(ctrl, "%s: no adapter on slot(%s)\n", 594 ctrl_info(ctrl, "No adapter on slot(%s)\n",
597 __func__, p_slot->name); 595 slot_name(p_slot));
598 mutex_unlock(&p_slot->ctrl->crit_sect); 596 mutex_unlock(&p_slot->ctrl->crit_sect);
599 return -ENODEV; 597 return -ENODEV;
600 } 598 }
@@ -603,8 +601,8 @@ int pciehp_disable_slot(struct slot *p_slot)
603 if (MRL_SENS(p_slot->ctrl)) { 601 if (MRL_SENS(p_slot->ctrl)) {
604 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 602 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
605 if (ret || getstatus) { 603 if (ret || getstatus) {
606 ctrl_info(ctrl, "%s: latch open on slot(%s)\n", 604 ctrl_info(ctrl, "Latch open on slot(%s)\n",
607 __func__, p_slot->name); 605 slot_name(p_slot));
608 mutex_unlock(&p_slot->ctrl->crit_sect); 606 mutex_unlock(&p_slot->ctrl->crit_sect);
609 return -ENODEV; 607 return -ENODEV;
610 } 608 }
@@ -613,8 +611,8 @@ int pciehp_disable_slot(struct slot *p_slot)
613 if (POWER_CTRL(p_slot->ctrl)) { 611 if (POWER_CTRL(p_slot->ctrl)) {
614 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 612 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
615 if (ret || !getstatus) { 613 if (ret || !getstatus) {
616 ctrl_info(ctrl, "%s: already disabled slot(%s)\n", 614 ctrl_info(ctrl, "Already disabled on slot(%s)\n",
617 __func__, p_slot->name); 615 slot_name(p_slot));
618 mutex_unlock(&p_slot->ctrl->crit_sect); 616 mutex_unlock(&p_slot->ctrl->crit_sect);
619 return -EINVAL; 617 return -EINVAL;
620 } 618 }
@@ -645,14 +643,16 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
645 break; 643 break;
646 case POWERON_STATE: 644 case POWERON_STATE:
647 ctrl_info(ctrl, "Slot %s is already in powering on state\n", 645 ctrl_info(ctrl, "Slot %s is already in powering on state\n",
648 p_slot->name); 646 slot_name(p_slot));
649 break; 647 break;
650 case BLINKINGOFF_STATE: 648 case BLINKINGOFF_STATE:
651 case POWEROFF_STATE: 649 case POWEROFF_STATE:
652 ctrl_info(ctrl, "Already enabled on slot %s\n", p_slot->name); 650 ctrl_info(ctrl, "Already enabled on slot %s\n",
651 slot_name(p_slot));
653 break; 652 break;
654 default: 653 default:
655 ctrl_err(ctrl, "Not a valid state on slot %s\n", p_slot->name); 654 ctrl_err(ctrl, "Not a valid state on slot %s\n",
655 slot_name(p_slot));
656 break; 656 break;
657 } 657 }
658 mutex_unlock(&p_slot->lock); 658 mutex_unlock(&p_slot->lock);
@@ -678,14 +678,16 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
678 break; 678 break;
679 case POWEROFF_STATE: 679 case POWEROFF_STATE:
680 ctrl_info(ctrl, "Slot %s is already in powering off state\n", 680 ctrl_info(ctrl, "Slot %s is already in powering off state\n",
681 p_slot->name); 681 slot_name(p_slot));
682 break; 682 break;
683 case BLINKINGON_STATE: 683 case BLINKINGON_STATE:
684 case POWERON_STATE: 684 case POWERON_STATE:
685 ctrl_info(ctrl, "Already disabled on slot %s\n", p_slot->name); 685 ctrl_info(ctrl, "Already disabled on slot %s\n",
686 slot_name(p_slot));
686 break; 687 break;
687 default: 688 default:
688 ctrl_err(ctrl, "Not a valid state on slot %s\n", p_slot->name); 689 ctrl_err(ctrl, "Not a valid state on slot %s\n",
690 slot_name(p_slot));
689 break; 691 break;
690 } 692 }
691 mutex_unlock(&p_slot->lock); 693 mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 8e9530c4c36d..b643ca13e4f1 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -125,6 +125,7 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
125/* Field definitions in Link Capabilities Register */ 125/* Field definitions in Link Capabilities Register */
126#define MAX_LNK_SPEED 0x000F 126#define MAX_LNK_SPEED 0x000F
127#define MAX_LNK_WIDTH 0x03F0 127#define MAX_LNK_WIDTH 0x03F0
128#define LINK_ACTIVE_REPORTING 0x00100000
128 129
129/* Link Width Encoding */ 130/* Link Width Encoding */
130#define LNK_X1 0x01 131#define LNK_X1 0x01
@@ -141,6 +142,7 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
141#define LNK_TRN_ERR 0x0400 142#define LNK_TRN_ERR 0x0400
142#define LNK_TRN 0x0800 143#define LNK_TRN 0x0800
143#define SLOT_CLK_CONF 0x1000 144#define SLOT_CLK_CONF 0x1000
145#define LINK_ACTIVE 0x2000
144 146
145/* Field definitions in Slot Capabilities Register */ 147/* Field definitions in Slot Capabilities Register */
146#define ATTN_BUTTN_PRSN 0x00000001 148#define ATTN_BUTTN_PRSN 0x00000001
@@ -314,22 +316,19 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
314 * proceed forward to issue the next command according 316 * proceed forward to issue the next command according
315 * to spec. Just print out the error message. 317 * to spec. Just print out the error message.
316 */ 318 */
317 ctrl_dbg(ctrl, 319 ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n");
318 "%s: CMD_COMPLETED not clear after 1 sec.\n",
319 __func__);
320 } else if (!NO_CMD_CMPL(ctrl)) { 320 } else if (!NO_CMD_CMPL(ctrl)) {
321 /* 321 /*
322 * This controller semms to notify of command completed 322 * This controller semms to notify of command completed
323 * event even though it supports none of power 323 * event even though it supports none of power
324 * controller, attention led, power led and EMI. 324 * controller, attention led, power led and EMI.
325 */ 325 */
326 ctrl_dbg(ctrl, "%s: Unexpected CMD_COMPLETED. Need to " 326 ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to "
327 "wait for command completed event.\n", 327 "wait for command completed event.\n");
328 __func__);
329 ctrl->no_cmd_complete = 0; 328 ctrl->no_cmd_complete = 0;
330 } else { 329 } else {
331 ctrl_dbg(ctrl, "%s: Unexpected CMD_COMPLETED. Maybe " 330 ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe "
332 "the controller is broken.\n", __func__); 331 "the controller is broken.\n");
333 } 332 }
334 } 333 }
335 334
@@ -345,8 +344,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
345 smp_mb(); 344 smp_mb();
346 retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); 345 retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl);
347 if (retval) 346 if (retval)
348 ctrl_err(ctrl, "%s: Cannot write to SLOTCTRL register\n", 347 ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n");
349 __func__);
350 348
351 /* 349 /*
352 * Wait for command completion. 350 * Wait for command completion.
@@ -368,22 +366,62 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
368 return retval; 366 return retval;
369} 367}
370 368
369static inline int check_link_active(struct controller *ctrl)
370{
371 u16 link_status;
372
373 if (pciehp_readw(ctrl, LNKSTATUS, &link_status))
374 return 0;
375 return !!(link_status & LINK_ACTIVE);
376}
377
378static void pcie_wait_link_active(struct controller *ctrl)
379{
380 int timeout = 1000;
381
382 if (check_link_active(ctrl))
383 return;
384 while (timeout > 0) {
385 msleep(10);
386 timeout -= 10;
387 if (check_link_active(ctrl))
388 return;
389 }
390 ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n");
391}
392
371static int hpc_check_lnk_status(struct controller *ctrl) 393static int hpc_check_lnk_status(struct controller *ctrl)
372{ 394{
373 u16 lnk_status; 395 u16 lnk_status;
374 int retval = 0; 396 int retval = 0;
375 397
398 /*
399 * Data Link Layer Link Active Reporting must be capable for
400 * hot-plug capable downstream port. But old controller might
401 * not implement it. In this case, we wait for 1000 ms.
402 */
403 if (ctrl->link_active_reporting){
404 /* Wait for Data Link Layer Link Active bit to be set */
405 pcie_wait_link_active(ctrl);
406 /*
407 * We must wait for 100 ms after the Data Link Layer
408 * Link Active bit reads 1b before initiating a
409 * configuration access to the hot added device.
410 */
411 msleep(100);
412 } else
413 msleep(1000);
414
376 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 415 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
377 if (retval) { 416 if (retval) {
378 ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", 417 ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
379 __func__);
380 return retval; 418 return retval;
381 } 419 }
382 420
383 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); 421 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
384 if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) || 422 if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) ||
385 !(lnk_status & NEG_LINK_WD)) { 423 !(lnk_status & NEG_LINK_WD)) {
386 ctrl_err(ctrl, "%s : Link Training Error occurs \n", __func__); 424 ctrl_err(ctrl, "Link Training Error occurs \n");
387 retval = -1; 425 retval = -1;
388 return retval; 426 return retval;
389 } 427 }
@@ -508,7 +546,7 @@ static int hpc_query_power_fault(struct slot *slot)
508 546
509 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 547 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
510 if (retval) { 548 if (retval) {
511 ctrl_err(ctrl, "%s: Cannot check for power fault\n", __func__); 549 ctrl_err(ctrl, "Cannot check for power fault\n");
512 return retval; 550 return retval;
513 } 551 }
514 pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); 552 pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1);
@@ -524,7 +562,7 @@ static int hpc_get_emi_status(struct slot *slot, u8 *status)
524 562
525 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 563 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
526 if (retval) { 564 if (retval) {
527 ctrl_err(ctrl, "%s : Cannot check EMI status\n", __func__); 565 ctrl_err(ctrl, "Cannot check EMI status\n");
528 return retval; 566 return retval;
529 } 567 }
530 *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT; 568 *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT;
@@ -654,8 +692,7 @@ static int hpc_power_on_slot(struct slot * slot)
654 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 692 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
655 693
656 if (retval) { 694 if (retval) {
657 ctrl_err(ctrl, "%s: Write %x command failed!\n", 695 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
658 __func__, slot_cmd);
659 return -1; 696 return -1;
660 } 697 }
661 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 698 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
@@ -733,7 +770,7 @@ static int hpc_power_off_slot(struct slot * slot)
733 770
734 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 771 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
735 if (retval) { 772 if (retval) {
736 ctrl_err(ctrl, "%s: Write command failed!\n", __func__); 773 ctrl_err(ctrl, "Write command failed!\n");
737 retval = -1; 774 retval = -1;
738 goto out; 775 goto out;
739 } 776 }
@@ -1013,8 +1050,7 @@ int pcie_enable_notification(struct controller *ctrl)
1013 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; 1050 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
1014 1051
1015 if (pcie_write_cmd(ctrl, cmd, mask)) { 1052 if (pcie_write_cmd(ctrl, cmd, mask)) {
1016 ctrl_err(ctrl, "%s: Cannot enable software notification\n", 1053 ctrl_err(ctrl, "Cannot enable software notification\n");
1017 __func__);
1018 return -1; 1054 return -1;
1019 } 1055 }
1020 return 0; 1056 return 0;
@@ -1026,8 +1062,7 @@ static void pcie_disable_notification(struct controller *ctrl)
1026 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | 1062 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
1027 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; 1063 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
1028 if (pcie_write_cmd(ctrl, 0, mask)) 1064 if (pcie_write_cmd(ctrl, 0, mask))
1029 ctrl_warn(ctrl, "%s: Cannot disable software notification\n", 1065 ctrl_warn(ctrl, "Cannot disable software notification\n");
1030 __func__);
1031} 1066}
1032 1067
1033static int pcie_init_notification(struct controller *ctrl) 1068static int pcie_init_notification(struct controller *ctrl)
@@ -1061,7 +1096,6 @@ static int pcie_init_slot(struct controller *ctrl)
1061 slot->device = ctrl->slot_device_offset + slot->hp_slot; 1096 slot->device = ctrl->slot_device_offset + slot->hp_slot;
1062 slot->hpc_ops = ctrl->hpc_ops; 1097 slot->hpc_ops = ctrl->hpc_ops;
1063 slot->number = ctrl->first_slot; 1098 slot->number = ctrl->first_slot;
1064 snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
1065 mutex_init(&slot->lock); 1099 mutex_init(&slot->lock);
1066 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); 1100 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
1067 list_add(&slot->slot_list, &ctrl->slot_list); 1101 list_add(&slot->slot_list, &ctrl->slot_list);
@@ -1132,12 +1166,12 @@ static inline void dbg_ctrl(struct controller *ctrl)
1132struct controller *pcie_init(struct pcie_device *dev) 1166struct controller *pcie_init(struct pcie_device *dev)
1133{ 1167{
1134 struct controller *ctrl; 1168 struct controller *ctrl;
1135 u32 slot_cap; 1169 u32 slot_cap, link_cap;
1136 struct pci_dev *pdev = dev->port; 1170 struct pci_dev *pdev = dev->port;
1137 1171
1138 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 1172 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1139 if (!ctrl) { 1173 if (!ctrl) {
1140 dev_err(&dev->device, "%s : out of memory\n", __func__); 1174 dev_err(&dev->device, "%s: Out of memory\n", __func__);
1141 goto abort; 1175 goto abort;
1142 } 1176 }
1143 INIT_LIST_HEAD(&ctrl->slot_list); 1177 INIT_LIST_HEAD(&ctrl->slot_list);
@@ -1146,13 +1180,12 @@ struct controller *pcie_init(struct pcie_device *dev)
1146 ctrl->pci_dev = pdev; 1180 ctrl->pci_dev = pdev;
1147 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); 1181 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1148 if (!ctrl->cap_base) { 1182 if (!ctrl->cap_base) {
1149 ctrl_err(ctrl, "%s: Cannot find PCI Express capability\n", 1183 ctrl_err(ctrl, "Cannot find PCI Express capability\n");
1150 __func__); 1184 goto abort_ctrl;
1151 goto abort;
1152 } 1185 }
1153 if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) { 1186 if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) {
1154 ctrl_err(ctrl, "%s: Cannot read SLOTCAP register\n", __func__); 1187 ctrl_err(ctrl, "Cannot read SLOTCAP register\n");
1155 goto abort; 1188 goto abort_ctrl;
1156 } 1189 }
1157 1190
1158 ctrl->slot_cap = slot_cap; 1191 ctrl->slot_cap = slot_cap;
@@ -1174,6 +1207,16 @@ struct controller *pcie_init(struct pcie_device *dev)
1174 !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) 1207 !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
1175 ctrl->no_cmd_complete = 1; 1208 ctrl->no_cmd_complete = 1;
1176 1209
1210 /* Check if Data Link Layer Link Active Reporting is implemented */
1211 if (pciehp_readl(ctrl, LNKCAP, &link_cap)) {
1212 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
1213 goto abort_ctrl;
1214 }
1215 if (link_cap & LINK_ACTIVE_REPORTING) {
1216 ctrl_dbg(ctrl, "Link Active Reporting supported\n");
1217 ctrl->link_active_reporting = 1;
1218 }
1219
1177 /* Clear all remaining event bits in Slot Status register */ 1220 /* Clear all remaining event bits in Slot Status register */
1178 if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) 1221 if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f))
1179 goto abort_ctrl; 1222 goto abort_ctrl;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index ffd11148fbe2..10f9566cceeb 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -39,8 +39,7 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
39 u16 pci_cmd, pci_bctl; 39 u16 pci_cmd, pci_bctl;
40 40
41 if (hpp->revision > 1) { 41 if (hpp->revision > 1) {
42 printk(KERN_WARNING "%s: Rev.%d type0 record not supported\n", 42 warn("Rev.%d type0 record not supported\n", hpp->revision);
43 __func__, hpp->revision);
44 return; 43 return;
45 } 44 }
46 45
@@ -81,8 +80,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
81 u32 reg32; 80 u32 reg32;
82 81
83 if (hpp->revision > 1) { 82 if (hpp->revision > 1) {
84 printk(KERN_WARNING "%s: Rev.%d type2 record not supported\n", 83 warn("Rev.%d type2 record not supported\n", hpp->revision);
85 __func__, hpp->revision);
86 return; 84 return;
87 } 85 }
88 86
@@ -149,8 +147,7 @@ static void program_fw_provided_values(struct pci_dev *dev)
149 return; 147 return;
150 148
151 if (pciehp_get_hp_params_from_firmware(dev, &hpp)) { 149 if (pciehp_get_hp_params_from_firmware(dev, &hpp)) {
152 printk(KERN_WARNING "%s: Could not get hotplug parameters\n", 150 warn("Could not get hotplug parameters\n");
153 __func__);
154 return; 151 return;
155 } 152 }
156 153
@@ -202,9 +199,9 @@ int pciehp_configure_device(struct slot *p_slot)
202 199
203 dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); 200 dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
204 if (dev) { 201 if (dev) {
205 ctrl_err(ctrl, 202 ctrl_err(ctrl, "Device %s already exists "
206 "Device %s already exists at %x:%x, cannot hot-add\n", 203 "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev),
207 pci_name(dev), p_slot->bus, p_slot->device); 204 pci_domain_nr(parent), p_slot->bus, p_slot->device);
208 pci_dev_put(dev); 205 pci_dev_put(dev);
209 return -EINVAL; 206 return -EINVAL;
210 } 207 }
@@ -248,8 +245,8 @@ int pciehp_unconfigure_device(struct slot *p_slot)
248 u16 command; 245 u16 command;
249 struct controller *ctrl = p_slot->ctrl; 246 struct controller *ctrl = p_slot->ctrl;
250 247
251 ctrl_dbg(ctrl, "%s: bus/dev = %x/%x\n", __func__, 248 ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
252 p_slot->bus, p_slot->device); 249 __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
253 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence); 250 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence);
254 if (ret) 251 if (ret)
255 presence = 0; 252 presence = 0;
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 50884507b8be..2ea9cf1a8d02 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -43,7 +43,7 @@ static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot)
43void dealloc_slot_struct(struct slot *slot) 43void dealloc_slot_struct(struct slot *slot)
44{ 44{
45 kfree(slot->hotplug_slot->info); 45 kfree(slot->hotplug_slot->info);
46 kfree(slot->hotplug_slot->name); 46 kfree(slot->name);
47 kfree(slot->hotplug_slot); 47 kfree(slot->hotplug_slot);
48 kfree(slot); 48 kfree(slot);
49} 49}
@@ -63,11 +63,9 @@ struct slot *alloc_slot_struct(struct device_node *dn,
63 GFP_KERNEL); 63 GFP_KERNEL);
64 if (!slot->hotplug_slot->info) 64 if (!slot->hotplug_slot->info)
65 goto error_hpslot; 65 goto error_hpslot;
66 slot->hotplug_slot->name = kmalloc(strlen(drc_name) + 1, GFP_KERNEL); 66 slot->name = kstrdup(drc_name, GFP_KERNEL);
67 if (!slot->hotplug_slot->name) 67 if (!slot->name)
68 goto error_info; 68 goto error_info;
69 slot->name = slot->hotplug_slot->name;
70 strcpy(slot->name, drc_name);
71 slot->dn = dn; 69 slot->dn = dn;
72 slot->index = drc_index; 70 slot->index = drc_index;
73 slot->power_domain = power_domain; 71 slot->power_domain = power_domain;
@@ -137,7 +135,7 @@ int rpaphp_register_slot(struct slot *slot)
137 slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn); 135 slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
138 else 136 else
139 slotno = -1; 137 slotno = -1;
140 retval = pci_hp_register(php_slot, slot->bus, slotno); 138 retval = pci_hp_register(php_slot, slot->bus, slotno, slot->name);
141 if (retval) { 139 if (retval) {
142 err("pci_hp_register failed with error %d\n", retval); 140 err("pci_hp_register failed with error %d\n", retval);
143 return retval; 141 return retval;
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 410fe0394a8e..3eee70928d45 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -161,7 +161,8 @@ static int sn_pci_bus_valid(struct pci_bus *pci_bus)
161} 161}
162 162
163static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot, 163static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
164 struct pci_bus *pci_bus, int device) 164 struct pci_bus *pci_bus, int device,
165 char *name)
165{ 166{
166 struct pcibus_info *pcibus_info; 167 struct pcibus_info *pcibus_info;
167 struct slot *slot; 168 struct slot *slot;
@@ -173,15 +174,9 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
173 return -ENOMEM; 174 return -ENOMEM;
174 bss_hotplug_slot->private = slot; 175 bss_hotplug_slot->private = slot;
175 176
176 bss_hotplug_slot->name = kmalloc(SN_SLOT_NAME_SIZE, GFP_KERNEL);
177 if (!bss_hotplug_slot->name) {
178 kfree(bss_hotplug_slot->private);
179 return -ENOMEM;
180 }
181
182 slot->device_num = device; 177 slot->device_num = device;
183 slot->pci_bus = pci_bus; 178 slot->pci_bus = pci_bus;
184 sprintf(bss_hotplug_slot->name, "%04x:%02x:%02x", 179 sprintf(name, "%04x:%02x:%02x",
185 pci_domain_nr(pci_bus), 180 pci_domain_nr(pci_bus),
186 ((u16)pcibus_info->pbi_buscommon.bs_persist_busnum), 181 ((u16)pcibus_info->pbi_buscommon.bs_persist_busnum),
187 device + 1); 182 device + 1);
@@ -418,7 +413,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
418 /* 413 /*
419 * Add the slot's devices to the ACPI infrastructure */ 414 * Add the slot's devices to the ACPI infrastructure */
420 if (SN_ACPI_BASE_SUPPORT() && ssdt) { 415 if (SN_ACPI_BASE_SUPPORT() && ssdt) {
421 unsigned long adr; 416 unsigned long long adr;
422 struct acpi_device *pdevice; 417 struct acpi_device *pdevice;
423 struct acpi_device *device; 418 struct acpi_device *device;
424 acpi_handle phandle; 419 acpi_handle phandle;
@@ -510,7 +505,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
510 /* free the ACPI resources for the slot */ 505 /* free the ACPI resources for the slot */
511 if (SN_ACPI_BASE_SUPPORT() && 506 if (SN_ACPI_BASE_SUPPORT() &&
512 PCI_CONTROLLER(slot->pci_bus)->acpi_handle) { 507 PCI_CONTROLLER(slot->pci_bus)->acpi_handle) {
513 unsigned long adr; 508 unsigned long long adr;
514 struct acpi_device *device; 509 struct acpi_device *device;
515 acpi_handle phandle; 510 acpi_handle phandle;
516 acpi_handle chandle = NULL; 511 acpi_handle chandle = NULL;
@@ -608,7 +603,6 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
608static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot) 603static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
609{ 604{
610 kfree(bss_hotplug_slot->info); 605 kfree(bss_hotplug_slot->info);
611 kfree(bss_hotplug_slot->name);
612 kfree(bss_hotplug_slot->private); 606 kfree(bss_hotplug_slot->private);
613 kfree(bss_hotplug_slot); 607 kfree(bss_hotplug_slot);
614} 608}
@@ -618,6 +612,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
618 int device; 612 int device;
619 struct pci_slot *pci_slot; 613 struct pci_slot *pci_slot;
620 struct hotplug_slot *bss_hotplug_slot; 614 struct hotplug_slot *bss_hotplug_slot;
615 char name[SN_SLOT_NAME_SIZE];
621 int rc = 0; 616 int rc = 0;
622 617
623 /* 618 /*
@@ -645,15 +640,14 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
645 } 640 }
646 641
647 if (sn_hp_slot_private_alloc(bss_hotplug_slot, 642 if (sn_hp_slot_private_alloc(bss_hotplug_slot,
648 pci_bus, device)) { 643 pci_bus, device, name)) {
649 rc = -ENOMEM; 644 rc = -ENOMEM;
650 goto alloc_err; 645 goto alloc_err;
651 } 646 }
652
653 bss_hotplug_slot->ops = &sn_hotplug_slot_ops; 647 bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
654 bss_hotplug_slot->release = &sn_release_slot; 648 bss_hotplug_slot->release = &sn_release_slot;
655 649
656 rc = pci_hp_register(bss_hotplug_slot, pci_bus, device); 650 rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name);
657 if (rc) 651 if (rc)
658 goto register_err; 652 goto register_err;
659 653
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 8a026f750deb..6aba0b6cf2e0 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -59,6 +59,20 @@ extern struct workqueue_struct *shpchp_wq;
59#define warn(format, arg...) \ 59#define warn(format, arg...) \
60 printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) 60 printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
61 61
62#define ctrl_dbg(ctrl, format, arg...) \
63 do { \
64 if (shpchp_debug) \
65 dev_printk(, &ctrl->pci_dev->dev, \
66 format, ## arg); \
67 } while (0)
68#define ctrl_err(ctrl, format, arg...) \
69 dev_err(&ctrl->pci_dev->dev, format, ## arg)
70#define ctrl_info(ctrl, format, arg...) \
71 dev_info(&ctrl->pci_dev->dev, format, ## arg)
72#define ctrl_warn(ctrl, format, arg...) \
73 dev_warn(&ctrl->pci_dev->dev, format, ## arg)
74
75
62#define SLOT_NAME_SIZE 10 76#define SLOT_NAME_SIZE 10
63struct slot { 77struct slot {
64 u8 bus; 78 u8 bus;
@@ -69,15 +83,13 @@ struct slot {
69 u8 state; 83 u8 state;
70 u8 presence_save; 84 u8 presence_save;
71 u8 pwr_save; 85 u8 pwr_save;
72 struct timer_list task_event;
73 u8 hp_slot;
74 struct controller *ctrl; 86 struct controller *ctrl;
75 struct hpc_ops *hpc_ops; 87 struct hpc_ops *hpc_ops;
76 struct hotplug_slot *hotplug_slot; 88 struct hotplug_slot *hotplug_slot;
77 struct list_head slot_list; 89 struct list_head slot_list;
78 char name[SLOT_NAME_SIZE];
79 struct delayed_work work; /* work for button event */ 90 struct delayed_work work; /* work for button event */
80 struct mutex lock; 91 struct mutex lock;
92 u8 hp_slot;
81}; 93};
82 94
83struct event_info { 95struct event_info {
@@ -169,6 +181,11 @@ extern void cleanup_slots(struct controller *ctrl);
169extern void shpchp_queue_pushbutton_work(struct work_struct *work); 181extern void shpchp_queue_pushbutton_work(struct work_struct *work);
170extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev); 182extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
171 183
184static inline const char *slot_name(struct slot *slot)
185{
186 return hotplug_slot_name(slot->hotplug_slot);
187}
188
172#ifdef CONFIG_ACPI 189#ifdef CONFIG_ACPI
173#include <linux/pci-acpi.h> 190#include <linux/pci-acpi.h>
174static inline int get_hp_params_from_firmware(struct pci_dev *dev, 191static inline int get_hp_params_from_firmware(struct pci_dev *dev,
@@ -236,7 +253,7 @@ static inline struct slot *shpchp_find_slot(struct controller *ctrl, u8 device)
236 return slot; 253 return slot;
237 } 254 }
238 255
239 err("%s: slot (device=0x%x) not found\n", __func__, device); 256 ctrl_err(ctrl, "Slot (device=0x%02x) not found\n", device);
240 return NULL; 257 return NULL;
241} 258}
242 259
@@ -270,7 +287,9 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot)
270 pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MISC_BRIDGE_ERRORS_OFFSET, &pcix_bridge_errors_reg); 287 pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MISC_BRIDGE_ERRORS_OFFSET, &pcix_bridge_errors_reg);
271 perr_set = pcix_bridge_errors_reg & PERR_OBSERVED_MASK; 288 perr_set = pcix_bridge_errors_reg & PERR_OBSERVED_MASK;
272 if (perr_set) { 289 if (perr_set) {
273 dbg ("%s W1C: Bridge_Errors[ PERR_OBSERVED = %08X]\n",__func__ , perr_set); 290 ctrl_dbg(p_slot->ctrl,
291 "Bridge_Errors[ PERR_OBSERVED = %08X] (W1C)\n",
292 perr_set);
274 293
275 pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISC_BRIDGE_ERRORS_OFFSET, perr_set); 294 pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISC_BRIDGE_ERRORS_OFFSET, perr_set);
276 } 295 }
@@ -279,7 +298,7 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot)
279 pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, &pcix_mem_base_reg); 298 pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, &pcix_mem_base_reg);
280 rse_set = pcix_mem_base_reg & RSE_MASK; 299 rse_set = pcix_mem_base_reg & RSE_MASK;
281 if (rse_set) { 300 if (rse_set) {
282 dbg ("%s W1C: Memory_Base_Limit[ RSE ]\n",__func__ ); 301 ctrl_dbg(p_slot->ctrl, "Memory_Base_Limit[ RSE ] (W1C)\n");
283 302
284 pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, rse_set); 303 pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, rse_set);
285 } 304 }
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index cc38615395f1..fe8d149c2293 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -89,7 +89,8 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
89{ 89{
90 struct slot *slot = hotplug_slot->private; 90 struct slot *slot = hotplug_slot->private;
91 91
92 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 92 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
93 __func__, slot_name(slot));
93 94
94 kfree(slot->hotplug_slot->info); 95 kfree(slot->hotplug_slot->info);
95 kfree(slot->hotplug_slot); 96 kfree(slot->hotplug_slot);
@@ -101,8 +102,9 @@ static int init_slots(struct controller *ctrl)
101 struct slot *slot; 102 struct slot *slot;
102 struct hotplug_slot *hotplug_slot; 103 struct hotplug_slot *hotplug_slot;
103 struct hotplug_slot_info *info; 104 struct hotplug_slot_info *info;
105 char name[SLOT_NAME_SIZE];
104 int retval = -ENOMEM; 106 int retval = -ENOMEM;
105 int i, len, dup = 1; 107 int i;
106 108
107 for (i = 0; i < ctrl->num_slots; i++) { 109 for (i = 0; i < ctrl->num_slots; i++) {
108 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 110 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
@@ -119,8 +121,6 @@ static int init_slots(struct controller *ctrl)
119 goto error_hpslot; 121 goto error_hpslot;
120 hotplug_slot->info = info; 122 hotplug_slot->info = info;
121 123
122 hotplug_slot->name = slot->name;
123
124 slot->hp_slot = i; 124 slot->hp_slot = i;
125 slot->ctrl = ctrl; 125 slot->ctrl = ctrl;
126 slot->bus = ctrl->pci_dev->subordinate->number; 126 slot->bus = ctrl->pci_dev->subordinate->number;
@@ -133,37 +133,27 @@ static int init_slots(struct controller *ctrl)
133 /* register this slot with the hotplug pci core */ 133 /* register this slot with the hotplug pci core */
134 hotplug_slot->private = slot; 134 hotplug_slot->private = slot;
135 hotplug_slot->release = &release_slot; 135 hotplug_slot->release = &release_slot;
136 snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number); 136 snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
137 hotplug_slot->ops = &shpchp_hotplug_slot_ops; 137 hotplug_slot->ops = &shpchp_hotplug_slot_ops;
138 138
139 get_power_status(hotplug_slot, &info->power_status); 139 ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
140 get_attention_status(hotplug_slot, &info->attention_status); 140 "hp_slot=%x sun=%x slot_device_offset=%x\n",
141 get_latch_status(hotplug_slot, &info->latch_status); 141 pci_domain_nr(ctrl->pci_dev->subordinate),
142 get_adapter_status(hotplug_slot, &info->adapter_status); 142 slot->bus, slot->device, slot->hp_slot, slot->number,
143 143 ctrl->slot_device_offset);
144 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
145 "slot_device_offset=%x\n", slot->bus, slot->device,
146 slot->hp_slot, slot->number, ctrl->slot_device_offset);
147duplicate_name:
148 retval = pci_hp_register(slot->hotplug_slot, 144 retval = pci_hp_register(slot->hotplug_slot,
149 ctrl->pci_dev->subordinate, slot->device); 145 ctrl->pci_dev->subordinate, slot->device, name);
150 if (retval) { 146 if (retval) {
151 /* 147 ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
152 * If slot N already exists, we'll try to create 148 retval);
153 * slot N-1, N-2 ... N-M, until we overflow.
154 */
155 if (retval == -EEXIST) {
156 len = snprintf(slot->name, SLOT_NAME_SIZE,
157 "%d-%d", slot->number, dup++);
158 if (len < SLOT_NAME_SIZE)
159 goto duplicate_name;
160 else
161 err("duplicate slot name overflow\n");
162 }
163 err("pci_hp_register failed with error %d\n", retval);
164 goto error_info; 149 goto error_info;
165 } 150 }
166 151
152 get_power_status(hotplug_slot, &info->power_status);
153 get_attention_status(hotplug_slot, &info->attention_status);
154 get_latch_status(hotplug_slot, &info->latch_status);
155 get_adapter_status(hotplug_slot, &info->adapter_status);
156
167 list_add(&slot->slot_list, &ctrl->slot_list); 157 list_add(&slot->slot_list, &ctrl->slot_list);
168 } 158 }
169 159
@@ -201,7 +191,8 @@ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status)
201{ 191{
202 struct slot *slot = get_slot(hotplug_slot); 192 struct slot *slot = get_slot(hotplug_slot);
203 193
204 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 194 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
195 __func__, slot_name(slot));
205 196
206 hotplug_slot->info->attention_status = status; 197 hotplug_slot->info->attention_status = status;
207 slot->hpc_ops->set_attention_status(slot, status); 198 slot->hpc_ops->set_attention_status(slot, status);
@@ -213,7 +204,8 @@ static int enable_slot (struct hotplug_slot *hotplug_slot)
213{ 204{
214 struct slot *slot = get_slot(hotplug_slot); 205 struct slot *slot = get_slot(hotplug_slot);
215 206
216 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 207 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
208 __func__, slot_name(slot));
217 209
218 return shpchp_sysfs_enable_slot(slot); 210 return shpchp_sysfs_enable_slot(slot);
219} 211}
@@ -222,7 +214,8 @@ static int disable_slot (struct hotplug_slot *hotplug_slot)
222{ 214{
223 struct slot *slot = get_slot(hotplug_slot); 215 struct slot *slot = get_slot(hotplug_slot);
224 216
225 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 217 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
218 __func__, slot_name(slot));
226 219
227 return shpchp_sysfs_disable_slot(slot); 220 return shpchp_sysfs_disable_slot(slot);
228} 221}
@@ -232,7 +225,8 @@ static int get_power_status (struct hotplug_slot *hotplug_slot, u8 *value)
232 struct slot *slot = get_slot(hotplug_slot); 225 struct slot *slot = get_slot(hotplug_slot);
233 int retval; 226 int retval;
234 227
235 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 228 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
229 __func__, slot_name(slot));
236 230
237 retval = slot->hpc_ops->get_power_status(slot, value); 231 retval = slot->hpc_ops->get_power_status(slot, value);
238 if (retval < 0) 232 if (retval < 0)
@@ -246,7 +240,8 @@ static int get_attention_status (struct hotplug_slot *hotplug_slot, u8 *value)
246 struct slot *slot = get_slot(hotplug_slot); 240 struct slot *slot = get_slot(hotplug_slot);
247 int retval; 241 int retval;
248 242
249 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 243 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
244 __func__, slot_name(slot));
250 245
251 retval = slot->hpc_ops->get_attention_status(slot, value); 246 retval = slot->hpc_ops->get_attention_status(slot, value);
252 if (retval < 0) 247 if (retval < 0)
@@ -260,7 +255,8 @@ static int get_latch_status (struct hotplug_slot *hotplug_slot, u8 *value)
260 struct slot *slot = get_slot(hotplug_slot); 255 struct slot *slot = get_slot(hotplug_slot);
261 int retval; 256 int retval;
262 257
263 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 258 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
259 __func__, slot_name(slot));
264 260
265 retval = slot->hpc_ops->get_latch_status(slot, value); 261 retval = slot->hpc_ops->get_latch_status(slot, value);
266 if (retval < 0) 262 if (retval < 0)
@@ -274,7 +270,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
274 struct slot *slot = get_slot(hotplug_slot); 270 struct slot *slot = get_slot(hotplug_slot);
275 int retval; 271 int retval;
276 272
277 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 273 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
274 __func__, slot_name(slot));
278 275
279 retval = slot->hpc_ops->get_adapter_status(slot, value); 276 retval = slot->hpc_ops->get_adapter_status(slot, value);
280 if (retval < 0) 277 if (retval < 0)
@@ -289,7 +286,8 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
289 struct slot *slot = get_slot(hotplug_slot); 286 struct slot *slot = get_slot(hotplug_slot);
290 int retval; 287 int retval;
291 288
292 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 289 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
290 __func__, slot_name(slot));
293 291
294 retval = slot->hpc_ops->get_max_bus_speed(slot, value); 292 retval = slot->hpc_ops->get_max_bus_speed(slot, value);
295 if (retval < 0) 293 if (retval < 0)
@@ -303,7 +301,8 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
303 struct slot *slot = get_slot(hotplug_slot); 301 struct slot *slot = get_slot(hotplug_slot);
304 int retval; 302 int retval;
305 303
306 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 304 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
305 __func__, slot_name(slot));
307 306
308 retval = slot->hpc_ops->get_cur_bus_speed(slot, value); 307 retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
309 if (retval < 0) 308 if (retval < 0)
@@ -334,15 +333,14 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
334 333
335 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 334 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
336 if (!ctrl) { 335 if (!ctrl) {
337 err("%s : out of memory\n", __func__); 336 dev_err(&pdev->dev, "%s: Out of memory\n", __func__);
338 goto err_out_none; 337 goto err_out_none;
339 } 338 }
340 INIT_LIST_HEAD(&ctrl->slot_list); 339 INIT_LIST_HEAD(&ctrl->slot_list);
341 340
342 rc = shpc_init(ctrl, pdev); 341 rc = shpc_init(ctrl, pdev);
343 if (rc) { 342 if (rc) {
344 dbg("%s: controller initialization failed\n", 343 ctrl_dbg(ctrl, "Controller initialization failed\n");
345 SHPC_MODULE_NAME);
346 goto err_out_free_ctrl; 344 goto err_out_free_ctrl;
347 } 345 }
348 346
@@ -351,7 +349,7 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
351 /* Setup the slot information structures */ 349 /* Setup the slot information structures */
352 rc = init_slots(ctrl); 350 rc = init_slots(ctrl);
353 if (rc) { 351 if (rc) {
354 err("%s: slot initialization failed\n", SHPC_MODULE_NAME); 352 ctrl_err(ctrl, "Slot initialization failed\n");
355 goto err_out_release_ctlr; 353 goto err_out_release_ctlr;
356 } 354 }
357 355
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index dfb53932dfbc..b8ab2796e66a 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -62,7 +62,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
62 u32 event_type; 62 u32 event_type;
63 63
64 /* Attention Button Change */ 64 /* Attention Button Change */
65 dbg("shpchp: Attention button interrupt received.\n"); 65 ctrl_dbg(ctrl, "Attention button interrupt received\n");
66 66
67 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 67 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
68 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); 68 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
@@ -70,7 +70,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
70 /* 70 /*
71 * Button pressed - See if need to TAKE ACTION!!! 71 * Button pressed - See if need to TAKE ACTION!!!
72 */ 72 */
73 info("Button pressed on Slot(%s)\n", p_slot->name); 73 ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot));
74 event_type = INT_BUTTON_PRESS; 74 event_type = INT_BUTTON_PRESS;
75 75
76 queue_interrupt_event(p_slot, event_type); 76 queue_interrupt_event(p_slot, event_type);
@@ -86,29 +86,29 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
86 u32 event_type; 86 u32 event_type;
87 87
88 /* Switch Change */ 88 /* Switch Change */
89 dbg("shpchp: Switch interrupt received.\n"); 89 ctrl_dbg(ctrl, "Switch interrupt received\n");
90 90
91 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 91 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
92 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); 92 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
93 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 93 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
94 dbg("%s: Card present %x Power status %x\n", __func__, 94 ctrl_dbg(ctrl, "Card present %x Power status %x\n",
95 p_slot->presence_save, p_slot->pwr_save); 95 p_slot->presence_save, p_slot->pwr_save);
96 96
97 if (getstatus) { 97 if (getstatus) {
98 /* 98 /*
99 * Switch opened 99 * Switch opened
100 */ 100 */
101 info("Latch open on Slot(%s)\n", p_slot->name); 101 ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot));
102 event_type = INT_SWITCH_OPEN; 102 event_type = INT_SWITCH_OPEN;
103 if (p_slot->pwr_save && p_slot->presence_save) { 103 if (p_slot->pwr_save && p_slot->presence_save) {
104 event_type = INT_POWER_FAULT; 104 event_type = INT_POWER_FAULT;
105 err("Surprise Removal of card\n"); 105 ctrl_err(ctrl, "Surprise Removal of card\n");
106 } 106 }
107 } else { 107 } else {
108 /* 108 /*
109 * Switch closed 109 * Switch closed
110 */ 110 */
111 info("Latch close on Slot(%s)\n", p_slot->name); 111 ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot));
112 event_type = INT_SWITCH_CLOSE; 112 event_type = INT_SWITCH_CLOSE;
113 } 113 }
114 114
@@ -123,7 +123,7 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
123 u32 event_type; 123 u32 event_type;
124 124
125 /* Presence Change */ 125 /* Presence Change */
126 dbg("shpchp: Presence/Notify input change.\n"); 126 ctrl_dbg(ctrl, "Presence/Notify input change\n");
127 127
128 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 128 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
129 129
@@ -135,13 +135,15 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
135 /* 135 /*
136 * Card Present 136 * Card Present
137 */ 137 */
138 info("Card present on Slot(%s)\n", p_slot->name); 138 ctrl_info(ctrl, "Card present on Slot(%s)\n",
139 slot_name(p_slot));
139 event_type = INT_PRESENCE_ON; 140 event_type = INT_PRESENCE_ON;
140 } else { 141 } else {
141 /* 142 /*
142 * Not Present 143 * Not Present
143 */ 144 */
144 info("Card not present on Slot(%s)\n", p_slot->name); 145 ctrl_info(ctrl, "Card not present on Slot(%s)\n",
146 slot_name(p_slot));
145 event_type = INT_PRESENCE_OFF; 147 event_type = INT_PRESENCE_OFF;
146 } 148 }
147 149
@@ -156,7 +158,7 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
156 u32 event_type; 158 u32 event_type;
157 159
158 /* Power fault */ 160 /* Power fault */
159 dbg("shpchp: Power fault interrupt received.\n"); 161 ctrl_dbg(ctrl, "Power fault interrupt received\n");
160 162
161 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 163 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
162 164
@@ -164,18 +166,19 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
164 /* 166 /*
165 * Power fault Cleared 167 * Power fault Cleared
166 */ 168 */
167 info("Power fault cleared on Slot(%s)\n", p_slot->name); 169 ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n",
170 slot_name(p_slot));
168 p_slot->status = 0x00; 171 p_slot->status = 0x00;
169 event_type = INT_POWER_FAULT_CLEAR; 172 event_type = INT_POWER_FAULT_CLEAR;
170 } else { 173 } else {
171 /* 174 /*
172 * Power fault 175 * Power fault
173 */ 176 */
174 info("Power fault on Slot(%s)\n", p_slot->name); 177 ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot));
175 event_type = INT_POWER_FAULT; 178 event_type = INT_POWER_FAULT;
176 /* set power fault status for this board */ 179 /* set power fault status for this board */
177 p_slot->status = 0xFF; 180 p_slot->status = 0xFF;
178 info("power fault bit %x set\n", hp_slot); 181 ctrl_info(ctrl, "Power fault bit %x set\n", hp_slot);
179 } 182 }
180 183
181 queue_interrupt_event(p_slot, event_type); 184 queue_interrupt_event(p_slot, event_type);
@@ -191,10 +194,10 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
191{ 194{
192 int rc = 0; 195 int rc = 0;
193 196
194 dbg("%s: change to speed %d\n", __func__, speed); 197 ctrl_dbg(ctrl, "Change speed to %d\n", speed);
195 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) { 198 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) {
196 err("%s: Issue of set bus speed mode command failed\n", 199 ctrl_err(ctrl, "%s: Issue of set bus speed mode command "
197 __func__); 200 "failed\n", __func__);
198 return WRONG_BUS_FREQUENCY; 201 return WRONG_BUS_FREQUENCY;
199 } 202 }
200 return rc; 203 return rc;
@@ -212,8 +215,8 @@ static int fix_bus_speed(struct controller *ctrl, struct slot *pslot,
212 */ 215 */
213 if (flag) { 216 if (flag) {
214 if (asp < bsp) { 217 if (asp < bsp) {
215 err("%s: speed of bus %x and adapter %x mismatch\n", 218 ctrl_err(ctrl, "Speed of bus %x and adapter %x "
216 __func__, bsp, asp); 219 "mismatch\n", bsp, asp);
217 rc = WRONG_BUS_FREQUENCY; 220 rc = WRONG_BUS_FREQUENCY;
218 } 221 }
219 return rc; 222 return rc;
@@ -243,17 +246,18 @@ static int board_added(struct slot *p_slot)
243 int rc = 0; 246 int rc = 0;
244 enum pci_bus_speed asp, bsp, msp; 247 enum pci_bus_speed asp, bsp, msp;
245 struct controller *ctrl = p_slot->ctrl; 248 struct controller *ctrl = p_slot->ctrl;
249 struct pci_bus *parent = ctrl->pci_dev->subordinate;
246 250
247 hp_slot = p_slot->device - ctrl->slot_device_offset; 251 hp_slot = p_slot->device - ctrl->slot_device_offset;
248 252
249 dbg("%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n", 253 ctrl_dbg(ctrl,
250 __func__, p_slot->device, 254 "%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n",
251 ctrl->slot_device_offset, hp_slot); 255 __func__, p_slot->device, ctrl->slot_device_offset, hp_slot);
252 256
253 /* Power on slot without connecting to bus */ 257 /* Power on slot without connecting to bus */
254 rc = p_slot->hpc_ops->power_on_slot(p_slot); 258 rc = p_slot->hpc_ops->power_on_slot(p_slot);
255 if (rc) { 259 if (rc) {
256 err("%s: Failed to power on slot\n", __func__); 260 ctrl_err(ctrl, "Failed to power on slot\n");
257 return -1; 261 return -1;
258 } 262 }
259 263
@@ -262,33 +266,34 @@ static int board_added(struct slot *p_slot)
262 return WRONG_BUS_FREQUENCY; 266 return WRONG_BUS_FREQUENCY;
263 267
264 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) { 268 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) {
265 err("%s: Issue of set bus speed mode command failed\n", __func__); 269 ctrl_err(ctrl, "%s: Issue of set bus speed mode command"
270 " failed\n", __func__);
266 return WRONG_BUS_FREQUENCY; 271 return WRONG_BUS_FREQUENCY;
267 } 272 }
268 273
269 /* turn on board, blink green LED, turn off Amber LED */ 274 /* turn on board, blink green LED, turn off Amber LED */
270 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { 275 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) {
271 err("%s: Issue of Slot Enable command failed\n", __func__); 276 ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
272 return rc; 277 return rc;
273 } 278 }
274 } 279 }
275 280
276 rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp); 281 rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp);
277 if (rc) { 282 if (rc) {
278 err("%s: Can't get adapter speed or bus mode mismatch\n", 283 ctrl_err(ctrl, "Can't get adapter speed or "
279 __func__); 284 "bus mode mismatch\n");
280 return WRONG_BUS_FREQUENCY; 285 return WRONG_BUS_FREQUENCY;
281 } 286 }
282 287
283 rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp); 288 rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp);
284 if (rc) { 289 if (rc) {
285 err("%s: Can't get bus operation speed\n", __func__); 290 ctrl_err(ctrl, "Can't get bus operation speed\n");
286 return WRONG_BUS_FREQUENCY; 291 return WRONG_BUS_FREQUENCY;
287 } 292 }
288 293
289 rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp); 294 rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp);
290 if (rc) { 295 if (rc) {
291 err("%s: Can't get max bus operation speed\n", __func__); 296 ctrl_err(ctrl, "Can't get max bus operation speed\n");
292 msp = bsp; 297 msp = bsp;
293 } 298 }
294 299
@@ -296,9 +301,9 @@ static int board_added(struct slot *p_slot)
296 if (!list_empty(&ctrl->pci_dev->subordinate->devices)) 301 if (!list_empty(&ctrl->pci_dev->subordinate->devices))
297 slots_not_empty = 1; 302 slots_not_empty = 1;
298 303
299 dbg("%s: slots_not_empty %d, adapter_speed %d, bus_speed %d, " 304 ctrl_dbg(ctrl, "%s: slots_not_empty %d, adapter_speed %d, bus_speed %d,"
300 "max_bus_speed %d\n", __func__, slots_not_empty, asp, 305 " max_bus_speed %d\n", __func__, slots_not_empty, asp,
301 bsp, msp); 306 bsp, msp);
302 307
303 rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, asp, bsp, msp); 308 rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, asp, bsp, msp);
304 if (rc) 309 if (rc)
@@ -306,26 +311,26 @@ static int board_added(struct slot *p_slot)
306 311
307 /* turn on board, blink green LED, turn off Amber LED */ 312 /* turn on board, blink green LED, turn off Amber LED */
308 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { 313 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) {
309 err("%s: Issue of Slot Enable command failed\n", __func__); 314 ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
310 return rc; 315 return rc;
311 } 316 }
312 317
313 /* Wait for ~1 second */ 318 /* Wait for ~1 second */
314 msleep(1000); 319 msleep(1000);
315 320
316 dbg("%s: slot status = %x\n", __func__, p_slot->status); 321 ctrl_dbg(ctrl, "%s: slot status = %x\n", __func__, p_slot->status);
317 /* Check for a power fault */ 322 /* Check for a power fault */
318 if (p_slot->status == 0xFF) { 323 if (p_slot->status == 0xFF) {
319 /* power fault occurred, but it was benign */ 324 /* power fault occurred, but it was benign */
320 dbg("%s: power fault\n", __func__); 325 ctrl_dbg(ctrl, "%s: Power fault\n", __func__);
321 rc = POWER_FAILURE; 326 rc = POWER_FAILURE;
322 p_slot->status = 0; 327 p_slot->status = 0;
323 goto err_exit; 328 goto err_exit;
324 } 329 }
325 330
326 if (shpchp_configure_device(p_slot)) { 331 if (shpchp_configure_device(p_slot)) {
327 err("Cannot add device at 0x%x:0x%x\n", p_slot->bus, 332 ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n",
328 p_slot->device); 333 pci_domain_nr(parent), p_slot->bus, p_slot->device);
329 goto err_exit; 334 goto err_exit;
330 } 335 }
331 336
@@ -341,7 +346,8 @@ err_exit:
341 /* turn off slot, turn on Amber LED, turn off Green LED */ 346 /* turn off slot, turn on Amber LED, turn off Green LED */
342 rc = p_slot->hpc_ops->slot_disable(p_slot); 347 rc = p_slot->hpc_ops->slot_disable(p_slot);
343 if (rc) { 348 if (rc) {
344 err("%s: Issue of Slot Disable command failed\n", __func__); 349 ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
350 __func__);
345 return rc; 351 return rc;
346 } 352 }
347 353
@@ -365,7 +371,7 @@ static int remove_board(struct slot *p_slot)
365 hp_slot = p_slot->device - ctrl->slot_device_offset; 371 hp_slot = p_slot->device - ctrl->slot_device_offset;
366 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 372 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
367 373
368 dbg("In %s, hp_slot = %d\n", __func__, hp_slot); 374 ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, hp_slot);
369 375
370 /* Change status to shutdown */ 376 /* Change status to shutdown */
371 if (p_slot->is_a_board) 377 if (p_slot->is_a_board)
@@ -374,13 +380,14 @@ static int remove_board(struct slot *p_slot)
374 /* turn off slot, turn on Amber LED, turn off Green LED */ 380 /* turn off slot, turn on Amber LED, turn off Green LED */
375 rc = p_slot->hpc_ops->slot_disable(p_slot); 381 rc = p_slot->hpc_ops->slot_disable(p_slot);
376 if (rc) { 382 if (rc) {
377 err("%s: Issue of Slot Disable command failed\n", __func__); 383 ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
384 __func__);
378 return rc; 385 return rc;
379 } 386 }
380 387
381 rc = p_slot->hpc_ops->set_attention_status(p_slot, 0); 388 rc = p_slot->hpc_ops->set_attention_status(p_slot, 0);
382 if (rc) { 389 if (rc) {
383 err("%s: Issue of Set Attention command failed\n", __func__); 390 ctrl_err(ctrl, "Issue of Set Attention command failed\n");
384 return rc; 391 return rc;
385 } 392 }
386 393
@@ -439,7 +446,8 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
439 446
440 info = kmalloc(sizeof(*info), GFP_KERNEL); 447 info = kmalloc(sizeof(*info), GFP_KERNEL);
441 if (!info) { 448 if (!info) {
442 err("%s: Cannot allocate memory\n", __func__); 449 ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
450 __func__);
443 return; 451 return;
444 } 452 }
445 info->p_slot = p_slot; 453 info->p_slot = p_slot;
@@ -486,18 +494,19 @@ static int update_slot_info (struct slot *slot)
486static void handle_button_press_event(struct slot *p_slot) 494static void handle_button_press_event(struct slot *p_slot)
487{ 495{
488 u8 getstatus; 496 u8 getstatus;
497 struct controller *ctrl = p_slot->ctrl;
489 498
490 switch (p_slot->state) { 499 switch (p_slot->state) {
491 case STATIC_STATE: 500 case STATIC_STATE:
492 p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 501 p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
493 if (getstatus) { 502 if (getstatus) {
494 p_slot->state = BLINKINGOFF_STATE; 503 p_slot->state = BLINKINGOFF_STATE;
495 info("PCI slot #%s - powering off due to button " 504 ctrl_info(ctrl, "PCI slot #%s - powering off due to "
496 "press.\n", p_slot->name); 505 "button press.\n", slot_name(p_slot));
497 } else { 506 } else {
498 p_slot->state = BLINKINGON_STATE; 507 p_slot->state = BLINKINGON_STATE;
499 info("PCI slot #%s - powering on due to button " 508 ctrl_info(ctrl, "PCI slot #%s - powering on due to "
500 "press.\n", p_slot->name); 509 "button press.\n", slot_name(p_slot));
501 } 510 }
502 /* blink green LED and turn off amber */ 511 /* blink green LED and turn off amber */
503 p_slot->hpc_ops->green_led_blink(p_slot); 512 p_slot->hpc_ops->green_led_blink(p_slot);
@@ -512,16 +521,16 @@ static void handle_button_press_event(struct slot *p_slot)
512 * press the attention again before the 5 sec. limit 521 * press the attention again before the 5 sec. limit
513 * expires to cancel hot-add or hot-remove 522 * expires to cancel hot-add or hot-remove
514 */ 523 */
515 info("Button cancel on Slot(%s)\n", p_slot->name); 524 ctrl_info(ctrl, "Button cancel on Slot(%s)\n",
516 dbg("%s: button cancel\n", __func__); 525 slot_name(p_slot));
517 cancel_delayed_work(&p_slot->work); 526 cancel_delayed_work(&p_slot->work);
518 if (p_slot->state == BLINKINGOFF_STATE) 527 if (p_slot->state == BLINKINGOFF_STATE)
519 p_slot->hpc_ops->green_led_on(p_slot); 528 p_slot->hpc_ops->green_led_on(p_slot);
520 else 529 else
521 p_slot->hpc_ops->green_led_off(p_slot); 530 p_slot->hpc_ops->green_led_off(p_slot);
522 p_slot->hpc_ops->set_attention_status(p_slot, 0); 531 p_slot->hpc_ops->set_attention_status(p_slot, 0);
523 info("PCI slot #%s - action canceled due to button press\n", 532 ctrl_info(ctrl, "PCI slot #%s - action canceled due to "
524 p_slot->name); 533 "button press\n", slot_name(p_slot));
525 p_slot->state = STATIC_STATE; 534 p_slot->state = STATIC_STATE;
526 break; 535 break;
527 case POWEROFF_STATE: 536 case POWEROFF_STATE:
@@ -531,11 +540,12 @@ static void handle_button_press_event(struct slot *p_slot)
531 * this means that the previous attention button action 540 * this means that the previous attention button action
532 * to hot-add or hot-remove is undergoing 541 * to hot-add or hot-remove is undergoing
533 */ 542 */
534 info("Button ignore on Slot(%s)\n", p_slot->name); 543 ctrl_info(ctrl, "Button ignore on Slot(%s)\n",
544 slot_name(p_slot));
535 update_slot_info(p_slot); 545 update_slot_info(p_slot);
536 break; 546 break;
537 default: 547 default:
538 warn("Not a valid state\n"); 548 ctrl_warn(ctrl, "Not a valid state\n");
539 break; 549 break;
540 } 550 }
541} 551}
@@ -551,7 +561,7 @@ static void interrupt_event_handler(struct work_struct *work)
551 handle_button_press_event(p_slot); 561 handle_button_press_event(p_slot);
552 break; 562 break;
553 case INT_POWER_FAULT: 563 case INT_POWER_FAULT:
554 dbg("%s: power fault\n", __func__); 564 ctrl_dbg(p_slot->ctrl, "%s: Power fault\n", __func__);
555 p_slot->hpc_ops->set_attention_status(p_slot, 1); 565 p_slot->hpc_ops->set_attention_status(p_slot, 1);
556 p_slot->hpc_ops->green_led_off(p_slot); 566 p_slot->hpc_ops->green_led_off(p_slot);
557 break; 567 break;
@@ -569,22 +579,24 @@ static int shpchp_enable_slot (struct slot *p_slot)
569{ 579{
570 u8 getstatus = 0; 580 u8 getstatus = 0;
571 int rc, retval = -ENODEV; 581 int rc, retval = -ENODEV;
582 struct controller *ctrl = p_slot->ctrl;
572 583
573 /* Check to see if (latch closed, card present, power off) */ 584 /* Check to see if (latch closed, card present, power off) */
574 mutex_lock(&p_slot->ctrl->crit_sect); 585 mutex_lock(&p_slot->ctrl->crit_sect);
575 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 586 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
576 if (rc || !getstatus) { 587 if (rc || !getstatus) {
577 info("No adapter on slot(%s)\n", p_slot->name); 588 ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
578 goto out; 589 goto out;
579 } 590 }
580 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 591 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
581 if (rc || getstatus) { 592 if (rc || getstatus) {
582 info("Latch open on slot(%s)\n", p_slot->name); 593 ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
583 goto out; 594 goto out;
584 } 595 }
585 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 596 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
586 if (rc || getstatus) { 597 if (rc || getstatus) {
587 info("Already enabled on slot(%s)\n", p_slot->name); 598 ctrl_info(ctrl, "Already enabled on slot(%s)\n",
599 slot_name(p_slot));
588 goto out; 600 goto out;
589 } 601 }
590 602
@@ -593,7 +605,7 @@ static int shpchp_enable_slot (struct slot *p_slot)
593 /* We have to save the presence info for these slots */ 605 /* We have to save the presence info for these slots */
594 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); 606 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
595 p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save)); 607 p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save));
596 dbg("%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save); 608 ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
597 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 609 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
598 610
599 if(((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) || 611 if(((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
@@ -624,6 +636,7 @@ static int shpchp_disable_slot (struct slot *p_slot)
624{ 636{
625 u8 getstatus = 0; 637 u8 getstatus = 0;
626 int rc, retval = -ENODEV; 638 int rc, retval = -ENODEV;
639 struct controller *ctrl = p_slot->ctrl;
627 640
628 if (!p_slot->ctrl) 641 if (!p_slot->ctrl)
629 return -ENODEV; 642 return -ENODEV;
@@ -633,17 +646,18 @@ static int shpchp_disable_slot (struct slot *p_slot)
633 646
634 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 647 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
635 if (rc || !getstatus) { 648 if (rc || !getstatus) {
636 info("No adapter on slot(%s)\n", p_slot->name); 649 ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
637 goto out; 650 goto out;
638 } 651 }
639 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 652 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
640 if (rc || getstatus) { 653 if (rc || getstatus) {
641 info("Latch open on slot(%s)\n", p_slot->name); 654 ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
642 goto out; 655 goto out;
643 } 656 }
644 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 657 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
645 if (rc || !getstatus) { 658 if (rc || !getstatus) {
646 info("Already disabled slot(%s)\n", p_slot->name); 659 ctrl_info(ctrl, "Already disabled on slot(%s)\n",
660 slot_name(p_slot));
647 goto out; 661 goto out;
648 } 662 }
649 663
@@ -657,6 +671,7 @@ static int shpchp_disable_slot (struct slot *p_slot)
657int shpchp_sysfs_enable_slot(struct slot *p_slot) 671int shpchp_sysfs_enable_slot(struct slot *p_slot)
658{ 672{
659 int retval = -ENODEV; 673 int retval = -ENODEV;
674 struct controller *ctrl = p_slot->ctrl;
660 675
661 mutex_lock(&p_slot->lock); 676 mutex_lock(&p_slot->lock);
662 switch (p_slot->state) { 677 switch (p_slot->state) {
@@ -670,15 +685,17 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot)
670 p_slot->state = STATIC_STATE; 685 p_slot->state = STATIC_STATE;
671 break; 686 break;
672 case POWERON_STATE: 687 case POWERON_STATE:
673 info("Slot %s is already in powering on state\n", 688 ctrl_info(ctrl, "Slot %s is already in powering on state\n",
674 p_slot->name); 689 slot_name(p_slot));
675 break; 690 break;
676 case BLINKINGOFF_STATE: 691 case BLINKINGOFF_STATE:
677 case POWEROFF_STATE: 692 case POWEROFF_STATE:
678 info("Already enabled on slot %s\n", p_slot->name); 693 ctrl_info(ctrl, "Already enabled on slot %s\n",
694 slot_name(p_slot));
679 break; 695 break;
680 default: 696 default:
681 err("Not a valid state on slot %s\n", p_slot->name); 697 ctrl_err(ctrl, "Not a valid state on slot %s\n",
698 slot_name(p_slot));
682 break; 699 break;
683 } 700 }
684 mutex_unlock(&p_slot->lock); 701 mutex_unlock(&p_slot->lock);
@@ -689,6 +706,7 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot)
689int shpchp_sysfs_disable_slot(struct slot *p_slot) 706int shpchp_sysfs_disable_slot(struct slot *p_slot)
690{ 707{
691 int retval = -ENODEV; 708 int retval = -ENODEV;
709 struct controller *ctrl = p_slot->ctrl;
692 710
693 mutex_lock(&p_slot->lock); 711 mutex_lock(&p_slot->lock);
694 switch (p_slot->state) { 712 switch (p_slot->state) {
@@ -702,15 +720,17 @@ int shpchp_sysfs_disable_slot(struct slot *p_slot)
702 p_slot->state = STATIC_STATE; 720 p_slot->state = STATIC_STATE;
703 break; 721 break;
704 case POWEROFF_STATE: 722 case POWEROFF_STATE:
705 info("Slot %s is already in powering off state\n", 723 ctrl_info(ctrl, "Slot %s is already in powering off state\n",
706 p_slot->name); 724 slot_name(p_slot));
707 break; 725 break;
708 case BLINKINGON_STATE: 726 case BLINKINGON_STATE:
709 case POWERON_STATE: 727 case POWERON_STATE:
710 info("Already disabled on slot %s\n", p_slot->name); 728 ctrl_info(ctrl, "Already disabled on slot %s\n",
729 slot_name(p_slot));
711 break; 730 break;
712 default: 731 default:
713 err("Not a valid state on slot %s\n", p_slot->name); 732 ctrl_err(ctrl, "Not a valid state on slot %s\n",
733 slot_name(p_slot));
714 break; 734 break;
715 } 735 }
716 mutex_unlock(&p_slot->lock); 736 mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 7a0bff364cd4..86dc39847769 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -300,10 +300,10 @@ static inline int shpc_wait_cmd(struct controller *ctrl)
300 !is_ctrl_busy(ctrl), timeout); 300 !is_ctrl_busy(ctrl), timeout);
301 if (!rc && is_ctrl_busy(ctrl)) { 301 if (!rc && is_ctrl_busy(ctrl)) {
302 retval = -EIO; 302 retval = -EIO;
303 err("Command not completed in 1000 msec\n"); 303 ctrl_err(ctrl, "Command not completed in 1000 msec\n");
304 } else if (rc < 0) { 304 } else if (rc < 0) {
305 retval = -EINTR; 305 retval = -EINTR;
306 info("Command was interrupted by a signal\n"); 306 ctrl_info(ctrl, "Command was interrupted by a signal\n");
307 } 307 }
308 308
309 return retval; 309 return retval;
@@ -320,15 +320,14 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
320 320
321 if (!shpc_poll_ctrl_busy(ctrl)) { 321 if (!shpc_poll_ctrl_busy(ctrl)) {
322 /* After 1 sec and and the controller is still busy */ 322 /* After 1 sec and and the controller is still busy */
323 err("%s : Controller is still busy after 1 sec.\n", 323 ctrl_err(ctrl, "Controller is still busy after 1 sec\n");
324 __func__);
325 retval = -EBUSY; 324 retval = -EBUSY;
326 goto out; 325 goto out;
327 } 326 }
328 327
329 ++t_slot; 328 ++t_slot;
330 temp_word = (t_slot << 8) | (cmd & 0xFF); 329 temp_word = (t_slot << 8) | (cmd & 0xFF);
331 dbg("%s: t_slot %x cmd %x\n", __func__, t_slot, cmd); 330 ctrl_dbg(ctrl, "%s: t_slot %x cmd %x\n", __func__, t_slot, cmd);
332 331
333 /* To make sure the Controller Busy bit is 0 before we send out the 332 /* To make sure the Controller Busy bit is 0 before we send out the
334 * command. 333 * command.
@@ -344,8 +343,9 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
344 343
345 cmd_status = hpc_check_cmd_status(slot->ctrl); 344 cmd_status = hpc_check_cmd_status(slot->ctrl);
346 if (cmd_status) { 345 if (cmd_status) {
347 err("%s: Failed to issued command 0x%x (error code = %d)\n", 346 ctrl_err(ctrl,
348 __func__, cmd, cmd_status); 347 "Failed to issued command 0x%x (error code = %d)\n",
348 cmd, cmd_status);
349 retval = -EIO; 349 retval = -EIO;
350 } 350 }
351 out: 351 out:
@@ -364,15 +364,15 @@ static int hpc_check_cmd_status(struct controller *ctrl)
364 break; 364 break;
365 case 1: 365 case 1:
366 retval = SWITCH_OPEN; 366 retval = SWITCH_OPEN;
367 err("%s: Switch opened!\n", __func__); 367 ctrl_err(ctrl, "Switch opened!\n");
368 break; 368 break;
369 case 2: 369 case 2:
370 retval = INVALID_CMD; 370 retval = INVALID_CMD;
371 err("%s: Invalid HPC command!\n", __func__); 371 ctrl_err(ctrl, "Invalid HPC command!\n");
372 break; 372 break;
373 case 4: 373 case 4:
374 retval = INVALID_SPEED_MODE; 374 retval = INVALID_SPEED_MODE;
375 err("%s: Invalid bus speed/mode!\n", __func__); 375 ctrl_err(ctrl, "Invalid bus speed/mode!\n");
376 break; 376 break;
377 default: 377 default:
378 retval = cmd_status; 378 retval = cmd_status;
@@ -483,8 +483,8 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
483 return -ENODEV; 483 return -ENODEV;
484 } 484 }
485 485
486 dbg("%s: slot_reg = %x, pcix_cap = %x, m66_cap = %x\n", 486 ctrl_dbg(ctrl, "%s: slot_reg = %x, pcix_cap = %x, m66_cap = %x\n",
487 __func__, slot_reg, pcix_cap, m66_cap); 487 __func__, slot_reg, pcix_cap, m66_cap);
488 488
489 switch (pcix_cap) { 489 switch (pcix_cap) {
490 case 0x0: 490 case 0x0:
@@ -509,7 +509,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
509 break; 509 break;
510 } 510 }
511 511
512 dbg("Adapter speed = %d\n", *value); 512 ctrl_dbg(ctrl, "Adapter speed = %d\n", *value);
513 return retval; 513 return retval;
514} 514}
515 515
@@ -526,7 +526,7 @@ static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode)
526 retval = -1; 526 retval = -1;
527 } 527 }
528 528
529 dbg("Mode 1 ECC cap = %d\n", *mode); 529 ctrl_dbg(ctrl, "Mode 1 ECC cap = %d\n", *mode);
530 return retval; 530 return retval;
531} 531}
532 532
@@ -629,7 +629,7 @@ static int hpc_power_on_slot(struct slot * slot)
629 629
630 retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_PWR); 630 retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_PWR);
631 if (retval) 631 if (retval)
632 err("%s: Write command failed!\n", __func__); 632 ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__);
633 633
634 return retval; 634 return retval;
635} 635}
@@ -642,7 +642,7 @@ static int hpc_slot_enable(struct slot * slot)
642 retval = shpc_write_cmd(slot, slot->hp_slot, 642 retval = shpc_write_cmd(slot, slot->hp_slot,
643 SET_SLOT_ENABLE | SET_PWR_BLINK | SET_ATTN_OFF); 643 SET_SLOT_ENABLE | SET_PWR_BLINK | SET_ATTN_OFF);
644 if (retval) 644 if (retval)
645 err("%s: Write command failed!\n", __func__); 645 ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__);
646 646
647 return retval; 647 return retval;
648} 648}
@@ -655,7 +655,7 @@ static int hpc_slot_disable(struct slot * slot)
655 retval = shpc_write_cmd(slot, slot->hp_slot, 655 retval = shpc_write_cmd(slot, slot->hp_slot,
656 SET_SLOT_DISABLE | SET_PWR_OFF | SET_ATTN_ON); 656 SET_SLOT_DISABLE | SET_PWR_OFF | SET_ATTN_ON);
657 if (retval) 657 if (retval)
658 err("%s: Write command failed!\n", __func__); 658 ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__);
659 659
660 return retval; 660 return retval;
661} 661}
@@ -719,7 +719,7 @@ static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
719 719
720 retval = shpc_write_cmd(slot, 0, cmd); 720 retval = shpc_write_cmd(slot, 0, cmd);
721 if (retval) 721 if (retval)
722 err("%s: Write command failed!\n", __func__); 722 ctrl_err(ctrl, "%s: Write command failed!\n", __func__);
723 723
724 return retval; 724 return retval;
725} 725}
@@ -735,7 +735,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
735 if (!intr_loc) 735 if (!intr_loc)
736 return IRQ_NONE; 736 return IRQ_NONE;
737 737
738 dbg("%s: intr_loc = %x\n",__func__, intr_loc); 738 ctrl_dbg(ctrl, "%s: intr_loc = %x\n", __func__, intr_loc);
739 739
740 if(!shpchp_poll_mode) { 740 if(!shpchp_poll_mode) {
741 /* 741 /*
@@ -748,7 +748,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
748 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); 748 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
749 749
750 intr_loc2 = shpc_readl(ctrl, INTR_LOC); 750 intr_loc2 = shpc_readl(ctrl, INTR_LOC);
751 dbg("%s: intr_loc2 = %x\n",__func__, intr_loc2); 751 ctrl_dbg(ctrl, "%s: intr_loc2 = %x\n", __func__, intr_loc2);
752 } 752 }
753 753
754 if (intr_loc & CMD_INTR_PENDING) { 754 if (intr_loc & CMD_INTR_PENDING) {
@@ -773,8 +773,8 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
773 continue; 773 continue;
774 774
775 slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot)); 775 slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot));
776 dbg("%s: Slot %x with intr, slot register = %x\n", 776 ctrl_dbg(ctrl, "Slot %x with intr, slot register = %x\n",
777 __func__, hp_slot, slot_reg); 777 hp_slot, slot_reg);
778 778
779 if (slot_reg & MRL_CHANGE_DETECTED) 779 if (slot_reg & MRL_CHANGE_DETECTED)
780 shpchp_handle_switch_change(hp_slot, ctrl); 780 shpchp_handle_switch_change(hp_slot, ctrl);
@@ -843,7 +843,7 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
843 } 843 }
844 844
845 *value = bus_speed; 845 *value = bus_speed;
846 dbg("Max bus speed = %d\n", bus_speed); 846 ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed);
847 847
848 return retval; 848 return retval;
849} 849}
@@ -911,7 +911,7 @@ static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value)
911 break; 911 break;
912 } 912 }
913 913
914 dbg("Current bus speed = %d\n", bus_speed); 914 ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed);
915 return retval; 915 return retval;
916} 916}
917 917
@@ -949,6 +949,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
949 u8 i; 949 u8 i;
950 950
951 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ 951 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
952 ctrl_dbg(ctrl, "Hotplug Controller:\n");
952 953
953 if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device == 954 if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device ==
954 PCI_DEVICE_ID_AMD_GOLAM_7450)) { 955 PCI_DEVICE_ID_AMD_GOLAM_7450)) {
@@ -958,34 +959,33 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
958 } else { 959 } else {
959 ctrl->cap_offset = pci_find_capability(pdev, PCI_CAP_ID_SHPC); 960 ctrl->cap_offset = pci_find_capability(pdev, PCI_CAP_ID_SHPC);
960 if (!ctrl->cap_offset) { 961 if (!ctrl->cap_offset) {
961 err("%s : cap_offset == 0\n", __func__); 962 ctrl_err(ctrl, "Cannot find PCI capability\n");
962 goto abort; 963 goto abort;
963 } 964 }
964 dbg("%s: cap_offset = %x\n", __func__, ctrl->cap_offset); 965 ctrl_dbg(ctrl, " cap_offset = %x\n", ctrl->cap_offset);
965 966
966 rc = shpc_indirect_read(ctrl, 0, &shpc_base_offset); 967 rc = shpc_indirect_read(ctrl, 0, &shpc_base_offset);
967 if (rc) { 968 if (rc) {
968 err("%s: cannot read base_offset\n", __func__); 969 ctrl_err(ctrl, "Cannot read base_offset\n");
969 goto abort; 970 goto abort;
970 } 971 }
971 972
972 rc = shpc_indirect_read(ctrl, 3, &tempdword); 973 rc = shpc_indirect_read(ctrl, 3, &tempdword);
973 if (rc) { 974 if (rc) {
974 err("%s: cannot read slot config\n", __func__); 975 ctrl_err(ctrl, "Cannot read slot config\n");
975 goto abort; 976 goto abort;
976 } 977 }
977 num_slots = tempdword & SLOT_NUM; 978 num_slots = tempdword & SLOT_NUM;
978 dbg("%s: num_slots (indirect) %x\n", __func__, num_slots); 979 ctrl_dbg(ctrl, " num_slots (indirect) %x\n", num_slots);
979 980
980 for (i = 0; i < 9 + num_slots; i++) { 981 for (i = 0; i < 9 + num_slots; i++) {
981 rc = shpc_indirect_read(ctrl, i, &tempdword); 982 rc = shpc_indirect_read(ctrl, i, &tempdword);
982 if (rc) { 983 if (rc) {
983 err("%s: cannot read creg (index = %d)\n", 984 ctrl_err(ctrl,
984 __func__, i); 985 "Cannot read creg (index = %d)\n", i);
985 goto abort; 986 goto abort;
986 } 987 }
987 dbg("%s: offset %d: value %x\n", __func__,i, 988 ctrl_dbg(ctrl, " offset %d: value %x\n", i, tempdword);
988 tempdword);
989 } 989 }
990 990
991 ctrl->mmio_base = 991 ctrl->mmio_base =
@@ -993,30 +993,31 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
993 ctrl->mmio_size = 0x24 + 0x4 * num_slots; 993 ctrl->mmio_size = 0x24 + 0x4 * num_slots;
994 } 994 }
995 995
996 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor, 996 ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
997 pdev->subsystem_device); 997 pdev->vendor, pdev->device, pdev->subsystem_vendor,
998 pdev->subsystem_device);
998 999
999 rc = pci_enable_device(pdev); 1000 rc = pci_enable_device(pdev);
1000 if (rc) { 1001 if (rc) {
1001 err("%s: pci_enable_device failed\n", __func__); 1002 ctrl_err(ctrl, "pci_enable_device failed\n");
1002 goto abort; 1003 goto abort;
1003 } 1004 }
1004 1005
1005 if (!request_mem_region(ctrl->mmio_base, ctrl->mmio_size, MY_NAME)) { 1006 if (!request_mem_region(ctrl->mmio_base, ctrl->mmio_size, MY_NAME)) {
1006 err("%s: cannot reserve MMIO region\n", __func__); 1007 ctrl_err(ctrl, "Cannot reserve MMIO region\n");
1007 rc = -1; 1008 rc = -1;
1008 goto abort; 1009 goto abort;
1009 } 1010 }
1010 1011
1011 ctrl->creg = ioremap(ctrl->mmio_base, ctrl->mmio_size); 1012 ctrl->creg = ioremap(ctrl->mmio_base, ctrl->mmio_size);
1012 if (!ctrl->creg) { 1013 if (!ctrl->creg) {
1013 err("%s: cannot remap MMIO region %lx @ %lx\n", __func__, 1014 ctrl_err(ctrl, "Cannot remap MMIO region %lx @ %lx\n",
1014 ctrl->mmio_size, ctrl->mmio_base); 1015 ctrl->mmio_size, ctrl->mmio_base);
1015 release_mem_region(ctrl->mmio_base, ctrl->mmio_size); 1016 release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
1016 rc = -1; 1017 rc = -1;
1017 goto abort; 1018 goto abort;
1018 } 1019 }
1019 dbg("%s: ctrl->creg %p\n", __func__, ctrl->creg); 1020 ctrl_dbg(ctrl, "ctrl->creg %p\n", ctrl->creg);
1020 1021
1021 mutex_init(&ctrl->crit_sect); 1022 mutex_init(&ctrl->crit_sect);
1022 mutex_init(&ctrl->cmd_lock); 1023 mutex_init(&ctrl->cmd_lock);
@@ -1035,21 +1036,21 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1035 1036
1036 /* Mask Global Interrupt Mask & Command Complete Interrupt Mask */ 1037 /* Mask Global Interrupt Mask & Command Complete Interrupt Mask */
1037 tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); 1038 tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
1038 dbg("%s: SERR_INTR_ENABLE = %x\n", __func__, tempdword); 1039 ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword);
1039 tempdword |= (GLOBAL_INTR_MASK | GLOBAL_SERR_MASK | 1040 tempdword |= (GLOBAL_INTR_MASK | GLOBAL_SERR_MASK |
1040 COMMAND_INTR_MASK | ARBITER_SERR_MASK); 1041 COMMAND_INTR_MASK | ARBITER_SERR_MASK);
1041 tempdword &= ~SERR_INTR_RSVDZ_MASK; 1042 tempdword &= ~SERR_INTR_RSVDZ_MASK;
1042 shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword); 1043 shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword);
1043 tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); 1044 tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
1044 dbg("%s: SERR_INTR_ENABLE = %x\n", __func__, tempdword); 1045 ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword);
1045 1046
1046 /* Mask the MRL sensor SERR Mask of individual slot in 1047 /* Mask the MRL sensor SERR Mask of individual slot in
1047 * Slot SERR-INT Mask & clear all the existing event if any 1048 * Slot SERR-INT Mask & clear all the existing event if any
1048 */ 1049 */
1049 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { 1050 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
1050 slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot)); 1051 slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot));
1051 dbg("%s: Default Logical Slot Register %d value %x\n", __func__, 1052 ctrl_dbg(ctrl, "Default Logical Slot Register %d value %x\n",
1052 hp_slot, slot_reg); 1053 hp_slot, slot_reg);
1053 slot_reg |= (PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK | 1054 slot_reg |= (PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK |
1054 BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK | 1055 BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK |
1055 CON_PFAULT_INTR_MASK | MRL_CHANGE_SERR_MASK | 1056 CON_PFAULT_INTR_MASK | MRL_CHANGE_SERR_MASK |
@@ -1066,24 +1067,24 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1066 /* Installs the interrupt handler */ 1067 /* Installs the interrupt handler */
1067 rc = pci_enable_msi(pdev); 1068 rc = pci_enable_msi(pdev);
1068 if (rc) { 1069 if (rc) {
1069 info("Can't get msi for the hotplug controller\n"); 1070 ctrl_info(ctrl,
1070 info("Use INTx for the hotplug controller\n"); 1071 "Can't get msi for the hotplug controller\n");
1072 ctrl_info(ctrl,
1073 "Use INTx for the hotplug controller\n");
1071 } 1074 }
1072 1075
1073 rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, 1076 rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
1074 MY_NAME, (void *)ctrl); 1077 MY_NAME, (void *)ctrl);
1075 dbg("%s: request_irq %d for hpc%d (returns %d)\n", 1078 ctrl_dbg(ctrl, "request_irq %d for hpc%d (returns %d)\n",
1076 __func__, ctrl->pci_dev->irq, 1079 ctrl->pci_dev->irq,
1077 atomic_read(&shpchp_num_controllers), rc); 1080 atomic_read(&shpchp_num_controllers), rc);
1078 if (rc) { 1081 if (rc) {
1079 err("Can't get irq %d for the hotplug controller\n", 1082 ctrl_err(ctrl, "Can't get irq %d for the hotplug "
1080 ctrl->pci_dev->irq); 1083 "controller\n", ctrl->pci_dev->irq);
1081 goto abort_iounmap; 1084 goto abort_iounmap;
1082 } 1085 }
1083 } 1086 }
1084 dbg("%s: HPC at b:d:f:irq=0x%x:%x:%x:%x\n", __func__, 1087 ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq);
1085 pdev->bus->number, PCI_SLOT(pdev->devfn),
1086 PCI_FUNC(pdev->devfn), pdev->irq);
1087 1088
1088 /* 1089 /*
1089 * If this is the first controller to be initialized, 1090 * If this is the first controller to be initialized,
@@ -1102,8 +1103,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1102 */ 1103 */
1103 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { 1104 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
1104 slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot)); 1105 slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot));
1105 dbg("%s: Default Logical Slot Register %d value %x\n", __func__, 1106 ctrl_dbg(ctrl, "Default Logical Slot Register %d value %x\n",
1106 hp_slot, slot_reg); 1107 hp_slot, slot_reg);
1107 slot_reg &= ~(PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK | 1108 slot_reg &= ~(PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK |
1108 BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK | 1109 BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK |
1109 CON_PFAULT_INTR_MASK | SLOT_REG_RSVDZ_MASK); 1110 CON_PFAULT_INTR_MASK | SLOT_REG_RSVDZ_MASK);
@@ -1116,7 +1117,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1116 SERR_INTR_RSVDZ_MASK); 1117 SERR_INTR_RSVDZ_MASK);
1117 shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword); 1118 shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword);
1118 tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); 1119 tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
1119 dbg("%s: SERR_INTR_ENABLE = %x\n", __func__, tempdword); 1120 ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword);
1120 } 1121 }
1121 1122
1122 return 0; 1123 return 0;
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 3fc4ec0eea0b..138f161becc0 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -49,9 +49,7 @@ static void program_fw_provided_values(struct pci_dev *dev)
49 /* use default values if we can't get them from firmware */ 49 /* use default values if we can't get them from firmware */
50 if (get_hp_params_from_firmware(dev, &hpp) || 50 if (get_hp_params_from_firmware(dev, &hpp) ||
51 !hpp.t0 || (hpp.t0->revision > 1)) { 51 !hpp.t0 || (hpp.t0->revision > 1)) {
52 printk(KERN_WARNING 52 warn("Could not get hotplug parameters. Use defaults\n");
53 "%s: Could not get hotplug parameters. Use defaults\n",
54 __func__);
55 hpp.t0 = &hpp.type0_data; 53 hpp.t0 = &hpp.type0_data;
56 hpp.t0->revision = 0; 54 hpp.t0->revision = 0;
57 hpp.t0->cache_line_size = 8; 55 hpp.t0->cache_line_size = 8;
@@ -101,18 +99,20 @@ int __ref shpchp_configure_device(struct slot *p_slot)
101 struct pci_dev *dev; 99 struct pci_dev *dev;
102 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 100 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
103 int num, fn; 101 int num, fn;
102 struct controller *ctrl = p_slot->ctrl;
104 103
105 dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); 104 dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
106 if (dev) { 105 if (dev) {
107 err("Device %s already exists at %x:%x, cannot hot-add\n", 106 ctrl_err(ctrl, "Device %s already exists "
108 pci_name(dev), p_slot->bus, p_slot->device); 107 "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev),
108 pci_domain_nr(parent), p_slot->bus, p_slot->device);
109 pci_dev_put(dev); 109 pci_dev_put(dev);
110 return -EINVAL; 110 return -EINVAL;
111 } 111 }
112 112
113 num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); 113 num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0));
114 if (num == 0) { 114 if (num == 0) {
115 err("No new device found\n"); 115 ctrl_err(ctrl, "No new device found\n");
116 return -ENODEV; 116 return -ENODEV;
117 } 117 }
118 118
@@ -121,8 +121,8 @@ int __ref shpchp_configure_device(struct slot *p_slot)
121 if (!dev) 121 if (!dev)
122 continue; 122 continue;
123 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { 123 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
124 err("Cannot hot-add display device %s\n", 124 ctrl_err(ctrl, "Cannot hot-add display device %s\n",
125 pci_name(dev)); 125 pci_name(dev));
126 pci_dev_put(dev); 126 pci_dev_put(dev);
127 continue; 127 continue;
128 } 128 }
@@ -138,14 +138,15 @@ int __ref shpchp_configure_device(struct slot *p_slot)
138 break; 138 break;
139 } 139 }
140 if (busnr >= end) { 140 if (busnr >= end) {
141 err("No free bus for hot-added bridge\n"); 141 ctrl_err(ctrl,
142 "No free bus for hot-added bridge\n");
142 pci_dev_put(dev); 143 pci_dev_put(dev);
143 continue; 144 continue;
144 } 145 }
145 child = pci_add_new_bus(parent, dev, busnr); 146 child = pci_add_new_bus(parent, dev, busnr);
146 if (!child) { 147 if (!child) {
147 err("Cannot add new bus for %s\n", 148 ctrl_err(ctrl, "Cannot add new bus for %s\n",
148 pci_name(dev)); 149 pci_name(dev));
149 pci_dev_put(dev); 150 pci_dev_put(dev);
150 continue; 151 continue;
151 } 152 }
@@ -168,8 +169,10 @@ int shpchp_unconfigure_device(struct slot *p_slot)
168 int j; 169 int j;
169 u8 bctl = 0; 170 u8 bctl = 0;
170 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 171 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
172 struct controller *ctrl = p_slot->ctrl;
171 173
172 dbg("%s: bus/dev = %x/%x\n", __func__, p_slot->bus, p_slot->device); 174 ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
175 __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
173 176
174 for (j=0; j<8 ; j++) { 177 for (j=0; j<8 ; j++) {
175 struct pci_dev* temp = pci_get_slot(parent, 178 struct pci_dev* temp = pci_get_slot(parent,
@@ -177,16 +180,17 @@ int shpchp_unconfigure_device(struct slot *p_slot)
177 if (!temp) 180 if (!temp)
178 continue; 181 continue;
179 if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { 182 if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
180 err("Cannot remove display device %s\n", 183 ctrl_err(ctrl, "Cannot remove display device %s\n",
181 pci_name(temp)); 184 pci_name(temp));
182 pci_dev_put(temp); 185 pci_dev_put(temp);
183 continue; 186 continue;
184 } 187 }
185 if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 188 if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
186 pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); 189 pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
187 if (bctl & PCI_BRIDGE_CTL_VGA) { 190 if (bctl & PCI_BRIDGE_CTL_VGA) {
188 err("Cannot remove display device %s\n", 191 ctrl_err(ctrl,
189 pci_name(temp)); 192 "Cannot remove display device %s\n",
193 pci_name(temp));
190 pci_dev_put(temp); 194 pci_dev_put(temp);
191 continue; 195 continue;
192 } 196 }
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 8b51e10b7783..5c8baa43ac9c 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -18,6 +18,7 @@
18 * Author: Ashok Raj <ashok.raj@intel.com> 18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
21 */ 22 */
22 23
23#include <linux/init.h> 24#include <linux/init.h>
@@ -35,11 +36,13 @@
35#include <linux/timer.h> 36#include <linux/timer.h>
36#include <linux/iova.h> 37#include <linux/iova.h>
37#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
38#include <asm/proto.h> /* force_iommu in this header in x86-64*/
39#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
40#include <asm/iommu.h> 40#include <asm/iommu.h>
41#include "pci.h" 41#include "pci.h"
42 42
43#define ROOT_SIZE VTD_PAGE_SIZE
44#define CONTEXT_SIZE VTD_PAGE_SIZE
45
43#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) 46#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
44#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) 47#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
45 48
@@ -199,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
199 spin_unlock_irqrestore(&iommu->lock, flags); 202 spin_unlock_irqrestore(&iommu->lock, flags);
200 return NULL; 203 return NULL;
201 } 204 }
202 __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); 205 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
203 phy_addr = virt_to_phys((void *)context); 206 phy_addr = virt_to_phys((void *)context);
204 set_root_value(root, phy_addr); 207 set_root_value(root, phy_addr);
205 set_root_present(root); 208 set_root_present(root);
@@ -345,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
345 return NULL; 348 return NULL;
346 } 349 }
347 __iommu_flush_cache(domain->iommu, tmp_page, 350 __iommu_flush_cache(domain->iommu, tmp_page,
348 PAGE_SIZE_4K); 351 PAGE_SIZE);
349 dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); 352 dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
350 /* 353 /*
351 * high level table always sets r/w, last level page 354 * high level table always sets r/w, last level page
@@ -408,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
408 start &= (((u64)1) << addr_width) - 1; 411 start &= (((u64)1) << addr_width) - 1;
409 end &= (((u64)1) << addr_width) - 1; 412 end &= (((u64)1) << addr_width) - 1;
410 /* in case it's partial page */ 413 /* in case it's partial page */
411 start = PAGE_ALIGN_4K(start); 414 start = PAGE_ALIGN(start);
412 end &= PAGE_MASK_4K; 415 end &= PAGE_MASK;
413 416
414 /* we don't need lock here, nobody else touches the iova range */ 417 /* we don't need lock here, nobody else touches the iova range */
415 while (start < end) { 418 while (start < end) {
416 dma_pte_clear_one(domain, start); 419 dma_pte_clear_one(domain, start);
417 start += PAGE_SIZE_4K; 420 start += VTD_PAGE_SIZE;
418 } 421 }
419} 422}
420 423
@@ -468,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
468 if (!root) 471 if (!root)
469 return -ENOMEM; 472 return -ENOMEM;
470 473
471 __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); 474 __iommu_flush_cache(iommu, root, ROOT_SIZE);
472 475
473 spin_lock_irqsave(&iommu->lock, flags); 476 spin_lock_irqsave(&iommu->lock, flags);
474 iommu->root_entry = root; 477 iommu->root_entry = root;
@@ -567,27 +570,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
567 return 0; 570 return 0;
568} 571}
569 572
570static int inline iommu_flush_context_global(struct intel_iommu *iommu,
571 int non_present_entry_flush)
572{
573 return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
574 non_present_entry_flush);
575}
576
577static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did,
578 int non_present_entry_flush)
579{
580 return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
581 non_present_entry_flush);
582}
583
584static int inline iommu_flush_context_device(struct intel_iommu *iommu,
585 u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush)
586{
587 return __iommu_flush_context(iommu, did, source_id, function_mask,
588 DMA_CCMD_DEVICE_INVL, non_present_entry_flush);
589}
590
591/* return value determine if we need a write buffer flush */ 573/* return value determine if we need a write buffer flush */
592static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, 574static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
593 u64 addr, unsigned int size_order, u64 type, 575 u64 addr, unsigned int size_order, u64 type,
@@ -655,37 +637,25 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
655 printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); 637 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
656 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) 638 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
657 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", 639 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
658 DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); 640 (unsigned long long)DMA_TLB_IIRG(type),
641 (unsigned long long)DMA_TLB_IAIG(val));
659 /* flush iotlb entry will implicitly flush write buffer */ 642 /* flush iotlb entry will implicitly flush write buffer */
660 return 0; 643 return 0;
661} 644}
662 645
663static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu,
664 int non_present_entry_flush)
665{
666 return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
667 non_present_entry_flush);
668}
669
670static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did,
671 int non_present_entry_flush)
672{
673 return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
674 non_present_entry_flush);
675}
676
677static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 646static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
678 u64 addr, unsigned int pages, int non_present_entry_flush) 647 u64 addr, unsigned int pages, int non_present_entry_flush)
679{ 648{
680 unsigned int mask; 649 unsigned int mask;
681 650
682 BUG_ON(addr & (~PAGE_MASK_4K)); 651 BUG_ON(addr & (~VTD_PAGE_MASK));
683 BUG_ON(pages == 0); 652 BUG_ON(pages == 0);
684 653
685 /* Fallback to domain selective flush if no PSI support */ 654 /* Fallback to domain selective flush if no PSI support */
686 if (!cap_pgsel_inv(iommu->cap)) 655 if (!cap_pgsel_inv(iommu->cap))
687 return iommu_flush_iotlb_dsi(iommu, did, 656 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
688 non_present_entry_flush); 657 DMA_TLB_DSI_FLUSH,
658 non_present_entry_flush);
689 659
690 /* 660 /*
691 * PSI requires page size to be 2 ^ x, and the base address is naturally 661 * PSI requires page size to be 2 ^ x, and the base address is naturally
@@ -694,11 +664,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
694 mask = ilog2(__roundup_pow_of_two(pages)); 664 mask = ilog2(__roundup_pow_of_two(pages));
695 /* Fallback to domain selective flush if size is too big */ 665 /* Fallback to domain selective flush if size is too big */
696 if (mask > cap_max_amask_val(iommu->cap)) 666 if (mask > cap_max_amask_val(iommu->cap))
697 return iommu_flush_iotlb_dsi(iommu, did, 667 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
698 non_present_entry_flush); 668 DMA_TLB_DSI_FLUSH, non_present_entry_flush);
699 669
700 return __iommu_flush_iotlb(iommu, did, addr, mask, 670 return iommu->flush.flush_iotlb(iommu, did, addr, mask,
701 DMA_TLB_PSI_FLUSH, non_present_entry_flush); 671 DMA_TLB_PSI_FLUSH,
672 non_present_entry_flush);
702} 673}
703 674
704static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) 675static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -831,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
831} 802}
832 803
833static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, 804static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
834 u8 fault_reason, u16 source_id, u64 addr) 805 u8 fault_reason, u16 source_id, unsigned long long addr)
835{ 806{
836 const char *reason; 807 const char *reason;
837 808
@@ -1084,9 +1055,9 @@ static void dmar_init_reserved_ranges(void)
1084 if (!r->flags || !(r->flags & IORESOURCE_MEM)) 1055 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1085 continue; 1056 continue;
1086 addr = r->start; 1057 addr = r->start;
1087 addr &= PAGE_MASK_4K; 1058 addr &= PAGE_MASK;
1088 size = r->end - addr; 1059 size = r->end - addr;
1089 size = PAGE_ALIGN_4K(size); 1060 size = PAGE_ALIGN(size);
1090 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), 1061 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1091 IOVA_PFN(size + addr) - 1); 1062 IOVA_PFN(size + addr) - 1);
1092 if (!iova) 1063 if (!iova)
@@ -1148,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1148 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 1119 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1149 if (!domain->pgd) 1120 if (!domain->pgd)
1150 return -ENOMEM; 1121 return -ENOMEM;
1151 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); 1122 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1152 return 0; 1123 return 0;
1153} 1124}
1154 1125
@@ -1164,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain)
1164 /* destroy iovas */ 1135 /* destroy iovas */
1165 put_iova_domain(&domain->iovad); 1136 put_iova_domain(&domain->iovad);
1166 end = DOMAIN_MAX_ADDR(domain->gaw); 1137 end = DOMAIN_MAX_ADDR(domain->gaw);
1167 end = end & (~PAGE_MASK_4K); 1138 end = end & (~PAGE_MASK);
1168 1139
1169 /* clear ptes */ 1140 /* clear ptes */
1170 dma_pte_clear_range(domain, 0, end); 1141 dma_pte_clear_range(domain, 0, end);
@@ -1204,11 +1175,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1204 __iommu_flush_cache(iommu, context, sizeof(*context)); 1175 __iommu_flush_cache(iommu, context, sizeof(*context));
1205 1176
1206 /* it's a non-present to present mapping */ 1177 /* it's a non-present to present mapping */
1207 if (iommu_flush_context_device(iommu, domain->id, 1178 if (iommu->flush.flush_context(iommu, domain->id,
1208 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1)) 1179 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
1180 DMA_CCMD_DEVICE_INVL, 1))
1209 iommu_flush_write_buffer(iommu); 1181 iommu_flush_write_buffer(iommu);
1210 else 1182 else
1211 iommu_flush_iotlb_dsi(iommu, 0, 0); 1183 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
1184
1212 spin_unlock_irqrestore(&iommu->lock, flags); 1185 spin_unlock_irqrestore(&iommu->lock, flags);
1213 return 0; 1186 return 0;
1214} 1187}
@@ -1283,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1283 u64 start_pfn, end_pfn; 1256 u64 start_pfn, end_pfn;
1284 struct dma_pte *pte; 1257 struct dma_pte *pte;
1285 int index; 1258 int index;
1259 int addr_width = agaw_to_width(domain->agaw);
1260
1261 hpa &= (((u64)1) << addr_width) - 1;
1286 1262
1287 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) 1263 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1288 return -EINVAL; 1264 return -EINVAL;
1289 iova &= PAGE_MASK_4K; 1265 iova &= PAGE_MASK;
1290 start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; 1266 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1291 end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; 1267 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
1292 index = 0; 1268 index = 0;
1293 while (start_pfn < end_pfn) { 1269 while (start_pfn < end_pfn) {
1294 pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); 1270 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
1295 if (!pte) 1271 if (!pte)
1296 return -ENOMEM; 1272 return -ENOMEM;
1297 /* We don't need lock here, nobody else 1273 /* We don't need lock here, nobody else
1298 * touches the iova range 1274 * touches the iova range
1299 */ 1275 */
1300 BUG_ON(dma_pte_addr(*pte)); 1276 BUG_ON(dma_pte_addr(*pte));
1301 dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); 1277 dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT);
1302 dma_set_pte_prot(*pte, prot); 1278 dma_set_pte_prot(*pte, prot);
1303 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); 1279 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
1304 start_pfn++; 1280 start_pfn++;
@@ -1310,8 +1286,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1310static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) 1286static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
1311{ 1287{
1312 clear_context_table(domain->iommu, bus, devfn); 1288 clear_context_table(domain->iommu, bus, devfn);
1313 iommu_flush_context_global(domain->iommu, 0); 1289 domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0,
1314 iommu_flush_iotlb_global(domain->iommu, 0); 1290 DMA_CCMD_GLOBAL_INVL, 0);
1291 domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0,
1292 DMA_TLB_GLOBAL_FLUSH, 0);
1315} 1293}
1316 1294
1317static void domain_remove_dev_info(struct dmar_domain *domain) 1295static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1474,11 +1452,13 @@ error:
1474 return find_domain(pdev); 1452 return find_domain(pdev);
1475} 1453}
1476 1454
1477static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) 1455static int iommu_prepare_identity_map(struct pci_dev *pdev,
1456 unsigned long long start,
1457 unsigned long long end)
1478{ 1458{
1479 struct dmar_domain *domain; 1459 struct dmar_domain *domain;
1480 unsigned long size; 1460 unsigned long size;
1481 u64 base; 1461 unsigned long long base;
1482 int ret; 1462 int ret;
1483 1463
1484 printk(KERN_INFO 1464 printk(KERN_INFO
@@ -1490,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
1490 return -ENOMEM; 1470 return -ENOMEM;
1491 1471
1492 /* The address might not be aligned */ 1472 /* The address might not be aligned */
1493 base = start & PAGE_MASK_4K; 1473 base = start & PAGE_MASK;
1494 size = end - base; 1474 size = end - base;
1495 size = PAGE_ALIGN_4K(size); 1475 size = PAGE_ALIGN(size);
1496 if (!reserve_iova(&domain->iovad, IOVA_PFN(base), 1476 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1497 IOVA_PFN(base + size) - 1)) { 1477 IOVA_PFN(base + size) - 1)) {
1498 printk(KERN_ERR "IOMMU: reserve iova failed\n"); 1478 printk(KERN_ERR "IOMMU: reserve iova failed\n");
@@ -1662,6 +1642,30 @@ int __init init_dmars(void)
1662 } 1642 }
1663 } 1643 }
1664 1644
1645 for_each_drhd_unit(drhd) {
1646 if (drhd->ignored)
1647 continue;
1648
1649 iommu = drhd->iommu;
1650 if (dmar_enable_qi(iommu)) {
1651 /*
1652 * Queued Invalidate not enabled, use Register Based
1653 * Invalidate
1654 */
1655 iommu->flush.flush_context = __iommu_flush_context;
1656 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
1657 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
1658 "invalidation\n",
1659 (unsigned long long)drhd->reg_base_addr);
1660 } else {
1661 iommu->flush.flush_context = qi_flush_context;
1662 iommu->flush.flush_iotlb = qi_flush_iotlb;
1663 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
1664 "invalidation\n",
1665 (unsigned long long)drhd->reg_base_addr);
1666 }
1667 }
1668
1665 /* 1669 /*
1666 * For each rmrr 1670 * For each rmrr
1667 * for each dev attached to rmrr 1671 * for each dev attached to rmrr
@@ -1714,9 +1718,10 @@ int __init init_dmars(void)
1714 1718
1715 iommu_set_root_entry(iommu); 1719 iommu_set_root_entry(iommu);
1716 1720
1717 iommu_flush_context_global(iommu, 0); 1721 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
1718 iommu_flush_iotlb_global(iommu, 0); 1722 0);
1719 1723 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
1724 0);
1720 iommu_disable_protect_mem_regions(iommu); 1725 iommu_disable_protect_mem_regions(iommu);
1721 1726
1722 ret = iommu_enable_translation(iommu); 1727 ret = iommu_enable_translation(iommu);
@@ -1738,8 +1743,8 @@ error:
1738static inline u64 aligned_size(u64 host_addr, size_t size) 1743static inline u64 aligned_size(u64 host_addr, size_t size)
1739{ 1744{
1740 u64 addr; 1745 u64 addr;
1741 addr = (host_addr & (~PAGE_MASK_4K)) + size; 1746 addr = (host_addr & (~PAGE_MASK)) + size;
1742 return PAGE_ALIGN_4K(addr); 1747 return PAGE_ALIGN(addr);
1743} 1748}
1744 1749
1745struct iova * 1750struct iova *
@@ -1753,20 +1758,20 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
1753 return NULL; 1758 return NULL;
1754 1759
1755 piova = alloc_iova(&domain->iovad, 1760 piova = alloc_iova(&domain->iovad,
1756 size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); 1761 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
1757 return piova; 1762 return piova;
1758} 1763}
1759 1764
1760static struct iova * 1765static struct iova *
1761__intel_alloc_iova(struct device *dev, struct dmar_domain *domain, 1766__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
1762 size_t size) 1767 size_t size, u64 dma_mask)
1763{ 1768{
1764 struct pci_dev *pdev = to_pci_dev(dev); 1769 struct pci_dev *pdev = to_pci_dev(dev);
1765 struct iova *iova = NULL; 1770 struct iova *iova = NULL;
1766 1771
1767 if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) { 1772 if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
1768 iova = iommu_alloc_iova(domain, size, pdev->dma_mask); 1773 iova = iommu_alloc_iova(domain, size, dma_mask);
1769 } else { 1774 else {
1770 /* 1775 /*
1771 * First try to allocate an io virtual address in 1776 * First try to allocate an io virtual address in
1772 * DMA_32BIT_MASK and if that fails then try allocating 1777 * DMA_32BIT_MASK and if that fails then try allocating
@@ -1774,7 +1779,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
1774 */ 1779 */
1775 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); 1780 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
1776 if (!iova) 1781 if (!iova)
1777 iova = iommu_alloc_iova(domain, size, pdev->dma_mask); 1782 iova = iommu_alloc_iova(domain, size, dma_mask);
1778 } 1783 }
1779 1784
1780 if (!iova) { 1785 if (!iova) {
@@ -1813,12 +1818,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
1813 return domain; 1818 return domain;
1814} 1819}
1815 1820
1816static dma_addr_t 1821static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
1817intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) 1822 size_t size, int dir, u64 dma_mask)
1818{ 1823{
1819 struct pci_dev *pdev = to_pci_dev(hwdev); 1824 struct pci_dev *pdev = to_pci_dev(hwdev);
1820 struct dmar_domain *domain; 1825 struct dmar_domain *domain;
1821 unsigned long start_paddr; 1826 phys_addr_t start_paddr;
1822 struct iova *iova; 1827 struct iova *iova;
1823 int prot = 0; 1828 int prot = 0;
1824 int ret; 1829 int ret;
@@ -1833,11 +1838,11 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1833 1838
1834 size = aligned_size((u64)paddr, size); 1839 size = aligned_size((u64)paddr, size);
1835 1840
1836 iova = __intel_alloc_iova(hwdev, domain, size); 1841 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
1837 if (!iova) 1842 if (!iova)
1838 goto error; 1843 goto error;
1839 1844
1840 start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; 1845 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
1841 1846
1842 /* 1847 /*
1843 * Check if DMAR supports zero-length reads on write only 1848 * Check if DMAR supports zero-length reads on write only
@@ -1855,30 +1860,33 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1855 * is not a big problem 1860 * is not a big problem
1856 */ 1861 */
1857 ret = domain_page_mapping(domain, start_paddr, 1862 ret = domain_page_mapping(domain, start_paddr,
1858 ((u64)paddr) & PAGE_MASK_4K, size, prot); 1863 ((u64)paddr) & PAGE_MASK, size, prot);
1859 if (ret) 1864 if (ret)
1860 goto error; 1865 goto error;
1861 1866
1862 pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
1863 pci_name(pdev), size, (u64)paddr,
1864 size, (u64)start_paddr, dir);
1865
1866 /* it's a non-present to present mapping */ 1867 /* it's a non-present to present mapping */
1867 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, 1868 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
1868 start_paddr, size >> PAGE_SHIFT_4K, 1); 1869 start_paddr, size >> VTD_PAGE_SHIFT, 1);
1869 if (ret) 1870 if (ret)
1870 iommu_flush_write_buffer(domain->iommu); 1871 iommu_flush_write_buffer(domain->iommu);
1871 1872
1872 return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); 1873 return start_paddr + ((u64)paddr & (~PAGE_MASK));
1873 1874
1874error: 1875error:
1875 if (iova) 1876 if (iova)
1876 __free_iova(&domain->iovad, iova); 1877 __free_iova(&domain->iovad, iova);
1877 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", 1878 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
1878 pci_name(pdev), size, (u64)paddr, dir); 1879 pci_name(pdev), size, (unsigned long long)paddr, dir);
1879 return 0; 1880 return 0;
1880} 1881}
1881 1882
1883dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
1884 size_t size, int dir)
1885{
1886 return __intel_map_single(hwdev, paddr, size, dir,
1887 to_pci_dev(hwdev)->dma_mask);
1888}
1889
1882static void flush_unmaps(void) 1890static void flush_unmaps(void)
1883{ 1891{
1884 int i, j; 1892 int i, j;
@@ -1891,7 +1899,8 @@ static void flush_unmaps(void)
1891 struct intel_iommu *iommu = 1899 struct intel_iommu *iommu =
1892 deferred_flush[i].domain[0]->iommu; 1900 deferred_flush[i].domain[0]->iommu;
1893 1901
1894 iommu_flush_iotlb_global(iommu, 0); 1902 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1903 DMA_TLB_GLOBAL_FLUSH, 0);
1895 for (j = 0; j < deferred_flush[i].next; j++) { 1904 for (j = 0; j < deferred_flush[i].next; j++) {
1896 __free_iova(&deferred_flush[i].domain[j]->iovad, 1905 __free_iova(&deferred_flush[i].domain[j]->iovad,
1897 deferred_flush[i].iova[j]); 1906 deferred_flush[i].iova[j]);
@@ -1936,8 +1945,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
1936 spin_unlock_irqrestore(&async_umap_flush_lock, flags); 1945 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
1937} 1946}
1938 1947
1939static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, 1948void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
1940 size_t size, int dir) 1949 int dir)
1941{ 1950{
1942 struct pci_dev *pdev = to_pci_dev(dev); 1951 struct pci_dev *pdev = to_pci_dev(dev);
1943 struct dmar_domain *domain; 1952 struct dmar_domain *domain;
@@ -1953,11 +1962,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1953 if (!iova) 1962 if (!iova)
1954 return; 1963 return;
1955 1964
1956 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 1965 start_addr = iova->pfn_lo << PAGE_SHIFT;
1957 size = aligned_size((u64)dev_addr, size); 1966 size = aligned_size((u64)dev_addr, size);
1958 1967
1959 pr_debug("Device %s unmapping: %lx@%llx\n", 1968 pr_debug("Device %s unmapping: %lx@%llx\n",
1960 pci_name(pdev), size, (u64)start_addr); 1969 pci_name(pdev), size, (unsigned long long)start_addr);
1961 1970
1962 /* clear the whole page */ 1971 /* clear the whole page */
1963 dma_pte_clear_range(domain, start_addr, start_addr + size); 1972 dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -1965,7 +1974,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1965 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 1974 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
1966 if (intel_iommu_strict) { 1975 if (intel_iommu_strict) {
1967 if (iommu_flush_iotlb_psi(domain->iommu, 1976 if (iommu_flush_iotlb_psi(domain->iommu,
1968 domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) 1977 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
1969 iommu_flush_write_buffer(domain->iommu); 1978 iommu_flush_write_buffer(domain->iommu);
1970 /* free iova */ 1979 /* free iova */
1971 __free_iova(&domain->iovad, iova); 1980 __free_iova(&domain->iovad, iova);
@@ -1978,13 +1987,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1978 } 1987 }
1979} 1988}
1980 1989
1981static void * intel_alloc_coherent(struct device *hwdev, size_t size, 1990void *intel_alloc_coherent(struct device *hwdev, size_t size,
1982 dma_addr_t *dma_handle, gfp_t flags) 1991 dma_addr_t *dma_handle, gfp_t flags)
1983{ 1992{
1984 void *vaddr; 1993 void *vaddr;
1985 int order; 1994 int order;
1986 1995
1987 size = PAGE_ALIGN_4K(size); 1996 size = PAGE_ALIGN(size);
1988 order = get_order(size); 1997 order = get_order(size);
1989 flags &= ~(GFP_DMA | GFP_DMA32); 1998 flags &= ~(GFP_DMA | GFP_DMA32);
1990 1999
@@ -1993,19 +2002,21 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size,
1993 return NULL; 2002 return NULL;
1994 memset(vaddr, 0, size); 2003 memset(vaddr, 0, size);
1995 2004
1996 *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL); 2005 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2006 DMA_BIDIRECTIONAL,
2007 hwdev->coherent_dma_mask);
1997 if (*dma_handle) 2008 if (*dma_handle)
1998 return vaddr; 2009 return vaddr;
1999 free_pages((unsigned long)vaddr, order); 2010 free_pages((unsigned long)vaddr, order);
2000 return NULL; 2011 return NULL;
2001} 2012}
2002 2013
2003static void intel_free_coherent(struct device *hwdev, size_t size, 2014void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2004 void *vaddr, dma_addr_t dma_handle) 2015 dma_addr_t dma_handle)
2005{ 2016{
2006 int order; 2017 int order;
2007 2018
2008 size = PAGE_ALIGN_4K(size); 2019 size = PAGE_ALIGN(size);
2009 order = get_order(size); 2020 order = get_order(size);
2010 2021
2011 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); 2022 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
@@ -2013,8 +2024,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
2013} 2024}
2014 2025
2015#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) 2026#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
2016static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 2027
2017 int nelems, int dir) 2028void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2029 int nelems, int dir)
2018{ 2030{
2019 int i; 2031 int i;
2020 struct pci_dev *pdev = to_pci_dev(hwdev); 2032 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2038,7 +2050,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2038 size += aligned_size((u64)addr, sg->length); 2050 size += aligned_size((u64)addr, sg->length);
2039 } 2051 }
2040 2052
2041 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 2053 start_addr = iova->pfn_lo << PAGE_SHIFT;
2042 2054
2043 /* clear the whole page */ 2055 /* clear the whole page */
2044 dma_pte_clear_range(domain, start_addr, start_addr + size); 2056 dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -2046,7 +2058,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2046 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2058 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2047 2059
2048 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, 2060 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
2049 size >> PAGE_SHIFT_4K, 0)) 2061 size >> VTD_PAGE_SHIFT, 0))
2050 iommu_flush_write_buffer(domain->iommu); 2062 iommu_flush_write_buffer(domain->iommu);
2051 2063
2052 /* free iova */ 2064 /* free iova */
@@ -2067,8 +2079,8 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2067 return nelems; 2079 return nelems;
2068} 2080}
2069 2081
2070static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, 2082int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2071 int nelems, int dir) 2083 int dir)
2072{ 2084{
2073 void *addr; 2085 void *addr;
2074 int i; 2086 int i;
@@ -2096,7 +2108,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2096 size += aligned_size((u64)addr, sg->length); 2108 size += aligned_size((u64)addr, sg->length);
2097 } 2109 }
2098 2110
2099 iova = __intel_alloc_iova(hwdev, domain, size); 2111 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2100 if (!iova) { 2112 if (!iova) {
2101 sglist->dma_length = 0; 2113 sglist->dma_length = 0;
2102 return 0; 2114 return 0;
@@ -2112,14 +2124,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2112 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 2124 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2113 prot |= DMA_PTE_WRITE; 2125 prot |= DMA_PTE_WRITE;
2114 2126
2115 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 2127 start_addr = iova->pfn_lo << PAGE_SHIFT;
2116 offset = 0; 2128 offset = 0;
2117 for_each_sg(sglist, sg, nelems, i) { 2129 for_each_sg(sglist, sg, nelems, i) {
2118 addr = SG_ENT_VIRT_ADDRESS(sg); 2130 addr = SG_ENT_VIRT_ADDRESS(sg);
2119 addr = (void *)virt_to_phys(addr); 2131 addr = (void *)virt_to_phys(addr);
2120 size = aligned_size((u64)addr, sg->length); 2132 size = aligned_size((u64)addr, sg->length);
2121 ret = domain_page_mapping(domain, start_addr + offset, 2133 ret = domain_page_mapping(domain, start_addr + offset,
2122 ((u64)addr) & PAGE_MASK_4K, 2134 ((u64)addr) & PAGE_MASK,
2123 size, prot); 2135 size, prot);
2124 if (ret) { 2136 if (ret) {
2125 /* clear the page */ 2137 /* clear the page */
@@ -2133,14 +2145,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2133 return 0; 2145 return 0;
2134 } 2146 }
2135 sg->dma_address = start_addr + offset + 2147 sg->dma_address = start_addr + offset +
2136 ((u64)addr & (~PAGE_MASK_4K)); 2148 ((u64)addr & (~PAGE_MASK));
2137 sg->dma_length = sg->length; 2149 sg->dma_length = sg->length;
2138 offset += size; 2150 offset += size;
2139 } 2151 }
2140 2152
2141 /* it's a non-present to present mapping */ 2153 /* it's a non-present to present mapping */
2142 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, 2154 if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
2143 start_addr, offset >> PAGE_SHIFT_4K, 1)) 2155 start_addr, offset >> VTD_PAGE_SHIFT, 1))
2144 iommu_flush_write_buffer(domain->iommu); 2156 iommu_flush_write_buffer(domain->iommu);
2145 return nelems; 2157 return nelems;
2146} 2158}
@@ -2180,7 +2192,6 @@ static inline int iommu_devinfo_cache_init(void)
2180 sizeof(struct device_domain_info), 2192 sizeof(struct device_domain_info),
2181 0, 2193 0,
2182 SLAB_HWCACHE_ALIGN, 2194 SLAB_HWCACHE_ALIGN,
2183
2184 NULL); 2195 NULL);
2185 if (!iommu_devinfo_cache) { 2196 if (!iommu_devinfo_cache) {
2186 printk(KERN_ERR "Couldn't create devinfo cache\n"); 2197 printk(KERN_ERR "Couldn't create devinfo cache\n");
@@ -2198,7 +2209,6 @@ static inline int iommu_iova_cache_init(void)
2198 sizeof(struct iova), 2209 sizeof(struct iova),
2199 0, 2210 0,
2200 SLAB_HWCACHE_ALIGN, 2211 SLAB_HWCACHE_ALIGN,
2201
2202 NULL); 2212 NULL);
2203 if (!iommu_iova_cache) { 2213 if (!iommu_iova_cache) {
2204 printk(KERN_ERR "Couldn't create iova cache\n"); 2214 printk(KERN_ERR "Couldn't create iova cache\n");
@@ -2327,7 +2337,7 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
2327 return; 2337 return;
2328 2338
2329 end = DOMAIN_MAX_ADDR(domain->gaw); 2339 end = DOMAIN_MAX_ADDR(domain->gaw);
2330 end = end & (~PAGE_MASK_4K); 2340 end = end & (~VTD_PAGE_MASK);
2331 2341
2332 /* clear ptes */ 2342 /* clear ptes */
2333 dma_pte_clear_range(domain, 0, end); 2343 dma_pte_clear_range(domain, 0, end);
@@ -2423,6 +2433,6 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
2423 if (pte) 2433 if (pte)
2424 pfn = dma_pte_addr(*pte); 2434 pfn = dma_pte_addr(*pte);
2425 2435
2426 return pfn >> PAGE_SHIFT_4K; 2436 return pfn >> VTD_PAGE_SHIFT;
2427} 2437}
2428EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); 2438EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
new file mode 100644
index 000000000000..6441dfa969a3
--- /dev/null
+++ b/drivers/pci/irq.c
@@ -0,0 +1,60 @@
1/*
2 * PCI IRQ failure handing code
3 *
4 * Copyright (c) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
5 */
6
7#include <linux/acpi.h>
8#include <linux/device.h>
9#include <linux/kernel.h>
10#include <linux/pci.h>
11
12static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason)
13{
14 struct pci_dev *parent = to_pci_dev(pdev->dev.parent);
15
16 dev_printk(KERN_ERR, &pdev->dev,
17 "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n",
18 parent->dev.bus_id, parent->vendor, parent->device);
19 dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason);
20 dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n");
21 WARN_ON(1);
22}
23
24/**
25 * pci_lost_interrupt - reports a lost PCI interrupt
26 * @pdev: device whose interrupt is lost
27 *
28 * The primary function of this routine is to report a lost interrupt
29 * in a standard way which users can recognise (instead of blaming the
30 * driver).
31 *
32 * Returns:
33 * a suggestion for fixing it (although the driver is not required to
34 * act on this).
35 */
36enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *pdev)
37{
38 if (pdev->msi_enabled || pdev->msix_enabled) {
39 enum pci_lost_interrupt_reason ret;
40
41 if (pdev->msix_enabled) {
42 pci_note_irq_problem(pdev, "MSIX routing failure");
43 ret = PCI_LOST_IRQ_DISABLE_MSIX;
44 } else {
45 pci_note_irq_problem(pdev, "MSI routing failure");
46 ret = PCI_LOST_IRQ_DISABLE_MSI;
47 }
48 return ret;
49 }
50#ifdef CONFIG_ACPI
51 if (!(acpi_disabled || acpi_noirq)) {
52 pci_note_irq_problem(pdev, "Potential ACPI misrouting please reboot with acpi=noirq");
53 /* currently no way to fix acpi on the fly */
54 return PCI_LOST_IRQ_DISABLE_ACPI;
55 }
56#endif
57 pci_note_irq_problem(pdev, "unknown cause (not MSI or ACPI)");
58 return PCI_LOST_IRQ_NO_INFORMATION;
59}
60EXPORT_SYMBOL(pci_lost_interrupt);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d2812013fd22..74801f7df9c9 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -759,3 +759,24 @@ void pci_msi_init_pci_dev(struct pci_dev *dev)
759{ 759{
760 INIT_LIST_HEAD(&dev->msi_list); 760 INIT_LIST_HEAD(&dev->msi_list);
761} 761}
762
763#ifdef CONFIG_ACPI
764#include <linux/acpi.h>
765#include <linux/pci-acpi.h>
766static void __devinit msi_acpi_init(void)
767{
768 if (acpi_pci_disabled)
769 return;
770 pci_osc_support_set(OSC_MSI_SUPPORT);
771 pcie_osc_support_set(OSC_MSI_SUPPORT);
772}
773#else
774static inline void msi_acpi_init(void) { }
775#endif /* CONFIG_ACPI */
776
777void __devinit msi_init(void)
778{
779 if (!pci_msi_enable)
780 return;
781 msi_acpi_init();
782}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 89a2f0fa10f9..ae5ec76dca77 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -24,17 +24,17 @@ struct acpi_osc_data {
24 acpi_handle handle; 24 acpi_handle handle;
25 u32 support_set; 25 u32 support_set;
26 u32 control_set; 26 u32 control_set;
27 int is_queried;
28 u32 query_result;
29 struct list_head sibiling; 27 struct list_head sibiling;
30}; 28};
31static LIST_HEAD(acpi_osc_data_list); 29static LIST_HEAD(acpi_osc_data_list);
32 30
33struct acpi_osc_args { 31struct acpi_osc_args {
34 u32 capbuf[3]; 32 u32 capbuf[3];
35 u32 query_result; 33 u32 ctrl_result;
36}; 34};
37 35
36static DEFINE_MUTEX(pci_acpi_lock);
37
38static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) 38static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
39{ 39{
40 struct acpi_osc_data *data; 40 struct acpi_osc_data *data;
@@ -63,7 +63,7 @@ static acpi_status acpi_run_osc(acpi_handle handle,
63 union acpi_object in_params[4]; 63 union acpi_object in_params[4];
64 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; 64 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
65 union acpi_object *out_obj; 65 union acpi_object *out_obj;
66 u32 osc_dw0, flags = osc_args->capbuf[OSC_QUERY_TYPE]; 66 u32 errors, flags = osc_args->capbuf[OSC_QUERY_TYPE];
67 67
68 /* Setting up input parameters */ 68 /* Setting up input parameters */
69 input.count = 4; 69 input.count = 4;
@@ -83,21 +83,25 @@ static acpi_status acpi_run_osc(acpi_handle handle,
83 if (ACPI_FAILURE(status)) 83 if (ACPI_FAILURE(status))
84 return status; 84 return status;
85 85
86 if (!output.length)
87 return AE_NULL_OBJECT;
88
86 out_obj = output.pointer; 89 out_obj = output.pointer;
87 if (out_obj->type != ACPI_TYPE_BUFFER) { 90 if (out_obj->type != ACPI_TYPE_BUFFER) {
88 printk(KERN_DEBUG "Evaluate _OSC returns wrong type\n"); 91 printk(KERN_DEBUG "Evaluate _OSC returns wrong type\n");
89 status = AE_TYPE; 92 status = AE_TYPE;
90 goto out_kfree; 93 goto out_kfree;
91 } 94 }
92 osc_dw0 = *((u32 *)out_obj->buffer.pointer); 95 /* Need to ignore the bit0 in result code */
93 if (osc_dw0) { 96 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
94 if (osc_dw0 & OSC_REQUEST_ERROR) 97 if (errors) {
98 if (errors & OSC_REQUEST_ERROR)
95 printk(KERN_DEBUG "_OSC request fails\n"); 99 printk(KERN_DEBUG "_OSC request fails\n");
96 if (osc_dw0 & OSC_INVALID_UUID_ERROR) 100 if (errors & OSC_INVALID_UUID_ERROR)
97 printk(KERN_DEBUG "_OSC invalid UUID\n"); 101 printk(KERN_DEBUG "_OSC invalid UUID\n");
98 if (osc_dw0 & OSC_INVALID_REVISION_ERROR) 102 if (errors & OSC_INVALID_REVISION_ERROR)
99 printk(KERN_DEBUG "_OSC invalid revision\n"); 103 printk(KERN_DEBUG "_OSC invalid revision\n");
100 if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { 104 if (errors & OSC_CAPABILITIES_MASK_ERROR) {
101 if (flags & OSC_QUERY_ENABLE) 105 if (flags & OSC_QUERY_ENABLE)
102 goto out_success; 106 goto out_success;
103 printk(KERN_DEBUG "_OSC FW not grant req. control\n"); 107 printk(KERN_DEBUG "_OSC FW not grant req. control\n");
@@ -108,9 +112,8 @@ static acpi_status acpi_run_osc(acpi_handle handle,
108 goto out_kfree; 112 goto out_kfree;
109 } 113 }
110out_success: 114out_success:
111 if (flags & OSC_QUERY_ENABLE) 115 osc_args->ctrl_result =
112 osc_args->query_result = 116 *((u32 *)(out_obj->buffer.pointer + 8));
113 *((u32 *)(out_obj->buffer.pointer + 8));
114 status = AE_OK; 117 status = AE_OK;
115 118
116out_kfree: 119out_kfree:
@@ -118,41 +121,53 @@ out_kfree:
118 return status; 121 return status;
119} 122}
120 123
121static acpi_status acpi_query_osc(acpi_handle handle, 124static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data,
122 u32 level, void *context, void **retval) 125 u32 *result)
123{ 126{
124 acpi_status status; 127 acpi_status status;
125 struct acpi_osc_data *osc_data; 128 u32 support_set;
126 u32 flags = (unsigned long)context, support_set;
127 acpi_handle tmp;
128 struct acpi_osc_args osc_args; 129 struct acpi_osc_args osc_args;
129 130
130 status = acpi_get_handle(handle, "_OSC", &tmp);
131 if (ACPI_FAILURE(status))
132 return status;
133
134 osc_data = acpi_get_osc_data(handle);
135 if (!osc_data) {
136 printk(KERN_ERR "acpi osc data array is full\n");
137 return AE_ERROR;
138 }
139
140 /* do _OSC query for all possible controls */ 131 /* do _OSC query for all possible controls */
141 support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS); 132 support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
142 osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 133 osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
143 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set; 134 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
144 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; 135 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
145 136
146 status = acpi_run_osc(handle, &osc_args); 137 status = acpi_run_osc(osc_data->handle, &osc_args);
147 if (ACPI_SUCCESS(status)) { 138 if (ACPI_SUCCESS(status)) {
148 osc_data->support_set = support_set; 139 osc_data->support_set = support_set;
149 osc_data->query_result = osc_args.query_result; 140 *result = osc_args.ctrl_result;
150 osc_data->is_queried = 1;
151 } 141 }
152 142
153 return status; 143 return status;
154} 144}
155 145
146static acpi_status acpi_query_osc(acpi_handle handle,
147 u32 level, void *context, void **retval)
148{
149 acpi_status status;
150 struct acpi_osc_data *osc_data;
151 u32 flags = (unsigned long)context, dummy;
152 acpi_handle tmp;
153
154 status = acpi_get_handle(handle, "_OSC", &tmp);
155 if (ACPI_FAILURE(status))
156 return AE_OK;
157
158 mutex_lock(&pci_acpi_lock);
159 osc_data = acpi_get_osc_data(handle);
160 if (!osc_data) {
161 printk(KERN_ERR "acpi osc data array is full\n");
162 goto out;
163 }
164
165 __acpi_query_osc(flags, osc_data, &dummy);
166out:
167 mutex_unlock(&pci_acpi_lock);
168 return AE_OK;
169}
170
156/** 171/**
157 * __pci_osc_support_set - register OS support to Firmware 172 * __pci_osc_support_set - register OS support to Firmware
158 * @flags: OS support bits 173 * @flags: OS support bits
@@ -181,7 +196,7 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid)
181acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) 196acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
182{ 197{
183 acpi_status status; 198 acpi_status status;
184 u32 ctrlset, control_set; 199 u32 ctrlset, control_set, result;
185 acpi_handle tmp; 200 acpi_handle tmp;
186 struct acpi_osc_data *osc_data; 201 struct acpi_osc_data *osc_data;
187 struct acpi_osc_args osc_args; 202 struct acpi_osc_args osc_args;
@@ -190,19 +205,28 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
190 if (ACPI_FAILURE(status)) 205 if (ACPI_FAILURE(status))
191 return status; 206 return status;
192 207
208 mutex_lock(&pci_acpi_lock);
193 osc_data = acpi_get_osc_data(handle); 209 osc_data = acpi_get_osc_data(handle);
194 if (!osc_data) { 210 if (!osc_data) {
195 printk(KERN_ERR "acpi osc data array is full\n"); 211 printk(KERN_ERR "acpi osc data array is full\n");
196 return AE_ERROR; 212 status = AE_ERROR;
213 goto out;
197 } 214 }
198 215
199 ctrlset = (flags & OSC_CONTROL_MASKS); 216 ctrlset = (flags & OSC_CONTROL_MASKS);
200 if (!ctrlset) 217 if (!ctrlset) {
201 return AE_TYPE; 218 status = AE_TYPE;
219 goto out;
220 }
202 221
203 if (osc_data->is_queried && 222 status = __acpi_query_osc(osc_data->support_set, osc_data, &result);
204 ((osc_data->query_result & ctrlset) != ctrlset)) 223 if (ACPI_FAILURE(status))
205 return AE_SUPPORT; 224 goto out;
225
226 if ((result & ctrlset) != ctrlset) {
227 status = AE_SUPPORT;
228 goto out;
229 }
206 230
207 control_set = osc_data->control_set | ctrlset; 231 control_set = osc_data->control_set | ctrlset;
208 osc_args.capbuf[OSC_QUERY_TYPE] = 0; 232 osc_args.capbuf[OSC_QUERY_TYPE] = 0;
@@ -211,7 +235,8 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
211 status = acpi_run_osc(handle, &osc_args); 235 status = acpi_run_osc(handle, &osc_args);
212 if (ACPI_SUCCESS(status)) 236 if (ACPI_SUCCESS(status))
213 osc_data->control_set = control_set; 237 osc_data->control_set = control_set;
214 238out:
239 mutex_unlock(&pci_acpi_lock);
215 return status; 240 return status;
216} 241}
217EXPORT_SYMBOL(pci_osc_control_set); 242EXPORT_SYMBOL(pci_osc_control_set);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 110022d78689..5d72866897a8 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -575,7 +575,7 @@ static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct
575 575
576 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 576 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
577 start = vma->vm_pgoff; 577 start = vma->vm_pgoff;
578 size = pci_resource_len(pdev, resno) >> PAGE_SHIFT; 578 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
579 if (start < size && size - start >= nr) 579 if (start < size && size - start >= nr)
580 return 1; 580 return 1;
581 WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", 581 WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4db261e13e69..061d1ee0046a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -18,6 +18,7 @@
18#include <linux/log2.h> 18#include <linux/log2.h>
19#include <linux/pci-aspm.h> 19#include <linux/pci-aspm.h>
20#include <linux/pm_wakeup.h> 20#include <linux/pm_wakeup.h>
21#include <linux/interrupt.h>
21#include <asm/dma.h> /* isa_dma_bridge_buggy */ 22#include <asm/dma.h> /* isa_dma_bridge_buggy */
22#include "pci.h" 23#include "pci.h"
23 24
@@ -1308,27 +1309,32 @@ void pci_enable_ari(struct pci_dev *dev)
1308 int pos; 1309 int pos;
1309 u32 cap; 1310 u32 cap;
1310 u16 ctrl; 1311 u16 ctrl;
1312 struct pci_dev *bridge;
1311 1313
1312 if (!dev->is_pcie) 1314 if (!dev->is_pcie || dev->devfn)
1313 return; 1315 return;
1314 1316
1315 if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 1317 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1316 dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) 1318 if (!pos)
1317 return; 1319 return;
1318 1320
1319 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 1321 bridge = dev->bus->self;
1322 if (!bridge || !bridge->is_pcie)
1323 return;
1324
1325 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
1320 if (!pos) 1326 if (!pos)
1321 return; 1327 return;
1322 1328
1323 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap); 1329 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1324 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 1330 if (!(cap & PCI_EXP_DEVCAP2_ARI))
1325 return; 1331 return;
1326 1332
1327 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); 1333 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
1328 ctrl |= PCI_EXP_DEVCTL2_ARI; 1334 ctrl |= PCI_EXP_DEVCTL2_ARI;
1329 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); 1335 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
1330 1336
1331 dev->ari_enabled = 1; 1337 bridge->ari_enabled = 1;
1332} 1338}
1333 1339
1334int 1340int
@@ -1746,6 +1752,103 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary);
1746#endif 1752#endif
1747 1753
1748/** 1754/**
1755 * pci_execute_reset_function() - Reset a PCI device function
1756 * @dev: Device function to reset
1757 *
1758 * Some devices allow an individual function to be reset without affecting
1759 * other functions in the same device. The PCI device must be responsive
1760 * to PCI config space in order to use this function.
1761 *
1762 * The device function is presumed to be unused when this function is called.
1763 * Resetting the device will make the contents of PCI configuration space
1764 * random, so any caller of this must be prepared to reinitialise the
1765 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
1766 * etc.
1767 *
1768 * Returns 0 if the device function was successfully reset or -ENOTTY if the
1769 * device doesn't support resetting a single function.
1770 */
1771int pci_execute_reset_function(struct pci_dev *dev)
1772{
1773 u16 status;
1774 u32 cap;
1775 int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
1776
1777 if (!exppos)
1778 return -ENOTTY;
1779 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
1780 if (!(cap & PCI_EXP_DEVCAP_FLR))
1781 return -ENOTTY;
1782
1783 pci_block_user_cfg_access(dev);
1784
1785 /* Wait for Transaction Pending bit clean */
1786 msleep(100);
1787 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
1788 if (status & PCI_EXP_DEVSTA_TRPND) {
1789 dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
1790 "sleeping for 1 second\n");
1791 ssleep(1);
1792 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
1793 if (status & PCI_EXP_DEVSTA_TRPND)
1794 dev_info(&dev->dev, "Still busy after 1s; "
1795 "proceeding with reset anyway\n");
1796 }
1797
1798 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
1799 PCI_EXP_DEVCTL_BCR_FLR);
1800 mdelay(100);
1801
1802 pci_unblock_user_cfg_access(dev);
1803 return 0;
1804}
1805EXPORT_SYMBOL_GPL(pci_execute_reset_function);
1806
1807/**
1808 * pci_reset_function() - quiesce and reset a PCI device function
1809 * @dev: Device function to reset
1810 *
1811 * Some devices allow an individual function to be reset without affecting
1812 * other functions in the same device. The PCI device must be responsive
1813 * to PCI config space in order to use this function.
1814 *
1815 * This function does not just reset the PCI portion of a device, but
1816 * clears all the state associated with the device. This function differs
1817 * from pci_execute_reset_function in that it saves and restores device state
1818 * over the reset.
1819 *
1820 * Returns 0 if the device function was successfully reset or -ENOTTY if the
1821 * device doesn't support resetting a single function.
1822 */
1823int pci_reset_function(struct pci_dev *dev)
1824{
1825 u32 cap;
1826 int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
1827 int r;
1828
1829 if (!exppos)
1830 return -ENOTTY;
1831 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
1832 if (!(cap & PCI_EXP_DEVCAP_FLR))
1833 return -ENOTTY;
1834
1835 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
1836 disable_irq(dev->irq);
1837 pci_save_state(dev);
1838
1839 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
1840
1841 r = pci_execute_reset_function(dev);
1842
1843 pci_restore_state(dev);
1844 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
1845 enable_irq(dev->irq);
1846
1847 return r;
1848}
1849EXPORT_SYMBOL_GPL(pci_reset_function);
1850
1851/**
1749 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 1852 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
1750 * @dev: PCI device to query 1853 * @dev: PCI device to query
1751 * 1854 *
@@ -1933,10 +2036,13 @@ static int __devinit pci_init(void)
1933 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2036 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1934 pci_fixup_device(pci_fixup_final, dev); 2037 pci_fixup_device(pci_fixup_final, dev);
1935 } 2038 }
2039
2040 msi_init();
2041
1936 return 0; 2042 return 0;
1937} 2043}
1938 2044
1939static int __devinit pci_setup(char *str) 2045static int __init pci_setup(char *str)
1940{ 2046{
1941 while (str) { 2047 while (str) {
1942 char *k = strchr(str, ','); 2048 char *k = strchr(str, ',');
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index b205ab866a1d..9de87e9f98f5 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -98,9 +98,11 @@ extern unsigned int pci_pm_d3_delay;
98#ifdef CONFIG_PCI_MSI 98#ifdef CONFIG_PCI_MSI
99void pci_no_msi(void); 99void pci_no_msi(void);
100extern void pci_msi_init_pci_dev(struct pci_dev *dev); 100extern void pci_msi_init_pci_dev(struct pci_dev *dev);
101extern void __devinit msi_init(void);
101#else 102#else
102static inline void pci_no_msi(void) { } 103static inline void pci_no_msi(void) { }
103static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } 104static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
105static inline void msi_init(void) { }
104#endif 106#endif
105 107
106#ifdef CONFIG_PCIEAER 108#ifdef CONFIG_PCIEAER
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index dfc63d01f20a..aac7006949f1 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -252,7 +252,7 @@ static void report_resume(struct pci_dev *dev, void *data)
252 252
253 if (!dev->driver || 253 if (!dev->driver ||
254 !dev->driver->err_handler || 254 !dev->driver->err_handler ||
255 !dev->driver->err_handler->slot_reset) 255 !dev->driver->err_handler->resume)
256 return; 256 return;
257 257
258 err_handler = dev->driver->err_handler; 258 err_handler = dev->driver->err_handler;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 8f63f4c6b85f..9aad608bcf3f 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -16,6 +16,7 @@
16#include <linux/pm.h> 16#include <linux/pm.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/jiffies.h>
19#include <linux/pci-aspm.h> 20#include <linux/pci-aspm.h>
20#include "../pci.h" 21#include "../pci.h"
21 22
@@ -161,11 +162,12 @@ static void pcie_check_clock_pm(struct pci_dev *pdev)
161 */ 162 */
162static void pcie_aspm_configure_common_clock(struct pci_dev *pdev) 163static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
163{ 164{
164 int pos, child_pos; 165 int pos, child_pos, i = 0;
165 u16 reg16 = 0; 166 u16 reg16 = 0;
166 struct pci_dev *child_dev; 167 struct pci_dev *child_dev;
167 int same_clock = 1; 168 int same_clock = 1;
168 169 unsigned long start_jiffies;
170 u16 child_regs[8], parent_reg;
169 /* 171 /*
170 * all functions of a slot should have the same Slot Clock 172 * all functions of a slot should have the same Slot Clock
171 * Configuration, so just check one function 173 * Configuration, so just check one function
@@ -191,16 +193,19 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
191 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 193 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
192 pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, 194 pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
193 &reg16); 195 &reg16);
196 child_regs[i] = reg16;
194 if (same_clock) 197 if (same_clock)
195 reg16 |= PCI_EXP_LNKCTL_CCC; 198 reg16 |= PCI_EXP_LNKCTL_CCC;
196 else 199 else
197 reg16 &= ~PCI_EXP_LNKCTL_CCC; 200 reg16 &= ~PCI_EXP_LNKCTL_CCC;
198 pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, 201 pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
199 reg16); 202 reg16);
203 i++;
200 } 204 }
201 205
202 /* Configure upstream component */ 206 /* Configure upstream component */
203 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 207 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
208 parent_reg = reg16;
204 if (same_clock) 209 if (same_clock)
205 reg16 |= PCI_EXP_LNKCTL_CCC; 210 reg16 |= PCI_EXP_LNKCTL_CCC;
206 else 211 else
@@ -212,12 +217,30 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
212 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 217 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
213 218
214 /* Wait for link training end */ 219 /* Wait for link training end */
215 while (1) { 220 /* break out after waiting for 1 second */
221 start_jiffies = jiffies;
222 while ((jiffies - start_jiffies) < HZ) {
216 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16); 223 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
217 if (!(reg16 & PCI_EXP_LNKSTA_LT)) 224 if (!(reg16 & PCI_EXP_LNKSTA_LT))
218 break; 225 break;
219 cpu_relax(); 226 cpu_relax();
220 } 227 }
228 /* training failed -> recover */
229 if ((jiffies - start_jiffies) >= HZ) {
230 dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure"
231 " common clock\n");
232 i = 0;
233 list_for_each_entry(child_dev, &pdev->subordinate->devices,
234 bus_list) {
235 child_pos = pci_find_capability(child_dev,
236 PCI_CAP_ID_EXP);
237 pci_write_config_word(child_dev,
238 child_pos + PCI_EXP_LNKCTL,
239 child_regs[i]);
240 i++;
241 }
242 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
243 }
221} 244}
222 245
223/* 246/*
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index aaaf0a1fed22..003a9b3c293f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -298,9 +298,6 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
298 child->resource[i] = child->parent->resource[i - 3]; 298 child->resource[i] = child->parent->resource[i - 3];
299 } 299 }
300 300
301 for(i=0; i<3; i++)
302 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
303
304 res = child->resource[0]; 301 res = child->resource[0];
305 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); 302 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
306 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); 303 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
@@ -480,19 +477,27 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
480 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 477 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
481 u32 buses, i, j = 0; 478 u32 buses, i, j = 0;
482 u16 bctl; 479 u16 bctl;
480 int broken = 0;
483 481
484 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 482 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
485 483
486 dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n", 484 dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n",
487 buses & 0xffffff, pass); 485 buses & 0xffffff, pass);
488 486
487 /* Check if setup is sensible at all */
488 if (!pass &&
489 ((buses & 0xff) != bus->number || ((buses >> 8) & 0xff) <= bus->number)) {
490 dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
491 broken = 1;
492 }
493
489 /* Disable MasterAbortMode during probing to avoid reporting 494 /* Disable MasterAbortMode during probing to avoid reporting
490 of bus errors (in some architectures) */ 495 of bus errors (in some architectures) */
491 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 496 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
492 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 497 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
493 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 498 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
494 499
495 if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) { 500 if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus && !broken) {
496 unsigned int cmax, busnr; 501 unsigned int cmax, busnr;
497 /* 502 /*
498 * Bus already configured by firmware, process it in the first 503 * Bus already configured by firmware, process it in the first
@@ -530,7 +535,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
530 * do in the second pass. 535 * do in the second pass.
531 */ 536 */
532 if (!pass) { 537 if (!pass) {
533 if (pcibios_assign_all_busses()) 538 if (pcibios_assign_all_busses() || broken)
534 /* Temporarily disable forwarding of the 539 /* Temporarily disable forwarding of the
535 configuration cycles on all bridges in 540 configuration cycles on all bridges in
536 this bus segment to avoid possible 541 this bus segment to avoid possible
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bbf66ea8fd87..ce0985615133 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/acpi.h> 23#include <linux/acpi.h>
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include <linux/dmi.h>
25#include "pci.h" 26#include "pci.h"
26 27
27int isa_dma_bridge_buggy; 28int isa_dma_bridge_buggy;
@@ -605,27 +606,6 @@ static void __init quirk_ioapic_rmw(struct pci_dev *dev)
605 sis_apic_bug = 1; 606 sis_apic_bug = 1;
606} 607}
607DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw); 608DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw);
608
609#define AMD8131_revA0 0x01
610#define AMD8131_revB0 0x11
611#define AMD8131_MISC 0x40
612#define AMD8131_NIOAMODE_BIT 0
613static void quirk_amd_8131_ioapic(struct pci_dev *dev)
614{
615 unsigned char tmp;
616
617 if (nr_ioapics == 0)
618 return;
619
620 if (dev->revision == AMD8131_revA0 || dev->revision == AMD8131_revB0) {
621 dev_info(&dev->dev, "Fixing up AMD8131 IOAPIC mode\n");
622 pci_read_config_byte( dev, AMD8131_MISC, &tmp);
623 tmp &= ~(1 << AMD8131_NIOAMODE_BIT);
624 pci_write_config_byte( dev, AMD8131_MISC, tmp);
625 }
626}
627DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
628DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
629#endif /* CONFIG_X86_IO_APIC */ 609#endif /* CONFIG_X86_IO_APIC */
630 610
631/* 611/*
@@ -1422,6 +1402,155 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
1422DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); 1402DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
1423DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); 1403DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
1424 1404
1405#ifdef CONFIG_X86_IO_APIC
1406/*
1407 * Boot interrupts on some chipsets cannot be turned off. For these chipsets,
1408 * remap the original interrupt in the linux kernel to the boot interrupt, so
1409 * that a PCI device's interrupt handler is installed on the boot interrupt
1410 * line instead.
1411 */
1412static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
1413{
1414 if (noioapicquirk || noioapicreroute)
1415 return;
1416
1417 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
1418
1419 printk(KERN_INFO "PCI quirk: reroute interrupts for 0x%04x:0x%04x\n",
1420 dev->vendor, dev->device);
1421 return;
1422}
1423DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1424DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1425DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1426DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1427DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1428DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1429DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1430DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1431DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1432DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1433DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1434DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1435DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1436DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1437DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1438DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1439
1440/*
1441 * On some chipsets we can disable the generation of legacy INTx boot
1442 * interrupts.
1443 */
1444
1445/*
1446 * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no
1447 * 300641-004US, section 5.7.3.
1448 */
1449#define INTEL_6300_IOAPIC_ABAR 0x40
1450#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
1451
1452static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
1453{
1454 u16 pci_config_word;
1455
1456 if (noioapicquirk)
1457 return;
1458
1459 pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
1460 pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
1461 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
1462
1463 printk(KERN_INFO "disabled boot interrupt on device 0x%04x:0x%04x\n",
1464 dev->vendor, dev->device);
1465}
1466DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
1467DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
1468
1469/*
1470 * disable boot interrupts on HT-1000
1471 */
1472#define BC_HT1000_FEATURE_REG 0x64
1473#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
1474#define BC_HT1000_MAP_IDX 0xC00
1475#define BC_HT1000_MAP_DATA 0xC01
1476
1477static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
1478{
1479 u32 pci_config_dword;
1480 u8 irq;
1481
1482 if (noioapicquirk)
1483 return;
1484
1485 pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
1486 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
1487 BC_HT1000_PIC_REGS_ENABLE);
1488
1489 for (irq = 0x10; irq < 0x10 + 32; irq++) {
1490 outb(irq, BC_HT1000_MAP_IDX);
1491 outb(0x00, BC_HT1000_MAP_DATA);
1492 }
1493
1494 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
1495
1496 printk(KERN_INFO "disabled boot interrupts on PCI device"
1497 "0x%04x:0x%04x\n", dev->vendor, dev->device);
1498}
1499DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
1500DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
1501
1502/*
1503 * disable boot interrupts on AMD and ATI chipsets
1504 */
1505/*
1506 * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131
1507 * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode
1508 * (due to an erratum).
1509 */
1510#define AMD_813X_MISC 0x40
1511#define AMD_813X_NOIOAMODE (1<<0)
1512
1513static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1514{
1515 u32 pci_config_dword;
1516
1517 if (noioapicquirk)
1518 return;
1519
1520 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
1521 pci_config_dword &= ~AMD_813X_NOIOAMODE;
1522 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
1523
1524 printk(KERN_INFO "disabled boot interrupts on PCI device "
1525 "0x%04x:0x%04x\n", dev->vendor, dev->device);
1526}
1527DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
1528DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
1529
1530#define AMD_8111_PCI_IRQ_ROUTING 0x56
1531
1532static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
1533{
1534 u16 pci_config_word;
1535
1536 if (noioapicquirk)
1537 return;
1538
1539 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
1540 if (!pci_config_word) {
1541 printk(KERN_INFO "boot interrupts on PCI device 0x%04x:0x%04x "
1542 "already disabled\n",
1543 dev->vendor, dev->device);
1544 return;
1545 }
1546 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
1547 printk(KERN_INFO "disabled boot interrupts on PCI device "
1548 "0x%04x:0x%04x\n", dev->vendor, dev->device);
1549}
1550DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
1551DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
1552#endif /* CONFIG_X86_IO_APIC */
1553
1425/* 1554/*
1426 * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size 1555 * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size
1427 * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes. 1556 * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
@@ -1692,24 +1821,24 @@ static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
1692 } 1821 }
1693} 1822}
1694 1823
1695DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 1824DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1696 PCI_DEVICE_ID_NX2_5706, 1825 PCI_DEVICE_ID_NX2_5706,
1697 quirk_brcm_570x_limit_vpd); 1826 quirk_brcm_570x_limit_vpd);
1698DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 1827DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1699 PCI_DEVICE_ID_NX2_5706S, 1828 PCI_DEVICE_ID_NX2_5706S,
1700 quirk_brcm_570x_limit_vpd); 1829 quirk_brcm_570x_limit_vpd);
1701DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 1830DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1702 PCI_DEVICE_ID_NX2_5708, 1831 PCI_DEVICE_ID_NX2_5708,
1703 quirk_brcm_570x_limit_vpd); 1832 quirk_brcm_570x_limit_vpd);
1704DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 1833DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1705 PCI_DEVICE_ID_NX2_5708S, 1834 PCI_DEVICE_ID_NX2_5708S,
1706 quirk_brcm_570x_limit_vpd); 1835 quirk_brcm_570x_limit_vpd);
1707DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 1836DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1708 PCI_DEVICE_ID_NX2_5709, 1837 PCI_DEVICE_ID_NX2_5709,
1709 quirk_brcm_570x_limit_vpd); 1838 quirk_brcm_570x_limit_vpd);
1710DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 1839DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1711 PCI_DEVICE_ID_NX2_5709S, 1840 PCI_DEVICE_ID_NX2_5709S,
1712 quirk_brcm_570x_limit_vpd); 1841 quirk_brcm_570x_limit_vpd);
1713 1842
1714#ifdef CONFIG_PCI_MSI 1843#ifdef CONFIG_PCI_MSI
1715/* Some chipsets do not support MSI. We cannot easily rely on setting 1844/* Some chipsets do not support MSI. We cannot easily rely on setting
@@ -1828,6 +1957,22 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
1828 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, 1957 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
1829 ht_enable_msi_mapping); 1958 ht_enable_msi_mapping);
1830 1959
1960/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
1961 * for the MCP55 NIC. It is not yet determined whether the msi problem
1962 * also affects other devices. As for now, turn off msi for this device.
1963 */
1964static void __devinit nvenet_msi_disable(struct pci_dev *dev)
1965{
1966 if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) {
1967 dev_info(&dev->dev,
1968 "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n");
1969 dev->no_msi = 1;
1970 }
1971}
1972DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
1973 PCI_DEVICE_ID_NVIDIA_NVENET_15,
1974 nvenet_msi_disable);
1975
1831static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) 1976static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
1832{ 1977{
1833 struct pci_dev *host_bridge; 1978 struct pci_dev *host_bridge;
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 1f5f6143f35c..132a78159b60 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -100,7 +100,8 @@ size_t pci_get_rom_size(void __iomem *rom, size_t size)
100 * pci_map_rom - map a PCI ROM to kernel space 100 * pci_map_rom - map a PCI ROM to kernel space
101 * @pdev: pointer to pci device struct 101 * @pdev: pointer to pci device struct
102 * @size: pointer to receive size of pci window over ROM 102 * @size: pointer to receive size of pci window over ROM
103 * @return: kernel virtual pointer to image of ROM 103 *
104 * Return: kernel virtual pointer to image of ROM
104 * 105 *
105 * Map a PCI ROM into kernel space. If ROM is boot video ROM, 106 * Map a PCI ROM into kernel space. If ROM is boot video ROM,
106 * the shadow BIOS copy will be returned instead of the 107 * the shadow BIOS copy will be returned instead of the
@@ -167,7 +168,8 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
167 * pci_map_rom_copy - map a PCI ROM to kernel space, create a copy 168 * pci_map_rom_copy - map a PCI ROM to kernel space, create a copy
168 * @pdev: pointer to pci device struct 169 * @pdev: pointer to pci device struct
169 * @size: pointer to receive size of pci window over ROM 170 * @size: pointer to receive size of pci window over ROM
170 * @return: kernel virtual pointer to image of ROM 171 *
172 * Return: kernel virtual pointer to image of ROM
171 * 173 *
172 * Map a PCI ROM into kernel space. If ROM is boot video ROM, 174 * Map a PCI ROM into kernel space. If ROM is boot video ROM,
173 * the shadow BIOS copy will be returned instead of the 175 * the shadow BIOS copy will be returned instead of the
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 4edfc4731bd4..5af8bd538149 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -166,6 +166,7 @@ struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
166{ 166{
167 struct pci_dev *pdev; 167 struct pci_dev *pdev;
168 168
169 pci_dev_get(from);
169 pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); 170 pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
170 pci_dev_put(pdev); 171 pci_dev_put(pdev);
171 return pdev; 172 return pdev;
@@ -270,12 +271,8 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
270 struct pci_dev *pdev = NULL; 271 struct pci_dev *pdev = NULL;
271 272
272 WARN_ON(in_interrupt()); 273 WARN_ON(in_interrupt());
273 if (from) { 274 if (from)
274 /* FIXME 275 dev_start = &from->dev;
275 * take the cast off, when bus_find_device is made const.
276 */
277 dev_start = (struct device *)&from->dev;
278 }
279 dev = bus_find_device(&pci_bus_type, dev_start, (void *)id, 276 dev = bus_find_device(&pci_bus_type, dev_start, (void *)id,
280 match_pci_dev_by_id); 277 match_pci_dev_by_id);
281 if (dev) 278 if (dev)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 0c6db03698ea..5a8ccb4f604d 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -78,18 +78,100 @@ static struct kobj_type pci_slot_ktype = {
78 .default_attrs = pci_slot_default_attrs, 78 .default_attrs = pci_slot_default_attrs,
79}; 79};
80 80
81static char *make_slot_name(const char *name)
82{
83 char *new_name;
84 int len, max, dup;
85
86 new_name = kstrdup(name, GFP_KERNEL);
87 if (!new_name)
88 return NULL;
89
90 /*
91 * Make sure we hit the realloc case the first time through the
92 * loop. 'len' will be strlen(name) + 3 at that point which is
93 * enough space for "name-X" and the trailing NUL.
94 */
95 len = strlen(name) + 2;
96 max = 1;
97 dup = 1;
98
99 for (;;) {
100 struct kobject *dup_slot;
101 dup_slot = kset_find_obj(pci_slots_kset, new_name);
102 if (!dup_slot)
103 break;
104 kobject_put(dup_slot);
105 if (dup == max) {
106 len++;
107 max *= 10;
108 kfree(new_name);
109 new_name = kmalloc(len, GFP_KERNEL);
110 if (!new_name)
111 break;
112 }
113 sprintf(new_name, "%s-%d", name, dup++);
114 }
115
116 return new_name;
117}
118
119static int rename_slot(struct pci_slot *slot, const char *name)
120{
121 int result = 0;
122 char *slot_name;
123
124 if (strcmp(pci_slot_name(slot), name) == 0)
125 return result;
126
127 slot_name = make_slot_name(name);
128 if (!slot_name)
129 return -ENOMEM;
130
131 result = kobject_rename(&slot->kobj, slot_name);
132 kfree(slot_name);
133
134 return result;
135}
136
137static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr)
138{
139 struct pci_slot *slot;
140 /*
141 * We already hold pci_bus_sem so don't worry
142 */
143 list_for_each_entry(slot, &parent->slots, list)
144 if (slot->number == slot_nr) {
145 kobject_get(&slot->kobj);
146 return slot;
147 }
148
149 return NULL;
150}
151
81/** 152/**
82 * pci_create_slot - create or increment refcount for physical PCI slot 153 * pci_create_slot - create or increment refcount for physical PCI slot
83 * @parent: struct pci_bus of parent bridge 154 * @parent: struct pci_bus of parent bridge
84 * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder 155 * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder
85 * @name: user visible string presented in /sys/bus/pci/slots/<name> 156 * @name: user visible string presented in /sys/bus/pci/slots/<name>
157 * @hotplug: set if caller is hotplug driver, NULL otherwise
86 * 158 *
87 * PCI slots have first class attributes such as address, speed, width, 159 * PCI slots have first class attributes such as address, speed, width,
88 * and a &struct pci_slot is used to manage them. This interface will 160 * and a &struct pci_slot is used to manage them. This interface will
89 * either return a new &struct pci_slot to the caller, or if the pci_slot 161 * either return a new &struct pci_slot to the caller, or if the pci_slot
90 * already exists, its refcount will be incremented. 162 * already exists, its refcount will be incremented.
91 * 163 *
92 * Slots are uniquely identified by a @pci_bus, @slot_nr, @name tuple. 164 * Slots are uniquely identified by a @pci_bus, @slot_nr tuple.
165 *
166 * There are known platforms with broken firmware that assign the same
167 * name to multiple slots. Workaround these broken platforms by renaming
168 * the slots on behalf of the caller. If firmware assigns name N to
169 * multiple slots:
170 *
171 * The first slot is assigned N
172 * The second slot is assigned N-1
173 * The third slot is assigned N-2
174 * etc.
93 * 175 *
94 * Placeholder slots: 176 * Placeholder slots:
95 * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify 177 * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify
@@ -98,61 +180,67 @@ static struct kobj_type pci_slot_ktype = {
98 * the slot. In this scenario, the caller may pass -1 for @slot_nr. 180 * the slot. In this scenario, the caller may pass -1 for @slot_nr.
99 * 181 *
100 * The following semantics are imposed when the caller passes @slot_nr == 182 * The following semantics are imposed when the caller passes @slot_nr ==
101 * -1. First, the check for existing %struct pci_slot is skipped, as the 183 * -1. First, we no longer check for an existing %struct pci_slot, as there
102 * caller may know about several unpopulated slots on a given %struct 184 * may be many slots with @slot_nr of -1. The other change in semantics is
103 * pci_bus, and each slot would have a @slot_nr of -1. Uniqueness for
104 * these slots is then determined by the @name parameter. We expect
105 * kobject_init_and_add() to warn us if the caller attempts to create
106 * multiple slots with the same name. The other change in semantics is
107 * user-visible, which is the 'address' parameter presented in sysfs will 185 * user-visible, which is the 'address' parameter presented in sysfs will
108 * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the 186 * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the
109 * %struct pci_bus and bb is the bus number. In other words, the devfn of 187 * %struct pci_bus and bb is the bus number. In other words, the devfn of
110 * the 'placeholder' slot will not be displayed. 188 * the 'placeholder' slot will not be displayed.
111 */ 189 */
112
113struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, 190struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
114 const char *name) 191 const char *name,
192 struct hotplug_slot *hotplug)
115{ 193{
116 struct pci_dev *dev; 194 struct pci_dev *dev;
117 struct pci_slot *slot; 195 struct pci_slot *slot;
118 int err; 196 int err = 0;
197 char *slot_name = NULL;
119 198
120 down_write(&pci_bus_sem); 199 down_write(&pci_bus_sem);
121 200
122 if (slot_nr == -1) 201 if (slot_nr == -1)
123 goto placeholder; 202 goto placeholder;
124 203
125 /* If we've already created this slot, bump refcount and return. */ 204 /*
126 list_for_each_entry(slot, &parent->slots, list) { 205 * Hotplug drivers are allowed to rename an existing slot,
127 if (slot->number == slot_nr) { 206 * but only if not already claimed.
128 kobject_get(&slot->kobj); 207 */
129 pr_debug("%s: inc refcount to %d on %04x:%02x:%02x\n", 208 slot = get_slot(parent, slot_nr);
130 __func__, 209 if (slot) {
131 atomic_read(&slot->kobj.kref.refcount), 210 if (hotplug) {
132 pci_domain_nr(parent), parent->number, 211 if ((err = slot->hotplug ? -EBUSY : 0)
133 slot_nr); 212 || (err = rename_slot(slot, name))) {
134 goto out; 213 kobject_put(&slot->kobj);
214 slot = NULL;
215 goto err;
216 }
135 } 217 }
218 goto out;
136 } 219 }
137 220
138placeholder: 221placeholder:
139 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 222 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
140 if (!slot) { 223 if (!slot) {
141 slot = ERR_PTR(-ENOMEM); 224 err = -ENOMEM;
142 goto out; 225 goto err;
143 } 226 }
144 227
145 slot->bus = parent; 228 slot->bus = parent;
146 slot->number = slot_nr; 229 slot->number = slot_nr;
147 230
148 slot->kobj.kset = pci_slots_kset; 231 slot->kobj.kset = pci_slots_kset;
149 err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, 232
150 "%s", name); 233 slot_name = make_slot_name(name);
151 if (err) { 234 if (!slot_name) {
152 printk(KERN_ERR "Unable to register kobject %s\n", name); 235 err = -ENOMEM;
153 goto err; 236 goto err;
154 } 237 }
155 238
239 err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
240 "%s", slot_name);
241 if (err)
242 goto err;
243
156 INIT_LIST_HEAD(&slot->list); 244 INIT_LIST_HEAD(&slot->list);
157 list_add(&slot->list, &parent->slots); 245 list_add(&slot->list, &parent->slots);
158 246
@@ -164,10 +252,11 @@ placeholder:
164 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", 252 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n",
165 __func__, pci_domain_nr(parent), parent->number, slot_nr); 253 __func__, pci_domain_nr(parent), parent->number, slot_nr);
166 254
167 out: 255out:
256 kfree(slot_name);
168 up_write(&pci_bus_sem); 257 up_write(&pci_bus_sem);
169 return slot; 258 return slot;
170 err: 259err:
171 kfree(slot); 260 kfree(slot);
172 slot = ERR_PTR(err); 261 slot = ERR_PTR(err);
173 goto out; 262 goto out;
@@ -175,7 +264,7 @@ placeholder:
175EXPORT_SYMBOL_GPL(pci_create_slot); 264EXPORT_SYMBOL_GPL(pci_create_slot);
176 265
177/** 266/**
178 * pci_update_slot_number - update %struct pci_slot -> number 267 * pci_renumber_slot - update %struct pci_slot -> number
179 * @slot - %struct pci_slot to update 268 * @slot - %struct pci_slot to update
180 * @slot_nr - new number for slot 269 * @slot_nr - new number for slot
181 * 270 *
@@ -183,27 +272,22 @@ EXPORT_SYMBOL_GPL(pci_create_slot);
183 * created a placeholder slot in pci_create_slot() by passing a -1 as 272 * created a placeholder slot in pci_create_slot() by passing a -1 as
184 * slot_nr, to update their %struct pci_slot with the correct @slot_nr. 273 * slot_nr, to update their %struct pci_slot with the correct @slot_nr.
185 */ 274 */
186 275void pci_renumber_slot(struct pci_slot *slot, int slot_nr)
187void pci_update_slot_number(struct pci_slot *slot, int slot_nr)
188{ 276{
189 int name_count = 0;
190 struct pci_slot *tmp; 277 struct pci_slot *tmp;
191 278
192 down_write(&pci_bus_sem); 279 down_write(&pci_bus_sem);
193 280
194 list_for_each_entry(tmp, &slot->bus->slots, list) { 281 list_for_each_entry(tmp, &slot->bus->slots, list) {
195 WARN_ON(tmp->number == slot_nr); 282 WARN_ON(tmp->number == slot_nr);
196 if (!strcmp(kobject_name(&tmp->kobj), kobject_name(&slot->kobj))) 283 goto out;
197 name_count++;
198 } 284 }
199 285
200 if (name_count > 1)
201 printk(KERN_WARNING "pci_update_slot_number found %d slots with the same name: %s\n", name_count, kobject_name(&slot->kobj));
202
203 slot->number = slot_nr; 286 slot->number = slot_nr;
287out:
204 up_write(&pci_bus_sem); 288 up_write(&pci_bus_sem);
205} 289}
206EXPORT_SYMBOL_GPL(pci_update_slot_number); 290EXPORT_SYMBOL_GPL(pci_renumber_slot);
207 291
208/** 292/**
209 * pci_destroy_slot - decrement refcount for physical PCI slot 293 * pci_destroy_slot - decrement refcount for physical PCI slot
@@ -213,7 +297,6 @@ EXPORT_SYMBOL_GPL(pci_update_slot_number);
213 * just call kobject_put on its kobj and let our release methods do the 297 * just call kobject_put on its kobj and let our release methods do the
214 * rest. 298 * rest.
215 */ 299 */
216
217void pci_destroy_slot(struct pci_slot *slot) 300void pci_destroy_slot(struct pci_slot *slot)
218{ 301{
219 pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__, 302 pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__,
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index f57eeae3830a..222904411a13 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -188,10 +188,6 @@ config PCMCIA_M8XX
188 188
189 This driver is also available as a module called m8xx_pcmcia. 189 This driver is also available as a module called m8xx_pcmcia.
190 190
191config HD64465_PCMCIA
192 tristate "HD64465 host bridge support"
193 depends on HD64465 && PCMCIA
194
195config PCMCIA_AU1X00 191config PCMCIA_AU1X00
196 tristate "Au1x00 pcmcia support" 192 tristate "Au1x00 pcmcia support"
197 depends on SOC_AU1X00 && PCMCIA 193 depends on SOC_AU1X00 && PCMCIA
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index b46c60b72708..238629ad7f7c 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -22,7 +22,6 @@ obj-$(CONFIG_I82365) += i82365.o
22obj-$(CONFIG_I82092) += i82092.o 22obj-$(CONFIG_I82092) += i82092.o
23obj-$(CONFIG_TCIC) += tcic.o 23obj-$(CONFIG_TCIC) += tcic.o
24obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o 24obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o
25obj-$(CONFIG_HD64465_PCMCIA) += hd64465_ss.o
26obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_core.o sa1100_cs.o 25obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_core.o sa1100_cs.o
27obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_core.o sa1111_cs.o 26obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_core.o sa1111_cs.o
28obj-$(CONFIG_M32R_PCC) += m32r_pcc.o 27obj-$(CONFIG_M32R_PCC) += m32r_pcc.o
@@ -70,7 +69,7 @@ pxa2xx-obj-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
70pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o 69pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
71pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o 70pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o
72pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o 71pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o
73pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps.o 72pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
74pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o 73pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
75pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o 74pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
76 75
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index bb7338863fb9..b59d4115d20f 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -334,6 +334,6 @@ static void __exit bfin_cf_exit(void)
334module_init(bfin_cf_init); 334module_init(bfin_cf_init);
335module_exit(bfin_cf_exit); 335module_exit(bfin_cf_exit);
336 336
337MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>") 337MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
338MODULE_DESCRIPTION("BFIN CF/PCMCIA Driver"); 338MODULE_DESCRIPTION("BFIN CF/PCMCIA Driver");
339MODULE_LICENSE("GPL"); 339MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index dcce9f5d8465..4a110b7b2673 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -351,10 +351,11 @@ int verify_cis_cache(struct pcmcia_socket *s)
351 char *buf; 351 char *buf;
352 352
353 buf = kmalloc(256, GFP_KERNEL); 353 buf = kmalloc(256, GFP_KERNEL);
354 if (buf == NULL) 354 if (buf == NULL) {
355 dev_printk(KERN_WARNING, &s->dev, 355 dev_printk(KERN_WARNING, &s->dev,
356 "no memory for verifying CIS\n"); 356 "no memory for verifying CIS\n");
357 return -ENOMEM; 357 return -ENOMEM;
358 }
358 list_for_each_entry(cis, &s->cis_cache, node) { 359 list_for_each_entry(cis, &s->cis_cache, node) {
359 int len = cis->len; 360 int len = cis->len;
360 361
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index c68c5d338285..0660ad182589 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -186,12 +186,6 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
186 186
187 spin_lock_init(&socket->lock); 187 spin_lock_init(&socket->lock);
188 188
189 if (socket->resource_ops->init) {
190 ret = socket->resource_ops->init(socket);
191 if (ret)
192 return (ret);
193 }
194
195 /* try to obtain a socket number [yes, it gets ugly if we 189 /* try to obtain a socket number [yes, it gets ugly if we
196 * register more than 2^sizeof(unsigned int) pcmcia 190 * register more than 2^sizeof(unsigned int) pcmcia
197 * sockets... but the socket number is deprecated 191 * sockets... but the socket number is deprecated
@@ -226,7 +220,7 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
226 /* set proper values in socket->dev */ 220 /* set proper values in socket->dev */
227 dev_set_drvdata(&socket->dev, socket); 221 dev_set_drvdata(&socket->dev, socket);
228 socket->dev.class = &pcmcia_socket_class; 222 socket->dev.class = &pcmcia_socket_class;
229 snprintf(socket->dev.bus_id, BUS_ID_SIZE, "pcmcia_socket%u", socket->sock); 223 dev_set_name(&socket->dev, "pcmcia_socket%u", socket->sock);
230 224
231 /* base address = 0, map = 0 */ 225 /* base address = 0, map = 0 */
232 socket->cis_mem.flags = 0; 226 socket->cis_mem.flags = 0;
@@ -239,6 +233,12 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
239 mutex_init(&socket->skt_mutex); 233 mutex_init(&socket->skt_mutex);
240 spin_lock_init(&socket->thread_lock); 234 spin_lock_init(&socket->thread_lock);
241 235
236 if (socket->resource_ops->init) {
237 ret = socket->resource_ops->init(socket);
238 if (ret)
239 goto err;
240 }
241
242 tsk = kthread_run(pccardd, socket, "pccardd"); 242 tsk = kthread_run(pccardd, socket, "pccardd");
243 if (IS_ERR(tsk)) { 243 if (IS_ERR(tsk)) {
244 ret = PTR_ERR(tsk); 244 ret = PTR_ERR(tsk);
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 795660255490..47cab31ff6e4 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -622,7 +622,6 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
622{ 622{
623 struct pcmcia_device *p_dev, *tmp_dev; 623 struct pcmcia_device *p_dev, *tmp_dev;
624 unsigned long flags; 624 unsigned long flags;
625 int bus_id_len;
626 625
627 s = pcmcia_get_socket(s); 626 s = pcmcia_get_socket(s);
628 if (!s) 627 if (!s)
@@ -650,12 +649,12 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
650 /* by default don't allow DMA */ 649 /* by default don't allow DMA */
651 p_dev->dma_mask = DMA_MASK_NONE; 650 p_dev->dma_mask = DMA_MASK_NONE;
652 p_dev->dev.dma_mask = &p_dev->dma_mask; 651 p_dev->dev.dma_mask = &p_dev->dma_mask;
653 bus_id_len = sprintf (p_dev->dev.bus_id, "%d.%d", p_dev->socket->sock, p_dev->device_no); 652 dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
654 653 if (!dev_name(&p_dev->dev))
655 p_dev->devname = kmalloc(6 + bus_id_len + 1, GFP_KERNEL); 654 goto err_free;
655 p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev));
656 if (!p_dev->devname) 656 if (!p_dev->devname)
657 goto err_free; 657 goto err_free;
658 sprintf (p_dev->devname, "pcmcia%s", p_dev->dev.bus_id);
659 ds_dev_dbg(3, &p_dev->dev, "devname is %s\n", p_dev->devname); 658 ds_dev_dbg(3, &p_dev->dev, "devname is %s\n", p_dev->devname);
660 659
661 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 660 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
@@ -668,6 +667,8 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
668 list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list) 667 list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list)
669 if (p_dev->func == tmp_dev->func) { 668 if (p_dev->func == tmp_dev->func) {
670 p_dev->function_config = tmp_dev->function_config; 669 p_dev->function_config = tmp_dev->function_config;
670 p_dev->io = tmp_dev->io;
671 p_dev->irq = tmp_dev->irq;
671 kref_get(&p_dev->function_config->ref); 672 kref_get(&p_dev->function_config->ref);
672 } 673 }
673 674
diff --git a/drivers/pcmcia/hd64465_ss.c b/drivers/pcmcia/hd64465_ss.c
deleted file mode 100644
index 9ef69cdb3183..000000000000
--- a/drivers/pcmcia/hd64465_ss.c
+++ /dev/null
@@ -1,939 +0,0 @@
1/*
2 * Device driver for the PCMCIA controller module of the
3 * Hitachi HD64465 handheld companion chip.
4 *
5 * Note that the HD64465 provides a very thin PCMCIA host bridge
6 * layer, requiring a lot of the work of supporting cards to be
7 * performed by the processor. For example: mapping of card
8 * interrupts to processor IRQs is done by IRQ demuxing software;
9 * IO and memory mappings are fixed; setting voltages according
10 * to card Voltage Select pins etc is done in software.
11 *
12 * Note also that this driver uses only the simple, fixed,
13 * 16MB, 16-bit wide mappings to PCMCIA spaces defined by the
14 * HD64465. Larger mappings, smaller mappings, or mappings of
15 * different width to the same socket, are all possible only by
16 * involving the SH7750's MMU, which is considered unnecessary here.
17 * The downside is that it may be possible for some drivers to
18 * break because they need or expect 8-bit mappings.
19 *
20 * This driver currently supports only the following configuration:
21 * SH7750 CPU, HD64465, TPS2206 voltage control chip.
22 *
23 * by Greg Banks <gbanks@pocketpenguins.com>
24 * (c) 2000 PocketPenguins Inc
25 */
26
27#include <linux/types.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/string.h>
31#include <linux/kernel.h>
32#include <linux/ioport.h>
33#include <linux/mm.h>
34#include <linux/vmalloc.h>
35#include <asm/errno.h>
36#include <linux/irq.h>
37#include <linux/interrupt.h>
38#include <linux/platform_device.h>
39
40#include <asm/io.h>
41#include <asm/hd64465/hd64465.h>
42#include <asm/hd64465/io.h>
43
44#include <pcmcia/cs_types.h>
45#include <pcmcia/cs.h>
46#include <pcmcia/cistpl.h>
47#include <pcmcia/ds.h>
48#include <pcmcia/ss.h>
49
50#define MODNAME "hd64465_ss"
51
52/* #define HD64465_DEBUG 1 */
53
54#if HD64465_DEBUG
55#define DPRINTK(args...) printk(MODNAME ": " args)
56#else
57#define DPRINTK(args...)
58#endif
59
60extern int hd64465_io_debug;
61extern void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags);
62extern void p3_iounmap(void *addr);
63
64/*============================================================*/
65
66#define HS_IO_MAP_SIZE (64*1024)
67
68typedef struct hs_socket_t
69{
70 unsigned int number;
71 u_int irq;
72 u_long mem_base;
73 void *io_base;
74 u_long mem_length;
75 u_int ctrl_base;
76 socket_state_t state;
77 pccard_io_map io_maps[MAX_IO_WIN];
78 pccard_mem_map mem_maps[MAX_WIN];
79 struct pcmcia_socket socket;
80} hs_socket_t;
81
82
83
84#define HS_MAX_SOCKETS 2
85static hs_socket_t hs_sockets[HS_MAX_SOCKETS];
86
87#define hs_in(sp, r) inb((sp)->ctrl_base + (r))
88#define hs_out(sp, v, r) outb(v, (sp)->ctrl_base + (r))
89
90
91/* translate a boolean value to a bit in a register */
92#define bool_to_regbit(sp, r, bi, bo) \
93 do { \
94 unsigned short v = hs_in(sp, r); \
95 if (bo) \
96 v |= (bi); \
97 else \
98 v &= ~(bi); \
99 hs_out(sp, v, r); \
100 } while(0)
101
102/* register offsets from HD64465_REG_PCC[01]ISR */
103#define ISR 0x0
104#define GCR 0x2
105#define CSCR 0x4
106#define CSCIER 0x6
107#define SCR 0x8
108
109
110/* Mask and values for CSCIER register */
111#define IER_MASK 0x80
112#define IER_ON 0x3f /* interrupts on */
113#define IER_OFF 0x00 /* interrupts off */
114
115/*============================================================*/
116
117#if HD64465_DEBUG > 10
118
119static void cis_hex_dump(const unsigned char *x, int len)
120{
121 int i;
122
123 for (i=0 ; i<len ; i++)
124 {
125 if (!(i & 0xf))
126 printk("\n%08x", (unsigned)(x + i));
127 printk(" %02x", *(volatile unsigned short*)x);
128 x += 2;
129 }
130 printk("\n");
131}
132
133#endif
134/*============================================================*/
135
136/*
137 * This code helps create the illusion that the IREQ line from
138 * the PC card is mapped to one of the CPU's IRQ lines by the
139 * host bridge hardware (which is how every host bridge *except*
140 * the HD64465 works). In particular, it supports enabling
141 * and disabling the IREQ line by code which knows nothing
142 * about the host bridge (e.g. device drivers, IDE code) using
143 * the request_irq(), free_irq(), probe_irq_on() and probe_irq_off()
144 * functions. Also, it supports sharing the mapped IRQ with
145 * real hardware IRQs from the -IRL0-3 lines.
146 */
147
148#define HS_NUM_MAPPED_IRQS 16 /* Limitation of the PCMCIA code */
149static struct
150{
151 /* index is mapped irq number */
152 hs_socket_t *sock;
153 hw_irq_controller *old_handler;
154} hs_mapped_irq[HS_NUM_MAPPED_IRQS];
155
156static void hs_socket_enable_ireq(hs_socket_t *sp)
157{
158 unsigned short cscier;
159
160 DPRINTK("hs_socket_enable_ireq(sock=%d)\n", sp->number);
161
162 cscier = hs_in(sp, CSCIER);
163 cscier &= ~HD64465_PCCCSCIER_PIREQE_MASK;
164 cscier |= HD64465_PCCCSCIER_PIREQE_LEVEL;
165 hs_out(sp, cscier, CSCIER);
166}
167
168static void hs_socket_disable_ireq(hs_socket_t *sp)
169{
170 unsigned short cscier;
171
172 DPRINTK("hs_socket_disable_ireq(sock=%d)\n", sp->number);
173
174 cscier = hs_in(sp, CSCIER);
175 cscier &= ~HD64465_PCCCSCIER_PIREQE_MASK;
176 hs_out(sp, cscier, CSCIER);
177}
178
179static unsigned int hs_startup_irq(unsigned int irq)
180{
181 hs_socket_enable_ireq(hs_mapped_irq[irq].sock);
182 hs_mapped_irq[irq].old_handler->startup(irq);
183 return 0;
184}
185
186static void hs_shutdown_irq(unsigned int irq)
187{
188 hs_socket_disable_ireq(hs_mapped_irq[irq].sock);
189 hs_mapped_irq[irq].old_handler->shutdown(irq);
190}
191
192static void hs_enable_irq(unsigned int irq)
193{
194 hs_socket_enable_ireq(hs_mapped_irq[irq].sock);
195 hs_mapped_irq[irq].old_handler->enable(irq);
196}
197
198static void hs_disable_irq(unsigned int irq)
199{
200 hs_socket_disable_ireq(hs_mapped_irq[irq].sock);
201 hs_mapped_irq[irq].old_handler->disable(irq);
202}
203
204extern struct hw_interrupt_type no_irq_type;
205
206static void hs_mask_and_ack_irq(unsigned int irq)
207{
208 hs_socket_disable_ireq(hs_mapped_irq[irq].sock);
209 /* ack_none() spuriously complains about an unexpected IRQ */
210 if (hs_mapped_irq[irq].old_handler != &no_irq_type)
211 hs_mapped_irq[irq].old_handler->ack(irq);
212}
213
214static void hs_end_irq(unsigned int irq)
215{
216 hs_socket_enable_ireq(hs_mapped_irq[irq].sock);
217 hs_mapped_irq[irq].old_handler->end(irq);
218}
219
220
221static struct hw_interrupt_type hd64465_ss_irq_type = {
222 .typename = "PCMCIA-IRQ",
223 .startup = hs_startup_irq,
224 .shutdown = hs_shutdown_irq,
225 .enable = hs_enable_irq,
226 .disable = hs_disable_irq,
227 .ack = hs_mask_and_ack_irq,
228 .end = hs_end_irq
229};
230
231/*
232 * This function should only ever be called with interrupts disabled.
233 */
234static void hs_map_irq(hs_socket_t *sp, unsigned int irq)
235{
236 struct irq_desc *desc;
237
238 DPRINTK("hs_map_irq(sock=%d irq=%d)\n", sp->number, irq);
239
240 if (irq >= HS_NUM_MAPPED_IRQS)
241 return;
242
243 desc = irq_to_desc(irq);
244 hs_mapped_irq[irq].sock = sp;
245 /* insert ourselves as the irq controller */
246 hs_mapped_irq[irq].old_handler = desc->chip;
247 desc->chip = &hd64465_ss_irq_type;
248}
249
250
251/*
252 * This function should only ever be called with interrupts disabled.
253 */
254static void hs_unmap_irq(hs_socket_t *sp, unsigned int irq)
255{
256 struct irq_desc *desc;
257
258 DPRINTK("hs_unmap_irq(sock=%d irq=%d)\n", sp->number, irq);
259
260 if (irq >= HS_NUM_MAPPED_IRQS)
261 return;
262
263 desc = irq_to_desc(irq);
264 /* restore the original irq controller */
265 desc->chip = hs_mapped_irq[irq].old_handler;
266}
267
268/*============================================================*/
269
270
271/*
272 * Set Vpp and Vcc (in tenths of a Volt). Does not
273 * support the hi-Z state.
274 *
275 * Note, this assumes the board uses a TPS2206 chip to control
276 * the Vcc and Vpp voltages to the hs_sockets. If your board
277 * uses the MIC2563 (also supported by the HD64465) then you
278 * will have to modify this function.
279 */
280 /* 0V 3.3V 5.5V */
281static const u_char hs_tps2206_avcc[3] = { 0x00, 0x04, 0x08 };
282static const u_char hs_tps2206_bvcc[3] = { 0x00, 0x80, 0x40 };
283
284static int hs_set_voltages(hs_socket_t *sp, int Vcc, int Vpp)
285{
286 u_int psr;
287 u_int vcci = 0;
288 u_int sock = sp->number;
289
290 DPRINTK("hs_set_voltage(%d, %d, %d)\n", sock, Vcc, Vpp);
291
292 switch (Vcc)
293 {
294 case 0: vcci = 0; break;
295 case 33: vcci = 1; break;
296 case 50: vcci = 2; break;
297 default: return 0;
298 }
299
300 /* Note: Vpp = 120 not supported -- Greg Banks */
301 if (Vpp != 0 && Vpp != Vcc)
302 return 0;
303
304 /* The PSR register holds 8 of the 9 bits which control
305 * the TPS2206 via its serial interface.
306 */
307 psr = inw(HD64465_REG_PCCPSR);
308 switch (sock)
309 {
310 case 0:
311 psr &= 0x0f;
312 psr |= hs_tps2206_avcc[vcci];
313 psr |= (Vpp == 0 ? 0x00 : 0x02);
314 break;
315 case 1:
316 psr &= 0xf0;
317 psr |= hs_tps2206_bvcc[vcci];
318 psr |= (Vpp == 0 ? 0x00 : 0x20);
319 break;
320 };
321 outw(psr, HD64465_REG_PCCPSR);
322
323 return 1;
324}
325
326
327/*============================================================*/
328
329/*
330 * Drive the RESET line to the card.
331 */
332static void hs_reset_socket(hs_socket_t *sp, int on)
333{
334 unsigned short v;
335
336 v = hs_in(sp, GCR);
337 if (on)
338 v |= HD64465_PCCGCR_PCCR;
339 else
340 v &= ~HD64465_PCCGCR_PCCR;
341 hs_out(sp, v, GCR);
342}
343
344/*============================================================*/
345
346static int hs_init(struct pcmcia_socket *s)
347{
348 hs_socket_t *sp = container_of(s, struct hs_socket_t, socket);
349
350 DPRINTK("hs_init(%d)\n", sp->number);
351
352 return 0;
353}
354
355/*============================================================*/
356
357
358static int hs_get_status(struct pcmcia_socket *s, u_int *value)
359{
360 hs_socket_t *sp = container_of(s, struct hs_socket_t, socket);
361 unsigned int isr;
362 u_int status = 0;
363
364
365 isr = hs_in(sp, ISR);
366
367 /* Card is seated and powered when *both* CD pins are low */
368 if ((isr & HD64465_PCCISR_PCD_MASK) == 0)
369 {
370 status |= SS_DETECT; /* card present */
371
372 switch (isr & HD64465_PCCISR_PBVD_MASK)
373 {
374 case HD64465_PCCISR_PBVD_BATGOOD:
375 break;
376 case HD64465_PCCISR_PBVD_BATWARN:
377 status |= SS_BATWARN;
378 break;
379 default:
380 status |= SS_BATDEAD;
381 break;
382 }
383
384 if (isr & HD64465_PCCISR_PREADY)
385 status |= SS_READY;
386
387 if (isr & HD64465_PCCISR_PMWP)
388 status |= SS_WRPROT;
389
390 /* Voltage Select pins interpreted as per Table 4-5 of the std.
391 * Assuming we have the TPS2206, the socket is a "Low Voltage
392 * key, 3.3V and 5V available, no X.XV available".
393 */
394 switch (isr & (HD64465_PCCISR_PVS2|HD64465_PCCISR_PVS1))
395 {
396 case HD64465_PCCISR_PVS1:
397 printk(KERN_NOTICE MODNAME ": cannot handle X.XV card, ignored\n");
398 status = 0;
399 break;
400 case 0:
401 case HD64465_PCCISR_PVS2:
402 /* 3.3V */
403 status |= SS_3VCARD;
404 break;
405 case HD64465_PCCISR_PVS2|HD64465_PCCISR_PVS1:
406 /* 5V */
407 break;
408 }
409
410 /* TODO: SS_POWERON */
411 /* TODO: SS_STSCHG */
412 }
413
414 DPRINTK("hs_get_status(%d) = %x\n", sock, status);
415
416 *value = status;
417 return 0;
418}
419
420/*============================================================*/
421
422static int hs_set_socket(struct pcmcia_socket *s, socket_state_t *state)
423{
424 hs_socket_t *sp = container_of(s, struct hs_socket_t, socket);
425 u_long flags;
426 u_int changed;
427 unsigned short cscier;
428
429 DPRINTK("hs_set_socket(sock=%d, flags=%x, csc_mask=%x, Vcc=%d, Vpp=%d, io_irq=%d)\n",
430 sock, state->flags, state->csc_mask, state->Vcc, state->Vpp, state->io_irq);
431
432 local_irq_save(flags); /* Don't want interrupts happening here */
433
434 if (state->Vpp != sp->state.Vpp ||
435 state->Vcc != sp->state.Vcc) {
436 if (!hs_set_voltages(sp, state->Vcc, state->Vpp)) {
437 local_irq_restore(flags);
438 return -EINVAL;
439 }
440 }
441
442/* hd64465_io_debug = 1; */
443 /*
444 * Handle changes in the Card Status Change mask,
445 * by propagating to the CSCR register
446 */
447 changed = sp->state.csc_mask ^ state->csc_mask;
448 cscier = hs_in(sp, CSCIER);
449
450 if (changed & SS_DETECT) {
451 if (state->csc_mask & SS_DETECT)
452 cscier |= HD64465_PCCCSCIER_PCDE;
453 else
454 cscier &= ~HD64465_PCCCSCIER_PCDE;
455 }
456
457 if (changed & SS_READY) {
458 if (state->csc_mask & SS_READY)
459 cscier |= HD64465_PCCCSCIER_PRE;
460 else
461 cscier &= ~HD64465_PCCCSCIER_PRE;
462 }
463
464 if (changed & SS_BATDEAD) {
465 if (state->csc_mask & SS_BATDEAD)
466 cscier |= HD64465_PCCCSCIER_PBDE;
467 else
468 cscier &= ~HD64465_PCCCSCIER_PBDE;
469 }
470
471 if (changed & SS_BATWARN) {
472 if (state->csc_mask & SS_BATWARN)
473 cscier |= HD64465_PCCCSCIER_PBWE;
474 else
475 cscier &= ~HD64465_PCCCSCIER_PBWE;
476 }
477
478 if (changed & SS_STSCHG) {
479 if (state->csc_mask & SS_STSCHG)
480 cscier |= HD64465_PCCCSCIER_PSCE;
481 else
482 cscier &= ~HD64465_PCCCSCIER_PSCE;
483 }
484
485 hs_out(sp, cscier, CSCIER);
486
487 if (sp->state.io_irq && !state->io_irq)
488 hs_unmap_irq(sp, sp->state.io_irq);
489 else if (!sp->state.io_irq && state->io_irq)
490 hs_map_irq(sp, state->io_irq);
491
492
493 /*
494 * Handle changes in the flags field,
495 * by propagating to config registers.
496 */
497 changed = sp->state.flags ^ state->flags;
498
499 if (changed & SS_IOCARD) {
500 DPRINTK("card type: %s\n",
501 (state->flags & SS_IOCARD ? "i/o" : "memory" ));
502 bool_to_regbit(sp, GCR, HD64465_PCCGCR_PCCT,
503 state->flags & SS_IOCARD);
504 }
505
506 if (changed & SS_RESET) {
507 DPRINTK("%s reset card\n",
508 (state->flags & SS_RESET ? "start" : "stop"));
509 bool_to_regbit(sp, GCR, HD64465_PCCGCR_PCCR,
510 state->flags & SS_RESET);
511 }
512
513 if (changed & SS_OUTPUT_ENA) {
514 DPRINTK("%sabling card output\n",
515 (state->flags & SS_OUTPUT_ENA ? "en" : "dis"));
516 bool_to_regbit(sp, GCR, HD64465_PCCGCR_PDRV,
517 state->flags & SS_OUTPUT_ENA);
518 }
519
520 /* TODO: SS_SPKR_ENA */
521
522/* hd64465_io_debug = 0; */
523 sp->state = *state;
524
525 local_irq_restore(flags);
526
527#if HD64465_DEBUG > 10
528 if (state->flags & SS_OUTPUT_ENA)
529 cis_hex_dump((const unsigned char*)sp->mem_base, 0x100);
530#endif
531 return 0;
532}
533
534/*============================================================*/
535
536static int hs_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
537{
538 hs_socket_t *sp = container_of(s, struct hs_socket_t, socket);
539 int map = io->map;
540 int sock = sp->number;
541 struct pccard_io_map *sio;
542 pgprot_t prot;
543
544 DPRINTK("hs_set_io_map(sock=%d, map=%d, flags=0x%x, speed=%dns, start=%#lx, stop=%#lx)\n",
545 sock, map, io->flags, io->speed, io->start, io->stop);
546 if (map >= MAX_IO_WIN)
547 return -EINVAL;
548 sio = &sp->io_maps[map];
549
550 /* check for null changes */
551 if (io->flags == sio->flags &&
552 io->start == sio->start &&
553 io->stop == sio->stop)
554 return 0;
555
556 if (io->flags & MAP_AUTOSZ)
557 prot = PAGE_KERNEL_PCC(sock, _PAGE_PCC_IODYN);
558 else if (io->flags & MAP_16BIT)
559 prot = PAGE_KERNEL_PCC(sock, _PAGE_PCC_IO16);
560 else
561 prot = PAGE_KERNEL_PCC(sock, _PAGE_PCC_IO8);
562
563 /* TODO: handle MAP_USE_WAIT */
564 if (io->flags & MAP_USE_WAIT)
565 printk(KERN_INFO MODNAME ": MAP_USE_WAIT unimplemented\n");
566 /* TODO: handle MAP_PREFETCH */
567 if (io->flags & MAP_PREFETCH)
568 printk(KERN_INFO MODNAME ": MAP_PREFETCH unimplemented\n");
569 /* TODO: handle MAP_WRPROT */
570 if (io->flags & MAP_WRPROT)
571 printk(KERN_INFO MODNAME ": MAP_WRPROT unimplemented\n");
572 /* TODO: handle MAP_0WS */
573 if (io->flags & MAP_0WS)
574 printk(KERN_INFO MODNAME ": MAP_0WS unimplemented\n");
575
576 if (io->flags & MAP_ACTIVE) {
577 unsigned long pstart, psize, paddrbase;
578
579 paddrbase = virt_to_phys((void*)(sp->mem_base + 2 * HD64465_PCC_WINDOW));
580 pstart = io->start & PAGE_MASK;
581 psize = ((io->stop + PAGE_SIZE) & PAGE_MASK) - pstart;
582
583 /*
584 * Change PTEs in only that portion of the mapping requested
585 * by the caller. This means that most of the time, most of
586 * the PTEs in the io_vma will be unmapped and only the bottom
587 * page will be mapped. But the code allows for weird cards
588 * that might want IO ports > 4K.
589 */
590 sp->io_base = p3_ioremap(paddrbase + pstart, psize, pgprot_val(prot));
591
592 /*
593 * Change the mapping used by inb() outb() etc
594 */
595 hd64465_port_map(io->start,
596 io->stop - io->start + 1,
597 (unsigned long)sp->io_base + io->start, 0);
598 } else {
599 hd64465_port_unmap(sio->start, sio->stop - sio->start + 1);
600 p3_iounmap(sp->io_base);
601 }
602
603 *sio = *io;
604 return 0;
605}
606
607/*============================================================*/
608
609static int hs_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *mem)
610{
611 hs_socket_t *sp = container_of(s, struct hs_socket_t, socket);
612 struct pccard_mem_map *smem;
613 int map = mem->map;
614 unsigned long paddr;
615
616#if 0
617 DPRINTK("hs_set_mem_map(sock=%d, map=%d, flags=0x%x, card_start=0x%08x)\n",
618 sock, map, mem->flags, mem->card_start);
619#endif
620
621 if (map >= MAX_WIN)
622 return -EINVAL;
623 smem = &sp->mem_maps[map];
624
625 paddr = sp->mem_base; /* base of Attribute mapping */
626 if (!(mem->flags & MAP_ATTRIB))
627 paddr += HD64465_PCC_WINDOW; /* base of Common mapping */
628 paddr += mem->card_start;
629
630 /* Because we specified SS_CAP_STATIC_MAP, we are obliged
631 * at this time to report the system address corresponding
632 * to the card address requested. This is how Socket Services
633 * queries our fixed mapping. I wish this fact had been
634 * documented - Greg Banks.
635 */
636 mem->static_start = paddr;
637
638 *smem = *mem;
639
640 return 0;
641}
642
643/* TODO: do we need to use the MMU to access Common memory ??? */
644
645/*============================================================*/
646
647/*
648 * This function is registered with the HD64465 glue code to do a
649 * secondary demux step on the PCMCIA interrupts. It handles
650 * mapping the IREQ request from the card to a standard Linux
651 * IRQ, as requested by SocketServices.
652 */
653static int hs_irq_demux(int irq, void *dev)
654{
655 hs_socket_t *sp = dev;
656 u_int cscr;
657
658 DPRINTK("hs_irq_demux(irq=%d)\n", irq);
659
660 if (sp->state.io_irq &&
661 (cscr = hs_in(sp, CSCR)) & HD64465_PCCCSCR_PIREQ) {
662 cscr &= ~HD64465_PCCCSCR_PIREQ;
663 hs_out(sp, cscr, CSCR);
664 return sp->state.io_irq;
665 }
666
667 return irq;
668}
669
670/*============================================================*/
671
672/*
673 * Interrupt handling routine.
674 */
675
676static irqreturn_t hs_interrupt(int irq, void *dev)
677{
678 hs_socket_t *sp = dev;
679 u_int events = 0;
680 u_int cscr;
681
682 cscr = hs_in(sp, CSCR);
683
684 DPRINTK("hs_interrupt, cscr=%04x\n", cscr);
685
686 /* check for bus-related changes to be reported to Socket Services */
687 if (cscr & HD64465_PCCCSCR_PCDC) {
688 /* double-check for a 16-bit card, as we don't support CardBus */
689 if ((hs_in(sp, ISR) & HD64465_PCCISR_PCD_MASK) != 0) {
690 printk(KERN_NOTICE MODNAME
691 ": socket %d, card not a supported card type or not inserted correctly\n",
692 sp->number);
693 /* Don't do the rest unless a card is present */
694 cscr &= ~(HD64465_PCCCSCR_PCDC|
695 HD64465_PCCCSCR_PRC|
696 HD64465_PCCCSCR_PBW|
697 HD64465_PCCCSCR_PBD|
698 HD64465_PCCCSCR_PSC);
699 } else {
700 cscr &= ~HD64465_PCCCSCR_PCDC;
701 events |= SS_DETECT; /* card insertion or removal */
702 }
703 }
704 if (cscr & HD64465_PCCCSCR_PRC) {
705 cscr &= ~HD64465_PCCCSCR_PRC;
706 events |= SS_READY; /* ready signal changed */
707 }
708 if (cscr & HD64465_PCCCSCR_PBW) {
709 cscr &= ~HD64465_PCCCSCR_PSC;
710 events |= SS_BATWARN; /* battery warning */
711 }
712 if (cscr & HD64465_PCCCSCR_PBD) {
713 cscr &= ~HD64465_PCCCSCR_PSC;
714 events |= SS_BATDEAD; /* battery dead */
715 }
716 if (cscr & HD64465_PCCCSCR_PSC) {
717 cscr &= ~HD64465_PCCCSCR_PSC;
718 events |= SS_STSCHG; /* STSCHG (status changed) signal */
719 }
720
721 if (cscr & HD64465_PCCCSCR_PIREQ) {
722 cscr &= ~HD64465_PCCCSCR_PIREQ;
723
724 /* This should have been dealt with during irq demux */
725 printk(KERN_NOTICE MODNAME ": unexpected IREQ from card\n");
726 }
727
728 hs_out(sp, cscr, CSCR);
729
730 if (events)
731 pcmcia_parse_events(&sp->socket, events);
732
733 return IRQ_HANDLED;
734}
735
736/*============================================================*/
737
738static struct pccard_operations hs_operations = {
739 .init = hs_init,
740 .get_status = hs_get_status,
741 .set_socket = hs_set_socket,
742 .set_io_map = hs_set_io_map,
743 .set_mem_map = hs_set_mem_map,
744};
745
746static int hs_init_socket(hs_socket_t *sp, int irq, unsigned long mem_base,
747 unsigned int ctrl_base)
748{
749 unsigned short v;
750 int i, err;
751
752 memset(sp, 0, sizeof(*sp));
753 sp->irq = irq;
754 sp->mem_base = mem_base;
755 sp->mem_length = 4*HD64465_PCC_WINDOW; /* 16MB */
756 sp->ctrl_base = ctrl_base;
757
758 for (i=0 ; i<MAX_IO_WIN ; i++)
759 sp->io_maps[i].map = i;
760 for (i=0 ; i<MAX_WIN ; i++)
761 sp->mem_maps[i].map = i;
762
763 hd64465_register_irq_demux(sp->irq, hs_irq_demux, sp);
764
765 if ((err = request_irq(sp->irq, hs_interrupt, IRQF_DISABLED, MODNAME, sp)) < 0)
766 return err;
767 if (request_mem_region(sp->mem_base, sp->mem_length, MODNAME) == 0) {
768 sp->mem_base = 0;
769 return -ENOMEM;
770 }
771
772
773 /* According to section 3.2 of the PCMCIA standard, low-voltage
774 * capable cards must implement cold insertion, i.e. Vpp and
775 * Vcc set to 0 before card is inserted.
776 */
777 /*hs_set_voltages(sp, 0, 0);*/
778
779 /* hi-Z the outputs to the card and set 16MB map mode */
780 v = hs_in(sp, GCR);
781 v &= ~HD64465_PCCGCR_PCCT; /* memory-only card */
782 hs_out(sp, v, GCR);
783
784 v = hs_in(sp, GCR);
785 v |= HD64465_PCCGCR_PDRV; /* enable outputs to card */
786 hs_out(sp, v, GCR);
787
788 v = hs_in(sp, GCR);
789 v |= HD64465_PCCGCR_PMMOD; /* 16MB mapping mode */
790 hs_out(sp, v, GCR);
791
792 v = hs_in(sp, GCR);
793 /* lowest 16MB of Common */
794 v &= ~(HD64465_PCCGCR_PPA25|HD64465_PCCGCR_PPA24);
795 hs_out(sp, v, GCR);
796
797 hs_reset_socket(sp, 1);
798
799 printk(KERN_INFO "HD64465 PCMCIA bridge socket %d at 0x%08lx irq %d\n",
800 i, sp->mem_base, sp->irq);
801
802 return 0;
803}
804
805static void hs_exit_socket(hs_socket_t *sp)
806{
807 unsigned short cscier, gcr;
808 unsigned long flags;
809
810 local_irq_save(flags);
811
812 /* turn off interrupts in hardware */
813 cscier = hs_in(sp, CSCIER);
814 cscier = (cscier & IER_MASK) | IER_OFF;
815 hs_out(sp, cscier, CSCIER);
816
817 /* hi-Z the outputs to the card */
818 gcr = hs_in(sp, GCR);
819 gcr &= HD64465_PCCGCR_PDRV;
820 hs_out(sp, gcr, GCR);
821
822 /* power the card down */
823 hs_set_voltages(sp, 0, 0);
824
825 if (sp->mem_base != 0)
826 release_mem_region(sp->mem_base, sp->mem_length);
827 if (sp->irq != 0) {
828 free_irq(sp->irq, hs_interrupt);
829 hd64465_unregister_irq_demux(sp->irq);
830 }
831
832 local_irq_restore(flags);
833}
834
835static struct device_driver hd64465_driver = {
836 .name = "hd64465-pcmcia",
837 .bus = &platform_bus_type,
838 .suspend = pcmcia_socket_dev_suspend,
839 .resume = pcmcia_socket_dev_resume,
840};
841
842static struct platform_device hd64465_device = {
843 .name = "hd64465-pcmcia",
844 .id = 0,
845};
846
847static int __init init_hs(void)
848{
849 int i;
850 unsigned short v;
851
852/* hd64465_io_debug = 1; */
853 if (driver_register(&hd64465_driver))
854 return -EINVAL;
855
856 /* Wake both sockets out of STANDBY mode */
857 /* TODO: wait 15ms */
858 v = inw(HD64465_REG_SMSCR);
859 v &= ~(HD64465_SMSCR_PC0ST|HD64465_SMSCR_PC1ST);
860 outw(v, HD64465_REG_SMSCR);
861
862 /* keep power controller out of shutdown mode */
863 v = inb(HD64465_REG_PCC0SCR);
864 v |= HD64465_PCCSCR_SHDN;
865 outb(v, HD64465_REG_PCC0SCR);
866
867 /* use serial (TPS2206) power controller */
868 v = inb(HD64465_REG_PCC0CSCR);
869 v |= HD64465_PCCCSCR_PSWSEL;
870 outb(v, HD64465_REG_PCC0CSCR);
871
872 /*
873 * Setup hs_sockets[] structures and request system resources.
874 * TODO: on memory allocation failure, power down the socket
875 * before quitting.
876 */
877 for (i=0; i<HS_MAX_SOCKETS; i++) {
878 hs_set_voltages(&hs_sockets[i], 0, 0);
879
880 hs_sockets[i].socket.features |= SS_CAP_PCCARD | SS_CAP_STATIC_MAP; /* mappings are fixed in host memory */
881 hs_sockets[i].socket.resource_ops = &pccard_static_ops;
882 hs_sockets[i].socket.irq_mask = 0xffde;/*0xffff*/ /* IRQs mapped in s/w so can do any, really */
883 hs_sockets[i].socket.map_size = HD64465_PCC_WINDOW; /* 16MB fixed window size */
884
885 hs_sockets[i].socket.owner = THIS_MODULE;
886 hs_sockets[i].socket.ss_entry = &hs_operations;
887 }
888
889 i = hs_init_socket(&hs_sockets[0],
890 HD64465_IRQ_PCMCIA0,
891 HD64465_PCC0_BASE,
892 HD64465_REG_PCC0ISR);
893 if (i < 0) {
894 unregister_driver(&hd64465_driver);
895 return i;
896 }
897 i = hs_init_socket(&hs_sockets[1],
898 HD64465_IRQ_PCMCIA1,
899 HD64465_PCC1_BASE,
900 HD64465_REG_PCC1ISR);
901 if (i < 0) {
902 unregister_driver(&hd64465_driver);
903 return i;
904 }
905
906/* hd64465_io_debug = 0; */
907
908 platform_device_register(&hd64465_device);
909
910 for (i=0; i<HS_MAX_SOCKETS; i++) {
911 unsigned int ret;
912 hs_sockets[i].socket.dev.parent = &hd64465_device.dev;
913 hs_sockets[i].number = i;
914 ret = pcmcia_register_socket(&hs_sockets[i].socket);
915 if (ret && i)
916 pcmcia_unregister_socket(&hs_sockets[0].socket);
917 }
918
919 return 0;
920}
921
922static void __exit exit_hs(void)
923{
924 int i;
925
926 for (i=0 ; i<HS_MAX_SOCKETS ; i++) {
927 pcmcia_unregister_socket(&hs_sockets[i].socket);
928 hs_exit_socket(&hs_sockets[i]);
929 }
930
931 platform_device_unregister(&hd64465_device);
932 unregister_driver(&hd64465_driver);
933}
934
935module_init(init_hs);
936module_exit(exit_hs);
937
938/*============================================================*/
939/*END*/
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index afea2b2558b5..f5d0ba8e22d5 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -302,9 +302,10 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
302 /* We only allow changing Vpp1 and Vpp2 to the same value */ 302 /* We only allow changing Vpp1 and Vpp2 to the same value */
303 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && 303 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
304 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 304 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
305 if (mod->Vpp1 != mod->Vpp2) 305 if (mod->Vpp1 != mod->Vpp2) {
306 ds_dbg(s, 0, "Vpp1 and Vpp2 must be the same\n"); 306 ds_dbg(s, 0, "Vpp1 and Vpp2 must be the same\n");
307 return -EINVAL; 307 return -EINVAL;
308 }
308 s->socket.Vpp = mod->Vpp1; 309 s->socket.Vpp = mod->Vpp1;
309 if (s->ops->set_socket(s, &s->socket)) { 310 if (s->ops->set_socket(s, &s->socket)) {
310 dev_printk(KERN_WARNING, &s->dev, 311 dev_printk(KERN_WARNING, &s->dev,
@@ -693,8 +694,9 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
693 type = 0; 694 type = 0;
694 if (s->functions > 1) /* All of this ought to be handled higher up */ 695 if (s->functions > 1) /* All of this ought to be handled higher up */
695 type = IRQF_SHARED; 696 type = IRQF_SHARED;
696 if (req->Attributes & IRQ_TYPE_DYNAMIC_SHARING) 697 else if (req->Attributes & IRQ_TYPE_DYNAMIC_SHARING)
697 type = IRQF_SHARED; 698 type = IRQF_SHARED;
699 else printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n");
698 700
699#ifdef CONFIG_PCMCIA_PROBE 701#ifdef CONFIG_PCMCIA_PROBE
700 702
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 17f4ecf1c0c5..9ca22c7aafb2 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -71,7 +71,7 @@ static DEFINE_MUTEX(rsrc_mutex);
71======================================================================*/ 71======================================================================*/
72 72
73static struct resource * 73static struct resource *
74make_resource(resource_size_t b, resource_size_t n, int flags, char *name) 74make_resource(resource_size_t b, resource_size_t n, int flags, const char *name)
75{ 75{
76 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); 76 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
77 77
@@ -624,7 +624,7 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
624static struct resource *nonstatic_find_io_region(unsigned long base, int num, 624static struct resource *nonstatic_find_io_region(unsigned long base, int num,
625 unsigned long align, struct pcmcia_socket *s) 625 unsigned long align, struct pcmcia_socket *s)
626{ 626{
627 struct resource *res = make_resource(0, num, IORESOURCE_IO, s->dev.bus_id); 627 struct resource *res = make_resource(0, num, IORESOURCE_IO, dev_name(&s->dev));
628 struct socket_data *s_data = s->resource_data; 628 struct socket_data *s_data = s->resource_data;
629 struct pcmcia_align_data data; 629 struct pcmcia_align_data data;
630 unsigned long min = base; 630 unsigned long min = base;
@@ -658,7 +658,7 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
658static struct resource * nonstatic_find_mem_region(u_long base, u_long num, 658static struct resource * nonstatic_find_mem_region(u_long base, u_long num,
659 u_long align, int low, struct pcmcia_socket *s) 659 u_long align, int low, struct pcmcia_socket *s)
660{ 660{
661 struct resource *res = make_resource(0, num, IORESOURCE_MEM, s->dev.bus_id); 661 struct resource *res = make_resource(0, num, IORESOURCE_MEM, dev_name(&s->dev));
662 struct socket_data *s_data = s->resource_data; 662 struct socket_data *s_data = s->resource_data;
663 struct pcmcia_align_data data; 663 struct pcmcia_align_data data;
664 unsigned long min, max; 664 unsigned long min, max;
diff --git a/drivers/pnp/Kconfig b/drivers/pnp/Kconfig
index 821933f9aa57..2a37b3fedb8e 100644
--- a/drivers/pnp/Kconfig
+++ b/drivers/pnp/Kconfig
@@ -20,13 +20,21 @@ menuconfig PNP
20 20
21 If unsure, say Y. 21 If unsure, say Y.
22 22
23if PNP 23config PNP_DEBUG_MESSAGES
24 24 default y
25config PNP_DEBUG 25 bool "PNP debugging messages"
26 bool "PnP Debug Messages" 26 depends on PNP
27 help 27 help
28 Say Y if you want the Plug and Play Layer to print debug messages. 28 Say Y here if you want the PNP layer to be able to produce debugging
29 This is useful if you are developing a PnP driver or troubleshooting. 29 messages if needed. The messages can be enabled at boot-time with
30 the pnp.debug kernel parameter.
31
32 This option allows you to save a bit of space if you do not want
33 the messages to even be built into the kernel.
34
35 If you have any doubts about this, say Y here.
36
37if PNP
30 38
31comment "Protocols" 39comment "Protocols"
32 40
diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile
index e83f34f1b5ba..8de3775ec242 100644
--- a/drivers/pnp/Makefile
+++ b/drivers/pnp/Makefile
@@ -10,7 +10,3 @@ obj-$(CONFIG_ISAPNP) += isapnp/
10 10
11# pnp_system_init goes after pnpacpi/pnpbios init 11# pnp_system_init goes after pnpacpi/pnpbios init
12obj-y += system.o 12obj-y += system.o
13
14ifeq ($(CONFIG_PNP_DEBUG),y)
15EXTRA_CFLAGS += -DDEBUG
16endif
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 3b8b9d3cb03d..0b8d14050efa 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -166,3 +166,13 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
166struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev, 166struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
167 resource_size_t start, 167 resource_size_t start,
168 resource_size_t end, int flags); 168 resource_size_t end, int flags);
169
170extern int pnp_debug;
171
172#if defined(CONFIG_PNP_DEBUG_MESSAGES)
173#define pnp_dbg(dev, format, arg...) \
174 ({ if (pnp_debug) dev_printk(KERN_DEBUG, dev, format, ## arg); 0; })
175#else
176#define pnp_dbg(dev, format, arg...) \
177 ({ if (0) dev_printk(KERN_DEBUG, dev, format, ## arg); 0; })
178#endif
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 817fe626e15b..16c01c6fa7c5 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -177,6 +177,9 @@ int __pnp_add_device(struct pnp_dev *dev)
177int pnp_add_device(struct pnp_dev *dev) 177int pnp_add_device(struct pnp_dev *dev)
178{ 178{
179 int ret; 179 int ret;
180 char buf[128];
181 int len = 0;
182 struct pnp_id *id;
180 183
181 if (dev->card) 184 if (dev->card)
182 return -EINVAL; 185 return -EINVAL;
@@ -185,17 +188,12 @@ int pnp_add_device(struct pnp_dev *dev)
185 if (ret) 188 if (ret)
186 return ret; 189 return ret;
187 190
188#ifdef CONFIG_PNP_DEBUG 191 buf[0] = '\0';
189 { 192 for (id = dev->id; id; id = id->next)
190 struct pnp_id *id; 193 len += scnprintf(buf + len, sizeof(buf) - len, " %s", id->id);
191 194
192 dev_printk(KERN_DEBUG, &dev->dev, "%s device, IDs", 195 pnp_dbg(&dev->dev, "%s device, IDs%s (%s)\n",
193 dev->protocol->name); 196 dev->protocol->name, buf, dev->active ? "active" : "disabled");
194 for (id = dev->id; id; id = id->next)
195 printk(" %s", id->id);
196 printk(" (%s)\n", dev->active ? "active" : "disabled");
197 }
198#endif
199 return 0; 197 return 0;
200} 198}
201 199
@@ -214,3 +212,14 @@ static int __init pnp_init(void)
214} 212}
215 213
216subsys_initcall(pnp_init); 214subsys_initcall(pnp_init);
215
216int pnp_debug;
217
218#if defined(CONFIG_PNP_DEBUG_MESSAGES)
219static int __init pnp_debug_setup(char *__unused)
220{
221 pnp_debug = 1;
222 return 1;
223}
224__setup("pnp.debug", pnp_debug_setup);
225#endif
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index e3f7e89c4dfb..527ee764c93f 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -114,7 +114,6 @@ static int pnp_device_probe(struct device *dev)
114 } else 114 } else
115 goto fail; 115 goto fail;
116 116
117 dev_dbg(dev, "driver attached\n");
118 return error; 117 return error;
119 118
120fail: 119fail:
@@ -211,8 +210,6 @@ struct bus_type pnp_bus_type = {
211 210
212int pnp_register_driver(struct pnp_driver *drv) 211int pnp_register_driver(struct pnp_driver *drv)
213{ 212{
214 pnp_dbg("the driver '%s' has been registered", drv->name);
215
216 drv->driver.name = drv->name; 213 drv->driver.name = drv->name;
217 drv->driver.bus = &pnp_bus_type; 214 drv->driver.bus = &pnp_bus_type;
218 215
@@ -222,7 +219,6 @@ int pnp_register_driver(struct pnp_driver *drv)
222void pnp_unregister_driver(struct pnp_driver *drv) 219void pnp_unregister_driver(struct pnp_driver *drv)
223{ 220{
224 driver_unregister(&drv->driver); 221 driver_unregister(&drv->driver);
225 pnp_dbg("the driver '%s' has been unregistered", drv->name);
226} 222}
227 223
228/** 224/**
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 478a4a739c00..c3f1c8e9d254 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -12,7 +12,6 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/pnp.h>
16#include <linux/stat.h> 15#include <linux/stat.h>
17#include <linux/ctype.h> 16#include <linux/ctype.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
diff --git a/drivers/pnp/isapnp/Makefile b/drivers/pnp/isapnp/Makefile
index 3e38f06f8d78..cac18bbfb817 100644
--- a/drivers/pnp/isapnp/Makefile
+++ b/drivers/pnp/isapnp/Makefile
@@ -5,7 +5,3 @@
5isapnp-proc-$(CONFIG_PROC_FS) = proc.o 5isapnp-proc-$(CONFIG_PROC_FS) = proc.o
6 6
7obj-y := core.o compat.o $(isapnp-proc-y) 7obj-y := core.o compat.o $(isapnp-proc-y)
8
9ifeq ($(CONFIG_PNP_DEBUG),y)
10EXTRA_CFLAGS += -DDEBUG
11endif
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 46455fbab6d5..e851160e14f0 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -901,7 +901,7 @@ static int isapnp_get_resources(struct pnp_dev *dev)
901{ 901{
902 int i, ret; 902 int i, ret;
903 903
904 dev_dbg(&dev->dev, "get resources\n"); 904 pnp_dbg(&dev->dev, "get resources\n");
905 pnp_init_resources(dev); 905 pnp_init_resources(dev);
906 isapnp_cfg_begin(dev->card->number, dev->number); 906 isapnp_cfg_begin(dev->card->number, dev->number);
907 dev->active = isapnp_read_byte(ISAPNP_CFG_ACTIVATE); 907 dev->active = isapnp_read_byte(ISAPNP_CFG_ACTIVATE);
@@ -939,13 +939,13 @@ static int isapnp_set_resources(struct pnp_dev *dev)
939 struct resource *res; 939 struct resource *res;
940 int tmp; 940 int tmp;
941 941
942 dev_dbg(&dev->dev, "set resources\n"); 942 pnp_dbg(&dev->dev, "set resources\n");
943 isapnp_cfg_begin(dev->card->number, dev->number); 943 isapnp_cfg_begin(dev->card->number, dev->number);
944 dev->active = 1; 944 dev->active = 1;
945 for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) { 945 for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) {
946 res = pnp_get_resource(dev, IORESOURCE_IO, tmp); 946 res = pnp_get_resource(dev, IORESOURCE_IO, tmp);
947 if (pnp_resource_enabled(res)) { 947 if (pnp_resource_enabled(res)) {
948 dev_dbg(&dev->dev, " set io %d to %#llx\n", 948 pnp_dbg(&dev->dev, " set io %d to %#llx\n",
949 tmp, (unsigned long long) res->start); 949 tmp, (unsigned long long) res->start);
950 isapnp_write_word(ISAPNP_CFG_PORT + (tmp << 1), 950 isapnp_write_word(ISAPNP_CFG_PORT + (tmp << 1),
951 res->start); 951 res->start);
@@ -957,14 +957,14 @@ static int isapnp_set_resources(struct pnp_dev *dev)
957 int irq = res->start; 957 int irq = res->start;
958 if (irq == 2) 958 if (irq == 2)
959 irq = 9; 959 irq = 9;
960 dev_dbg(&dev->dev, " set irq %d to %d\n", tmp, irq); 960 pnp_dbg(&dev->dev, " set irq %d to %d\n", tmp, irq);
961 isapnp_write_byte(ISAPNP_CFG_IRQ + (tmp << 1), irq); 961 isapnp_write_byte(ISAPNP_CFG_IRQ + (tmp << 1), irq);
962 } 962 }
963 } 963 }
964 for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) { 964 for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) {
965 res = pnp_get_resource(dev, IORESOURCE_DMA, tmp); 965 res = pnp_get_resource(dev, IORESOURCE_DMA, tmp);
966 if (pnp_resource_enabled(res)) { 966 if (pnp_resource_enabled(res)) {
967 dev_dbg(&dev->dev, " set dma %d to %lld\n", 967 pnp_dbg(&dev->dev, " set dma %d to %lld\n",
968 tmp, (unsigned long long) res->start); 968 tmp, (unsigned long long) res->start);
969 isapnp_write_byte(ISAPNP_CFG_DMA + tmp, res->start); 969 isapnp_write_byte(ISAPNP_CFG_DMA + tmp, res->start);
970 } 970 }
@@ -972,7 +972,7 @@ static int isapnp_set_resources(struct pnp_dev *dev)
972 for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) { 972 for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) {
973 res = pnp_get_resource(dev, IORESOURCE_MEM, tmp); 973 res = pnp_get_resource(dev, IORESOURCE_MEM, tmp);
974 if (pnp_resource_enabled(res)) { 974 if (pnp_resource_enabled(res)) {
975 dev_dbg(&dev->dev, " set mem %d to %#llx\n", 975 pnp_dbg(&dev->dev, " set mem %d to %#llx\n",
976 tmp, (unsigned long long) res->start); 976 tmp, (unsigned long long) res->start);
977 isapnp_write_word(ISAPNP_CFG_MEM + (tmp << 3), 977 isapnp_write_word(ISAPNP_CFG_MEM + (tmp << 3),
978 (res->start >> 8) & 0xffff); 978 (res->start >> 8) & 0xffff);
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index b526eaad3f6c..00fd3577b985 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -25,7 +25,7 @@ static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
25 25
26 res = pnp_get_resource(dev, IORESOURCE_IO, idx); 26 res = pnp_get_resource(dev, IORESOURCE_IO, idx);
27 if (res) { 27 if (res) {
28 dev_dbg(&dev->dev, " io %d already set to %#llx-%#llx " 28 pnp_dbg(&dev->dev, " io %d already set to %#llx-%#llx "
29 "flags %#lx\n", idx, (unsigned long long) res->start, 29 "flags %#lx\n", idx, (unsigned long long) res->start,
30 (unsigned long long) res->end, res->flags); 30 (unsigned long long) res->end, res->flags);
31 return 0; 31 return 0;
@@ -38,7 +38,7 @@ static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
38 38
39 if (!rule->size) { 39 if (!rule->size) {
40 res->flags |= IORESOURCE_DISABLED; 40 res->flags |= IORESOURCE_DISABLED;
41 dev_dbg(&dev->dev, " io %d disabled\n", idx); 41 pnp_dbg(&dev->dev, " io %d disabled\n", idx);
42 goto __add; 42 goto __add;
43 } 43 }
44 44
@@ -49,7 +49,7 @@ static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
49 res->start += rule->align; 49 res->start += rule->align;
50 res->end = res->start + rule->size - 1; 50 res->end = res->start + rule->size - 1;
51 if (res->start > rule->max || !rule->align) { 51 if (res->start > rule->max || !rule->align) {
52 dev_dbg(&dev->dev, " couldn't assign io %d " 52 pnp_dbg(&dev->dev, " couldn't assign io %d "
53 "(min %#llx max %#llx)\n", idx, 53 "(min %#llx max %#llx)\n", idx,
54 (unsigned long long) rule->min, 54 (unsigned long long) rule->min,
55 (unsigned long long) rule->max); 55 (unsigned long long) rule->max);
@@ -68,7 +68,7 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
68 68
69 res = pnp_get_resource(dev, IORESOURCE_MEM, idx); 69 res = pnp_get_resource(dev, IORESOURCE_MEM, idx);
70 if (res) { 70 if (res) {
71 dev_dbg(&dev->dev, " mem %d already set to %#llx-%#llx " 71 pnp_dbg(&dev->dev, " mem %d already set to %#llx-%#llx "
72 "flags %#lx\n", idx, (unsigned long long) res->start, 72 "flags %#lx\n", idx, (unsigned long long) res->start,
73 (unsigned long long) res->end, res->flags); 73 (unsigned long long) res->end, res->flags);
74 return 0; 74 return 0;
@@ -90,7 +90,7 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
90 90
91 if (!rule->size) { 91 if (!rule->size) {
92 res->flags |= IORESOURCE_DISABLED; 92 res->flags |= IORESOURCE_DISABLED;
93 dev_dbg(&dev->dev, " mem %d disabled\n", idx); 93 pnp_dbg(&dev->dev, " mem %d disabled\n", idx);
94 goto __add; 94 goto __add;
95 } 95 }
96 96
@@ -101,7 +101,7 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
101 res->start += rule->align; 101 res->start += rule->align;
102 res->end = res->start + rule->size - 1; 102 res->end = res->start + rule->size - 1;
103 if (res->start > rule->max || !rule->align) { 103 if (res->start > rule->max || !rule->align) {
104 dev_dbg(&dev->dev, " couldn't assign mem %d " 104 pnp_dbg(&dev->dev, " couldn't assign mem %d "
105 "(min %#llx max %#llx)\n", idx, 105 "(min %#llx max %#llx)\n", idx,
106 (unsigned long long) rule->min, 106 (unsigned long long) rule->min,
107 (unsigned long long) rule->max); 107 (unsigned long long) rule->max);
@@ -126,7 +126,7 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
126 126
127 res = pnp_get_resource(dev, IORESOURCE_IRQ, idx); 127 res = pnp_get_resource(dev, IORESOURCE_IRQ, idx);
128 if (res) { 128 if (res) {
129 dev_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n", 129 pnp_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n",
130 idx, (int) res->start, res->flags); 130 idx, (int) res->start, res->flags);
131 return 0; 131 return 0;
132 } 132 }
@@ -138,7 +138,7 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
138 138
139 if (bitmap_empty(rule->map.bits, PNP_IRQ_NR)) { 139 if (bitmap_empty(rule->map.bits, PNP_IRQ_NR)) {
140 res->flags |= IORESOURCE_DISABLED; 140 res->flags |= IORESOURCE_DISABLED;
141 dev_dbg(&dev->dev, " irq %d disabled\n", idx); 141 pnp_dbg(&dev->dev, " irq %d disabled\n", idx);
142 goto __add; 142 goto __add;
143 } 143 }
144 144
@@ -160,11 +160,11 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
160 res->start = -1; 160 res->start = -1;
161 res->end = -1; 161 res->end = -1;
162 res->flags |= IORESOURCE_DISABLED; 162 res->flags |= IORESOURCE_DISABLED;
163 dev_dbg(&dev->dev, " irq %d disabled (optional)\n", idx); 163 pnp_dbg(&dev->dev, " irq %d disabled (optional)\n", idx);
164 goto __add; 164 goto __add;
165 } 165 }
166 166
167 dev_dbg(&dev->dev, " couldn't assign irq %d\n", idx); 167 pnp_dbg(&dev->dev, " couldn't assign irq %d\n", idx);
168 return -EBUSY; 168 return -EBUSY;
169 169
170__add: 170__add:
@@ -184,7 +184,7 @@ static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
184 184
185 res = pnp_get_resource(dev, IORESOURCE_DMA, idx); 185 res = pnp_get_resource(dev, IORESOURCE_DMA, idx);
186 if (res) { 186 if (res) {
187 dev_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n", 187 pnp_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n",
188 idx, (int) res->start, res->flags); 188 idx, (int) res->start, res->flags);
189 return 0; 189 return 0;
190 } 190 }
@@ -205,7 +205,7 @@ static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
205 res->start = res->end = MAX_DMA_CHANNELS; 205 res->start = res->end = MAX_DMA_CHANNELS;
206#endif 206#endif
207 res->flags |= IORESOURCE_DISABLED; 207 res->flags |= IORESOURCE_DISABLED;
208 dev_dbg(&dev->dev, " disable dma %d\n", idx); 208 pnp_dbg(&dev->dev, " disable dma %d\n", idx);
209 209
210__add: 210__add:
211 pnp_add_dma_resource(dev, res->start, res->flags); 211 pnp_add_dma_resource(dev, res->start, res->flags);
@@ -238,7 +238,7 @@ static int pnp_assign_resources(struct pnp_dev *dev, int set)
238 int nport = 0, nmem = 0, nirq = 0, ndma = 0; 238 int nport = 0, nmem = 0, nirq = 0, ndma = 0;
239 int ret = 0; 239 int ret = 0;
240 240
241 dev_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set); 241 pnp_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set);
242 mutex_lock(&pnp_res_mutex); 242 mutex_lock(&pnp_res_mutex);
243 pnp_clean_resource_table(dev); 243 pnp_clean_resource_table(dev);
244 244
@@ -270,7 +270,7 @@ static int pnp_assign_resources(struct pnp_dev *dev, int set)
270 270
271 mutex_unlock(&pnp_res_mutex); 271 mutex_unlock(&pnp_res_mutex);
272 if (ret < 0) { 272 if (ret < 0) {
273 dev_dbg(&dev->dev, "pnp_assign_resources failed (%d)\n", ret); 273 pnp_dbg(&dev->dev, "pnp_assign_resources failed (%d)\n", ret);
274 pnp_clean_resource_table(dev); 274 pnp_clean_resource_table(dev);
275 } else 275 } else
276 dbg_pnp_show_resources(dev, "pnp_assign_resources succeeded"); 276 dbg_pnp_show_resources(dev, "pnp_assign_resources succeeded");
@@ -286,7 +286,7 @@ int pnp_auto_config_dev(struct pnp_dev *dev)
286 int i, ret; 286 int i, ret;
287 287
288 if (!pnp_can_configure(dev)) { 288 if (!pnp_can_configure(dev)) {
289 dev_dbg(&dev->dev, "configuration not supported\n"); 289 pnp_dbg(&dev->dev, "configuration not supported\n");
290 return -ENODEV; 290 return -ENODEV;
291 } 291 }
292 292
@@ -313,7 +313,7 @@ int pnp_auto_config_dev(struct pnp_dev *dev)
313int pnp_start_dev(struct pnp_dev *dev) 313int pnp_start_dev(struct pnp_dev *dev)
314{ 314{
315 if (!pnp_can_write(dev)) { 315 if (!pnp_can_write(dev)) {
316 dev_dbg(&dev->dev, "activation not supported\n"); 316 pnp_dbg(&dev->dev, "activation not supported\n");
317 return -EINVAL; 317 return -EINVAL;
318 } 318 }
319 319
@@ -336,7 +336,7 @@ int pnp_start_dev(struct pnp_dev *dev)
336int pnp_stop_dev(struct pnp_dev *dev) 336int pnp_stop_dev(struct pnp_dev *dev)
337{ 337{
338 if (!pnp_can_disable(dev)) { 338 if (!pnp_can_disable(dev)) {
339 dev_dbg(&dev->dev, "disabling not supported\n"); 339 pnp_dbg(&dev->dev, "disabling not supported\n");
340 return -EINVAL; 340 return -EINVAL;
341 } 341 }
342 if (dev->protocol->disable(dev) < 0) { 342 if (dev->protocol->disable(dev) < 0) {
diff --git a/drivers/pnp/pnpacpi/Makefile b/drivers/pnp/pnpacpi/Makefile
index 2d7a1e6908be..905326fcca85 100644
--- a/drivers/pnp/pnpacpi/Makefile
+++ b/drivers/pnp/pnpacpi/Makefile
@@ -3,7 +3,3 @@
3# 3#
4 4
5obj-y := core.o rsparser.o 5obj-y := core.o rsparser.o
6
7ifeq ($(CONFIG_PNP_DEBUG),y)
8EXTRA_CFLAGS += -DDEBUG
9endif
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 53561d72b4ee..383e47c392a4 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -75,7 +75,7 @@ static int __init ispnpidacpi(char *id)
75 75
76static int pnpacpi_get_resources(struct pnp_dev *dev) 76static int pnpacpi_get_resources(struct pnp_dev *dev)
77{ 77{
78 dev_dbg(&dev->dev, "get resources\n"); 78 pnp_dbg(&dev->dev, "get resources\n");
79 return pnpacpi_parse_allocated_resource(dev); 79 return pnpacpi_parse_allocated_resource(dev);
80} 80}
81 81
@@ -86,7 +86,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
86 int ret; 86 int ret;
87 acpi_status status; 87 acpi_status status;
88 88
89 dev_dbg(&dev->dev, "set resources\n"); 89 pnp_dbg(&dev->dev, "set resources\n");
90 ret = pnpacpi_build_resource_template(dev, &buffer); 90 ret = pnpacpi_build_resource_template(dev, &buffer);
91 if (ret) 91 if (ret)
92 return ret; 92 return ret;
@@ -148,9 +148,13 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
148 acpi_status status; 148 acpi_status status;
149 struct pnp_dev *dev; 149 struct pnp_dev *dev;
150 150
151 /*
152 * If a PnPacpi device is not present , the device
153 * driver should not be loaded.
154 */
151 status = acpi_get_handle(device->handle, "_CRS", &temp); 155 status = acpi_get_handle(device->handle, "_CRS", &temp);
152 if (ACPI_FAILURE(status) || !ispnpidacpi(acpi_device_hid(device)) || 156 if (ACPI_FAILURE(status) || !ispnpidacpi(acpi_device_hid(device)) ||
153 is_exclusive_device(device)) 157 is_exclusive_device(device) || (!device->status.present))
154 return 0; 158 return 0;
155 159
156 dev = pnp_alloc_dev(&pnpacpi_protocol, num, acpi_device_hid(device)); 160 dev = pnp_alloc_dev(&pnpacpi_protocol, num, acpi_device_hid(device));
@@ -255,14 +259,14 @@ int pnpacpi_disabled __initdata;
255static int __init pnpacpi_init(void) 259static int __init pnpacpi_init(void)
256{ 260{
257 if (acpi_disabled || pnpacpi_disabled) { 261 if (acpi_disabled || pnpacpi_disabled) {
258 pnp_info("PnP ACPI: disabled"); 262 printk(KERN_INFO "pnp: PnP ACPI: disabled\n");
259 return 0; 263 return 0;
260 } 264 }
261 pnp_info("PnP ACPI init"); 265 printk(KERN_INFO "pnp: PnP ACPI init\n");
262 pnp_register_protocol(&pnpacpi_protocol); 266 pnp_register_protocol(&pnpacpi_protocol);
263 register_acpi_bus_type(&acpi_pnp_bus); 267 register_acpi_bus_type(&acpi_pnp_bus);
264 acpi_get_devices(NULL, pnpacpi_add_device_handler, NULL, NULL); 268 acpi_get_devices(NULL, pnpacpi_add_device_handler, NULL, NULL);
265 pnp_info("PnP ACPI: found %d devices", num); 269 printk(KERN_INFO "pnp: PnP ACPI: found %d devices\n", num);
266 unregister_acpi_bus_type(&acpi_pnp_bus); 270 unregister_acpi_bus_type(&acpi_pnp_bus);
267 pnp_platform_devices = 1; 271 pnp_platform_devices = 1;
268 return 0; 272 return 0;
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 95015cbfd33f..adf17856bacc 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -132,7 +132,8 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev,
132 pnp_add_irq_resource(dev, irq, flags); 132 pnp_add_irq_resource(dev, irq, flags);
133} 133}
134 134
135static int dma_flags(int type, int bus_master, int transfer) 135static int dma_flags(struct pnp_dev *dev, int type, int bus_master,
136 int transfer)
136{ 137{
137 int flags = 0; 138 int flags = 0;
138 139
@@ -154,7 +155,7 @@ static int dma_flags(int type, int bus_master, int transfer)
154 default: 155 default:
155 /* Set a default value ? */ 156 /* Set a default value ? */
156 flags |= IORESOURCE_DMA_COMPATIBLE; 157 flags |= IORESOURCE_DMA_COMPATIBLE;
157 pnp_err("Invalid DMA type"); 158 dev_err(&dev->dev, "invalid DMA type %d\n", type);
158 } 159 }
159 switch (transfer) { 160 switch (transfer) {
160 case ACPI_TRANSFER_8: 161 case ACPI_TRANSFER_8:
@@ -169,7 +170,7 @@ static int dma_flags(int type, int bus_master, int transfer)
169 default: 170 default:
170 /* Set a default value ? */ 171 /* Set a default value ? */
171 flags |= IORESOURCE_DMA_8AND16BIT; 172 flags |= IORESOURCE_DMA_8AND16BIT;
172 pnp_err("Invalid DMA transfer type"); 173 dev_err(&dev->dev, "invalid DMA transfer type %d\n", transfer);
173 } 174 }
174 175
175 return flags; 176 return flags;
@@ -336,7 +337,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
336 case ACPI_RESOURCE_TYPE_DMA: 337 case ACPI_RESOURCE_TYPE_DMA:
337 dma = &res->data.dma; 338 dma = &res->data.dma;
338 if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) 339 if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
339 flags = dma_flags(dma->type, dma->bus_master, 340 flags = dma_flags(dev, dma->type, dma->bus_master,
340 dma->transfer); 341 dma->transfer);
341 else 342 else
342 flags = IORESOURCE_DISABLED; 343 flags = IORESOURCE_DISABLED;
@@ -449,7 +450,7 @@ int pnpacpi_parse_allocated_resource(struct pnp_dev *dev)
449 acpi_handle handle = dev->data; 450 acpi_handle handle = dev->data;
450 acpi_status status; 451 acpi_status status;
451 452
452 dev_dbg(&dev->dev, "parse allocated resources\n"); 453 pnp_dbg(&dev->dev, "parse allocated resources\n");
453 454
454 pnp_init_resources(dev); 455 pnp_init_resources(dev);
455 456
@@ -477,7 +478,7 @@ static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev,
477 for (i = 0; i < p->channel_count; i++) 478 for (i = 0; i < p->channel_count; i++)
478 map |= 1 << p->channels[i]; 479 map |= 1 << p->channels[i];
479 480
480 flags = dma_flags(p->type, p->bus_master, p->transfer); 481 flags = dma_flags(dev, p->type, p->bus_master, p->transfer);
481 pnp_register_dma_resource(dev, option_flags, map, flags); 482 pnp_register_dma_resource(dev, option_flags, map, flags);
482} 483}
483 484
@@ -608,8 +609,8 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
608 unsigned char flags = 0; 609 unsigned char flags = 0;
609 610
610 status = acpi_resource_to_address64(r, p); 611 status = acpi_resource_to_address64(r, p);
611 if (!ACPI_SUCCESS(status)) { 612 if (ACPI_FAILURE(status)) {
612 pnp_warn("PnPACPI: failed to convert resource type %d", 613 dev_warn(&dev->dev, "can't convert resource type %d\n",
613 r->type); 614 r->type);
614 return; 615 return;
615 } 616 }
@@ -735,7 +736,7 @@ int __init pnpacpi_parse_resource_option_data(struct pnp_dev *dev)
735 acpi_status status; 736 acpi_status status;
736 struct acpipnp_parse_option_s parse_data; 737 struct acpipnp_parse_option_s parse_data;
737 738
738 dev_dbg(&dev->dev, "parse resource options\n"); 739 pnp_dbg(&dev->dev, "parse resource options\n");
739 740
740 parse_data.dev = dev; 741 parse_data.dev = dev;
741 parse_data.option_flags = 0; 742 parse_data.option_flags = 0;
@@ -843,7 +844,7 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev,
843 844
844 if (!pnp_resource_enabled(p)) { 845 if (!pnp_resource_enabled(p)) {
845 irq->interrupt_count = 0; 846 irq->interrupt_count = 0;
846 dev_dbg(&dev->dev, " encode irq (%s)\n", 847 pnp_dbg(&dev->dev, " encode irq (%s)\n",
847 p ? "disabled" : "missing"); 848 p ? "disabled" : "missing");
848 return; 849 return;
849 } 850 }
@@ -855,7 +856,7 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev,
855 irq->interrupt_count = 1; 856 irq->interrupt_count = 1;
856 irq->interrupts[0] = p->start; 857 irq->interrupts[0] = p->start;
857 858
858 dev_dbg(&dev->dev, " encode irq %d %s %s %s (%d-byte descriptor)\n", 859 pnp_dbg(&dev->dev, " encode irq %d %s %s %s (%d-byte descriptor)\n",
859 (int) p->start, 860 (int) p->start,
860 triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge", 861 triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
861 polarity == ACPI_ACTIVE_LOW ? "low" : "high", 862 polarity == ACPI_ACTIVE_LOW ? "low" : "high",
@@ -872,7 +873,7 @@ static void pnpacpi_encode_ext_irq(struct pnp_dev *dev,
872 873
873 if (!pnp_resource_enabled(p)) { 874 if (!pnp_resource_enabled(p)) {
874 extended_irq->interrupt_count = 0; 875 extended_irq->interrupt_count = 0;
875 dev_dbg(&dev->dev, " encode extended irq (%s)\n", 876 pnp_dbg(&dev->dev, " encode extended irq (%s)\n",
876 p ? "disabled" : "missing"); 877 p ? "disabled" : "missing");
877 return; 878 return;
878 } 879 }
@@ -885,7 +886,7 @@ static void pnpacpi_encode_ext_irq(struct pnp_dev *dev,
885 extended_irq->interrupt_count = 1; 886 extended_irq->interrupt_count = 1;
886 extended_irq->interrupts[0] = p->start; 887 extended_irq->interrupts[0] = p->start;
887 888
888 dev_dbg(&dev->dev, " encode irq %d %s %s %s\n", (int) p->start, 889 pnp_dbg(&dev->dev, " encode irq %d %s %s %s\n", (int) p->start,
889 triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge", 890 triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
890 polarity == ACPI_ACTIVE_LOW ? "low" : "high", 891 polarity == ACPI_ACTIVE_LOW ? "low" : "high",
891 extended_irq->sharable == ACPI_SHARED ? "shared" : "exclusive"); 892 extended_irq->sharable == ACPI_SHARED ? "shared" : "exclusive");
@@ -899,7 +900,7 @@ static void pnpacpi_encode_dma(struct pnp_dev *dev,
899 900
900 if (!pnp_resource_enabled(p)) { 901 if (!pnp_resource_enabled(p)) {
901 dma->channel_count = 0; 902 dma->channel_count = 0;
902 dev_dbg(&dev->dev, " encode dma (%s)\n", 903 pnp_dbg(&dev->dev, " encode dma (%s)\n",
903 p ? "disabled" : "missing"); 904 p ? "disabled" : "missing");
904 return; 905 return;
905 } 906 }
@@ -934,7 +935,7 @@ static void pnpacpi_encode_dma(struct pnp_dev *dev,
934 dma->channel_count = 1; 935 dma->channel_count = 1;
935 dma->channels[0] = p->start; 936 dma->channels[0] = p->start;
936 937
937 dev_dbg(&dev->dev, " encode dma %d " 938 pnp_dbg(&dev->dev, " encode dma %d "
938 "type %#x transfer %#x master %d\n", 939 "type %#x transfer %#x master %d\n",
939 (int) p->start, dma->type, dma->transfer, dma->bus_master); 940 (int) p->start, dma->type, dma->transfer, dma->bus_master);
940} 941}
@@ -958,7 +959,7 @@ static void pnpacpi_encode_io(struct pnp_dev *dev,
958 io->address_length = 0; 959 io->address_length = 0;
959 } 960 }
960 961
961 dev_dbg(&dev->dev, " encode io %#x-%#x decode %#x\n", io->minimum, 962 pnp_dbg(&dev->dev, " encode io %#x-%#x decode %#x\n", io->minimum,
962 io->minimum + io->address_length - 1, io->io_decode); 963 io->minimum + io->address_length - 1, io->io_decode);
963} 964}
964 965
@@ -976,7 +977,7 @@ static void pnpacpi_encode_fixed_io(struct pnp_dev *dev,
976 fixed_io->address_length = 0; 977 fixed_io->address_length = 0;
977 } 978 }
978 979
979 dev_dbg(&dev->dev, " encode fixed_io %#x-%#x\n", fixed_io->address, 980 pnp_dbg(&dev->dev, " encode fixed_io %#x-%#x\n", fixed_io->address,
980 fixed_io->address + fixed_io->address_length - 1); 981 fixed_io->address + fixed_io->address_length - 1);
981} 982}
982 983
@@ -999,7 +1000,7 @@ static void pnpacpi_encode_mem24(struct pnp_dev *dev,
999 memory24->address_length = 0; 1000 memory24->address_length = 0;
1000 } 1001 }
1001 1002
1002 dev_dbg(&dev->dev, " encode mem24 %#x-%#x write_protect %#x\n", 1003 pnp_dbg(&dev->dev, " encode mem24 %#x-%#x write_protect %#x\n",
1003 memory24->minimum, 1004 memory24->minimum,
1004 memory24->minimum + memory24->address_length - 1, 1005 memory24->minimum + memory24->address_length - 1,
1005 memory24->write_protect); 1006 memory24->write_protect);
@@ -1023,7 +1024,7 @@ static void pnpacpi_encode_mem32(struct pnp_dev *dev,
1023 memory32->alignment = 0; 1024 memory32->alignment = 0;
1024 } 1025 }
1025 1026
1026 dev_dbg(&dev->dev, " encode mem32 %#x-%#x write_protect %#x\n", 1027 pnp_dbg(&dev->dev, " encode mem32 %#x-%#x write_protect %#x\n",
1027 memory32->minimum, 1028 memory32->minimum,
1028 memory32->minimum + memory32->address_length - 1, 1029 memory32->minimum + memory32->address_length - 1,
1029 memory32->write_protect); 1030 memory32->write_protect);
@@ -1046,7 +1047,7 @@ static void pnpacpi_encode_fixed_mem32(struct pnp_dev *dev,
1046 fixed_memory32->address_length = 0; 1047 fixed_memory32->address_length = 0;
1047 } 1048 }
1048 1049
1049 dev_dbg(&dev->dev, " encode fixed_mem32 %#x-%#x write_protect %#x\n", 1050 pnp_dbg(&dev->dev, " encode fixed_mem32 %#x-%#x write_protect %#x\n",
1050 fixed_memory32->address, 1051 fixed_memory32->address,
1051 fixed_memory32->address + fixed_memory32->address_length - 1, 1052 fixed_memory32->address + fixed_memory32->address_length - 1,
1052 fixed_memory32->write_protect); 1053 fixed_memory32->write_protect);
@@ -1060,7 +1061,7 @@ int pnpacpi_encode_resources(struct pnp_dev *dev, struct acpi_buffer *buffer)
1060 struct acpi_resource *resource = buffer->pointer; 1061 struct acpi_resource *resource = buffer->pointer;
1061 int port = 0, irq = 0, dma = 0, mem = 0; 1062 int port = 0, irq = 0, dma = 0, mem = 0;
1062 1063
1063 dev_dbg(&dev->dev, "encode %d resources\n", res_cnt); 1064 pnp_dbg(&dev->dev, "encode %d resources\n", res_cnt);
1064 while (i < res_cnt) { 1065 while (i < res_cnt) {
1065 switch (resource->type) { 1066 switch (resource->type) {
1066 case ACPI_RESOURCE_TYPE_IRQ: 1067 case ACPI_RESOURCE_TYPE_IRQ:
diff --git a/drivers/pnp/pnpbios/Makefile b/drivers/pnp/pnpbios/Makefile
index 310e2b3a7710..3cd3ed760605 100644
--- a/drivers/pnp/pnpbios/Makefile
+++ b/drivers/pnp/pnpbios/Makefile
@@ -5,7 +5,3 @@
5pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o 5pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o
6 6
7obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y) 7obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y)
8
9ifeq ($(CONFIG_PNP_DEBUG),y)
10EXTRA_CFLAGS += -DDEBUG
11endif
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 2bfe13369df5..996f64838079 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -211,7 +211,7 @@ static int pnpbios_get_resources(struct pnp_dev *dev)
211 if (!pnpbios_is_dynamic(dev)) 211 if (!pnpbios_is_dynamic(dev))
212 return -EPERM; 212 return -EPERM;
213 213
214 dev_dbg(&dev->dev, "get resources\n"); 214 pnp_dbg(&dev->dev, "get resources\n");
215 node = kzalloc(node_info.max_node_size, GFP_KERNEL); 215 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
216 if (!node) 216 if (!node)
217 return -1; 217 return -1;
@@ -234,7 +234,7 @@ static int pnpbios_set_resources(struct pnp_dev *dev)
234 if (!pnpbios_is_dynamic(dev)) 234 if (!pnpbios_is_dynamic(dev))
235 return -EPERM; 235 return -EPERM;
236 236
237 dev_dbg(&dev->dev, "set resources\n"); 237 pnp_dbg(&dev->dev, "set resources\n");
238 node = kzalloc(node_info.max_node_size, GFP_KERNEL); 238 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
239 if (!node) 239 if (!node)
240 return -1; 240 return -1;
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index ca567671379e..87b4f49a5251 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -87,7 +87,7 @@ static unsigned char *pnpbios_parse_allocated_resource_data(struct pnp_dev *dev,
87 if (!p) 87 if (!p)
88 return NULL; 88 return NULL;
89 89
90 dev_dbg(&dev->dev, "parse allocated resources\n"); 90 pnp_dbg(&dev->dev, "parse allocated resources\n");
91 91
92 pnp_init_resources(dev); 92 pnp_init_resources(dev);
93 93
@@ -324,7 +324,7 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
324 if (!p) 324 if (!p)
325 return NULL; 325 return NULL;
326 326
327 dev_dbg(&dev->dev, "parse resource options\n"); 327 pnp_dbg(&dev->dev, "parse resource options\n");
328 option_flags = 0; 328 option_flags = 0;
329 while ((char *)p < (char *)end) { 329 while ((char *)p < (char *)end) {
330 330
@@ -519,7 +519,7 @@ static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p,
519 p[10] = (len >> 8) & 0xff; 519 p[10] = (len >> 8) & 0xff;
520 p[11] = ((len >> 8) >> 8) & 0xff; 520 p[11] = ((len >> 8) >> 8) & 0xff;
521 521
522 dev_dbg(&dev->dev, " encode mem %#lx-%#lx\n", base, base + len - 1); 522 pnp_dbg(&dev->dev, " encode mem %#lx-%#lx\n", base, base + len - 1);
523} 523}
524 524
525static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p, 525static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p,
@@ -549,7 +549,7 @@ static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p,
549 p[18] = (len >> 16) & 0xff; 549 p[18] = (len >> 16) & 0xff;
550 p[19] = (len >> 24) & 0xff; 550 p[19] = (len >> 24) & 0xff;
551 551
552 dev_dbg(&dev->dev, " encode mem32 %#lx-%#lx\n", base, base + len - 1); 552 pnp_dbg(&dev->dev, " encode mem32 %#lx-%#lx\n", base, base + len - 1);
553} 553}
554 554
555static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p, 555static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p,
@@ -575,7 +575,7 @@ static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p,
575 p[10] = (len >> 16) & 0xff; 575 p[10] = (len >> 16) & 0xff;
576 p[11] = (len >> 24) & 0xff; 576 p[11] = (len >> 24) & 0xff;
577 577
578 dev_dbg(&dev->dev, " encode fixed_mem32 %#lx-%#lx\n", base, 578 pnp_dbg(&dev->dev, " encode fixed_mem32 %#lx-%#lx\n", base,
579 base + len - 1); 579 base + len - 1);
580} 580}
581 581
@@ -592,7 +592,7 @@ static void pnpbios_encode_irq(struct pnp_dev *dev, unsigned char *p,
592 p[1] = map & 0xff; 592 p[1] = map & 0xff;
593 p[2] = (map >> 8) & 0xff; 593 p[2] = (map >> 8) & 0xff;
594 594
595 dev_dbg(&dev->dev, " encode irq mask %#lx\n", map); 595 pnp_dbg(&dev->dev, " encode irq mask %#lx\n", map);
596} 596}
597 597
598static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p, 598static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p,
@@ -607,7 +607,7 @@ static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p,
607 607
608 p[1] = map & 0xff; 608 p[1] = map & 0xff;
609 609
610 dev_dbg(&dev->dev, " encode dma mask %#lx\n", map); 610 pnp_dbg(&dev->dev, " encode dma mask %#lx\n", map);
611} 611}
612 612
613static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p, 613static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p,
@@ -630,7 +630,7 @@ static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p,
630 p[5] = (base >> 8) & 0xff; 630 p[5] = (base >> 8) & 0xff;
631 p[7] = len & 0xff; 631 p[7] = len & 0xff;
632 632
633 dev_dbg(&dev->dev, " encode io %#lx-%#lx\n", base, base + len - 1); 633 pnp_dbg(&dev->dev, " encode io %#lx-%#lx\n", base, base + len - 1);
634} 634}
635 635
636static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p, 636static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p,
@@ -651,7 +651,7 @@ static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p,
651 p[2] = (base >> 8) & 0xff; 651 p[2] = (base >> 8) & 0xff;
652 p[3] = len & 0xff; 652 p[3] = len & 0xff;
653 653
654 dev_dbg(&dev->dev, " encode fixed_io %#lx-%#lx\n", base, 654 pnp_dbg(&dev->dev, " encode fixed_io %#lx-%#lx\n", base,
655 base + len - 1); 655 base + len - 1);
656} 656}
657 657
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index c144bd575611..8473fe5ed7ff 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -337,9 +337,8 @@ void pnp_fixup_device(struct pnp_dev *dev)
337 for (f = pnp_fixups; *f->id; f++) { 337 for (f = pnp_fixups; *f->id; f++) {
338 if (!compare_pnp_id(dev->id, f->id)) 338 if (!compare_pnp_id(dev->id, f->id))
339 continue; 339 continue;
340#ifdef DEBUG 340 pnp_dbg(&dev->dev, "%s: calling %pF\n", f->id,
341 dev_dbg(&dev->dev, "%s: calling %pF\n", f->id, f->quirk_function); 341 f->quirk_function);
342#endif
343 f->quirk_function(dev); 342 f->quirk_function(dev);
344 } 343 }
345} 344}
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index dbae23acdd5b..f604061d2bb0 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -294,7 +294,7 @@ static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,
294 u8 progif; 294 u8 progif;
295 295
296 if (pci->irq == irq) { 296 if (pci->irq == irq) {
297 dev_dbg(&pnp->dev, "device %s using irq %d\n", 297 pnp_dbg(&pnp->dev, " device %s using irq %d\n",
298 pci_name(pci), irq); 298 pci_name(pci), irq);
299 return 1; 299 return 1;
300 } 300 }
@@ -316,7 +316,7 @@ static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,
316 if ((progif & 0x5) != 0x5) 316 if ((progif & 0x5) != 0x5)
317 if (pci_get_legacy_ide_irq(pci, 0) == irq || 317 if (pci_get_legacy_ide_irq(pci, 0) == irq ||
318 pci_get_legacy_ide_irq(pci, 1) == irq) { 318 pci_get_legacy_ide_irq(pci, 1) == irq) {
319 dev_dbg(&pnp->dev, "legacy IDE device %s " 319 pnp_dbg(&pnp->dev, " legacy IDE device %s "
320 "using irq %d\n", pci_name(pci), irq); 320 "using irq %d\n", pci_name(pci), irq);
321 return 1; 321 return 1;
322 } 322 }
@@ -517,7 +517,7 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
517 res->start = irq; 517 res->start = irq;
518 res->end = irq; 518 res->end = irq;
519 519
520 dev_dbg(&dev->dev, " add irq %d flags %#x\n", irq, flags); 520 pnp_dbg(&dev->dev, " add irq %d flags %#x\n", irq, flags);
521 return pnp_res; 521 return pnp_res;
522} 522}
523 523
@@ -538,7 +538,7 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
538 res->start = dma; 538 res->start = dma;
539 res->end = dma; 539 res->end = dma;
540 540
541 dev_dbg(&dev->dev, " add dma %d flags %#x\n", dma, flags); 541 pnp_dbg(&dev->dev, " add dma %d flags %#x\n", dma, flags);
542 return pnp_res; 542 return pnp_res;
543} 543}
544 544
@@ -562,7 +562,7 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
562 res->start = start; 562 res->start = start;
563 res->end = end; 563 res->end = end;
564 564
565 dev_dbg(&dev->dev, " add io %#llx-%#llx flags %#x\n", 565 pnp_dbg(&dev->dev, " add io %#llx-%#llx flags %#x\n",
566 (unsigned long long) start, (unsigned long long) end, flags); 566 (unsigned long long) start, (unsigned long long) end, flags);
567 return pnp_res; 567 return pnp_res;
568} 568}
@@ -587,7 +587,7 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
587 res->start = start; 587 res->start = start;
588 res->end = end; 588 res->end = end;
589 589
590 dev_dbg(&dev->dev, " add mem %#llx-%#llx flags %#x\n", 590 pnp_dbg(&dev->dev, " add mem %#llx-%#llx flags %#x\n",
591 (unsigned long long) start, (unsigned long long) end, flags); 591 (unsigned long long) start, (unsigned long long) end, flags);
592 return pnp_res; 592 return pnp_res;
593} 593}
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index b42df1620718..63087d5ce609 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -75,18 +75,17 @@ char *pnp_resource_type_name(struct resource *res)
75 75
76void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc) 76void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
77{ 77{
78#ifdef DEBUG
79 char buf[128]; 78 char buf[128];
80 int len; 79 int len;
81 struct pnp_resource *pnp_res; 80 struct pnp_resource *pnp_res;
82 struct resource *res; 81 struct resource *res;
83 82
84 if (list_empty(&dev->resources)) { 83 if (list_empty(&dev->resources)) {
85 dev_dbg(&dev->dev, "%s: no current resources\n", desc); 84 pnp_dbg(&dev->dev, "%s: no current resources\n", desc);
86 return; 85 return;
87 } 86 }
88 87
89 dev_dbg(&dev->dev, "%s: current resources:\n", desc); 88 pnp_dbg(&dev->dev, "%s: current resources:\n", desc);
90 list_for_each_entry(pnp_res, &dev->resources, list) { 89 list_for_each_entry(pnp_res, &dev->resources, list) {
91 res = &pnp_res->res; 90 res = &pnp_res->res;
92 len = 0; 91 len = 0;
@@ -95,7 +94,7 @@ void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
95 pnp_resource_type_name(res)); 94 pnp_resource_type_name(res));
96 95
97 if (res->flags & IORESOURCE_DISABLED) { 96 if (res->flags & IORESOURCE_DISABLED) {
98 dev_dbg(&dev->dev, "%sdisabled\n", buf); 97 pnp_dbg(&dev->dev, "%sdisabled\n", buf);
99 continue; 98 continue;
100 } 99 }
101 100
@@ -116,9 +115,8 @@ void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
116 res->flags); 115 res->flags);
117 break; 116 break;
118 } 117 }
119 dev_dbg(&dev->dev, "%s\n", buf); 118 pnp_dbg(&dev->dev, "%s\n", buf);
120 } 119 }
121#endif
122} 120}
123 121
124char *pnp_option_priority_name(struct pnp_option *option) 122char *pnp_option_priority_name(struct pnp_option *option)
@@ -136,7 +134,6 @@ char *pnp_option_priority_name(struct pnp_option *option)
136 134
137void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option) 135void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option)
138{ 136{
139#ifdef DEBUG
140 char buf[128]; 137 char buf[128];
141 int len = 0, i; 138 int len = 0, i;
142 struct pnp_port *port; 139 struct pnp_port *port;
@@ -208,6 +205,5 @@ void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option)
208 "flags %#x", dma->map, dma->flags); 205 "flags %#x", dma->map, dma->flags);
209 break; 206 break;
210 } 207 }
211 dev_dbg(&dev->dev, "%s\n", buf); 208 pnp_dbg(&dev->dev, "%s\n", buf);
212#endif
213} 209}
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index 85edf945ab86..204158cf7a55 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <asm/smp.h>
25#include <asm/time.h> 26#include <asm/time.h>
26#include <asm/ps3.h> 27#include <asm/ps3.h>
27#include <asm/lv1call.h> 28#include <asm/lv1call.h>
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index a926c896475e..643a6b98462b 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -879,7 +879,7 @@ static void rio_update_route_tables(struct rio_mport *port)
879 * link, then start recursive peer enumeration. Returns %0 if 879 * link, then start recursive peer enumeration. Returns %0 if
880 * enumeration succeeds or %-EBUSY if enumeration fails. 880 * enumeration succeeds or %-EBUSY if enumeration fails.
881 */ 881 */
882int rio_enum_mport(struct rio_mport *mport) 882int __devinit rio_enum_mport(struct rio_mport *mport)
883{ 883{
884 struct rio_net *net = NULL; 884 struct rio_net *net = NULL;
885 int rc = 0; 885 int rc = 0;
@@ -972,7 +972,7 @@ static void rio_enum_timeout(unsigned long data)
972 * peer discovery. Returns %0 if discovery succeeds or %-EBUSY 972 * peer discovery. Returns %0 if discovery succeeds or %-EBUSY
973 * on failure. 973 * on failure.
974 */ 974 */
975int rio_disc_mport(struct rio_mport *mport) 975int __devinit rio_disc_mport(struct rio_mport *mport)
976{ 976{
977 struct rio_net *net = NULL; 977 struct rio_net *net = NULL;
978 int enum_timeout_flag = 0; 978 int enum_timeout_flag = 0;
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 680661abbc4b..6395c780008b 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -467,7 +467,7 @@ static int __devinit rio_init(void)
467 467
468device_initcall(rio_init); 468device_initcall(rio_init);
469 469
470int rio_init_mports(void) 470int __devinit rio_init_mports(void)
471{ 471{
472 int rc = 0; 472 int rc = 0;
473 struct rio_mport *port; 473 struct rio_mport *port;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 4dada6ee1119..39360e2a4540 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1,6 +1,4 @@
1menu "Voltage and Current regulators" 1menuconfig REGULATOR
2
3config REGULATOR
4 bool "Voltage and Current Regulator Support" 2 bool "Voltage and Current Regulator Support"
5 default n 3 default n
6 help 4 help
@@ -23,21 +21,20 @@ config REGULATOR
23 21
24 If unsure, say no. 22 If unsure, say no.
25 23
24if REGULATOR
25
26config REGULATOR_DEBUG 26config REGULATOR_DEBUG
27 bool "Regulator debug support" 27 bool "Regulator debug support"
28 depends on REGULATOR
29 help 28 help
30 Say yes here to enable debugging support. 29 Say yes here to enable debugging support.
31 30
32config REGULATOR_FIXED_VOLTAGE 31config REGULATOR_FIXED_VOLTAGE
33 tristate 32 tristate
34 default n 33 default n
35 select REGULATOR
36 34
37config REGULATOR_VIRTUAL_CONSUMER 35config REGULATOR_VIRTUAL_CONSUMER
38 tristate "Virtual regulator consumer support" 36 tristate "Virtual regulator consumer support"
39 default n 37 default n
40 select REGULATOR
41 help 38 help
42 This driver provides a virtual consumer for the voltage and 39 This driver provides a virtual consumer for the voltage and
43 current regulator API which provides sysfs controls for 40 current regulator API which provides sysfs controls for
@@ -49,7 +46,6 @@ config REGULATOR_VIRTUAL_CONSUMER
49config REGULATOR_BQ24022 46config REGULATOR_BQ24022
50 tristate "TI bq24022 Dual Input 1-Cell Li-Ion Charger IC" 47 tristate "TI bq24022 Dual Input 1-Cell Li-Ion Charger IC"
51 default n 48 default n
52 select REGULATOR
53 help 49 help
54 This driver controls a TI bq24022 Charger attached via 50 This driver controls a TI bq24022 Charger attached via
55 GPIOs. The provided current regulator can enable/disable 51 GPIOs. The provided current regulator can enable/disable
@@ -59,7 +55,6 @@ config REGULATOR_BQ24022
59config REGULATOR_WM8350 55config REGULATOR_WM8350
60 tristate "Wolfson Microelectroncis WM8350 AudioPlus PMIC" 56 tristate "Wolfson Microelectroncis WM8350 AudioPlus PMIC"
61 depends on MFD_WM8350 57 depends on MFD_WM8350
62 select REGULATOR
63 help 58 help
64 This driver provides support for the voltage and current regulators 59 This driver provides support for the voltage and current regulators
65 of the WM8350 AudioPlus PMIC. 60 of the WM8350 AudioPlus PMIC.
@@ -67,7 +62,6 @@ config REGULATOR_WM8350
67config REGULATOR_WM8400 62config REGULATOR_WM8400
68 tristate "Wolfson Microelectroncis WM8400 AudioPlus PMIC" 63 tristate "Wolfson Microelectroncis WM8400 AudioPlus PMIC"
69 depends on MFD_WM8400 64 depends on MFD_WM8400
70 select REGULATOR
71 help 65 help
72 This driver provides support for the voltage regulators of the 66 This driver provides support for the voltage regulators of the
73 WM8400 AudioPlus PMIC. 67 WM8400 AudioPlus PMIC.
@@ -75,9 +69,8 @@ config REGULATOR_WM8400
75config REGULATOR_DA903X 69config REGULATOR_DA903X
76 tristate "Support regulators on Dialog Semiconductor DA9030/DA9034 PMIC" 70 tristate "Support regulators on Dialog Semiconductor DA9030/DA9034 PMIC"
77 depends on PMIC_DA903X 71 depends on PMIC_DA903X
78 select REGULATOR
79 help 72 help
80 Say y here to support the BUCKs and LDOs regulators found on 73 Say y here to support the BUCKs and LDOs regulators found on
81 Dialog Semiconductor DA9030/DA9034 PMIC. 74 Dialog Semiconductor DA9030/DA9034 PMIC.
82 75
83endmenu 76endif
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 3688e339db87..773b29cec8be 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -79,6 +79,11 @@ struct da903x_regulator_info {
79 int enable_bit; 79 int enable_bit;
80}; 80};
81 81
82static inline struct device *to_da903x_dev(struct regulator_dev *rdev)
83{
84 return rdev_get_dev(rdev)->parent->parent;
85}
86
82static inline int check_range(struct da903x_regulator_info *info, 87static inline int check_range(struct da903x_regulator_info *info,
83 int min_uV, int max_uV) 88 int min_uV, int max_uV)
84{ 89{
@@ -93,7 +98,7 @@ static int da903x_set_ldo_voltage(struct regulator_dev *rdev,
93 int min_uV, int max_uV) 98 int min_uV, int max_uV)
94{ 99{
95 struct da903x_regulator_info *info = rdev_get_drvdata(rdev); 100 struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
96 struct device *da9034_dev = rdev_get_dev(rdev)->parent; 101 struct device *da9034_dev = to_da903x_dev(rdev);
97 uint8_t val, mask; 102 uint8_t val, mask;
98 103
99 if (check_range(info, min_uV, max_uV)) { 104 if (check_range(info, min_uV, max_uV)) {
@@ -111,7 +116,7 @@ static int da903x_set_ldo_voltage(struct regulator_dev *rdev,
111static int da903x_get_voltage(struct regulator_dev *rdev) 116static int da903x_get_voltage(struct regulator_dev *rdev)
112{ 117{
113 struct da903x_regulator_info *info = rdev_get_drvdata(rdev); 118 struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
114 struct device *da9034_dev = rdev_get_dev(rdev)->parent; 119 struct device *da9034_dev = to_da903x_dev(rdev);
115 uint8_t val, mask; 120 uint8_t val, mask;
116 int ret; 121 int ret;
117 122
@@ -128,7 +133,7 @@ static int da903x_get_voltage(struct regulator_dev *rdev)
128static int da903x_enable(struct regulator_dev *rdev) 133static int da903x_enable(struct regulator_dev *rdev)
129{ 134{
130 struct da903x_regulator_info *info = rdev_get_drvdata(rdev); 135 struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
131 struct device *da9034_dev = rdev_get_dev(rdev)->parent; 136 struct device *da9034_dev = to_da903x_dev(rdev);
132 137
133 return da903x_set_bits(da9034_dev, info->enable_reg, 138 return da903x_set_bits(da9034_dev, info->enable_reg,
134 1 << info->enable_bit); 139 1 << info->enable_bit);
@@ -137,7 +142,7 @@ static int da903x_enable(struct regulator_dev *rdev)
137static int da903x_disable(struct regulator_dev *rdev) 142static int da903x_disable(struct regulator_dev *rdev)
138{ 143{
139 struct da903x_regulator_info *info = rdev_get_drvdata(rdev); 144 struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
140 struct device *da9034_dev = rdev_get_dev(rdev)->parent; 145 struct device *da9034_dev = to_da903x_dev(rdev);
141 146
142 return da903x_clr_bits(da9034_dev, info->enable_reg, 147 return da903x_clr_bits(da9034_dev, info->enable_reg,
143 1 << info->enable_bit); 148 1 << info->enable_bit);
@@ -146,7 +151,7 @@ static int da903x_disable(struct regulator_dev *rdev)
146static int da903x_is_enabled(struct regulator_dev *rdev) 151static int da903x_is_enabled(struct regulator_dev *rdev)
147{ 152{
148 struct da903x_regulator_info *info = rdev_get_drvdata(rdev); 153 struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
149 struct device *da9034_dev = rdev_get_dev(rdev)->parent; 154 struct device *da9034_dev = to_da903x_dev(rdev);
150 uint8_t reg_val; 155 uint8_t reg_val;
151 int ret; 156 int ret;
152 157
@@ -162,7 +167,7 @@ static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev,
162 int min_uV, int max_uV) 167 int min_uV, int max_uV)
163{ 168{
164 struct da903x_regulator_info *info = rdev_get_drvdata(rdev); 169 struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
165 struct device *da903x_dev = rdev_get_dev(rdev)->parent; 170 struct device *da903x_dev = to_da903x_dev(rdev);
166 uint8_t val, mask; 171 uint8_t val, mask;
167 int ret; 172 int ret;
168 173
@@ -189,7 +194,7 @@ static int da9030_set_ldo14_voltage(struct regulator_dev *rdev,
189 int min_uV, int max_uV) 194 int min_uV, int max_uV)
190{ 195{
191 struct da903x_regulator_info *info = rdev_get_drvdata(rdev); 196 struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
192 struct device *da903x_dev = rdev_get_dev(rdev)->parent; 197 struct device *da903x_dev = to_da903x_dev(rdev);
193 uint8_t val, mask; 198 uint8_t val, mask;
194 int thresh; 199 int thresh;
195 200
@@ -215,7 +220,7 @@ static int da9030_set_ldo14_voltage(struct regulator_dev *rdev,
215static int da9030_get_ldo14_voltage(struct regulator_dev *rdev) 220static int da9030_get_ldo14_voltage(struct regulator_dev *rdev)
216{ 221{
217 struct da903x_regulator_info *info = rdev_get_drvdata(rdev); 222 struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
218 struct device *da903x_dev = rdev_get_dev(rdev)->parent; 223 struct device *da903x_dev = to_da903x_dev(rdev);
219 uint8_t val, mask; 224 uint8_t val, mask;
220 int ret; 225 int ret;
221 226
@@ -238,7 +243,7 @@ static int da9034_set_dvc_voltage(struct regulator_dev *rdev,
238 int min_uV, int max_uV) 243 int min_uV, int max_uV)
239{ 244{
240 struct da903x_regulator_info *info = rdev_get_drvdata(rdev); 245 struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
241 struct device *da9034_dev = rdev_get_dev(rdev)->parent; 246 struct device *da9034_dev = to_da903x_dev(rdev);
242 uint8_t val, mask; 247 uint8_t val, mask;
243 int ret; 248 int ret;
244 249
@@ -264,7 +269,7 @@ static int da9034_set_ldo12_voltage(struct regulator_dev *rdev,
264 int min_uV, int max_uV) 269 int min_uV, int max_uV)
265{ 270{
266 struct da903x_regulator_info *info = rdev_get_drvdata(rdev); 271 struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
267 struct device *da9034_dev = rdev_get_dev(rdev)->parent; 272 struct device *da9034_dev = to_da903x_dev(rdev);
268 uint8_t val, mask; 273 uint8_t val, mask;
269 274
270 if (check_range(info, min_uV, max_uV)) { 275 if (check_range(info, min_uV, max_uV)) {
@@ -283,7 +288,7 @@ static int da9034_set_ldo12_voltage(struct regulator_dev *rdev,
283static int da9034_get_ldo12_voltage(struct regulator_dev *rdev) 288static int da9034_get_ldo12_voltage(struct regulator_dev *rdev)
284{ 289{
285 struct da903x_regulator_info *info = rdev_get_drvdata(rdev); 290 struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
286 struct device *da9034_dev = rdev_get_dev(rdev)->parent; 291 struct device *da9034_dev = to_da903x_dev(rdev);
287 uint8_t val, mask; 292 uint8_t val, mask;
288 int ret; 293 int ret;
289 294
@@ -466,7 +471,7 @@ static int __devinit da903x_regulator_probe(struct platform_device *pdev)
466 if (ri->desc.id == DA9030_ID_LDO1 || ri->desc.id == DA9030_ID_LDO15) 471 if (ri->desc.id == DA9030_ID_LDO1 || ri->desc.id == DA9030_ID_LDO15)
467 ri->desc.ops = &da9030_regulator_ldo1_15_ops; 472 ri->desc.ops = &da9030_regulator_ldo1_15_ops;
468 473
469 rdev = regulator_register(&ri->desc, pdev->dev.parent, ri); 474 rdev = regulator_register(&ri->desc, &pdev->dev, ri);
470 if (IS_ERR(rdev)) { 475 if (IS_ERR(rdev)) {
471 dev_err(&pdev->dev, "failed to register regulator %s\n", 476 dev_err(&pdev->dev, "failed to register regulator %s\n",
472 ri->desc.name); 477 ri->desc.name);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 814f49fde530..123092d8a984 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -171,10 +171,10 @@ config RTC_DRV_MAX6900
171 will be called rtc-max6900. 171 will be called rtc-max6900.
172 172
173config RTC_DRV_RS5C372 173config RTC_DRV_RS5C372
174 tristate "Ricoh RS5C372A/B, RV5C386, RV5C387A" 174 tristate "Ricoh R2025S/D, RS5C372A/B, RV5C386, RV5C387A"
175 help 175 help
176 If you say yes here you get support for the 176 If you say yes here you get support for the
177 Ricoh RS5C372A, RS5C372B, RV5C386, and RV5C387A RTC chips. 177 Ricoh R2025S/D, RS5C372A, RS5C372B, RV5C386, and RV5C387A RTC chips.
178 178
179 This driver can also be built as a module. If so, the module 179 This driver can also be built as a module. If so, the module
180 will be called rtc-rs5c372. 180 will be called rtc-rs5c372.
@@ -246,6 +246,16 @@ config RTC_DRV_TWL92330
246 platforms. The support is integrated with the rest of 246 platforms. The support is integrated with the rest of
247 the Menelaus driver; it's not separate module. 247 the Menelaus driver; it's not separate module.
248 248
249config RTC_DRV_TWL4030
250 tristate "TI TWL4030/TWL5030/TPS659x0"
251 depends on RTC_CLASS && TWL4030_CORE
252 help
253 If you say yes here you get support for the RTC on the
254 TWL4030 family chips, used mostly with OMAP3 platforms.
255
256 This driver can also be built as a module. If so, the module
257 will be called rtc-twl4030.
258
249config RTC_DRV_S35390A 259config RTC_DRV_S35390A
250 tristate "Seiko Instruments S-35390A" 260 tristate "Seiko Instruments S-35390A"
251 select BITREVERSE 261 select BITREVERSE
@@ -267,6 +277,14 @@ config RTC_DRV_FM3130
267 This driver can also be built as a module. If so the module 277 This driver can also be built as a module. If so the module
268 will be called rtc-fm3130. 278 will be called rtc-fm3130.
269 279
280config RTC_DRV_RX8581
281 tristate "Epson RX-8581"
282 help
283 If you say yes here you will get support for the Epson RX-8581.
284
285 This driver can also be built as a module. If so the module
286 will be called rtc-rx8581.
287
270endif # I2C 288endif # I2C
271 289
272comment "SPI RTC drivers" 290comment "SPI RTC drivers"
@@ -292,6 +310,17 @@ config RTC_DRV_DS1305
292 This driver can also be built as a module. If so, the module 310 This driver can also be built as a module. If so, the module
293 will be called rtc-ds1305. 311 will be called rtc-ds1305.
294 312
313config RTC_DRV_DS1390
314 tristate "Dallas/Maxim DS1390/93/94"
315 help
316 If you say yes here you get support for the DS1390/93/94 chips.
317
318 This driver only supports the RTC feature, and not other chip
319 features such as alarms and trickle charging.
320
321 This driver can also be built as a module. If so, the module
322 will be called rtc-ds1390.
323
295config RTC_DRV_MAX6902 324config RTC_DRV_MAX6902
296 tristate "Maxim MAX6902" 325 tristate "Maxim MAX6902"
297 help 326 help
@@ -458,6 +487,16 @@ config RTC_DRV_V3020
458 This driver can also be built as a module. If so, the module 487 This driver can also be built as a module. If so, the module
459 will be called rtc-v3020. 488 will be called rtc-v3020.
460 489
490config RTC_DRV_WM8350
491 tristate "Wolfson Microelectronics WM8350 RTC"
492 depends on MFD_WM8350
493 help
494 If you say yes here you will get support for the RTC subsystem
495 of the Wolfson Microelectronics WM8350.
496
497 This driver can also be built as a module. If so, the module
498 will be called "rtc-wm8350".
499
461comment "on-CPU RTC drivers" 500comment "on-CPU RTC drivers"
462 501
463config RTC_DRV_OMAP 502config RTC_DRV_OMAP
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index d6a9ac7176ea..6e79c912bf9e 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o
28obj-$(CONFIG_RTC_DRV_DS1305) += rtc-ds1305.o 28obj-$(CONFIG_RTC_DRV_DS1305) += rtc-ds1305.o
29obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o 29obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
30obj-$(CONFIG_RTC_DRV_DS1374) += rtc-ds1374.o 30obj-$(CONFIG_RTC_DRV_DS1374) += rtc-ds1374.o
31obj-$(CONFIG_RTC_DRV_DS1390) += rtc-ds1390.o
31obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o 32obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o
32obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o 33obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
33obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o 34obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
@@ -57,12 +58,15 @@ obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
57obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o 58obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
58obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o 59obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
59obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o 60obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
61obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
60obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o 62obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
61obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o 63obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
62obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o 64obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
63obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o 65obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
64obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o 66obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
65obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o 67obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
68obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl4030.o
66obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o 69obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
67obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o 70obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
71obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o
68obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o 72obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 7af60b98d8a4..a04c1b6b1575 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -271,7 +271,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
271 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year"); 271 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
272 do { 272 do {
273 alarm->time.tm_year++; 273 alarm->time.tm_year++;
274 } while (!rtc_valid_tm(&alarm->time)); 274 } while (rtc_valid_tm(&alarm->time) != 0);
275 break; 275 break;
276 276
277 default: 277 default:
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 5549231179a2..6cf8e282338f 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -794,7 +794,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
794 goto cleanup2; 794 goto cleanup2;
795 } 795 }
796 796
797 pr_info("%s: alarms up to one %s%s, %zd bytes nvram, %s irqs\n", 797 pr_info("%s: alarms up to one %s%s, %zd bytes nvram%s\n",
798 cmos_rtc.rtc->dev.bus_id, 798 cmos_rtc.rtc->dev.bus_id,
799 is_valid_irq(rtc_irq) 799 is_valid_irq(rtc_irq)
800 ? (cmos_rtc.mon_alrm 800 ? (cmos_rtc.mon_alrm
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 079e9ed907e0..ecdea44ae4e5 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -446,9 +446,6 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
446 if (rtc->ops->release) 446 if (rtc->ops->release)
447 rtc->ops->release(rtc->dev.parent); 447 rtc->ops->release(rtc->dev.parent);
448 448
449 if (file->f_flags & FASYNC)
450 rtc_dev_fasync(-1, file, 0);
451
452 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); 449 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
453 return 0; 450 return 0;
454} 451}
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c
new file mode 100644
index 000000000000..599e976bf014
--- /dev/null
+++ b/drivers/rtc/rtc-ds1390.c
@@ -0,0 +1,220 @@
1/*
2 * rtc-ds1390.c -- driver for DS1390/93/94
3 *
4 * Copyright (C) 2008 Mercury IMC Ltd
5 * Written by Mark Jackson <mpfj@mimc.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * NOTE : Currently this driver only supports the bare minimum for read
12 * and write the RTC. The extra features provided by the chip family
13 * (alarms, trickle charger, different control registers) are unavailable.
14 */
15
16#include <linux/platform_device.h>
17#include <linux/rtc.h>
18#include <linux/spi/spi.h>
19#include <linux/bcd.h>
20
21#define DS1390_REG_100THS 0x00
22#define DS1390_REG_SECONDS 0x01
23#define DS1390_REG_MINUTES 0x02
24#define DS1390_REG_HOURS 0x03
25#define DS1390_REG_DAY 0x04
26#define DS1390_REG_DATE 0x05
27#define DS1390_REG_MONTH_CENT 0x06
28#define DS1390_REG_YEAR 0x07
29
30#define DS1390_REG_ALARM_100THS 0x08
31#define DS1390_REG_ALARM_SECONDS 0x09
32#define DS1390_REG_ALARM_MINUTES 0x0A
33#define DS1390_REG_ALARM_HOURS 0x0B
34#define DS1390_REG_ALARM_DAY_DATE 0x0C
35
36#define DS1390_REG_CONTROL 0x0D
37#define DS1390_REG_STATUS 0x0E
38#define DS1390_REG_TRICKLE 0x0F
39
40struct ds1390 {
41 struct rtc_device *rtc;
42 u8 txrx_buf[9]; /* cmd + 8 registers */
43};
44
45static void ds1390_set_reg(struct device *dev, unsigned char address,
46 unsigned char data)
47{
48 struct spi_device *spi = to_spi_device(dev);
49 struct ds1390 *chip = dev_get_drvdata(dev);
50
51 /* Set MSB to indicate write */
52 chip->txrx_buf[0] = address | 0x80;
53 chip->txrx_buf[1] = data;
54
55 /* do the i/o */
56 spi_write_then_read(spi, chip->txrx_buf, 2, NULL, 0);
57}
58
59static int ds1390_get_reg(struct device *dev, unsigned char address,
60 unsigned char *data)
61{
62 struct spi_device *spi = to_spi_device(dev);
63 struct ds1390 *chip = dev_get_drvdata(dev);
64 int status;
65
66 if (!data)
67 return -EINVAL;
68
69 /* Clear MSB to indicate read */
70 chip->txrx_buf[0] = address & 0x7f;
71 /* do the i/o */
72 status = spi_write_then_read(spi, chip->txrx_buf, 1, chip->txrx_buf, 1);
73 if (status != 0)
74 return status;
75
76 *data = chip->txrx_buf[1];
77
78 return 0;
79}
80
81static int ds1390_get_datetime(struct device *dev, struct rtc_time *dt)
82{
83 struct spi_device *spi = to_spi_device(dev);
84 struct ds1390 *chip = dev_get_drvdata(dev);
85 int status;
86
87 /* build the message */
88 chip->txrx_buf[0] = DS1390_REG_SECONDS;
89
90 /* do the i/o */
91 status = spi_write_then_read(spi, chip->txrx_buf, 1, chip->txrx_buf, 8);
92 if (status != 0)
93 return status;
94
95 /* The chip sends data in this order:
96 * Seconds, Minutes, Hours, Day, Date, Month / Century, Year */
97 dt->tm_sec = bcd2bin(chip->txrx_buf[0]);
98 dt->tm_min = bcd2bin(chip->txrx_buf[1]);
99 dt->tm_hour = bcd2bin(chip->txrx_buf[2]);
100 dt->tm_wday = bcd2bin(chip->txrx_buf[3]);
101 dt->tm_mday = bcd2bin(chip->txrx_buf[4]);
102 /* mask off century bit */
103 dt->tm_mon = bcd2bin(chip->txrx_buf[5] & 0x7f) - 1;
104 /* adjust for century bit */
105 dt->tm_year = bcd2bin(chip->txrx_buf[6]) + ((chip->txrx_buf[5] & 0x80) ? 100 : 0);
106
107 return rtc_valid_tm(dt);
108}
109
110static int ds1390_set_datetime(struct device *dev, struct rtc_time *dt)
111{
112 struct spi_device *spi = to_spi_device(dev);
113 struct ds1390 *chip = dev_get_drvdata(dev);
114
115 /* build the message */
116 chip->txrx_buf[0] = DS1390_REG_SECONDS | 0x80;
117 chip->txrx_buf[1] = bin2bcd(dt->tm_sec);
118 chip->txrx_buf[2] = bin2bcd(dt->tm_min);
119 chip->txrx_buf[3] = bin2bcd(dt->tm_hour);
120 chip->txrx_buf[4] = bin2bcd(dt->tm_wday);
121 chip->txrx_buf[5] = bin2bcd(dt->tm_mday);
122 chip->txrx_buf[6] = bin2bcd(dt->tm_mon + 1) |
123 ((dt->tm_year > 99) ? 0x80 : 0x00);
124 chip->txrx_buf[7] = bin2bcd(dt->tm_year % 100);
125
126 /* do the i/o */
127 return spi_write_then_read(spi, chip->txrx_buf, 8, NULL, 0);
128}
129
130static int ds1390_read_time(struct device *dev, struct rtc_time *tm)
131{
132 return ds1390_get_datetime(dev, tm);
133}
134
135static int ds1390_set_time(struct device *dev, struct rtc_time *tm)
136{
137 return ds1390_set_datetime(dev, tm);
138}
139
140static const struct rtc_class_ops ds1390_rtc_ops = {
141 .read_time = ds1390_read_time,
142 .set_time = ds1390_set_time,
143};
144
145static int __devinit ds1390_probe(struct spi_device *spi)
146{
147 struct rtc_device *rtc;
148 unsigned char tmp;
149 struct ds1390 *chip;
150 int res;
151
152 printk(KERN_DEBUG "DS1390 SPI RTC driver\n");
153
154 rtc = rtc_device_register("ds1390",
155 &spi->dev, &ds1390_rtc_ops, THIS_MODULE);
156 if (IS_ERR(rtc)) {
157 printk(KERN_ALERT "RTC : unable to register device\n");
158 return PTR_ERR(rtc);
159 }
160
161 spi->mode = SPI_MODE_3;
162 spi->bits_per_word = 8;
163 spi_setup(spi);
164
165 chip = kzalloc(sizeof *chip, GFP_KERNEL);
166 if (!chip) {
167 printk(KERN_ALERT "RTC : unable to allocate device memory\n");
168 rtc_device_unregister(rtc);
169 return -ENOMEM;
170 }
171 chip->rtc = rtc;
172 dev_set_drvdata(&spi->dev, chip);
173
174 res = ds1390_get_reg(&spi->dev, DS1390_REG_SECONDS, &tmp);
175 if (res) {
176 printk(KERN_ALERT "RTC : unable to read device\n");
177 rtc_device_unregister(rtc);
178 return res;
179 }
180
181 return 0;
182}
183
184static int __devexit ds1390_remove(struct spi_device *spi)
185{
186 struct ds1390 *chip = platform_get_drvdata(spi);
187 struct rtc_device *rtc = chip->rtc;
188
189 if (rtc)
190 rtc_device_unregister(rtc);
191
192 kfree(chip);
193
194 return 0;
195}
196
197static struct spi_driver ds1390_driver = {
198 .driver = {
199 .name = "rtc-ds1390",
200 .owner = THIS_MODULE,
201 },
202 .probe = ds1390_probe,
203 .remove = __devexit_p(ds1390_remove),
204};
205
206static __init int ds1390_init(void)
207{
208 return spi_register_driver(&ds1390_driver);
209}
210module_init(ds1390_init);
211
212static __exit void ds1390_exit(void)
213{
214 spi_unregister_driver(&ds1390_driver);
215}
216module_exit(ds1390_exit);
217
218MODULE_DESCRIPTION("DS1390/93/94 SPI RTC driver");
219MODULE_AUTHOR("Mark Jackson <mpfj@mimc.co.uk>");
220MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 341d7a5b45a2..4e91419e8911 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -209,12 +209,18 @@ static int ds1672_probe(struct i2c_client *client,
209 return err; 209 return err;
210} 210}
211 211
212static struct i2c_device_id ds1672_id[] = {
213 { "ds1672", 0 },
214 { }
215};
216
212static struct i2c_driver ds1672_driver = { 217static struct i2c_driver ds1672_driver = {
213 .driver = { 218 .driver = {
214 .name = "rtc-ds1672", 219 .name = "rtc-ds1672",
215 }, 220 },
216 .probe = &ds1672_probe, 221 .probe = &ds1672_probe,
217 .remove = &ds1672_remove, 222 .remove = &ds1672_remove,
223 .id_table = ds1672_id,
218}; 224};
219 225
220static int __init ds1672_init(void) 226static int __init ds1672_init(void)
diff --git a/drivers/rtc/rtc-ds3234.c b/drivers/rtc/rtc-ds3234.c
index 37d131d03f33..45e5b106af73 100644
--- a/drivers/rtc/rtc-ds3234.c
+++ b/drivers/rtc/rtc-ds3234.c
@@ -189,7 +189,7 @@ static const struct rtc_class_ops ds3234_rtc_ops = {
189 .set_time = ds3234_set_time, 189 .set_time = ds3234_set_time,
190}; 190};
191 191
192static int ds3234_probe(struct spi_device *spi) 192static int __devinit ds3234_probe(struct spi_device *spi)
193{ 193{
194 struct rtc_device *rtc; 194 struct rtc_device *rtc;
195 unsigned char tmp; 195 unsigned char tmp;
@@ -249,7 +249,7 @@ static int ds3234_probe(struct spi_device *spi)
249 return 0; 249 return 0;
250} 250}
251 251
252static int __exit ds3234_remove(struct spi_device *spi) 252static int __devexit ds3234_remove(struct spi_device *spi)
253{ 253{
254 struct ds3234 *chip = platform_get_drvdata(spi); 254 struct ds3234 *chip = platform_get_drvdata(spi);
255 struct rtc_device *rtc = chip->rtc; 255 struct rtc_device *rtc = chip->rtc;
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 04b63dab6932..43afb7ab5289 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -87,6 +87,10 @@ static int m48t59_rtc_read_time(struct device *dev, struct rtc_time *tm)
87 dev_dbg(dev, "Century bit is enabled\n"); 87 dev_dbg(dev, "Century bit is enabled\n");
88 tm->tm_year += 100; /* one century */ 88 tm->tm_year += 100; /* one century */
89 } 89 }
90#ifdef CONFIG_SPARC
91 /* Sun SPARC machines count years since 1968 */
92 tm->tm_year += 68;
93#endif
90 94
91 tm->tm_wday = bcd2bin(val & 0x07); 95 tm->tm_wday = bcd2bin(val & 0x07);
92 tm->tm_hour = bcd2bin(M48T59_READ(M48T59_HOUR) & 0x3F); 96 tm->tm_hour = bcd2bin(M48T59_READ(M48T59_HOUR) & 0x3F);
@@ -110,11 +114,20 @@ static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
110 struct m48t59_private *m48t59 = platform_get_drvdata(pdev); 114 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
111 unsigned long flags; 115 unsigned long flags;
112 u8 val = 0; 116 u8 val = 0;
117 int year = tm->tm_year;
118
119#ifdef CONFIG_SPARC
120 /* Sun SPARC machines count years since 1968 */
121 year -= 68;
122#endif
113 123
114 dev_dbg(dev, "RTC set time %04d-%02d-%02d %02d/%02d/%02d\n", 124 dev_dbg(dev, "RTC set time %04d-%02d-%02d %02d/%02d/%02d\n",
115 tm->tm_year + 1900, tm->tm_mon, tm->tm_mday, 125 year + 1900, tm->tm_mon, tm->tm_mday,
116 tm->tm_hour, tm->tm_min, tm->tm_sec); 126 tm->tm_hour, tm->tm_min, tm->tm_sec);
117 127
128 if (year < 0)
129 return -EINVAL;
130
118 spin_lock_irqsave(&m48t59->lock, flags); 131 spin_lock_irqsave(&m48t59->lock, flags);
119 /* Issue the WRITE command */ 132 /* Issue the WRITE command */
120 M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL); 133 M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
@@ -125,9 +138,9 @@ static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
125 M48T59_WRITE((bin2bcd(tm->tm_mday) & 0x3F), M48T59_MDAY); 138 M48T59_WRITE((bin2bcd(tm->tm_mday) & 0x3F), M48T59_MDAY);
126 /* tm_mon is 0-11 */ 139 /* tm_mon is 0-11 */
127 M48T59_WRITE((bin2bcd(tm->tm_mon + 1) & 0x1F), M48T59_MONTH); 140 M48T59_WRITE((bin2bcd(tm->tm_mon + 1) & 0x1F), M48T59_MONTH);
128 M48T59_WRITE(bin2bcd(tm->tm_year % 100), M48T59_YEAR); 141 M48T59_WRITE(bin2bcd(year % 100), M48T59_YEAR);
129 142
130 if (pdata->type == M48T59RTC_TYPE_M48T59 && (tm->tm_year / 100)) 143 if (pdata->type == M48T59RTC_TYPE_M48T59 && (year / 100))
131 val = (M48T59_WDAY_CEB | M48T59_WDAY_CB); 144 val = (M48T59_WDAY_CEB | M48T59_WDAY_CB);
132 val |= (bin2bcd(tm->tm_wday) & 0x07); 145 val |= (bin2bcd(tm->tm_wday) & 0x07);
133 M48T59_WRITE(val, M48T59_WDAY); 146 M48T59_WRITE(val, M48T59_WDAY);
@@ -159,6 +172,10 @@ static int m48t59_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
159 M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL); 172 M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
160 173
161 tm->tm_year = bcd2bin(M48T59_READ(M48T59_YEAR)); 174 tm->tm_year = bcd2bin(M48T59_READ(M48T59_YEAR));
175#ifdef CONFIG_SPARC
176 /* Sun SPARC machines count years since 1968 */
177 tm->tm_year += 68;
178#endif
162 /* tm_mon is 0-11 */ 179 /* tm_mon is 0-11 */
163 tm->tm_mon = bcd2bin(M48T59_READ(M48T59_MONTH)) - 1; 180 tm->tm_mon = bcd2bin(M48T59_READ(M48T59_MONTH)) - 1;
164 181
@@ -192,11 +209,20 @@ static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
192 struct rtc_time *tm = &alrm->time; 209 struct rtc_time *tm = &alrm->time;
193 u8 mday, hour, min, sec; 210 u8 mday, hour, min, sec;
194 unsigned long flags; 211 unsigned long flags;
212 int year = tm->tm_year;
213
214#ifdef CONFIG_SPARC
215 /* Sun SPARC machines count years since 1968 */
216 year -= 68;
217#endif
195 218
196 /* If no irq, we don't support ALARM */ 219 /* If no irq, we don't support ALARM */
197 if (m48t59->irq == NO_IRQ) 220 if (m48t59->irq == NO_IRQ)
198 return -EIO; 221 return -EIO;
199 222
223 if (year < 0)
224 return -EINVAL;
225
200 /* 226 /*
201 * 0xff means "always match" 227 * 0xff means "always match"
202 */ 228 */
@@ -228,7 +254,7 @@ static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
228 spin_unlock_irqrestore(&m48t59->lock, flags); 254 spin_unlock_irqrestore(&m48t59->lock, flags);
229 255
230 dev_dbg(dev, "RTC set alarm time %04d-%02d-%02d %02d/%02d/%02d\n", 256 dev_dbg(dev, "RTC set alarm time %04d-%02d-%02d %02d/%02d/%02d\n",
231 tm->tm_year + 1900, tm->tm_mon, tm->tm_mday, 257 year + 1900, tm->tm_mon, tm->tm_mday,
232 tm->tm_hour, tm->tm_min, tm->tm_sec); 258 tm->tm_hour, tm->tm_min, tm->tm_sec);
233 return 0; 259 return 0;
234} 260}
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 80782798763f..a4f6665ab3c5 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -247,12 +247,18 @@ max6900_probe(struct i2c_client *client, const struct i2c_device_id *id)
247 return 0; 247 return 0;
248} 248}
249 249
250static struct i2c_device_id max6900_id[] = {
251 { "max6900", 0 },
252 { }
253};
254
250static struct i2c_driver max6900_driver = { 255static struct i2c_driver max6900_driver = {
251 .driver = { 256 .driver = {
252 .name = "rtc-max6900", 257 .name = "rtc-max6900",
253 }, 258 },
254 .probe = max6900_probe, 259 .probe = max6900_probe,
255 .remove = max6900_remove, 260 .remove = max6900_remove,
261 .id_table = max6900_id,
256}; 262};
257 263
258static int __init max6900_init(void) 264static int __init max6900_init(void)
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
new file mode 100644
index 000000000000..c9522f3bc21c
--- /dev/null
+++ b/drivers/rtc/rtc-rx8581.c
@@ -0,0 +1,281 @@
1/*
2 * An I2C driver for the Epson RX8581 RTC
3 *
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Based on: rtc-pcf8563.c (An I2C driver for the Philips PCF8563 RTC)
12 * Copyright 2005-06 Tower Technologies
13 */
14
15#include <linux/module.h>
16#include <linux/i2c.h>
17#include <linux/bcd.h>
18#include <linux/rtc.h>
19#include <linux/log2.h>
20
21#define DRV_VERSION "0.1"
22
23#define RX8581_REG_SC 0x00 /* Second in BCD */
24#define RX8581_REG_MN 0x01 /* Minute in BCD */
25#define RX8581_REG_HR 0x02 /* Hour in BCD */
26#define RX8581_REG_DW 0x03 /* Day of Week */
27#define RX8581_REG_DM 0x04 /* Day of Month in BCD */
28#define RX8581_REG_MO 0x05 /* Month in BCD */
29#define RX8581_REG_YR 0x06 /* Year in BCD */
30#define RX8581_REG_RAM 0x07 /* RAM */
31#define RX8581_REG_AMN 0x08 /* Alarm Min in BCD*/
32#define RX8581_REG_AHR 0x09 /* Alarm Hour in BCD */
33#define RX8581_REG_ADM 0x0A
34#define RX8581_REG_ADW 0x0A
35#define RX8581_REG_TMR0 0x0B
36#define RX8581_REG_TMR1 0x0C
37#define RX8581_REG_EXT 0x0D /* Extension Register */
38#define RX8581_REG_FLAG 0x0E /* Flag Register */
39#define RX8581_REG_CTRL 0x0F /* Control Register */
40
41
42/* Flag Register bit definitions */
43#define RX8581_FLAG_UF 0x20 /* Update */
44#define RX8581_FLAG_TF 0x10 /* Timer */
45#define RX8581_FLAG_AF 0x08 /* Alarm */
46#define RX8581_FLAG_VLF 0x02 /* Voltage Low */
47
48/* Control Register bit definitions */
49#define RX8581_CTRL_UIE 0x20 /* Update Interrupt Enable */
50#define RX8581_CTRL_TIE 0x10 /* Timer Interrupt Enable */
51#define RX8581_CTRL_AIE 0x08 /* Alarm Interrupt Enable */
52#define RX8581_CTRL_STOP 0x02 /* STOP bit */
53#define RX8581_CTRL_RESET 0x01 /* RESET bit */
54
55static struct i2c_driver rx8581_driver;
56
57/*
58 * In the routines that deal directly with the rx8581 hardware, we use
59 * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
60 */
61static int rx8581_get_datetime(struct i2c_client *client, struct rtc_time *tm)
62{
63 unsigned char date[7];
64 int data, err;
65
66 /* First we ensure that the "update flag" is not set, we read the
67 * time and date then re-read the "update flag". If the update flag
68 * has been set, we know that the time has changed during the read so
69 * we repeat the whole process again.
70 */
71 data = i2c_smbus_read_byte_data(client, RX8581_REG_FLAG);
72 if (data < 0) {
73 dev_err(&client->dev, "Unable to read device flags\n");
74 return -EIO;
75 }
76
77 do {
78 /* If update flag set, clear it */
79 if (data & RX8581_FLAG_UF) {
80 err = i2c_smbus_write_byte_data(client,
81 RX8581_REG_FLAG, (data & ~RX8581_FLAG_UF));
82 if (err != 0) {
83 dev_err(&client->dev, "Unable to write device "
84 "flags\n");
85 return -EIO;
86 }
87 }
88
89 /* Now read time and date */
90 err = i2c_smbus_read_i2c_block_data(client, RX8581_REG_SC,
91 7, date);
92 if (err < 0) {
93 dev_err(&client->dev, "Unable to read date\n");
94 return -EIO;
95 }
96
97 /* Check flag register */
98 data = i2c_smbus_read_byte_data(client, RX8581_REG_FLAG);
99 if (data < 0) {
100 dev_err(&client->dev, "Unable to read device flags\n");
101 return -EIO;
102 }
103 } while (data & RX8581_FLAG_UF);
104
105 if (data & RX8581_FLAG_VLF)
106 dev_info(&client->dev,
107 "low voltage detected, date/time is not reliable.\n");
108
109 dev_dbg(&client->dev,
110 "%s: raw data is sec=%02x, min=%02x, hr=%02x, "
111 "wday=%02x, mday=%02x, mon=%02x, year=%02x\n",
112 __func__,
113 date[0], date[1], date[2], date[3], date[4], date[5], date[6]);
114
115 tm->tm_sec = bcd2bin(date[RX8581_REG_SC] & 0x7F);
116 tm->tm_min = bcd2bin(date[RX8581_REG_MN] & 0x7F);
117 tm->tm_hour = bcd2bin(date[RX8581_REG_HR] & 0x3F); /* rtc hr 0-23 */
118 tm->tm_wday = ilog2(date[RX8581_REG_DW] & 0x7F);
119 tm->tm_mday = bcd2bin(date[RX8581_REG_DM] & 0x3F);
120 tm->tm_mon = bcd2bin(date[RX8581_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
121 tm->tm_year = bcd2bin(date[RX8581_REG_YR]);
122 if (tm->tm_year < 70)
123 tm->tm_year += 100; /* assume we are in 1970...2069 */
124
125
126 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
127 "mday=%d, mon=%d, year=%d, wday=%d\n",
128 __func__,
129 tm->tm_sec, tm->tm_min, tm->tm_hour,
130 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
131
132 err = rtc_valid_tm(tm);
133 if (err < 0)
134 dev_err(&client->dev, "retrieved date/time is not valid.\n");
135
136 return err;
137}
138
139static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
140{
141 int data, err;
142 unsigned char buf[7];
143
144 dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
145 "mday=%d, mon=%d, year=%d, wday=%d\n",
146 __func__,
147 tm->tm_sec, tm->tm_min, tm->tm_hour,
148 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
149
150 /* hours, minutes and seconds */
151 buf[RX8581_REG_SC] = bin2bcd(tm->tm_sec);
152 buf[RX8581_REG_MN] = bin2bcd(tm->tm_min);
153 buf[RX8581_REG_HR] = bin2bcd(tm->tm_hour);
154
155 buf[RX8581_REG_DM] = bin2bcd(tm->tm_mday);
156
157 /* month, 1 - 12 */
158 buf[RX8581_REG_MO] = bin2bcd(tm->tm_mon + 1);
159
160 /* year and century */
161 buf[RX8581_REG_YR] = bin2bcd(tm->tm_year % 100);
162 buf[RX8581_REG_DW] = (0x1 << tm->tm_wday);
163
164 /* Stop the clock */
165 data = i2c_smbus_read_byte_data(client, RX8581_REG_CTRL);
166 if (data < 0) {
167 dev_err(&client->dev, "Unable to read control register\n");
168 return -EIO;
169 }
170
171 err = i2c_smbus_write_byte_data(client, RX8581_REG_FLAG,
172 (data | RX8581_CTRL_STOP));
173 if (err < 0) {
174 dev_err(&client->dev, "Unable to write control register\n");
175 return -EIO;
176 }
177
178 /* write register's data */
179 err = i2c_smbus_write_i2c_block_data(client, RX8581_REG_SC, 7, buf);
180 if (err < 0) {
181 dev_err(&client->dev, "Unable to write to date registers\n");
182 return -EIO;
183 }
184
185 /* Restart the clock */
186 data = i2c_smbus_read_byte_data(client, RX8581_REG_CTRL);
187 if (data < 0) {
188 dev_err(&client->dev, "Unable to read control register\n");
189 return -EIO;
190 }
191
192 err = i2c_smbus_write_byte_data(client, RX8581_REG_FLAG,
193 (data | ~(RX8581_CTRL_STOP)));
194 if (err != 0) {
195 dev_err(&client->dev, "Unable to write control register\n");
196 return -EIO;
197 }
198
199 return 0;
200}
201
202static int rx8581_rtc_read_time(struct device *dev, struct rtc_time *tm)
203{
204 return rx8581_get_datetime(to_i2c_client(dev), tm);
205}
206
207static int rx8581_rtc_set_time(struct device *dev, struct rtc_time *tm)
208{
209 return rx8581_set_datetime(to_i2c_client(dev), tm);
210}
211
212static const struct rtc_class_ops rx8581_rtc_ops = {
213 .read_time = rx8581_rtc_read_time,
214 .set_time = rx8581_rtc_set_time,
215};
216
217static int __devinit rx8581_probe(struct i2c_client *client,
218 const struct i2c_device_id *id)
219{
220 struct rtc_device *rtc;
221
222 dev_dbg(&client->dev, "%s\n", __func__);
223
224 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
225 return -ENODEV;
226
227 dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
228
229 rtc = rtc_device_register(rx8581_driver.driver.name,
230 &client->dev, &rx8581_rtc_ops, THIS_MODULE);
231
232 if (IS_ERR(rtc))
233 return PTR_ERR(rtc);
234
235 i2c_set_clientdata(client, rtc);
236
237 return 0;
238}
239
240static int __devexit rx8581_remove(struct i2c_client *client)
241{
242 struct rtc_device *rtc = i2c_get_clientdata(client);
243
244 rtc_device_unregister(rtc);
245
246 return 0;
247}
248
249static const struct i2c_device_id rx8581_id[] = {
250 { "rx8581", 0 },
251 { }
252};
253MODULE_DEVICE_TABLE(i2c, rx8581_id);
254
255static struct i2c_driver rx8581_driver = {
256 .driver = {
257 .name = "rtc-rx8581",
258 .owner = THIS_MODULE,
259 },
260 .probe = rx8581_probe,
261 .remove = __devexit_p(rx8581_remove),
262 .id_table = rx8581_id,
263};
264
265static int __init rx8581_init(void)
266{
267 return i2c_add_driver(&rx8581_driver);
268}
269
270static void __exit rx8581_exit(void)
271{
272 i2c_del_driver(&rx8581_driver);
273}
274
275MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com>");
276MODULE_DESCRIPTION("Epson RX-8581 RTC driver");
277MODULE_LICENSE("GPL");
278MODULE_VERSION(DRV_VERSION);
279
280module_init(rx8581_init);
281module_exit(rx8581_exit);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 910bc704939c..f59277bbedaa 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -455,6 +455,8 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
455 455
456 s3c_rtc_setfreq(&pdev->dev, 1); 456 s3c_rtc_setfreq(&pdev->dev, 1);
457 457
458 device_init_wakeup(&pdev->dev, 1);
459
458 /* register RTC and exit */ 460 /* register RTC and exit */
459 461
460 rtc = rtc_device_register("s3c", &pdev->dev, &s3c_rtcops, 462 rtc = rtc_device_register("s3c", &pdev->dev, &s3c_rtcops,
@@ -507,7 +509,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
507#define s3c_rtc_resume NULL 509#define s3c_rtc_resume NULL
508#endif 510#endif
509 511
510static struct platform_driver s3c2410_rtcdrv = { 512static struct platform_driver s3c2410_rtc_driver = {
511 .probe = s3c_rtc_probe, 513 .probe = s3c_rtc_probe,
512 .remove = __devexit_p(s3c_rtc_remove), 514 .remove = __devexit_p(s3c_rtc_remove),
513 .suspend = s3c_rtc_suspend, 515 .suspend = s3c_rtc_suspend,
@@ -523,12 +525,12 @@ static char __initdata banner[] = "S3C24XX RTC, (c) 2004,2006 Simtec Electronics
523static int __init s3c_rtc_init(void) 525static int __init s3c_rtc_init(void)
524{ 526{
525 printk(banner); 527 printk(banner);
526 return platform_driver_register(&s3c2410_rtcdrv); 528 return platform_driver_register(&s3c2410_rtc_driver);
527} 529}
528 530
529static void __exit s3c_rtc_exit(void) 531static void __exit s3c_rtc_exit(void)
530{ 532{
531 platform_driver_unregister(&s3c2410_rtcdrv); 533 platform_driver_unregister(&s3c2410_rtc_driver);
532} 534}
533 535
534module_init(s3c_rtc_init); 536module_init(s3c_rtc_init);
diff --git a/drivers/rtc/rtc-starfire.c b/drivers/rtc/rtc-starfire.c
index 7ccb0dd700af..5be98bfd7ed3 100644
--- a/drivers/rtc/rtc-starfire.c
+++ b/drivers/rtc/rtc-starfire.c
@@ -6,7 +6,6 @@
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/time.h>
10#include <linux/rtc.h> 9#include <linux/rtc.h>
11#include <linux/platform_device.h> 10#include <linux/platform_device.h>
12 11
@@ -16,11 +15,6 @@ MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
16MODULE_DESCRIPTION("Starfire RTC driver"); 15MODULE_DESCRIPTION("Starfire RTC driver");
17MODULE_LICENSE("GPL"); 16MODULE_LICENSE("GPL");
18 17
19struct starfire_rtc {
20 struct rtc_device *rtc;
21 spinlock_t lock;
22};
23
24static u32 starfire_get_time(void) 18static u32 starfire_get_time(void)
25{ 19{
26 static char obp_gettod[32]; 20 static char obp_gettod[32];
@@ -35,64 +29,31 @@ static u32 starfire_get_time(void)
35 29
36static int starfire_read_time(struct device *dev, struct rtc_time *tm) 30static int starfire_read_time(struct device *dev, struct rtc_time *tm)
37{ 31{
38 struct starfire_rtc *p = dev_get_drvdata(dev); 32 rtc_time_to_tm(starfire_get_time(), tm);
39 unsigned long flags, secs; 33 return rtc_valid_tm(tm);
40
41 spin_lock_irqsave(&p->lock, flags);
42 secs = starfire_get_time();
43 spin_unlock_irqrestore(&p->lock, flags);
44
45 rtc_time_to_tm(secs, tm);
46
47 return 0;
48}
49
50static int starfire_set_time(struct device *dev, struct rtc_time *tm)
51{
52 unsigned long secs;
53 int err;
54
55 err = rtc_tm_to_time(tm, &secs);
56 if (err)
57 return err;
58
59 /* Do nothing, time is set using the service processor
60 * console on this platform.
61 */
62 return 0;
63} 34}
64 35
65static const struct rtc_class_ops starfire_rtc_ops = { 36static const struct rtc_class_ops starfire_rtc_ops = {
66 .read_time = starfire_read_time, 37 .read_time = starfire_read_time,
67 .set_time = starfire_set_time,
68}; 38};
69 39
70static int __devinit starfire_rtc_probe(struct platform_device *pdev) 40static int __init starfire_rtc_probe(struct platform_device *pdev)
71{ 41{
72 struct starfire_rtc *p = kzalloc(sizeof(*p), GFP_KERNEL); 42 struct rtc_device *rtc = rtc_device_register("starfire", &pdev->dev,
73 43 &starfire_rtc_ops, THIS_MODULE);
74 if (!p) 44 if (IS_ERR(rtc))
75 return -ENOMEM; 45 return PTR_ERR(rtc);
76 46
77 spin_lock_init(&p->lock); 47 platform_set_drvdata(pdev, rtc);
78 48
79 p->rtc = rtc_device_register("starfire", &pdev->dev,
80 &starfire_rtc_ops, THIS_MODULE);
81 if (IS_ERR(p->rtc)) {
82 int err = PTR_ERR(p->rtc);
83 kfree(p);
84 return err;
85 }
86 platform_set_drvdata(pdev, p);
87 return 0; 49 return 0;
88} 50}
89 51
90static int __devexit starfire_rtc_remove(struct platform_device *pdev) 52static int __exit starfire_rtc_remove(struct platform_device *pdev)
91{ 53{
92 struct starfire_rtc *p = platform_get_drvdata(pdev); 54 struct rtc_device *rtc = platform_get_drvdata(pdev);
93 55
94 rtc_device_unregister(p->rtc); 56 rtc_device_unregister(rtc);
95 kfree(p);
96 57
97 return 0; 58 return 0;
98} 59}
@@ -102,13 +63,12 @@ static struct platform_driver starfire_rtc_driver = {
102 .name = "rtc-starfire", 63 .name = "rtc-starfire",
103 .owner = THIS_MODULE, 64 .owner = THIS_MODULE,
104 }, 65 },
105 .probe = starfire_rtc_probe, 66 .remove = __exit_p(starfire_rtc_remove),
106 .remove = __devexit_p(starfire_rtc_remove),
107}; 67};
108 68
109static int __init starfire_rtc_init(void) 69static int __init starfire_rtc_init(void)
110{ 70{
111 return platform_driver_register(&starfire_rtc_driver); 71 return platform_driver_probe(&starfire_rtc_driver, starfire_rtc_probe);
112} 72}
113 73
114static void __exit starfire_rtc_exit(void) 74static void __exit starfire_rtc_exit(void)
diff --git a/drivers/rtc/rtc-sun4v.c b/drivers/rtc/rtc-sun4v.c
index 2012ccbb4a53..5b2261052a65 100644
--- a/drivers/rtc/rtc-sun4v.c
+++ b/drivers/rtc/rtc-sun4v.c
@@ -1,4 +1,4 @@
1/* rtc-sun4c.c: Hypervisor based RTC for SUN4V systems. 1/* rtc-sun4v.c: Hypervisor based RTC for SUN4V systems.
2 * 2 *
3 * Copyright (C) 2008 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
4 */ 4 */
@@ -7,21 +7,11 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/time.h>
11#include <linux/rtc.h> 10#include <linux/rtc.h>
12#include <linux/platform_device.h> 11#include <linux/platform_device.h>
13 12
14#include <asm/hypervisor.h> 13#include <asm/hypervisor.h>
15 14
16MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
17MODULE_DESCRIPTION("SUN4V RTC driver");
18MODULE_LICENSE("GPL");
19
20struct sun4v_rtc {
21 struct rtc_device *rtc;
22 spinlock_t lock;
23};
24
25static unsigned long hypervisor_get_time(void) 15static unsigned long hypervisor_get_time(void)
26{ 16{
27 unsigned long ret, time; 17 unsigned long ret, time;
@@ -45,15 +35,7 @@ retry:
45 35
46static int sun4v_read_time(struct device *dev, struct rtc_time *tm) 36static int sun4v_read_time(struct device *dev, struct rtc_time *tm)
47{ 37{
48 struct sun4v_rtc *p = dev_get_drvdata(dev); 38 rtc_time_to_tm(hypervisor_get_time(), tm);
49 unsigned long flags, secs;
50
51 spin_lock_irqsave(&p->lock, flags);
52 secs = hypervisor_get_time();
53 spin_unlock_irqrestore(&p->lock, flags);
54
55 rtc_time_to_tm(secs, tm);
56
57 return 0; 39 return 0;
58} 40}
59 41
@@ -80,19 +62,14 @@ retry:
80 62
81static int sun4v_set_time(struct device *dev, struct rtc_time *tm) 63static int sun4v_set_time(struct device *dev, struct rtc_time *tm)
82{ 64{
83 struct sun4v_rtc *p = dev_get_drvdata(dev); 65 unsigned long secs;
84 unsigned long flags, secs;
85 int err; 66 int err;
86 67
87 err = rtc_tm_to_time(tm, &secs); 68 err = rtc_tm_to_time(tm, &secs);
88 if (err) 69 if (err)
89 return err; 70 return err;
90 71
91 spin_lock_irqsave(&p->lock, flags); 72 return hypervisor_set_time(secs);
92 err = hypervisor_set_time(secs);
93 spin_unlock_irqrestore(&p->lock, flags);
94
95 return err;
96} 73}
97 74
98static const struct rtc_class_ops sun4v_rtc_ops = { 75static const struct rtc_class_ops sun4v_rtc_ops = {
@@ -100,33 +77,22 @@ static const struct rtc_class_ops sun4v_rtc_ops = {
100 .set_time = sun4v_set_time, 77 .set_time = sun4v_set_time,
101}; 78};
102 79
103static int __devinit sun4v_rtc_probe(struct platform_device *pdev) 80static int __init sun4v_rtc_probe(struct platform_device *pdev)
104{ 81{
105 struct sun4v_rtc *p = kzalloc(sizeof(*p), GFP_KERNEL); 82 struct rtc_device *rtc = rtc_device_register("sun4v", &pdev->dev,
106
107 if (!p)
108 return -ENOMEM;
109
110 spin_lock_init(&p->lock);
111
112 p->rtc = rtc_device_register("sun4v", &pdev->dev,
113 &sun4v_rtc_ops, THIS_MODULE); 83 &sun4v_rtc_ops, THIS_MODULE);
114 if (IS_ERR(p->rtc)) { 84 if (IS_ERR(rtc))
115 int err = PTR_ERR(p->rtc); 85 return PTR_ERR(rtc);
116 kfree(p); 86
117 return err; 87 platform_set_drvdata(pdev, rtc);
118 }
119 platform_set_drvdata(pdev, p);
120 return 0; 88 return 0;
121} 89}
122 90
123static int __devexit sun4v_rtc_remove(struct platform_device *pdev) 91static int __exit sun4v_rtc_remove(struct platform_device *pdev)
124{ 92{
125 struct sun4v_rtc *p = platform_get_drvdata(pdev); 93 struct rtc_device *rtc = platform_get_drvdata(pdev);
126
127 rtc_device_unregister(p->rtc);
128 kfree(p);
129 94
95 rtc_device_unregister(rtc);
130 return 0; 96 return 0;
131} 97}
132 98
@@ -135,13 +101,12 @@ static struct platform_driver sun4v_rtc_driver = {
135 .name = "rtc-sun4v", 101 .name = "rtc-sun4v",
136 .owner = THIS_MODULE, 102 .owner = THIS_MODULE,
137 }, 103 },
138 .probe = sun4v_rtc_probe, 104 .remove = __exit_p(sun4v_rtc_remove),
139 .remove = __devexit_p(sun4v_rtc_remove),
140}; 105};
141 106
142static int __init sun4v_rtc_init(void) 107static int __init sun4v_rtc_init(void)
143{ 108{
144 return platform_driver_register(&sun4v_rtc_driver); 109 return platform_driver_probe(&sun4v_rtc_driver, sun4v_rtc_probe);
145} 110}
146 111
147static void __exit sun4v_rtc_exit(void) 112static void __exit sun4v_rtc_exit(void)
@@ -151,3 +116,7 @@ static void __exit sun4v_rtc_exit(void)
151 116
152module_init(sun4v_rtc_init); 117module_init(sun4v_rtc_init);
153module_exit(sun4v_rtc_exit); 118module_exit(sun4v_rtc_exit);
119
120MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
121MODULE_DESCRIPTION("SUN4V RTC driver");
122MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c
new file mode 100644
index 000000000000..01d8da9afdc8
--- /dev/null
+++ b/drivers/rtc/rtc-twl4030.c
@@ -0,0 +1,564 @@
1/*
2 * rtc-twl4030.c -- TWL4030 Real Time Clock interface
3 *
4 * Copyright (C) 2007 MontaVista Software, Inc
5 * Author: Alexandre Rusev <source@mvista.com>
6 *
7 * Based on original TI driver twl4030-rtc.c
8 * Copyright (C) 2006 Texas Instruments, Inc.
9 *
10 * Based on rtc-omap.c
11 * Copyright (C) 2003 MontaVista Software, Inc.
12 * Author: George G. Davis <gdavis@mvista.com> or <source@mvista.com>
13 * Copyright (C) 2006 David Brownell
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/platform_device.h>
28#include <linux/interrupt.h>
29
30#include <linux/i2c/twl4030.h>
31
32
33/*
34 * RTC block register offsets (use TWL_MODULE_RTC)
35 */
36#define REG_SECONDS_REG 0x00
37#define REG_MINUTES_REG 0x01
38#define REG_HOURS_REG 0x02
39#define REG_DAYS_REG 0x03
40#define REG_MONTHS_REG 0x04
41#define REG_YEARS_REG 0x05
42#define REG_WEEKS_REG 0x06
43
44#define REG_ALARM_SECONDS_REG 0x07
45#define REG_ALARM_MINUTES_REG 0x08
46#define REG_ALARM_HOURS_REG 0x09
47#define REG_ALARM_DAYS_REG 0x0A
48#define REG_ALARM_MONTHS_REG 0x0B
49#define REG_ALARM_YEARS_REG 0x0C
50
51#define REG_RTC_CTRL_REG 0x0D
52#define REG_RTC_STATUS_REG 0x0E
53#define REG_RTC_INTERRUPTS_REG 0x0F
54
55#define REG_RTC_COMP_LSB_REG 0x10
56#define REG_RTC_COMP_MSB_REG 0x11
57
58/* RTC_CTRL_REG bitfields */
59#define BIT_RTC_CTRL_REG_STOP_RTC_M 0x01
60#define BIT_RTC_CTRL_REG_ROUND_30S_M 0x02
61#define BIT_RTC_CTRL_REG_AUTO_COMP_M 0x04
62#define BIT_RTC_CTRL_REG_MODE_12_24_M 0x08
63#define BIT_RTC_CTRL_REG_TEST_MODE_M 0x10
64#define BIT_RTC_CTRL_REG_SET_32_COUNTER_M 0x20
65#define BIT_RTC_CTRL_REG_GET_TIME_M 0x40
66
67/* RTC_STATUS_REG bitfields */
68#define BIT_RTC_STATUS_REG_RUN_M 0x02
69#define BIT_RTC_STATUS_REG_1S_EVENT_M 0x04
70#define BIT_RTC_STATUS_REG_1M_EVENT_M 0x08
71#define BIT_RTC_STATUS_REG_1H_EVENT_M 0x10
72#define BIT_RTC_STATUS_REG_1D_EVENT_M 0x20
73#define BIT_RTC_STATUS_REG_ALARM_M 0x40
74#define BIT_RTC_STATUS_REG_POWER_UP_M 0x80
75
76/* RTC_INTERRUPTS_REG bitfields */
77#define BIT_RTC_INTERRUPTS_REG_EVERY_M 0x03
78#define BIT_RTC_INTERRUPTS_REG_IT_TIMER_M 0x04
79#define BIT_RTC_INTERRUPTS_REG_IT_ALARM_M 0x08
80
81
82/* REG_SECONDS_REG through REG_YEARS_REG is how many registers? */
83#define ALL_TIME_REGS 6
84
85/*----------------------------------------------------------------------*/
86
87/*
88 * Supports 1 byte read from TWL4030 RTC register.
89 */
90static int twl4030_rtc_read_u8(u8 *data, u8 reg)
91{
92 int ret;
93
94 ret = twl4030_i2c_read_u8(TWL4030_MODULE_RTC, data, reg);
95 if (ret < 0)
96 pr_err("twl4030_rtc: Could not read TWL4030"
97 "register %X - error %d\n", reg, ret);
98 return ret;
99}
100
101/*
102 * Supports 1 byte write to TWL4030 RTC registers.
103 */
104static int twl4030_rtc_write_u8(u8 data, u8 reg)
105{
106 int ret;
107
108 ret = twl4030_i2c_write_u8(TWL4030_MODULE_RTC, data, reg);
109 if (ret < 0)
110 pr_err("twl4030_rtc: Could not write TWL4030"
111 "register %X - error %d\n", reg, ret);
112 return ret;
113}
114
115/*
116 * Cache the value for timer/alarm interrupts register; this is
117 * only changed by callers holding rtc ops lock (or resume).
118 */
119static unsigned char rtc_irq_bits;
120
121/*
122 * Enable timer and/or alarm interrupts.
123 */
124static int set_rtc_irq_bit(unsigned char bit)
125{
126 unsigned char val;
127 int ret;
128
129 val = rtc_irq_bits | bit;
130 ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
131 if (ret == 0)
132 rtc_irq_bits = val;
133
134 return ret;
135}
136
137/*
138 * Disable timer and/or alarm interrupts.
139 */
140static int mask_rtc_irq_bit(unsigned char bit)
141{
142 unsigned char val;
143 int ret;
144
145 val = rtc_irq_bits & ~bit;
146 ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
147 if (ret == 0)
148 rtc_irq_bits = val;
149
150 return ret;
151}
152
153static inline int twl4030_rtc_alarm_irq_set_state(int enabled)
154{
155 int ret;
156
157 if (enabled)
158 ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
159 else
160 ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
161
162 return ret;
163}
164
165static inline int twl4030_rtc_irq_set_state(int enabled)
166{
167 int ret;
168
169 if (enabled)
170 ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
171 else
172 ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
173
174 return ret;
175}
176
177/*
178 * Gets current TWL4030 RTC time and date parameters.
179 *
180 * The RTC's time/alarm representation is not what gmtime(3) requires
181 * Linux to use:
182 *
183 * - Months are 1..12 vs Linux 0-11
184 * - Years are 0..99 vs Linux 1900..N (we assume 21st century)
185 */
186static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm)
187{
188 unsigned char rtc_data[ALL_TIME_REGS + 1];
189 int ret;
190 u8 save_control;
191
192 ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
193 if (ret < 0)
194 return ret;
195
196 save_control |= BIT_RTC_CTRL_REG_GET_TIME_M;
197
198 ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
199 if (ret < 0)
200 return ret;
201
202 ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
203 REG_SECONDS_REG, ALL_TIME_REGS);
204
205 if (ret < 0) {
206 dev_err(dev, "rtc_read_time error %d\n", ret);
207 return ret;
208 }
209
210 tm->tm_sec = bcd2bin(rtc_data[0]);
211 tm->tm_min = bcd2bin(rtc_data[1]);
212 tm->tm_hour = bcd2bin(rtc_data[2]);
213 tm->tm_mday = bcd2bin(rtc_data[3]);
214 tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
215 tm->tm_year = bcd2bin(rtc_data[5]) + 100;
216
217 return ret;
218}
219
220static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm)
221{
222 unsigned char save_control;
223 unsigned char rtc_data[ALL_TIME_REGS + 1];
224 int ret;
225
226 rtc_data[1] = bin2bcd(tm->tm_sec);
227 rtc_data[2] = bin2bcd(tm->tm_min);
228 rtc_data[3] = bin2bcd(tm->tm_hour);
229 rtc_data[4] = bin2bcd(tm->tm_mday);
230 rtc_data[5] = bin2bcd(tm->tm_mon + 1);
231 rtc_data[6] = bin2bcd(tm->tm_year - 100);
232
233 /* Stop RTC while updating the TC registers */
234 ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
235 if (ret < 0)
236 goto out;
237
238 save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M;
239 twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
240 if (ret < 0)
241 goto out;
242
243 /* update all the time registers in one shot */
244 ret = twl4030_i2c_write(TWL4030_MODULE_RTC, rtc_data,
245 REG_SECONDS_REG, ALL_TIME_REGS);
246 if (ret < 0) {
247 dev_err(dev, "rtc_set_time error %d\n", ret);
248 goto out;
249 }
250
251 /* Start back RTC */
252 save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M;
253 ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
254
255out:
256 return ret;
257}
258
259/*
260 * Gets current TWL4030 RTC alarm time.
261 */
262static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
263{
264 unsigned char rtc_data[ALL_TIME_REGS + 1];
265 int ret;
266
267 ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
268 REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
269 if (ret < 0) {
270 dev_err(dev, "rtc_read_alarm error %d\n", ret);
271 return ret;
272 }
273
274 /* some of these fields may be wildcard/"match all" */
275 alm->time.tm_sec = bcd2bin(rtc_data[0]);
276 alm->time.tm_min = bcd2bin(rtc_data[1]);
277 alm->time.tm_hour = bcd2bin(rtc_data[2]);
278 alm->time.tm_mday = bcd2bin(rtc_data[3]);
279 alm->time.tm_mon = bcd2bin(rtc_data[4]) - 1;
280 alm->time.tm_year = bcd2bin(rtc_data[5]) + 100;
281
282 /* report cached alarm enable state */
283 if (rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M)
284 alm->enabled = 1;
285
286 return ret;
287}
288
289static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
290{
291 unsigned char alarm_data[ALL_TIME_REGS + 1];
292 int ret;
293
294 ret = twl4030_rtc_alarm_irq_set_state(0);
295 if (ret)
296 goto out;
297
298 alarm_data[1] = bin2bcd(alm->time.tm_sec);
299 alarm_data[2] = bin2bcd(alm->time.tm_min);
300 alarm_data[3] = bin2bcd(alm->time.tm_hour);
301 alarm_data[4] = bin2bcd(alm->time.tm_mday);
302 alarm_data[5] = bin2bcd(alm->time.tm_mon + 1);
303 alarm_data[6] = bin2bcd(alm->time.tm_year - 100);
304
305 /* update all the alarm registers in one shot */
306 ret = twl4030_i2c_write(TWL4030_MODULE_RTC, alarm_data,
307 REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
308 if (ret) {
309 dev_err(dev, "rtc_set_alarm error %d\n", ret);
310 goto out;
311 }
312
313 if (alm->enabled)
314 ret = twl4030_rtc_alarm_irq_set_state(1);
315out:
316 return ret;
317}
318
319#ifdef CONFIG_RTC_INTF_DEV
320
321static int twl4030_rtc_ioctl(struct device *dev, unsigned int cmd,
322 unsigned long arg)
323{
324 switch (cmd) {
325 case RTC_AIE_OFF:
326 return twl4030_rtc_alarm_irq_set_state(0);
327 case RTC_AIE_ON:
328 return twl4030_rtc_alarm_irq_set_state(1);
329 case RTC_UIE_OFF:
330 return twl4030_rtc_irq_set_state(0);
331 case RTC_UIE_ON:
332 return twl4030_rtc_irq_set_state(1);
333
334 default:
335 return -ENOIOCTLCMD;
336 }
337}
338
339#else
340#define twl4030_rtc_ioctl NULL
341#endif
342
343static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
344{
345 unsigned long events = 0;
346 int ret = IRQ_NONE;
347 int res;
348 u8 rd_reg;
349
350#ifdef CONFIG_LOCKDEP
351 /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
352 * we don't want and can't tolerate. Although it might be
353 * friendlier not to borrow this thread context...
354 */
355 local_irq_enable();
356#endif
357
358 res = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
359 if (res)
360 goto out;
361 /*
362 * Figure out source of interrupt: ALARM or TIMER in RTC_STATUS_REG.
363 * only one (ALARM or RTC) interrupt source may be enabled
364 * at time, we also could check our results
365 * by reading RTS_INTERRUPTS_REGISTER[IT_TIMER,IT_ALARM]
366 */
367 if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M)
368 events |= RTC_IRQF | RTC_AF;
369 else
370 events |= RTC_IRQF | RTC_UF;
371
372 res = twl4030_rtc_write_u8(rd_reg | BIT_RTC_STATUS_REG_ALARM_M,
373 REG_RTC_STATUS_REG);
374 if (res)
375 goto out;
376
377 /* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1
378 * needs 2 reads to clear the interrupt. One read is done in
379 * do_twl4030_pwrirq(). Doing the second read, to clear
380 * the bit.
381 *
382 * FIXME the reason PWR_ISR1 needs an extra read is that
383 * RTC_IF retriggered until we cleared REG_ALARM_M above.
384 * But re-reading like this is a bad hack; by doing so we
385 * risk wrongly clearing status for some other IRQ (losing
386 * the interrupt). Be smarter about handling RTC_UF ...
387 */
388 res = twl4030_i2c_read_u8(TWL4030_MODULE_INT,
389 &rd_reg, TWL4030_INT_PWR_ISR1);
390 if (res)
391 goto out;
392
393 /* Notify RTC core on event */
394 rtc_update_irq(rtc, 1, events);
395
396 ret = IRQ_HANDLED;
397out:
398 return ret;
399}
400
401static struct rtc_class_ops twl4030_rtc_ops = {
402 .ioctl = twl4030_rtc_ioctl,
403 .read_time = twl4030_rtc_read_time,
404 .set_time = twl4030_rtc_set_time,
405 .read_alarm = twl4030_rtc_read_alarm,
406 .set_alarm = twl4030_rtc_set_alarm,
407};
408
409/*----------------------------------------------------------------------*/
410
411static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
412{
413 struct rtc_device *rtc;
414 int ret = 0;
415 int irq = platform_get_irq(pdev, 0);
416 u8 rd_reg;
417
418 if (irq < 0)
419 return irq;
420
421 rtc = rtc_device_register(pdev->name,
422 &pdev->dev, &twl4030_rtc_ops, THIS_MODULE);
423 if (IS_ERR(rtc)) {
424 ret = -EINVAL;
425 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
426 PTR_ERR(rtc));
427 goto out0;
428
429 }
430
431 platform_set_drvdata(pdev, rtc);
432
433 ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
434
435 if (ret < 0)
436 goto out1;
437
438 if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M)
439 dev_warn(&pdev->dev, "Power up reset detected.\n");
440
441 if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M)
442 dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n");
443
444 /* Clear RTC Power up reset and pending alarm interrupts */
445 ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
446 if (ret < 0)
447 goto out1;
448
449 ret = request_irq(irq, twl4030_rtc_interrupt,
450 IRQF_TRIGGER_RISING,
451 rtc->dev.bus_id, rtc);
452 if (ret < 0) {
453 dev_err(&pdev->dev, "IRQ is not free.\n");
454 goto out1;
455 }
456
457 /* Check RTC module status, Enable if it is off */
458 ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
459 if (ret < 0)
460 goto out2;
461
462 if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
463 dev_info(&pdev->dev, "Enabling TWL4030-RTC.\n");
464 rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
465 ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
466 if (ret < 0)
467 goto out2;
468 }
469
470 /* init cached IRQ enable bits */
471 ret = twl4030_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
472 if (ret < 0)
473 goto out2;
474
475 return ret;
476
477
478out2:
479 free_irq(irq, rtc);
480out1:
481 rtc_device_unregister(rtc);
482out0:
483 return ret;
484}
485
486/*
487 * Disable all TWL4030 RTC module interrupts.
488 * Sets status flag to free.
489 */
490static int __devexit twl4030_rtc_remove(struct platform_device *pdev)
491{
492 /* leave rtc running, but disable irqs */
493 struct rtc_device *rtc = platform_get_drvdata(pdev);
494 int irq = platform_get_irq(pdev, 0);
495
496 mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
497 mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
498
499 free_irq(irq, rtc);
500
501 rtc_device_unregister(rtc);
502 platform_set_drvdata(pdev, NULL);
503 return 0;
504}
505
506static void twl4030_rtc_shutdown(struct platform_device *pdev)
507{
508 mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M |
509 BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
510}
511
512#ifdef CONFIG_PM
513
514static unsigned char irqstat;
515
516static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state)
517{
518 irqstat = rtc_irq_bits;
519
520 /* REVISIT alarm may need to wake us from sleep */
521 mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M |
522 BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
523 return 0;
524}
525
526static int twl4030_rtc_resume(struct platform_device *pdev)
527{
528 set_rtc_irq_bit(irqstat);
529 return 0;
530}
531
532#else
533#define twl4030_rtc_suspend NULL
534#define twl4030_rtc_resume NULL
535#endif
536
537MODULE_ALIAS("platform:twl4030_rtc");
538
539static struct platform_driver twl4030rtc_driver = {
540 .probe = twl4030_rtc_probe,
541 .remove = __devexit_p(twl4030_rtc_remove),
542 .shutdown = twl4030_rtc_shutdown,
543 .suspend = twl4030_rtc_suspend,
544 .resume = twl4030_rtc_resume,
545 .driver = {
546 .owner = THIS_MODULE,
547 .name = "twl4030_rtc",
548 },
549};
550
551static int __init twl4030_rtc_init(void)
552{
553 return platform_driver_register(&twl4030rtc_driver);
554}
555module_init(twl4030_rtc_init);
556
557static void __exit twl4030_rtc_exit(void)
558{
559 platform_driver_unregister(&twl4030rtc_driver);
560}
561module_exit(twl4030_rtc_exit);
562
563MODULE_AUTHOR("Texas Instruments, MontaVista Software");
564MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
new file mode 100644
index 000000000000..5c5e3aa91385
--- /dev/null
+++ b/drivers/rtc/rtc-wm8350.c
@@ -0,0 +1,514 @@
1/*
2 * Real Time Clock driver for Wolfson Microelectronics WM8350
3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 *
6 * Author: Liam Girdwood
7 * linux@wolfsonmicro.com
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/time.h>
19#include <linux/rtc.h>
20#include <linux/bcd.h>
21#include <linux/interrupt.h>
22#include <linux/ioctl.h>
23#include <linux/completion.h>
24#include <linux/mfd/wm8350/rtc.h>
25#include <linux/mfd/wm8350/core.h>
26#include <linux/delay.h>
27#include <linux/platform_device.h>
28
29#define WM8350_SET_ALM_RETRIES 5
30#define WM8350_SET_TIME_RETRIES 5
31#define WM8350_GET_TIME_RETRIES 5
32
33#define to_wm8350_from_rtc_dev(d) container_of(d, struct wm8350, rtc.pdev.dev)
34
35/*
36 * Read current time and date in RTC
37 */
38static int wm8350_rtc_readtime(struct device *dev, struct rtc_time *tm)
39{
40 struct wm8350 *wm8350 = dev_get_drvdata(dev);
41 u16 time1[4], time2[4];
42 int retries = WM8350_GET_TIME_RETRIES, ret;
43
44 /*
45 * Read the time twice and compare.
46 * If time1 == time2, then time is valid else retry.
47 */
48 do {
49 ret = wm8350_block_read(wm8350, WM8350_RTC_SECONDS_MINUTES,
50 4, time1);
51 if (ret < 0)
52 return ret;
53 ret = wm8350_block_read(wm8350, WM8350_RTC_SECONDS_MINUTES,
54 4, time2);
55 if (ret < 0)
56 return ret;
57
58 if (memcmp(time1, time2, sizeof(time1)) == 0) {
59 tm->tm_sec = time1[0] & WM8350_RTC_SECS_MASK;
60
61 tm->tm_min = (time1[0] & WM8350_RTC_MINS_MASK)
62 >> WM8350_RTC_MINS_SHIFT;
63
64 tm->tm_hour = time1[1] & WM8350_RTC_HRS_MASK;
65
66 tm->tm_wday = ((time1[1] >> WM8350_RTC_DAY_SHIFT)
67 & 0x7) - 1;
68
69 tm->tm_mon = ((time1[2] & WM8350_RTC_MTH_MASK)
70 >> WM8350_RTC_MTH_SHIFT) - 1;
71
72 tm->tm_mday = (time1[2] & WM8350_RTC_DATE_MASK);
73
74 tm->tm_year = ((time1[3] & WM8350_RTC_YHUNDREDS_MASK)
75 >> WM8350_RTC_YHUNDREDS_SHIFT) * 100;
76 tm->tm_year += time1[3] & WM8350_RTC_YUNITS_MASK;
77
78 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon,
79 tm->tm_year);
80 tm->tm_year -= 1900;
81
82 dev_dbg(dev, "Read (%d left): %04x %04x %04x %04x\n",
83 retries,
84 time1[0], time1[1], time1[2], time1[3]);
85
86 return 0;
87 }
88 } while (retries--);
89
90 dev_err(dev, "timed out reading RTC time\n");
91 return -EIO;
92}
93
94/*
95 * Set current time and date in RTC
96 */
97static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
98{
99 struct wm8350 *wm8350 = dev_get_drvdata(dev);
100 u16 time[4];
101 u16 rtc_ctrl;
102 int ret, retries = WM8350_SET_TIME_RETRIES;
103
104 time[0] = tm->tm_sec;
105 time[0] |= tm->tm_min << WM8350_RTC_MINS_SHIFT;
106 time[1] = tm->tm_hour;
107 time[1] |= (tm->tm_wday + 1) << WM8350_RTC_DAY_SHIFT;
108 time[2] = tm->tm_mday;
109 time[2] |= (tm->tm_mon + 1) << WM8350_RTC_MTH_SHIFT;
110 time[3] = ((tm->tm_year + 1900) / 100) << WM8350_RTC_YHUNDREDS_SHIFT;
111 time[3] |= (tm->tm_year + 1900) % 100;
112
113 dev_dbg(dev, "Setting: %04x %04x %04x %04x\n",
114 time[0], time[1], time[2], time[3]);
115
116 /* Set RTC_SET to stop the clock */
117 ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_SET);
118 if (ret < 0)
119 return ret;
120
121 /* Wait until confirmation of stopping */
122 do {
123 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
124 schedule_timeout_uninterruptible(msecs_to_jiffies(1));
125 } while (retries-- && !(rtc_ctrl & WM8350_RTC_STS));
126
127 if (!retries) {
128 dev_err(dev, "timed out on set confirmation\n");
129 return -EIO;
130 }
131
132 /* Write time to RTC */
133 ret = wm8350_block_write(wm8350, WM8350_RTC_SECONDS_MINUTES, 4, time);
134 if (ret < 0)
135 return ret;
136
137 /* Clear RTC_SET to start the clock */
138 ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL,
139 WM8350_RTC_SET);
140 return ret;
141}
142
143/*
144 * Read alarm time and date in RTC
145 */
146static int wm8350_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
147{
148 struct wm8350 *wm8350 = dev_get_drvdata(dev);
149 struct rtc_time *tm = &alrm->time;
150 u16 time[4];
151 int ret;
152
153 ret = wm8350_block_read(wm8350, WM8350_ALARM_SECONDS_MINUTES, 4, time);
154 if (ret < 0)
155 return ret;
156
157 tm->tm_sec = time[0] & WM8350_RTC_ALMSECS_MASK;
158 if (tm->tm_sec == WM8350_RTC_ALMSECS_MASK)
159 tm->tm_sec = -1;
160
161 tm->tm_min = time[0] & WM8350_RTC_ALMMINS_MASK;
162 if (tm->tm_min == WM8350_RTC_ALMMINS_MASK)
163 tm->tm_min = -1;
164 else
165 tm->tm_min >>= WM8350_RTC_ALMMINS_SHIFT;
166
167 tm->tm_hour = time[1] & WM8350_RTC_ALMHRS_MASK;
168 if (tm->tm_hour == WM8350_RTC_ALMHRS_MASK)
169 tm->tm_hour = -1;
170
171 tm->tm_wday = ((time[1] >> WM8350_RTC_ALMDAY_SHIFT) & 0x7) - 1;
172 if (tm->tm_wday > 7)
173 tm->tm_wday = -1;
174
175 tm->tm_mon = time[2] & WM8350_RTC_ALMMTH_MASK;
176 if (tm->tm_mon == WM8350_RTC_ALMMTH_MASK)
177 tm->tm_mon = -1;
178 else
179 tm->tm_mon = (tm->tm_mon >> WM8350_RTC_ALMMTH_SHIFT) - 1;
180
181 tm->tm_mday = (time[2] & WM8350_RTC_ALMDATE_MASK);
182 if (tm->tm_mday == WM8350_RTC_ALMDATE_MASK)
183 tm->tm_mday = -1;
184
185 tm->tm_year = -1;
186
187 alrm->enabled = !(time[3] & WM8350_RTC_ALMSTS);
188
189 return 0;
190}
191
192static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
193{
194 int retries = WM8350_SET_ALM_RETRIES;
195 u16 rtc_ctrl;
196 int ret;
197
198 /* Set RTC_SET to stop the clock */
199 ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL,
200 WM8350_RTC_ALMSET);
201 if (ret < 0)
202 return ret;
203
204 /* Wait until confirmation of stopping */
205 do {
206 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
207 schedule_timeout_uninterruptible(msecs_to_jiffies(1));
208 } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
209
210 if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
211 return -ETIMEDOUT;
212
213 return 0;
214}
215
216static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
217{
218 int ret;
219 int retries = WM8350_SET_ALM_RETRIES;
220 u16 rtc_ctrl;
221
222 ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL,
223 WM8350_RTC_ALMSET);
224 if (ret < 0)
225 return ret;
226
227 /* Wait until confirmation */
228 do {
229 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
230 schedule_timeout_uninterruptible(msecs_to_jiffies(1));
231 } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
232
233 if (rtc_ctrl & WM8350_RTC_ALMSTS)
234 return -ETIMEDOUT;
235
236 return 0;
237}
238
239static int wm8350_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
240{
241 struct wm8350 *wm8350 = dev_get_drvdata(dev);
242 struct rtc_time *tm = &alrm->time;
243 u16 time[3];
244 int ret;
245
246 memset(time, 0, sizeof(time));
247
248 if (tm->tm_sec != -1)
249 time[0] |= tm->tm_sec;
250 else
251 time[0] |= WM8350_RTC_ALMSECS_MASK;
252
253 if (tm->tm_min != -1)
254 time[0] |= tm->tm_min << WM8350_RTC_ALMMINS_SHIFT;
255 else
256 time[0] |= WM8350_RTC_ALMMINS_MASK;
257
258 if (tm->tm_hour != -1)
259 time[1] |= tm->tm_hour;
260 else
261 time[1] |= WM8350_RTC_ALMHRS_MASK;
262
263 if (tm->tm_wday != -1)
264 time[1] |= (tm->tm_wday + 1) << WM8350_RTC_ALMDAY_SHIFT;
265 else
266 time[1] |= WM8350_RTC_ALMDAY_MASK;
267
268 if (tm->tm_mday != -1)
269 time[2] |= tm->tm_mday;
270 else
271 time[2] |= WM8350_RTC_ALMDATE_MASK;
272
273 if (tm->tm_mon != -1)
274 time[2] |= (tm->tm_mon + 1) << WM8350_RTC_ALMMTH_SHIFT;
275 else
276 time[2] |= WM8350_RTC_ALMMTH_MASK;
277
278 ret = wm8350_rtc_stop_alarm(wm8350);
279 if (ret < 0)
280 return ret;
281
282 /* Write time to RTC */
283 ret = wm8350_block_write(wm8350, WM8350_ALARM_SECONDS_MINUTES,
284 3, time);
285 if (ret < 0)
286 return ret;
287
288 if (alrm->enabled)
289 ret = wm8350_rtc_start_alarm(wm8350);
290
291 return ret;
292}
293
294/*
295 * Handle commands from user-space
296 */
297static int wm8350_rtc_ioctl(struct device *dev, unsigned int cmd,
298 unsigned long arg)
299{
300 struct wm8350 *wm8350 = dev_get_drvdata(dev);
301
302 switch (cmd) {
303 case RTC_AIE_OFF:
304 return wm8350_rtc_stop_alarm(wm8350);
305 case RTC_AIE_ON:
306 return wm8350_rtc_start_alarm(wm8350);
307
308 case RTC_UIE_OFF:
309 wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
310 break;
311 case RTC_UIE_ON:
312 wm8350_unmask_irq(wm8350, WM8350_IRQ_RTC_SEC);
313 break;
314
315 default:
316 return -ENOIOCTLCMD;
317 }
318
319 return 0;
320}
321
322static void wm8350_rtc_alarm_handler(struct wm8350 *wm8350, int irq,
323 void *data)
324{
325 struct rtc_device *rtc = wm8350->rtc.rtc;
326 int ret;
327
328 rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
329
330 /* Make it one shot */
331 ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL,
332 WM8350_RTC_ALMSET);
333 if (ret != 0) {
334 dev_err(&(wm8350->rtc.pdev->dev),
335 "Failed to disable alarm: %d\n", ret);
336 }
337}
338
339static void wm8350_rtc_update_handler(struct wm8350 *wm8350, int irq,
340 void *data)
341{
342 struct rtc_device *rtc = wm8350->rtc.rtc;
343
344 rtc_update_irq(rtc, 1, RTC_IRQF | RTC_UF);
345}
346
347static const struct rtc_class_ops wm8350_rtc_ops = {
348 .ioctl = wm8350_rtc_ioctl,
349 .read_time = wm8350_rtc_readtime,
350 .set_time = wm8350_rtc_settime,
351 .read_alarm = wm8350_rtc_readalarm,
352 .set_alarm = wm8350_rtc_setalarm,
353};
354
355#ifdef CONFIG_PM
356static int wm8350_rtc_suspend(struct platform_device *pdev, pm_message_t state)
357{
358 struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
359 int ret = 0;
360 u16 reg;
361
362 reg = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
363
364 if (device_may_wakeup(&wm8350->rtc.pdev->dev) &&
365 reg & WM8350_RTC_ALMSTS) {
366 ret = wm8350_rtc_stop_alarm(wm8350);
367 if (ret != 0)
368 dev_err(&pdev->dev, "Failed to stop RTC alarm: %d\n",
369 ret);
370 }
371
372 return ret;
373}
374
375static int wm8350_rtc_resume(struct platform_device *pdev)
376{
377 struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
378 int ret;
379
380 if (wm8350->rtc.alarm_enabled) {
381 ret = wm8350_rtc_start_alarm(wm8350);
382 if (ret != 0)
383 dev_err(&pdev->dev,
384 "Failed to restart RTC alarm: %d\n", ret);
385 }
386
387 return 0;
388}
389
390#else
391#define wm8350_rtc_suspend NULL
392#define wm8350_rtc_resume NULL
393#endif
394
395static int wm8350_rtc_probe(struct platform_device *pdev)
396{
397 struct wm8350 *wm8350 = platform_get_drvdata(pdev);
398 struct wm8350_rtc *wm_rtc = &wm8350->rtc;
399 int ret = 0;
400 u16 timectl, power5;
401
402 timectl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
403 if (timectl & WM8350_RTC_BCD) {
404 dev_err(&pdev->dev, "RTC BCD mode not supported\n");
405 return -EINVAL;
406 }
407 if (timectl & WM8350_RTC_12HR) {
408 dev_err(&pdev->dev, "RTC 12 hour mode not supported\n");
409 return -EINVAL;
410 }
411
412 /* enable the RTC if it's not already enabled */
413 power5 = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_5);
414 if (!(power5 & WM8350_RTC_TICK_ENA)) {
415 dev_info(wm8350->dev, "Starting RTC\n");
416
417 wm8350_reg_unlock(wm8350);
418
419 ret = wm8350_set_bits(wm8350, WM8350_POWER_MGMT_5,
420 WM8350_RTC_TICK_ENA);
421 if (ret < 0) {
422 dev_err(&pdev->dev, "failed to enable RTC: %d\n", ret);
423 return ret;
424 }
425
426 wm8350_reg_lock(wm8350);
427 }
428
429 if (timectl & WM8350_RTC_STS) {
430 int retries;
431
432 ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL,
433 WM8350_RTC_SET);
434 if (ret < 0) {
435 dev_err(&pdev->dev, "failed to start: %d\n", ret);
436 return ret;
437 }
438
439 retries = WM8350_SET_TIME_RETRIES;
440 do {
441 timectl = wm8350_reg_read(wm8350,
442 WM8350_RTC_TIME_CONTROL);
443 } while (timectl & WM8350_RTC_STS && retries--);
444
445 if (retries == 0) {
446 dev_err(&pdev->dev, "failed to start: timeout\n");
447 return -ENODEV;
448 }
449 }
450
451 device_init_wakeup(&pdev->dev, 1);
452
453 wm_rtc->rtc = rtc_device_register("wm8350", &pdev->dev,
454 &wm8350_rtc_ops, THIS_MODULE);
455 if (IS_ERR(wm_rtc->rtc)) {
456 ret = PTR_ERR(wm_rtc->rtc);
457 dev_err(&pdev->dev, "failed to register RTC: %d\n", ret);
458 return ret;
459 }
460
461 wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
462 wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_PER);
463
464 wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
465 wm8350_rtc_update_handler, NULL);
466
467 wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
468 wm8350_rtc_alarm_handler, NULL);
469 wm8350_unmask_irq(wm8350, WM8350_IRQ_RTC_ALM);
470
471 return 0;
472}
473
474static int __devexit wm8350_rtc_remove(struct platform_device *pdev)
475{
476 struct wm8350 *wm8350 = platform_get_drvdata(pdev);
477 struct wm8350_rtc *wm_rtc = &wm8350->rtc;
478
479 wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
480
481 wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC);
482 wm8350_free_irq(wm8350, WM8350_IRQ_RTC_ALM);
483
484 rtc_device_unregister(wm_rtc->rtc);
485
486 return 0;
487}
488
489static struct platform_driver wm8350_rtc_driver = {
490 .probe = wm8350_rtc_probe,
491 .remove = __devexit_p(wm8350_rtc_remove),
492 .suspend = wm8350_rtc_suspend,
493 .resume = wm8350_rtc_resume,
494 .driver = {
495 .name = "wm8350-rtc",
496 },
497};
498
499static int __init wm8350_rtc_init(void)
500{
501 return platform_driver_register(&wm8350_rtc_driver);
502}
503module_init(wm8350_rtc_init);
504
505static void __exit wm8350_rtc_exit(void)
506{
507 platform_driver_unregister(&wm8350_rtc_driver);
508}
509module_exit(wm8350_rtc_exit);
510
511MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
512MODULE_DESCRIPTION("RTC driver for the WM8350");
513MODULE_LICENSE("GPL");
514MODULE_ALIAS("platform:wm8350-rtc");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 0a225ccda026..363bd1303d21 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1746,6 +1746,11 @@ restart:
1746 goto restart; 1746 goto restart;
1747 } 1747 }
1748 1748
1749 /* log sense for fatal error */
1750 if (cqr->status == DASD_CQR_FAILED) {
1751 dasd_log_sense(cqr, &cqr->irb);
1752 }
1753
1749 /* First of all call extended error reporting. */ 1754 /* First of all call extended error reporting. */
1750 if (dasd_eer_enabled(base) && 1755 if (dasd_eer_enabled(base) &&
1751 cqr->status == DASD_CQR_FAILED) { 1756 cqr->status == DASD_CQR_FAILED) {
@@ -2011,10 +2016,9 @@ static void dasd_flush_request_queue(struct dasd_block *block)
2011 spin_unlock_irq(&block->request_queue_lock); 2016 spin_unlock_irq(&block->request_queue_lock);
2012} 2017}
2013 2018
2014static int dasd_open(struct inode *inp, struct file *filp) 2019static int dasd_open(struct block_device *bdev, fmode_t mode)
2015{ 2020{
2016 struct gendisk *disk = inp->i_bdev->bd_disk; 2021 struct dasd_block *block = bdev->bd_disk->private_data;
2017 struct dasd_block *block = disk->private_data;
2018 struct dasd_device *base = block->base; 2022 struct dasd_device *base = block->base;
2019 int rc; 2023 int rc;
2020 2024
@@ -2052,9 +2056,8 @@ unlock:
2052 return rc; 2056 return rc;
2053} 2057}
2054 2058
2055static int dasd_release(struct inode *inp, struct file *filp) 2059static int dasd_release(struct gendisk *disk, fmode_t mode)
2056{ 2060{
2057 struct gendisk *disk = inp->i_bdev->bd_disk;
2058 struct dasd_block *block = disk->private_data; 2061 struct dasd_block *block = disk->private_data;
2059 2062
2060 atomic_dec(&block->open_count); 2063 atomic_dec(&block->open_count);
@@ -2089,8 +2092,7 @@ dasd_device_operations = {
2089 .owner = THIS_MODULE, 2092 .owner = THIS_MODULE,
2090 .open = dasd_open, 2093 .open = dasd_open,
2091 .release = dasd_release, 2094 .release = dasd_release,
2092 .ioctl = dasd_ioctl, 2095 .locked_ioctl = dasd_ioctl,
2093 .compat_ioctl = dasd_compat_ioctl,
2094 .getgeo = dasd_getgeo, 2096 .getgeo = dasd_getgeo,
2095}; 2097};
2096 2098
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index aee6565aaf98..e99d566b69cc 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -99,7 +99,7 @@ int dasd_scan_partitions(struct dasd_block *block)
99 struct block_device *bdev; 99 struct block_device *bdev;
100 100
101 bdev = bdget_disk(block->gdp, 0); 101 bdev = bdget_disk(block->gdp, 0);
102 if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0) 102 if (!bdev || blkdev_get(bdev, FMODE_READ) < 0)
103 return -ENODEV; 103 return -ENODEV;
104 /* 104 /*
105 * See fs/partition/check.c:register_disk,rescan_partitions 105 * See fs/partition/check.c:register_disk,rescan_partitions
@@ -152,7 +152,7 @@ void dasd_destroy_partitions(struct dasd_block *block)
152 152
153 invalidate_partition(block->gdp, 0); 153 invalidate_partition(block->gdp, 0);
154 /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */ 154 /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
155 blkdev_put(bdev); 155 blkdev_put(bdev, FMODE_READ);
156 set_capacity(block->gdp, 0); 156 set_capacity(block->gdp, 0);
157} 157}
158 158
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 489d5fe488fb..05a14536c369 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -610,8 +610,7 @@ int dasd_scan_partitions(struct dasd_block *);
610void dasd_destroy_partitions(struct dasd_block *); 610void dasd_destroy_partitions(struct dasd_block *);
611 611
612/* externals in dasd_ioctl.c */ 612/* externals in dasd_ioctl.c */
613int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long); 613int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
614long dasd_compat_ioctl(struct file *, unsigned int, unsigned long);
615 614
616/* externals in dasd_proc.c */ 615/* externals in dasd_proc.c */
617int dasd_proc_init(void); 616int dasd_proc_init(void);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 91a64630cb0f..b82d816d9ef7 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -366,10 +366,9 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
366} 366}
367 367
368int 368int
369dasd_ioctl(struct inode *inode, struct file *file, 369dasd_ioctl(struct block_device *bdev, fmode_t mode,
370 unsigned int cmd, unsigned long arg) 370 unsigned int cmd, unsigned long arg)
371{ 371{
372 struct block_device *bdev = inode->i_bdev;
373 struct dasd_block *block = bdev->bd_disk->private_data; 372 struct dasd_block *block = bdev->bd_disk->private_data;
374 void __user *argp = (void __user *)arg; 373 void __user *argp = (void __user *)arg;
375 374
@@ -421,15 +420,3 @@ dasd_ioctl(struct inode *inode, struct file *file,
421 return -EINVAL; 420 return -EINVAL;
422 } 421 }
423} 422}
424
425long
426dasd_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
427{
428 int rval;
429
430 lock_kernel();
431 rval = dasd_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
432 unlock_kernel();
433
434 return (rval == -EINVAL) ? -ENOIOCTLCMD : rval;
435}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index a7ff167d5b81..63f26a135fe5 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -31,8 +31,8 @@
31#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x) 31#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
32#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x) 32#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
33 33
34static int dcssblk_open(struct inode *inode, struct file *filp); 34static int dcssblk_open(struct block_device *bdev, fmode_t mode);
35static int dcssblk_release(struct inode *inode, struct file *filp); 35static int dcssblk_release(struct gendisk *disk, fmode_t mode);
36static int dcssblk_make_request(struct request_queue *q, struct bio *bio); 36static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
37static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum, 37static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
38 void **kaddr, unsigned long *pfn); 38 void **kaddr, unsigned long *pfn);
@@ -776,32 +776,31 @@ out_buf:
776} 776}
777 777
778static int 778static int
779dcssblk_open(struct inode *inode, struct file *filp) 779dcssblk_open(struct block_device *bdev, fmode_t mode)
780{ 780{
781 struct dcssblk_dev_info *dev_info; 781 struct dcssblk_dev_info *dev_info;
782 int rc; 782 int rc;
783 783
784 dev_info = inode->i_bdev->bd_disk->private_data; 784 dev_info = bdev->bd_disk->private_data;
785 if (NULL == dev_info) { 785 if (NULL == dev_info) {
786 rc = -ENODEV; 786 rc = -ENODEV;
787 goto out; 787 goto out;
788 } 788 }
789 atomic_inc(&dev_info->use_count); 789 atomic_inc(&dev_info->use_count);
790 inode->i_bdev->bd_block_size = 4096; 790 bdev->bd_block_size = 4096;
791 rc = 0; 791 rc = 0;
792out: 792out:
793 return rc; 793 return rc;
794} 794}
795 795
796static int 796static int
797dcssblk_release(struct inode *inode, struct file *filp) 797dcssblk_release(struct gendisk *disk, fmode_t mode)
798{ 798{
799 struct dcssblk_dev_info *dev_info; 799 struct dcssblk_dev_info *dev_info = disk->private_data;
800 struct segment_info *entry; 800 struct segment_info *entry;
801 int rc; 801 int rc;
802 802
803 dev_info = inode->i_bdev->bd_disk->private_data; 803 if (!dev_info) {
804 if (NULL == dev_info) {
805 rc = -ENODEV; 804 rc = -ENODEV;
806 goto out; 805 goto out;
807 } 806 }
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index eb5f1b8bc57f..ec9c0bcf66ee 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -324,6 +324,9 @@ static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
324 case 0x0120: 324 case 0x0120:
325 break; 325 break;
326 default: 326 default:
327 pr_warning("assign storage failed (cmd=0x%08x, "
328 "response=0x%04x, rn=0x%04x)\n", cmd,
329 sccb->header.response_code, rn);
327 rc = -EIO; 330 rc = -EIO;
328 break; 331 break;
329 } 332 }
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index a25b8bf54f41..ae18baf59f06 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -43,9 +43,9 @@
43/* 43/*
44 * file operation structure for tape block frontend 44 * file operation structure for tape block frontend
45 */ 45 */
46static int tapeblock_open(struct inode *, struct file *); 46static int tapeblock_open(struct block_device *, fmode_t);
47static int tapeblock_release(struct inode *, struct file *); 47static int tapeblock_release(struct gendisk *, fmode_t);
48static int tapeblock_ioctl(struct inode *, struct file *, unsigned int, 48static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int,
49 unsigned long); 49 unsigned long);
50static int tapeblock_medium_changed(struct gendisk *); 50static int tapeblock_medium_changed(struct gendisk *);
51static int tapeblock_revalidate_disk(struct gendisk *); 51static int tapeblock_revalidate_disk(struct gendisk *);
@@ -54,7 +54,7 @@ static struct block_device_operations tapeblock_fops = {
54 .owner = THIS_MODULE, 54 .owner = THIS_MODULE,
55 .open = tapeblock_open, 55 .open = tapeblock_open,
56 .release = tapeblock_release, 56 .release = tapeblock_release,
57 .ioctl = tapeblock_ioctl, 57 .locked_ioctl = tapeblock_ioctl,
58 .media_changed = tapeblock_medium_changed, 58 .media_changed = tapeblock_medium_changed,
59 .revalidate_disk = tapeblock_revalidate_disk, 59 .revalidate_disk = tapeblock_revalidate_disk,
60}; 60};
@@ -76,7 +76,7 @@ tapeblock_trigger_requeue(struct tape_device *device)
76static void 76static void
77tapeblock_end_request(struct request *req, int error) 77tapeblock_end_request(struct request *req, int error)
78{ 78{
79 if (__blk_end_request(req, error, blk_rq_bytes(req))) 79 if (blk_end_request(req, error, blk_rq_bytes(req)))
80 BUG(); 80 BUG();
81} 81}
82 82
@@ -166,7 +166,7 @@ tapeblock_requeue(struct work_struct *work) {
166 nr_queued++; 166 nr_queued++;
167 spin_unlock(get_ccwdev_lock(device->cdev)); 167 spin_unlock(get_ccwdev_lock(device->cdev));
168 168
169 spin_lock(&device->blk_data.request_queue_lock); 169 spin_lock_irq(&device->blk_data.request_queue_lock);
170 while ( 170 while (
171 !blk_queue_plugged(queue) && 171 !blk_queue_plugged(queue) &&
172 elv_next_request(queue) && 172 elv_next_request(queue) &&
@@ -176,7 +176,9 @@ tapeblock_requeue(struct work_struct *work) {
176 if (rq_data_dir(req) == WRITE) { 176 if (rq_data_dir(req) == WRITE) {
177 DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); 177 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
178 blkdev_dequeue_request(req); 178 blkdev_dequeue_request(req);
179 spin_unlock_irq(&device->blk_data.request_queue_lock);
179 tapeblock_end_request(req, -EIO); 180 tapeblock_end_request(req, -EIO);
181 spin_lock_irq(&device->blk_data.request_queue_lock);
180 continue; 182 continue;
181 } 183 }
182 blkdev_dequeue_request(req); 184 blkdev_dequeue_request(req);
@@ -364,13 +366,12 @@ tapeblock_medium_changed(struct gendisk *disk)
364 * Block frontend tape device open function. 366 * Block frontend tape device open function.
365 */ 367 */
366static int 368static int
367tapeblock_open(struct inode *inode, struct file *filp) 369tapeblock_open(struct block_device *bdev, fmode_t mode)
368{ 370{
369 struct gendisk * disk; 371 struct gendisk * disk = bdev->bd_disk;
370 struct tape_device * device; 372 struct tape_device * device;
371 int rc; 373 int rc;
372 374
373 disk = inode->i_bdev->bd_disk;
374 device = tape_get_device_reference(disk->private_data); 375 device = tape_get_device_reference(disk->private_data);
375 376
376 if (device->required_tapemarks) { 377 if (device->required_tapemarks) {
@@ -410,9 +411,8 @@ release:
410 * we just get the pointer here and release the reference. 411 * we just get the pointer here and release the reference.
411 */ 412 */
412static int 413static int
413tapeblock_release(struct inode *inode, struct file *filp) 414tapeblock_release(struct gendisk *disk, fmode_t mode)
414{ 415{
415 struct gendisk *disk = inode->i_bdev->bd_disk;
416 struct tape_device *device = disk->private_data; 416 struct tape_device *device = disk->private_data;
417 417
418 tape_state_set(device, TS_IN_USE); 418 tape_state_set(device, TS_IN_USE);
@@ -427,22 +427,21 @@ tapeblock_release(struct inode *inode, struct file *filp)
427 */ 427 */
428static int 428static int
429tapeblock_ioctl( 429tapeblock_ioctl(
430 struct inode * inode, 430 struct block_device * bdev,
431 struct file * file, 431 fmode_t mode,
432 unsigned int command, 432 unsigned int command,
433 unsigned long arg 433 unsigned long arg
434) { 434) {
435 int rc; 435 int rc;
436 int minor; 436 int minor;
437 struct gendisk *disk; 437 struct gendisk *disk = bdev->bd_disk;
438 struct tape_device *device; 438 struct tape_device *device;
439 439
440 rc = 0; 440 rc = 0;
441 disk = inode->i_bdev->bd_disk;
442 BUG_ON(!disk); 441 BUG_ON(!disk);
443 device = disk->private_data; 442 device = disk->private_data;
444 BUG_ON(!device); 443 BUG_ON(!device);
445 minor = iminor(inode); 444 minor = MINOR(bdev->bd_dev);
446 445
447 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command); 446 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
448 DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor); 447 DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index d7073dbf825c..f9bb51fa7f5b 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -1200,7 +1200,7 @@ tape_open(struct tape_device *device)
1200{ 1200{
1201 int rc; 1201 int rc;
1202 1202
1203 spin_lock(get_ccwdev_lock(device->cdev)); 1203 spin_lock_irq(get_ccwdev_lock(device->cdev));
1204 if (device->tape_state == TS_NOT_OPER) { 1204 if (device->tape_state == TS_NOT_OPER) {
1205 DBF_EVENT(6, "TAPE:nodev\n"); 1205 DBF_EVENT(6, "TAPE:nodev\n");
1206 rc = -ENODEV; 1206 rc = -ENODEV;
@@ -1218,7 +1218,7 @@ tape_open(struct tape_device *device)
1218 tape_state_set(device, TS_IN_USE); 1218 tape_state_set(device, TS_IN_USE);
1219 rc = 0; 1219 rc = 0;
1220 } 1220 }
1221 spin_unlock(get_ccwdev_lock(device->cdev)); 1221 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1222 return rc; 1222 return rc;
1223} 1223}
1224 1224
@@ -1228,11 +1228,11 @@ tape_open(struct tape_device *device)
1228int 1228int
1229tape_release(struct tape_device *device) 1229tape_release(struct tape_device *device)
1230{ 1230{
1231 spin_lock(get_ccwdev_lock(device->cdev)); 1231 spin_lock_irq(get_ccwdev_lock(device->cdev));
1232 if (device->tape_state == TS_IN_USE) 1232 if (device->tape_state == TS_IN_USE)
1233 tape_state_set(device, TS_UNUSED); 1233 tape_state_set(device, TS_UNUSED);
1234 module_put(device->discipline->owner); 1234 module_put(device->discipline->owner);
1235 spin_unlock(get_ccwdev_lock(device->cdev)); 1235 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1236 return 0; 1236 return 0;
1237} 1237}
1238 1238
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 4e78c82194b4..4e4008325e28 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -874,11 +874,15 @@ void ccw_device_move_to_orphanage(struct work_struct *work)
874 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev); 874 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
875 if (replacing_cdev) { 875 if (replacing_cdev) {
876 sch_attach_disconnected_device(sch, replacing_cdev); 876 sch_attach_disconnected_device(sch, replacing_cdev);
877 /* Release reference from get_disc_ccwdev_by_dev_id() */
878 put_device(&cdev->dev);
877 return; 879 return;
878 } 880 }
879 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); 881 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
880 if (replacing_cdev) { 882 if (replacing_cdev) {
881 sch_attach_orphaned_device(sch, replacing_cdev); 883 sch_attach_orphaned_device(sch, replacing_cdev);
884 /* Release reference from get_orphaned_ccwdev_by_dev_id() */
885 put_device(&cdev->dev);
882 return; 886 return;
883 } 887 }
884 sch_create_and_recog_new_device(sch); 888 sch_create_and_recog_new_device(sch);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index b5390821434f..f05590355be8 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -20,6 +20,7 @@ static struct dentry *debugfs_root;
20#define MAX_DEBUGFS_QUEUES 32 20#define MAX_DEBUGFS_QUEUES 32
21static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL }; 21static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
22static DEFINE_MUTEX(debugfs_mutex); 22static DEFINE_MUTEX(debugfs_mutex);
23#define QDIO_DEBUGFS_NAME_LEN 40
23 24
24void qdio_allocate_do_dbf(struct qdio_initialize *init_data) 25void qdio_allocate_do_dbf(struct qdio_initialize *init_data)
25{ 26{
@@ -152,17 +153,6 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
152 filp->f_path.dentry->d_inode->i_private); 153 filp->f_path.dentry->d_inode->i_private);
153} 154}
154 155
155static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name)
156{
157 memset(name, 0, sizeof(name));
158 sprintf(name, "%s", dev_name(&cdev->dev));
159 if (q->is_input_q)
160 sprintf(name + strlen(name), "_input");
161 else
162 sprintf(name + strlen(name), "_output");
163 sprintf(name + strlen(name), "_%d", q->nr);
164}
165
166static void remove_debugfs_entry(struct qdio_q *q) 156static void remove_debugfs_entry(struct qdio_q *q)
167{ 157{
168 int i; 158 int i;
@@ -189,14 +179,17 @@ static struct file_operations debugfs_fops = {
189static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) 179static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
190{ 180{
191 int i = 0; 181 int i = 0;
192 char name[40]; 182 char name[QDIO_DEBUGFS_NAME_LEN];
193 183
194 while (debugfs_queues[i] != NULL) { 184 while (debugfs_queues[i] != NULL) {
195 i++; 185 i++;
196 if (i >= MAX_DEBUGFS_QUEUES) 186 if (i >= MAX_DEBUGFS_QUEUES)
197 return; 187 return;
198 } 188 }
199 get_queue_name(q, cdev, name); 189 snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%s_%d",
190 dev_name(&cdev->dev),
191 q->is_input_q ? "input" : "output",
192 q->nr);
200 debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, 193 debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
201 debugfs_root, q, &debugfs_fops); 194 debugfs_root, q, &debugfs_fops);
202} 195}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index a50682d2a0fa..7c8659151993 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1083,7 +1083,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1083 case -EIO: 1083 case -EIO:
1084 sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no); 1084 sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no);
1085 QDIO_DBF_TEXT2(1, setup, dbf_text); 1085 QDIO_DBF_TEXT2(1, setup, dbf_text);
1086 qdio_int_error(cdev);
1087 return; 1086 return;
1088 case -ETIMEDOUT: 1087 case -ETIMEDOUT:
1089 sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no); 1088 sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 326db1e827c4..e3fe6838293a 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -659,9 +659,9 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
659 hr_time = ktime_set(0, poll_timeout); 659 hr_time = ktime_set(0, poll_timeout);
660 660
661 if (!hrtimer_is_queued(&ap_poll_timer) || 661 if (!hrtimer_is_queued(&ap_poll_timer) ||
662 !hrtimer_forward(&ap_poll_timer, ap_poll_timer.expires, hr_time)) { 662 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
663 ap_poll_timer.expires = hr_time; 663 hrtimer_set_expires(&ap_poll_timer, hr_time);
664 hrtimer_start(&ap_poll_timer, hr_time, HRTIMER_MODE_ABS); 664 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
665 } 665 }
666 return count; 666 return count;
667} 667}
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index ff4a6931bb8e..3d442444c618 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -322,13 +322,13 @@ static int __init kvm_devices_init(void)
322 return rc; 322 return rc;
323 } 323 }
324 324
325 rc = vmem_add_mapping(PFN_PHYS(max_pfn), PAGE_SIZE); 325 rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
326 if (rc) { 326 if (rc) {
327 s390_root_dev_unregister(kvm_root); 327 s390_root_dev_unregister(kvm_root);
328 return rc; 328 return rc;
329 } 329 }
330 330
331 kvm_devices = (void *) PFN_PHYS(max_pfn); 331 kvm_devices = (void *) real_memory_size;
332 332
333 ctl_set_bit(0, 9); 333 ctl_set_bit(0, 9);
334 register_external_interrupt(0x2603, kvm_extint_handler); 334 register_external_interrupt(0x2603, kvm_extint_handler);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 7de410d5be4a..52d26592c72c 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3025,7 +3025,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3025 struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, 3025 struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
3026 int offset) 3026 int offset)
3027{ 3027{
3028 int length = skb->len - offset; 3028 int length = skb->len;
3029 int length_here; 3029 int length_here;
3030 int element; 3030 int element;
3031 char *data; 3031 char *data;
@@ -3037,6 +3037,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3037 3037
3038 if (offset >= 0) { 3038 if (offset >= 0) {
3039 data = skb->data + offset; 3039 data = skb->data + offset;
3040 length -= offset;
3040 first_lap = 0; 3041 first_lap = 0;
3041 } 3042 }
3042 3043
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 955ba7a31b90..1b1e80336d2c 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -373,8 +373,6 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
373 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 373 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
374 374
375 qeth_set_allowed_threads(card, 0, 1); 375 qeth_set_allowed_threads(card, 0, 1);
376 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
377 return -ERESTARTSYS;
378 if (card->read.state == CH_STATE_UP && 376 if (card->read.state == CH_STATE_UP &&
379 card->write.state == CH_STATE_UP && 377 card->write.state == CH_STATE_UP &&
380 (card->state == CARD_STATE_UP)) { 378 (card->state == CARD_STATE_UP)) {
@@ -451,12 +449,15 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
451 netif_rx(skb); 449 netif_rx(skb);
452 break; 450 break;
453 case QETH_HEADER_TYPE_OSN: 451 case QETH_HEADER_TYPE_OSN:
454 skb_push(skb, sizeof(struct qeth_hdr)); 452 if (card->info.type == QETH_CARD_TYPE_OSN) {
455 skb_copy_to_linear_data(skb, hdr, 453 skb_push(skb, sizeof(struct qeth_hdr));
454 skb_copy_to_linear_data(skb, hdr,
456 sizeof(struct qeth_hdr)); 455 sizeof(struct qeth_hdr));
457 len = skb->len; 456 len = skb->len;
458 card->osn_info.data_cb(skb); 457 card->osn_info.data_cb(skb);
459 break; 458 break;
459 }
460 /* else unknown */
460 default: 461 default:
461 dev_kfree_skb_any(skb); 462 dev_kfree_skb_any(skb);
462 QETH_DBF_TEXT(TRACE, 3, "inbunkno"); 463 QETH_DBF_TEXT(TRACE, 3, "inbunkno");
@@ -975,12 +976,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
975 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 976 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
976 977
977 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 978 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
978 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) {
979 PRINT_WARN("set_online of card %s interrupted by user!\n",
980 CARD_BUS_ID(card));
981 return -ERESTARTSYS;
982 }
983
984 recover_flag = card->state; 979 recover_flag = card->state;
985 rc = ccw_device_set_online(CARD_RDEV(card)); 980 rc = ccw_device_set_online(CARD_RDEV(card));
986 if (rc) { 981 if (rc) {
@@ -1091,11 +1086,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
1091 if (card->dev && netif_carrier_ok(card->dev)) 1086 if (card->dev && netif_carrier_ok(card->dev))
1092 netif_carrier_off(card->dev); 1087 netif_carrier_off(card->dev);
1093 recover_flag = card->state; 1088 recover_flag = card->state;
1094 if (qeth_l2_stop_card(card, recovery_mode) == -ERESTARTSYS) { 1089 qeth_l2_stop_card(card, recovery_mode);
1095 PRINT_WARN("Stopping card %s interrupted by user!\n",
1096 CARD_BUS_ID(card));
1097 return -ERESTARTSYS;
1098 }
1099 rc = ccw_device_set_offline(CARD_DDEV(card)); 1090 rc = ccw_device_set_offline(CARD_DDEV(card));
1100 rc2 = ccw_device_set_offline(CARD_WDEV(card)); 1091 rc2 = ccw_device_set_offline(CARD_WDEV(card));
1101 rc3 = ccw_device_set_offline(CARD_RDEV(card)); 1092 rc3 = ccw_device_set_offline(CARD_RDEV(card));
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 99547dea44de..ed59fedd5922 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2064,8 +2064,6 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2064 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 2064 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
2065 2065
2066 qeth_set_allowed_threads(card, 0, 1); 2066 qeth_set_allowed_threads(card, 0, 1);
2067 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
2068 return -ERESTARTSYS;
2069 if (card->read.state == CH_STATE_UP && 2067 if (card->read.state == CH_STATE_UP &&
2070 card->write.state == CH_STATE_UP && 2068 card->write.state == CH_STATE_UP &&
2071 (card->state == CARD_STATE_UP)) { 2069 (card->state == CARD_STATE_UP)) {
@@ -3049,11 +3047,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3049 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 3047 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
3050 3048
3051 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 3049 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3052 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) {
3053 PRINT_WARN("set_online of card %s interrupted by user!\n",
3054 CARD_BUS_ID(card));
3055 return -ERESTARTSYS;
3056 }
3057 3050
3058 recover_flag = card->state; 3051 recover_flag = card->state;
3059 rc = ccw_device_set_online(CARD_RDEV(card)); 3052 rc = ccw_device_set_online(CARD_RDEV(card));
@@ -3170,11 +3163,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
3170 if (card->dev && netif_carrier_ok(card->dev)) 3163 if (card->dev && netif_carrier_ok(card->dev))
3171 netif_carrier_off(card->dev); 3164 netif_carrier_off(card->dev);
3172 recover_flag = card->state; 3165 recover_flag = card->state;
3173 if (qeth_l3_stop_card(card, recovery_mode) == -ERESTARTSYS) { 3166 qeth_l3_stop_card(card, recovery_mode);
3174 PRINT_WARN("Stopping card %s interrupted by user!\n",
3175 CARD_BUS_ID(card));
3176 return -ERESTARTSYS;
3177 }
3178 rc = ccw_device_set_offline(CARD_DDEV(card)); 3167 rc = ccw_device_set_offline(CARD_DDEV(card));
3179 rc2 = ccw_device_set_offline(CARD_WDEV(card)); 3168 rc2 = ccw_device_set_offline(CARD_WDEV(card));
3180 rc3 = ccw_device_set_offline(CARD_RDEV(card)); 3169 rc3 = ccw_device_set_offline(CARD_RDEV(card));
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 210ddb639748..c144b9924d52 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -121,9 +121,6 @@ static ssize_t qeth_l3_dev_route6_show(struct device *dev,
121 if (!card) 121 if (!card)
122 return -EINVAL; 122 return -EINVAL;
123 123
124 if (!qeth_is_supported(card, IPA_IPV6))
125 return sprintf(buf, "%s\n", "n/a");
126
127 return qeth_l3_dev_route_show(card, &card->options.route6, buf); 124 return qeth_l3_dev_route_show(card, &card->options.route6, buf);
128} 125}
129 126
@@ -135,10 +132,6 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev,
135 if (!card) 132 if (!card)
136 return -EINVAL; 133 return -EINVAL;
137 134
138 if (!qeth_is_supported(card, IPA_IPV6)) {
139 return -EOPNOTSUPP;
140 }
141
142 return qeth_l3_dev_route_store(card, &card->options.route6, 135 return qeth_l3_dev_route_store(card, &card->options.route6,
143 QETH_PROT_IPV6, buf, count); 136 QETH_PROT_IPV6, buf, count);
144} 137}
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 3b56220fb900..3d4e3e3f3fc0 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -610,7 +610,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
610 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); 610 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
611 atomic_set(&port->refcount, 0); 611 atomic_set(&port->refcount, 0);
612 612
613 dev_set_name(&port->sysfs_device, "0x%016llx", wwpn); 613 dev_set_name(&port->sysfs_device, "0x%016llx",
614 (unsigned long long)wwpn);
614 port->sysfs_device.parent = &adapter->ccw_device->dev; 615 port->sysfs_device.parent = &adapter->ccw_device->dev;
615 616
616 port->sysfs_device.release = zfcp_sysfs_port_release; 617 port->sysfs_device.release = zfcp_sysfs_port_release;
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index b04038c74786..951a8d409d1d 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -116,7 +116,9 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
116 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 85, 116 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 85,
117 NULL); 117 NULL);
118 zfcp_erp_wait(adapter); 118 zfcp_erp_wait(adapter);
119 goto out; 119 up(&zfcp_data.config_sema);
120 flush_work(&adapter->scan_work);
121 return 0;
120 122
121 out_scsi_register: 123 out_scsi_register:
122 zfcp_erp_thread_kill(adapter); 124 zfcp_erp_thread_kill(adapter);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 060f5f2352ec..31012d58cfb7 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -30,7 +30,7 @@ static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len,
30 dump->offset = offset; 30 dump->offset = offset;
31 dump->size = min(from_len - offset, room); 31 dump->size = min(from_len - offset, room);
32 memcpy(dump->data, from + offset, dump->size); 32 memcpy(dump->data, from + offset, dump->size);
33 debug_event(dbf, level, dump, dump->size); 33 debug_event(dbf, level, dump, dump->size + sizeof(*dump));
34 } 34 }
35} 35}
36 36
@@ -108,7 +108,7 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view,
108 t.tv_sec, t.tv_nsec); 108 t.tv_sec, t.tv_nsec);
109 zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid); 109 zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid);
110 } else { 110 } else {
111 zfcp_dbf_outd(&p, NULL, dump->data, dump->size, dump->offset, 111 zfcp_dbf_outd(&p, "", dump->data, dump->size, dump->offset,
112 dump->total_size); 112 dump->total_size);
113 if ((dump->offset + dump->size) == dump->total_size) 113 if ((dump->offset + dump->size) == dump->total_size)
114 p += sprintf(p, "\n"); 114 p += sprintf(p, "\n");
@@ -366,6 +366,7 @@ static void zfcp_hba_dbf_view_response(char **p,
366 break; 366 break;
367 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 367 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
368 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); 368 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
369 p += sprintf(*p, "\n");
369 break; 370 break;
370 371
371 case FSF_QTCB_OPEN_PORT_WITH_DID: 372 case FSF_QTCB_OPEN_PORT_WITH_DID:
@@ -465,7 +466,8 @@ static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
465 else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0) 466 else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0)
466 zfcp_hba_dbf_view_berr(&p, &r->u.berr); 467 zfcp_hba_dbf_view_berr(&p, &r->u.berr);
467 468
468 p += sprintf(p, "\n"); 469 if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0)
470 p += sprintf(p, "\n");
469 return p - out_buf; 471 return p - out_buf;
470} 472}
471 473
@@ -880,6 +882,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
880 struct ct_hdr *hdr = sg_virt(ct->req); 882 struct ct_hdr *hdr = sg_virt(ct->req);
881 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 883 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
882 struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; 884 struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req;
885 int level = 3;
883 unsigned long flags; 886 unsigned long flags;
884 887
885 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 888 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
@@ -896,9 +899,10 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
896 oct->options = hdr->options; 899 oct->options = hdr->options;
897 oct->max_res_size = hdr->max_res_size; 900 oct->max_res_size = hdr->max_res_size;
898 oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr), 901 oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr),
899 ZFCP_DBF_CT_PAYLOAD); 902 ZFCP_DBF_SAN_MAX_PAYLOAD);
900 memcpy(oct->payload, (void *)hdr + sizeof(struct ct_hdr), oct->len); 903 debug_event(adapter->san_dbf, level, r, sizeof(*r));
901 debug_event(adapter->san_dbf, 3, r, sizeof(*r)); 904 zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level,
905 (void *)hdr + sizeof(struct ct_hdr), oct->len);
902 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 906 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
903} 907}
904 908
@@ -914,6 +918,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
914 struct ct_hdr *hdr = sg_virt(ct->resp); 918 struct ct_hdr *hdr = sg_virt(ct->resp);
915 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 919 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
916 struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; 920 struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp;
921 int level = 3;
917 unsigned long flags; 922 unsigned long flags;
918 923
919 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 924 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
@@ -929,9 +934,10 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
929 rct->expl = hdr->reason_code_expl; 934 rct->expl = hdr->reason_code_expl;
930 rct->vendor_unique = hdr->vendor_unique; 935 rct->vendor_unique = hdr->vendor_unique;
931 rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), 936 rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
932 ZFCP_DBF_CT_PAYLOAD); 937 ZFCP_DBF_SAN_MAX_PAYLOAD);
933 memcpy(rct->payload, (void *)hdr + sizeof(struct ct_hdr), rct->len); 938 debug_event(adapter->san_dbf, level, r, sizeof(*r));
934 debug_event(adapter->san_dbf, 3, r, sizeof(*r)); 939 zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level,
940 (void *)hdr + sizeof(struct ct_hdr), rct->len);
935 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 941 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
936} 942}
937 943
@@ -954,7 +960,7 @@ static void zfcp_san_dbf_event_els(const char *tag, int level,
954 rec->u.els.ls_code = ls_code; 960 rec->u.els.ls_code = ls_code;
955 debug_event(adapter->san_dbf, level, rec, sizeof(*rec)); 961 debug_event(adapter->san_dbf, level, rec, sizeof(*rec));
956 zfcp_dbf_hexdump(adapter->san_dbf, rec, sizeof(*rec), level, 962 zfcp_dbf_hexdump(adapter->san_dbf, rec, sizeof(*rec), level,
957 buffer, min(buflen, ZFCP_DBF_ELS_MAX_PAYLOAD)); 963 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD));
958 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 964 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
959} 965}
960 966
@@ -1008,8 +1014,6 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view,
1008 char *out_buf, const char *in_buf) 1014 char *out_buf, const char *in_buf)
1009{ 1015{
1010 struct zfcp_san_dbf_record *r = (struct zfcp_san_dbf_record *)in_buf; 1016 struct zfcp_san_dbf_record *r = (struct zfcp_san_dbf_record *)in_buf;
1011 char *buffer = NULL;
1012 int buflen = 0, total = 0;
1013 char *p = out_buf; 1017 char *p = out_buf;
1014 1018
1015 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) 1019 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
@@ -1029,9 +1033,6 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view,
1029 zfcp_dbf_out(&p, "gs_subtype", "0x%02x", ct->gs_subtype); 1033 zfcp_dbf_out(&p, "gs_subtype", "0x%02x", ct->gs_subtype);
1030 zfcp_dbf_out(&p, "options", "0x%02x", ct->options); 1034 zfcp_dbf_out(&p, "options", "0x%02x", ct->options);
1031 zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); 1035 zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
1032 total = ct->len;
1033 buffer = ct->payload;
1034 buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
1035 } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { 1036 } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
1036 struct zfcp_san_dbf_record_ct_response *ct = &r->u.ct_resp; 1037 struct zfcp_san_dbf_record_ct_response *ct = &r->u.ct_resp;
1037 zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code); 1038 zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code);
@@ -1039,23 +1040,12 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view,
1039 zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code); 1040 zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code);
1040 zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl); 1041 zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl);
1041 zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique); 1042 zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique);
1042 total = ct->len;
1043 buffer = ct->payload;
1044 buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
1045 } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || 1043 } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
1046 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || 1044 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
1047 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { 1045 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
1048 struct zfcp_san_dbf_record_els *els = &r->u.els; 1046 struct zfcp_san_dbf_record_els *els = &r->u.els;
1049 zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); 1047 zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code);
1050 total = els->len;
1051 buffer = els->payload;
1052 buflen = min(total, ZFCP_DBF_ELS_PAYLOAD);
1053 } 1048 }
1054
1055 zfcp_dbf_outd(&p, "payload", buffer, buflen, 0, total);
1056 if (buflen == total)
1057 p += sprintf(p, "\n");
1058
1059 return p - out_buf; 1049 return p - out_buf;
1060} 1050}
1061 1051
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index e8f450801fea..5d6b2dff855b 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -163,8 +163,6 @@ struct zfcp_san_dbf_record_ct_request {
163 u8 options; 163 u8 options;
164 u16 max_res_size; 164 u16 max_res_size;
165 u32 len; 165 u32 len;
166#define ZFCP_DBF_CT_PAYLOAD 24
167 u8 payload[ZFCP_DBF_CT_PAYLOAD];
168} __attribute__ ((packed)); 166} __attribute__ ((packed));
169 167
170struct zfcp_san_dbf_record_ct_response { 168struct zfcp_san_dbf_record_ct_response {
@@ -174,15 +172,11 @@ struct zfcp_san_dbf_record_ct_response {
174 u8 expl; 172 u8 expl;
175 u8 vendor_unique; 173 u8 vendor_unique;
176 u32 len; 174 u32 len;
177 u8 payload[ZFCP_DBF_CT_PAYLOAD];
178} __attribute__ ((packed)); 175} __attribute__ ((packed));
179 176
180struct zfcp_san_dbf_record_els { 177struct zfcp_san_dbf_record_els {
181 u8 ls_code; 178 u8 ls_code;
182 u32 len; 179 u32 len;
183#define ZFCP_DBF_ELS_PAYLOAD 32
184#define ZFCP_DBF_ELS_MAX_PAYLOAD 1024
185 u8 payload[ZFCP_DBF_ELS_PAYLOAD];
186} __attribute__ ((packed)); 180} __attribute__ ((packed));
187 181
188struct zfcp_san_dbf_record { 182struct zfcp_san_dbf_record {
@@ -196,6 +190,8 @@ struct zfcp_san_dbf_record {
196 struct zfcp_san_dbf_record_ct_response ct_resp; 190 struct zfcp_san_dbf_record_ct_response ct_resp;
197 struct zfcp_san_dbf_record_els els; 191 struct zfcp_san_dbf_record_els els;
198 } u; 192 } u;
193#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
194 u8 payload[32];
199} __attribute__ ((packed)); 195} __attribute__ ((packed));
200 196
201struct zfcp_scsi_dbf_record { 197struct zfcp_scsi_dbf_record {
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 9040f738ff33..c557ba34e1aa 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -472,6 +472,7 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
472 ZFCP_STATUS_ERP_TIMEDOUT)) { 472 ZFCP_STATUS_ERP_TIMEDOUT)) {
473 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 473 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
474 zfcp_rec_dbf_event_action(142, act); 474 zfcp_rec_dbf_event_action(142, act);
475 act->fsf_req->erp_action = NULL;
475 } 476 }
476 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 477 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
477 zfcp_rec_dbf_event_action(143, act); 478 zfcp_rec_dbf_event_action(143, act);
@@ -719,7 +720,6 @@ static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
719 goto failed_openfcp; 720 goto failed_openfcp;
720 721
721 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status); 722 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status);
722 schedule_work(&act->adapter->scan_work);
723 723
724 return ZFCP_ERP_SUCCEEDED; 724 return ZFCP_ERP_SUCCEEDED;
725 725
@@ -1185,7 +1185,9 @@ static void zfcp_erp_scsi_scan(struct work_struct *work)
1185 container_of(work, struct zfcp_erp_add_work, work); 1185 container_of(work, struct zfcp_erp_add_work, work);
1186 struct zfcp_unit *unit = p->unit; 1186 struct zfcp_unit *unit = p->unit;
1187 struct fc_rport *rport = unit->port->rport; 1187 struct fc_rport *rport = unit->port->rport;
1188 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, 1188
1189 if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
1190 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
1189 scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0); 1191 scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0);
1190 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); 1192 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1191 zfcp_unit_put(unit); 1193 zfcp_unit_put(unit);
@@ -1281,6 +1283,8 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1281 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1283 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1282 if (result != ZFCP_ERP_SUCCEEDED) 1284 if (result != ZFCP_ERP_SUCCEEDED)
1283 zfcp_erp_rports_del(adapter); 1285 zfcp_erp_rports_del(adapter);
1286 else
1287 schedule_work(&adapter->scan_work);
1284 zfcp_adapter_put(adapter); 1288 zfcp_adapter_put(adapter);
1285 break; 1289 break;
1286 } 1290 }
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 1a7c80a77ff5..8aab3091a7b1 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -50,7 +50,8 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
50 if (mutex_lock_interruptible(&wka_port->mutex)) 50 if (mutex_lock_interruptible(&wka_port->mutex))
51 return -ERESTARTSYS; 51 return -ERESTARTSYS;
52 52
53 if (wka_port->status != ZFCP_WKA_PORT_ONLINE) { 53 if (wka_port->status == ZFCP_WKA_PORT_OFFLINE ||
54 wka_port->status == ZFCP_WKA_PORT_CLOSING) {
54 wka_port->status = ZFCP_WKA_PORT_OPENING; 55 wka_port->status = ZFCP_WKA_PORT_OPENING;
55 if (zfcp_fsf_open_wka_port(wka_port)) 56 if (zfcp_fsf_open_wka_port(wka_port))
56 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 57 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
@@ -125,8 +126,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
125 126
126 read_lock_irqsave(&zfcp_data.config_lock, flags); 127 read_lock_irqsave(&zfcp_data.config_lock, flags);
127 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { 128 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
128 /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */ 129 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_PHYS_OPEN))
129 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_DID_DID))
130 /* Try to connect to unused ports anyway. */ 130 /* Try to connect to unused ports anyway. */
131 zfcp_erp_port_reopen(port, 131 zfcp_erp_port_reopen(port,
132 ZFCP_STATUS_COMMON_ERP_FAILED, 132 ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -610,7 +610,6 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
610 int ret, i; 610 int ret, i;
611 struct zfcp_gpn_ft *gpn_ft; 611 struct zfcp_gpn_ft *gpn_ft;
612 612
613 zfcp_erp_wait(adapter); /* wait until adapter is finished with ERP */
614 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT) 613 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
615 return 0; 614 return 0;
616 615
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 5ae1d497e5ed..dc0367690405 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -683,6 +683,7 @@ static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool)
683 if (!req) 683 if (!req)
684 return NULL; 684 return NULL;
685 memset(req, 0, sizeof(*req)); 685 memset(req, 0, sizeof(*req));
686 req->pool = pool;
686 return req; 687 return req;
687} 688}
688 689
@@ -769,28 +770,24 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
769static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) 770static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
770{ 771{
771 struct zfcp_adapter *adapter = req->adapter; 772 struct zfcp_adapter *adapter = req->adapter;
772 struct zfcp_qdio_queue *req_q = &adapter->req_q; 773 unsigned long flags;
773 int idx; 774 int idx;
774 775
775 /* put allocated FSF request into hash table */ 776 /* put allocated FSF request into hash table */
776 spin_lock(&adapter->req_list_lock); 777 spin_lock_irqsave(&adapter->req_list_lock, flags);
777 idx = zfcp_reqlist_hash(req->req_id); 778 idx = zfcp_reqlist_hash(req->req_id);
778 list_add_tail(&req->list, &adapter->req_list[idx]); 779 list_add_tail(&req->list, &adapter->req_list[idx]);
779 spin_unlock(&adapter->req_list_lock); 780 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
780 781
781 req->qdio_outb_usage = atomic_read(&req_q->count); 782 req->qdio_outb_usage = atomic_read(&adapter->req_q.count);
782 req->issued = get_clock(); 783 req->issued = get_clock();
783 if (zfcp_qdio_send(req)) { 784 if (zfcp_qdio_send(req)) {
784 /* Queues are down..... */
785 del_timer(&req->timer); 785 del_timer(&req->timer);
786 spin_lock(&adapter->req_list_lock); 786 spin_lock_irqsave(&adapter->req_list_lock, flags);
787 zfcp_reqlist_remove(adapter, req); 787 /* lookup request again, list might have changed */
788 spin_unlock(&adapter->req_list_lock); 788 if (zfcp_reqlist_find_safe(adapter, req))
789 /* undo changes in request queue made for this request */ 789 zfcp_reqlist_remove(adapter, req);
790 atomic_add(req->sbal_number, &req_q->count); 790 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
791 req_q->first -= req->sbal_number;
792 req_q->first += QDIO_MAX_BUFFERS_PER_Q;
793 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
794 zfcp_erp_adapter_reopen(adapter, 0, 116, req); 791 zfcp_erp_adapter_reopen(adapter, 0, 116, req);
795 return -EIO; 792 return -EIO;
796 } 793 }
@@ -933,8 +930,10 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
933 goto out; 930 goto out;
934 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, 931 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
935 req_flags, adapter->pool.fsf_req_abort); 932 req_flags, adapter->pool.fsf_req_abort);
936 if (IS_ERR(req)) 933 if (IS_ERR(req)) {
934 req = NULL;
937 goto out; 935 goto out;
936 }
938 937
939 if (unlikely(!(atomic_read(&unit->status) & 938 if (unlikely(!(atomic_read(&unit->status) &
940 ZFCP_STATUS_COMMON_UNBLOCKED))) 939 ZFCP_STATUS_COMMON_UNBLOCKED)))
@@ -1587,6 +1586,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1587 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1586 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1588 break; 1587 break;
1589 case FSF_PORT_ALREADY_OPEN: 1588 case FSF_PORT_ALREADY_OPEN:
1589 break;
1590 case FSF_GOOD: 1590 case FSF_GOOD:
1591 wka_port->handle = header->port_handle; 1591 wka_port->handle = header->port_handle;
1592 wka_port->status = ZFCP_WKA_PORT_ONLINE; 1592 wka_port->status = ZFCP_WKA_PORT_ONLINE;
@@ -2116,18 +2116,21 @@ static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2116 2116
2117static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) 2117static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2118{ 2118{
2119 struct scsi_cmnd *scpnt = req->data; 2119 struct scsi_cmnd *scpnt;
2120 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2120 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
2121 &(req->qtcb->bottom.io.fcp_rsp); 2121 &(req->qtcb->bottom.io.fcp_rsp);
2122 u32 sns_len; 2122 u32 sns_len;
2123 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1]; 2123 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2124 unsigned long flags; 2124 unsigned long flags;
2125 2125
2126 if (unlikely(!scpnt))
2127 return;
2128
2129 read_lock_irqsave(&req->adapter->abort_lock, flags); 2126 read_lock_irqsave(&req->adapter->abort_lock, flags);
2130 2127
2128 scpnt = req->data;
2129 if (unlikely(!scpnt)) {
2130 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2131 return;
2132 }
2133
2131 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) { 2134 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
2132 set_host_byte(scpnt, DID_SOFT_ERROR); 2135 set_host_byte(scpnt, DID_SOFT_ERROR);
2133 set_driver_byte(scpnt, SUGGEST_RETRY); 2136 set_driver_byte(scpnt, SUGGEST_RETRY);
@@ -2445,8 +2448,10 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2445 goto out; 2448 goto out;
2446 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2449 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2447 adapter->pool.fsf_req_scsi); 2450 adapter->pool.fsf_req_scsi);
2448 if (IS_ERR(req)) 2451 if (IS_ERR(req)) {
2452 req = NULL;
2449 goto out; 2453 goto out;
2454 }
2450 2455
2451 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; 2456 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2452 req->data = unit; 2457 req->data = unit;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index ca8f85f3dad4..468c880f8b6d 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -24,14 +24,10 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
24static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 24static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
25{ 25{
26 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 26 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
27 WARN_ON(!unit); 27 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
28 if (unit) { 28 unit->device = NULL;
29 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); 29 zfcp_erp_unit_failed(unit, 12, NULL);
30 sdpnt->hostdata = NULL; 30 zfcp_unit_put(unit);
31 unit->device = NULL;
32 zfcp_erp_unit_failed(unit, 12, NULL);
33 zfcp_unit_put(unit);
34 }
35} 31}
36 32
37static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 33static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -92,7 +88,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
92 ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0, 88 ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0,
93 ZFCP_REQ_AUTO_CLEANUP); 89 ZFCP_REQ_AUTO_CLEANUP);
94 if (unlikely(ret == -EBUSY)) 90 if (unlikely(ret == -EBUSY))
95 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT); 91 return SCSI_MLQUEUE_DEVICE_BUSY;
96 else if (unlikely(ret < 0)) 92 else if (unlikely(ret < 0))
97 return SCSI_MLQUEUE_HOST_BUSY; 93 return SCSI_MLQUEUE_HOST_BUSY;
98 94
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 2bec9ccc0293..a9a9893a5f95 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -36,7 +36,6 @@
36#include <linux/poll.h> 36#include <linux/poll.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/string.h> 38#include <linux/string.h>
39#include <linux/smp_lock.h>
40#include <linux/genhd.h> 39#include <linux/genhd.h>
41#include <linux/blkdev.h> 40#include <linux/blkdev.h>
42 41
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b92c19bb6876..5311317c2e4c 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1924,12 +1924,9 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
1924 (cmd->sc_data_direction == DMA_FROM_DEVICE || 1924 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1925 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { 1925 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1926 if (scsi_sg_count(cmd) == 1) { 1926 if (scsi_sg_count(cmd) == 1) {
1927 unsigned long flags;
1928 void *buf = tw_dev->generic_buffer_virt[request_id]; 1927 void *buf = tw_dev->generic_buffer_virt[request_id];
1929 1928
1930 local_irq_save(flags);
1931 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE); 1929 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1932 local_irq_restore(flags);
1933 } 1930 }
1934 } 1931 }
1935} /* End twa_scsiop_execute_scsi_complete() */ 1932} /* End twa_scsiop_execute_scsi_complete() */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index a0537f09aa21..c03f1d2c9e2e 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1466,12 +1466,7 @@ static int tw_scsiop_inquiry(TW_Device_Extension *tw_dev, int request_id)
1466static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id, 1466static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
1467 void *data, unsigned int len) 1467 void *data, unsigned int len)
1468{ 1468{
1469 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1469 scsi_sg_copy_from_buffer(tw_dev->srb[request_id], data, len);
1470 unsigned long flags;
1471
1472 local_irq_save(flags);
1473 scsi_sg_copy_from_buffer(cmd, data, len);
1474 local_irq_restore(flags);
1475} 1470}
1476 1471
1477/* This function is called by the isr to complete an inquiry command */ 1472/* This function is called by the isr to complete an inquiry command */
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 9aa301c1ed07..94acbeed4e7c 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -175,8 +175,8 @@ static struct aac_driver_ident aac_drivers[] = {
175 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */ 175 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
176 { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */ 176 { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
177 { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */ 177 { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
178 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2120S (Crusader) */ 178 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
179 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan) */ 179 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
180 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */ 180 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
181 { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */ 181 { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
182 { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */ 182 { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
@@ -427,8 +427,8 @@ static int aac_slave_configure(struct scsi_device *sdev)
427 * Firmware has an individual device recovery time typically 427 * Firmware has an individual device recovery time typically
428 * of 35 seconds, give us a margin. 428 * of 35 seconds, give us a margin.
429 */ 429 */
430 if (sdev->timeout < (45 * HZ)) 430 if (sdev->request_queue->rq_timeout < (45 * HZ))
431 sdev->timeout = 45 * HZ; 431 blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
432 for (cid = 0; cid < aac->maximum_num_containers; ++cid) 432 for (cid = 0; cid < aac->maximum_num_containers; ++cid)
433 if (aac->fsa_dev[cid].valid) 433 if (aac->fsa_dev[cid].valid)
434 ++num_lsu; 434 ++num_lsu;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 218777bfc143..399fe559e4de 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -13872,8 +13872,10 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
13872 advansys_wide_free_mem(boardp); 13872 advansys_wide_free_mem(boardp);
13873 free_irq(boardp->irq, shost); 13873 free_irq(boardp->irq, shost);
13874 err_free_dma: 13874 err_free_dma:
13875#ifdef CONFIG_ISA
13875 if (shost->dma_channel != NO_ISA_DMA) 13876 if (shost->dma_channel != NO_ISA_DMA)
13876 free_dma(shost->dma_channel); 13877 free_dma(shost->dma_channel);
13878#endif
13877 err_free_proc: 13879 err_free_proc:
13878 kfree(boardp->prtbuf); 13880 kfree(boardp->prtbuf);
13879 err_unmap: 13881 err_unmap:
@@ -13894,10 +13896,12 @@ static int advansys_release(struct Scsi_Host *shost)
13894 ASC_DBG(1, "begin\n"); 13896 ASC_DBG(1, "begin\n");
13895 scsi_remove_host(shost); 13897 scsi_remove_host(shost);
13896 free_irq(board->irq, shost); 13898 free_irq(board->irq, shost);
13899#ifdef CONFIG_ISA
13897 if (shost->dma_channel != NO_ISA_DMA) { 13900 if (shost->dma_channel != NO_ISA_DMA) {
13898 ASC_DBG(1, "free_dma()\n"); 13901 ASC_DBG(1, "free_dma()\n");
13899 free_dma(shost->dma_channel); 13902 free_dma(shost->dma_channel);
13900 } 13903 }
13904#endif
13901 if (ASC_NARROW_BOARD(board)) { 13905 if (ASC_NARROW_BOARD(board)) {
13902 dma_unmap_single(board->dev, 13906 dma_unmap_single(board->dev,
13903 board->dvc_var.asc_dvc_var.overrun_dma, 13907 board->dvc_var.asc_dvc_var.overrun_dma,
diff --git a/drivers/scsi/aic7xxx/aic79xx.reg b/drivers/scsi/aic7xxx/aic79xx.reg
index cca16fc5b4ad..0666c22ab55b 100644
--- a/drivers/scsi/aic7xxx/aic79xx.reg
+++ b/drivers/scsi/aic7xxx/aic79xx.reg
@@ -80,6 +80,17 @@ VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $"
80 } 80 }
81 81
82/* 82/*
83 * Registers marked "dont_generate_debug_code" are not (yet) referenced
84 * from the driver code, and this keyword inhibit generation
85 * of debug code for them.
86 *
87 * REG_PRETTY_PRINT config will complain if dont_generate_debug_code
88 * is added to the register which is referenced in the driver.
89 * Unreferenced register with no dont_generate_debug_code will result
90 * in dead code. No warning is issued.
91 */
92
93/*
83 * Mode Pointer 94 * Mode Pointer
84 * Controls which of the 5, 512byte, address spaces should be used 95 * Controls which of the 5, 512byte, address spaces should be used
85 * as the source and destination of any register accesses in our 96 * as the source and destination of any register accesses in our
@@ -91,6 +102,7 @@ register MODE_PTR {
91 field DST_MODE 0x70 102 field DST_MODE 0x70
92 field SRC_MODE 0x07 103 field SRC_MODE 0x07
93 mode_pointer 104 mode_pointer
105 dont_generate_debug_code
94} 106}
95 107
96const SRC_MODE_SHIFT 0 108const SRC_MODE_SHIFT 0
@@ -190,6 +202,7 @@ register SEQINTCODE {
190 SAW_HWERR, 202 SAW_HWERR,
191 BAD_SCB_STATUS 203 BAD_SCB_STATUS
192 } 204 }
205 dont_generate_debug_code
193} 206}
194 207
195/* 208/*
@@ -207,6 +220,7 @@ register CLRINT {
207 field CLRSEQINT 0x04 220 field CLRSEQINT 0x04
208 field CLRCMDINT 0x02 221 field CLRCMDINT 0x02
209 field CLRSPLTINT 0x01 222 field CLRSPLTINT 0x01
223 dont_generate_debug_code
210} 224}
211 225
212/* 226/*
@@ -222,6 +236,7 @@ register ERROR {
222 field SQPARERR 0x08 236 field SQPARERR 0x08
223 field ILLOPCODE 0x04 237 field ILLOPCODE 0x04
224 field DSCTMOUT 0x02 238 field DSCTMOUT 0x02
239 dont_generate_debug_code
225} 240}
226 241
227/* 242/*
@@ -255,6 +270,7 @@ register HCNTRL {
255 field INTEN 0x02 270 field INTEN 0x02
256 field CHIPRST 0x01 271 field CHIPRST 0x01
257 field CHIPRSTACK 0x01 272 field CHIPRSTACK 0x01
273 dont_generate_debug_code
258} 274}
259 275
260/* 276/*
@@ -265,6 +281,7 @@ register HNSCB_QOFF {
265 access_mode RW 281 access_mode RW
266 size 2 282 size 2
267 count 2 283 count 2
284 dont_generate_debug_code
268} 285}
269 286
270/* 287/*
@@ -274,6 +291,7 @@ register HESCB_QOFF {
274 address 0x008 291 address 0x008
275 access_mode RW 292 access_mode RW
276 count 2 293 count 2
294 dont_generate_debug_code
277} 295}
278 296
279/* 297/*
@@ -311,6 +329,7 @@ register CLRSEQINTSTAT {
311 field CLRSEQ_SCSIINT 0x04 329 field CLRSEQ_SCSIINT 0x04
312 field CLRSEQ_PCIINT 0x02 330 field CLRSEQ_PCIINT 0x02
313 field CLRSEQ_SPLTINT 0x01 331 field CLRSEQ_SPLTINT 0x01
332 dont_generate_debug_code
314} 333}
315 334
316/* 335/*
@@ -320,6 +339,7 @@ register SWTIMER {
320 address 0x00E 339 address 0x00E
321 access_mode RW 340 access_mode RW
322 size 2 341 size 2
342 dont_generate_debug_code
323} 343}
324 344
325/* 345/*
@@ -330,6 +350,7 @@ register SNSCB_QOFF {
330 access_mode RW 350 access_mode RW
331 size 2 351 size 2
332 modes M_CCHAN 352 modes M_CCHAN
353 dont_generate_debug_code
333} 354}
334 355
335/* 356/*
@@ -340,6 +361,7 @@ register SESCB_QOFF {
340 count 2 361 count 2
341 access_mode RW 362 access_mode RW
342 modes M_CCHAN 363 modes M_CCHAN
364 dont_generate_debug_code
343} 365}
344 366
345/* 367/*
@@ -350,6 +372,7 @@ register SDSCB_QOFF {
350 access_mode RW 372 access_mode RW
351 modes M_CCHAN 373 modes M_CCHAN
352 size 2 374 size 2
375 dont_generate_debug_code
353} 376}
354 377
355/* 378/*
@@ -378,6 +401,7 @@ register QOFF_CTLSTA {
378 SCB_QSIZE_8192, 401 SCB_QSIZE_8192,
379 SCB_QSIZE_16384 402 SCB_QSIZE_16384
380 } 403 }
404 dont_generate_debug_code
381} 405}
382 406
383/* 407/*
@@ -431,6 +455,7 @@ register DSCOMMAND0 {
431 field EXTREQLCK 0x10 /* External Request Lock */ 455 field EXTREQLCK 0x10 /* External Request Lock */
432 field DISABLE_TWATE 0x02 /* Rev B or greater */ 456 field DISABLE_TWATE 0x02 /* Rev B or greater */
433 field CIOPARCKEN 0x01 /* Internal bus parity error enable */ 457 field CIOPARCKEN 0x01 /* Internal bus parity error enable */
458 dont_generate_debug_code
434} 459}
435 460
436/* 461/*
@@ -459,6 +484,7 @@ register SG_CACHE_PRE {
459 field SG_ADDR_MASK 0xf8 484 field SG_ADDR_MASK 0xf8
460 field ODD_SEG 0x04 485 field ODD_SEG 0x04
461 field LAST_SEG 0x02 486 field LAST_SEG 0x02
487 dont_generate_debug_code
462} 488}
463 489
464register SG_CACHE_SHADOW { 490register SG_CACHE_SHADOW {
@@ -491,6 +517,7 @@ register HADDR {
491 access_mode RW 517 access_mode RW
492 size 8 518 size 8
493 modes M_DFF0, M_DFF1 519 modes M_DFF0, M_DFF1
520 dont_generate_debug_code
494} 521}
495 522
496/* 523/*
@@ -522,6 +549,7 @@ register HCNT {
522 access_mode RW 549 access_mode RW
523 size 3 550 size 3
524 modes M_DFF0, M_DFF1 551 modes M_DFF0, M_DFF1
552 dont_generate_debug_code
525} 553}
526 554
527/* 555/*
@@ -551,6 +579,7 @@ register SGHADDR {
551 access_mode RW 579 access_mode RW
552 size 8 580 size 8
553 modes M_DFF0, M_DFF1 581 modes M_DFF0, M_DFF1
582 dont_generate_debug_code
554} 583}
555 584
556/* 585/*
@@ -561,6 +590,7 @@ register SCBHADDR {
561 access_mode RW 590 access_mode RW
562 size 8 591 size 8
563 modes M_CCHAN 592 modes M_CCHAN
593 dont_generate_debug_code
564} 594}
565 595
566/* 596/*
@@ -570,6 +600,7 @@ register SGHCNT {
570 address 0x084 600 address 0x084
571 access_mode RW 601 access_mode RW
572 modes M_DFF0, M_DFF1 602 modes M_DFF0, M_DFF1
603 dont_generate_debug_code
573} 604}
574 605
575/* 606/*
@@ -579,6 +610,7 @@ register SCBHCNT {
579 address 0x084 610 address 0x084
580 access_mode RW 611 access_mode RW
581 modes M_CCHAN 612 modes M_CCHAN
613 dont_generate_debug_code
582} 614}
583 615
584/* 616/*
@@ -609,6 +641,7 @@ register DFF_THRSH {
609 RD_DFTHRSH_90, 641 RD_DFTHRSH_90,
610 RD_DFTHRSH_MAX 642 RD_DFTHRSH_MAX
611 } 643 }
644 dont_generate_debug_code
612} 645}
613 646
614/* 647/*
@@ -817,6 +850,7 @@ register PCIXCTL {
817 field SRSPDPEEN 0x04 850 field SRSPDPEEN 0x04
818 field TSCSERREN 0x02 851 field TSCSERREN 0x02
819 field CMPABCDIS 0x01 852 field CMPABCDIS 0x01
853 dont_generate_debug_code
820} 854}
821 855
822/* 856/*
@@ -863,6 +897,7 @@ register DCHSPLTSTAT0 {
863 field RXOVRUN 0x04 897 field RXOVRUN 0x04
864 field RXSCEMSG 0x02 898 field RXSCEMSG 0x02
865 field RXSPLTRSP 0x01 899 field RXSPLTRSP 0x01
900 dont_generate_debug_code
866} 901}
867 902
868/* 903/*
@@ -908,6 +943,7 @@ register DCHSPLTSTAT1 {
908 modes M_DFF0, M_DFF1 943 modes M_DFF0, M_DFF1
909 count 2 944 count 2
910 field RXDATABUCKET 0x01 945 field RXDATABUCKET 0x01
946 dont_generate_debug_code
911} 947}
912 948
913/* 949/*
@@ -1069,6 +1105,7 @@ register SGSPLTSTAT0 {
1069 field RXOVRUN 0x04 1105 field RXOVRUN 0x04
1070 field RXSCEMSG 0x02 1106 field RXSCEMSG 0x02
1071 field RXSPLTRSP 0x01 1107 field RXSPLTRSP 0x01
1108 dont_generate_debug_code
1072} 1109}
1073 1110
1074/* 1111/*
@@ -1080,6 +1117,7 @@ register SGSPLTSTAT1 {
1080 modes M_DFF0, M_DFF1 1117 modes M_DFF0, M_DFF1
1081 count 2 1118 count 2
1082 field RXDATABUCKET 0x01 1119 field RXDATABUCKET 0x01
1120 dont_generate_debug_code
1083} 1121}
1084 1122
1085/* 1123/*
@@ -1091,6 +1129,7 @@ register SFUNCT {
1091 modes M_CFG 1129 modes M_CFG
1092 field TEST_GROUP 0xF0 1130 field TEST_GROUP 0xF0
1093 field TEST_NUM 0x0F 1131 field TEST_NUM 0x0F
1132 dont_generate_debug_code
1094} 1133}
1095 1134
1096/* 1135/*
@@ -1109,6 +1148,7 @@ register DF0PCISTAT {
1109 field RDPERR 0x04 1148 field RDPERR 0x04
1110 field TWATERR 0x02 1149 field TWATERR 0x02
1111 field DPR 0x01 1150 field DPR 0x01
1151 dont_generate_debug_code
1112} 1152}
1113 1153
1114/* 1154/*
@@ -1204,6 +1244,7 @@ register TARGPCISTAT {
1204 field SSE 0x40 1244 field SSE 0x40
1205 field STA 0x08 1245 field STA 0x08
1206 field TWATERR 0x02 1246 field TWATERR 0x02
1247 dont_generate_debug_code
1207} 1248}
1208 1249
1209/* 1250/*
@@ -1216,6 +1257,7 @@ register LQIN {
1216 size 20 1257 size 20
1217 count 2 1258 count 2
1218 modes M_DFF0, M_DFF1, M_SCSI 1259 modes M_DFF0, M_DFF1, M_SCSI
1260 dont_generate_debug_code
1219} 1261}
1220 1262
1221/* 1263/*
@@ -1247,6 +1289,7 @@ register LUNPTR {
1247 access_mode RW 1289 access_mode RW
1248 modes M_CFG 1290 modes M_CFG
1249 count 2 1291 count 2
1292 dont_generate_debug_code
1250} 1293}
1251 1294
1252/* 1295/*
@@ -1278,6 +1321,7 @@ register CMDLENPTR {
1278 access_mode RW 1321 access_mode RW
1279 modes M_CFG 1322 modes M_CFG
1280 count 1 1323 count 1
1324 dont_generate_debug_code
1281} 1325}
1282 1326
1283/* 1327/*
@@ -1290,6 +1334,7 @@ register ATTRPTR {
1290 access_mode RW 1334 access_mode RW
1291 modes M_CFG 1335 modes M_CFG
1292 count 1 1336 count 1
1337 dont_generate_debug_code
1293} 1338}
1294 1339
1295/* 1340/*
@@ -1302,6 +1347,7 @@ register FLAGPTR {
1302 access_mode RW 1347 access_mode RW
1303 modes M_CFG 1348 modes M_CFG
1304 count 1 1349 count 1
1350 dont_generate_debug_code
1305} 1351}
1306 1352
1307/* 1353/*
@@ -1313,6 +1359,7 @@ register CMDPTR {
1313 access_mode RW 1359 access_mode RW
1314 modes M_CFG 1360 modes M_CFG
1315 count 1 1361 count 1
1362 dont_generate_debug_code
1316} 1363}
1317 1364
1318/* 1365/*
@@ -1324,6 +1371,7 @@ register QNEXTPTR {
1324 access_mode RW 1371 access_mode RW
1325 modes M_CFG 1372 modes M_CFG
1326 count 1 1373 count 1
1374 dont_generate_debug_code
1327} 1375}
1328 1376
1329/* 1377/*
@@ -1347,6 +1395,7 @@ register ABRTBYTEPTR {
1347 access_mode RW 1395 access_mode RW
1348 modes M_CFG 1396 modes M_CFG
1349 count 1 1397 count 1
1398 dont_generate_debug_code
1350} 1399}
1351 1400
1352/* 1401/*
@@ -1358,6 +1407,7 @@ register ABRTBITPTR {
1358 access_mode RW 1407 access_mode RW
1359 modes M_CFG 1408 modes M_CFG
1360 count 1 1409 count 1
1410 dont_generate_debug_code
1361} 1411}
1362 1412
1363/* 1413/*
@@ -1398,6 +1448,7 @@ register LUNLEN {
1398 count 2 1448 count 2
1399 mask ILUNLEN 0x0F 1449 mask ILUNLEN 0x0F
1400 mask TLUNLEN 0xF0 1450 mask TLUNLEN 0xF0
1451 dont_generate_debug_code
1401} 1452}
1402const LUNLEN_SINGLE_LEVEL_LUN 0xF 1453const LUNLEN_SINGLE_LEVEL_LUN 0xF
1403 1454
@@ -1410,6 +1461,7 @@ register CDBLIMIT {
1410 access_mode RW 1461 access_mode RW
1411 modes M_CFG 1462 modes M_CFG
1412 count 1 1463 count 1
1464 dont_generate_debug_code
1413} 1465}
1414 1466
1415/* 1467/*
@@ -1422,6 +1474,7 @@ register MAXCMD {
1422 access_mode RW 1474 access_mode RW
1423 modes M_CFG 1475 modes M_CFG
1424 count 9 1476 count 9
1477 dont_generate_debug_code
1425} 1478}
1426 1479
1427/* 1480/*
@@ -1432,6 +1485,7 @@ register MAXCMDCNT {
1432 address 0x033 1485 address 0x033
1433 access_mode RW 1486 access_mode RW
1434 modes M_CFG 1487 modes M_CFG
1488 dont_generate_debug_code
1435} 1489}
1436 1490
1437/* 1491/*
@@ -1490,6 +1544,7 @@ register LQCTL1 {
1490 field PCI2PCI 0x04 1544 field PCI2PCI 0x04
1491 field SINGLECMD 0x02 1545 field SINGLECMD 0x02
1492 field ABORTPENDING 0x01 1546 field ABORTPENDING 0x01
1547 dont_generate_debug_code
1493} 1548}
1494 1549
1495/* 1550/*
@@ -1508,6 +1563,7 @@ register LQCTL2 {
1508 field LQOCONTINUE 0x04 1563 field LQOCONTINUE 0x04
1509 field LQOTOIDLE 0x02 1564 field LQOTOIDLE 0x02
1510 field LQOPAUSE 0x01 1565 field LQOPAUSE 0x01
1566 dont_generate_debug_code
1511} 1567}
1512 1568
1513/* 1569/*
@@ -1578,6 +1634,7 @@ register SXFRCTL0 {
1578 field DFPEXP 0x40 1634 field DFPEXP 0x40
1579 field BIOSCANCELEN 0x10 1635 field BIOSCANCELEN 0x10
1580 field SPIOEN 0x08 1636 field SPIOEN 0x08
1637 dont_generate_debug_code
1581} 1638}
1582 1639
1583/* 1640/*
@@ -1594,6 +1651,7 @@ register SXFRCTL1 {
1594 field ENSTIMER 0x04 1651 field ENSTIMER 0x04
1595 field ACTNEGEN 0x02 1652 field ACTNEGEN 0x02
1596 field STPWEN 0x01 1653 field STPWEN 0x01
1654 dont_generate_debug_code
1597} 1655}
1598 1656
1599/* 1657/*
@@ -1696,6 +1754,7 @@ register SCSISIGO {
1696 P_STATUS CDO|IOO, 1754 P_STATUS CDO|IOO,
1697 P_MESGIN CDO|IOO|MSGO 1755 P_MESGIN CDO|IOO|MSGO
1698 } 1756 }
1757 dont_generate_debug_code
1699} 1758}
1700 1759
1701/* 1760/*
@@ -1738,6 +1797,7 @@ register MULTARGID {
1738 modes M_CFG 1797 modes M_CFG
1739 size 2 1798 size 2
1740 count 2 1799 count 2
1800 dont_generate_debug_code
1741} 1801}
1742 1802
1743/* 1803/*
@@ -1774,6 +1834,7 @@ register SCSIDAT {
1774 access_mode RW 1834 access_mode RW
1775 modes M_DFF0, M_DFF1, M_SCSI 1835 modes M_DFF0, M_DFF1, M_SCSI
1776 size 2 1836 size 2
1837 dont_generate_debug_code
1777} 1838}
1778 1839
1779/* 1840/*
@@ -1796,6 +1857,7 @@ register TARGIDIN {
1796 count 2 1857 count 2
1797 field CLKOUT 0x80 1858 field CLKOUT 0x80
1798 field TARGID 0x0F 1859 field TARGID 0x0F
1860 dont_generate_debug_code
1799} 1861}
1800 1862
1801/* 1863/*
@@ -1825,6 +1887,7 @@ register SBLKCTL {
1825 field ENAB40 0x08 /* LVD transceiver active */ 1887 field ENAB40 0x08 /* LVD transceiver active */
1826 field ENAB20 0x04 /* SE/HVD transceiver active */ 1888 field ENAB20 0x04 /* SE/HVD transceiver active */
1827 field SELWIDE 0x02 1889 field SELWIDE 0x02
1890 dont_generate_debug_code
1828} 1891}
1829 1892
1830/* 1893/*
@@ -1842,6 +1905,7 @@ register OPTIONMODE {
1842 field ENDGFORMCHK 0x04 1905 field ENDGFORMCHK 0x04
1843 field AUTO_MSGOUT_DE 0x02 1906 field AUTO_MSGOUT_DE 0x02
1844 mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE 1907 mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE
1908 dont_generate_debug_code
1845} 1909}
1846 1910
1847/* 1911/*
@@ -1876,6 +1940,7 @@ register CLRSINT0 {
1876 field CLROVERRUN 0x04 1940 field CLROVERRUN 0x04
1877 field CLRSPIORDY 0x02 1941 field CLRSPIORDY 0x02
1878 field CLRARBDO 0x01 1942 field CLRARBDO 0x01
1943 dont_generate_debug_code
1879} 1944}
1880 1945
1881/* 1946/*
@@ -1929,6 +1994,7 @@ register CLRSINT1 {
1929 field CLRSCSIPERR 0x04 1994 field CLRSCSIPERR 0x04
1930 field CLRSTRB2FAST 0x02 1995 field CLRSTRB2FAST 0x02
1931 field CLRREQINIT 0x01 1996 field CLRREQINIT 0x01
1997 dont_generate_debug_code
1932} 1998}
1933 1999
1934/* 2000/*
@@ -1962,6 +2028,7 @@ register CLRSINT2 {
1962 field CLRWIDE_RES 0x04 /* Modes 0 and 1 only */ 2028 field CLRWIDE_RES 0x04 /* Modes 0 and 1 only */
1963 field CLRSDONE 0x02 /* Modes 0 and 1 only */ 2029 field CLRSDONE 0x02 /* Modes 0 and 1 only */
1964 field CLRDMADONE 0x01 /* Modes 0 and 1 only */ 2030 field CLRDMADONE 0x01 /* Modes 0 and 1 only */
2031 dont_generate_debug_code
1965} 2032}
1966 2033
1967/* 2034/*
@@ -2002,6 +2069,7 @@ register LQISTATE {
2002 access_mode RO 2069 access_mode RO
2003 modes M_CFG 2070 modes M_CFG
2004 count 6 2071 count 6
2072 dont_generate_debug_code
2005} 2073}
2006 2074
2007/* 2075/*
@@ -2022,6 +2090,7 @@ register LQOSTATE {
2022 access_mode RO 2090 access_mode RO
2023 modes M_CFG 2091 modes M_CFG
2024 count 2 2092 count 2
2093 dont_generate_debug_code
2025} 2094}
2026 2095
2027/* 2096/*
@@ -2054,6 +2123,7 @@ register CLRLQIINT0 {
2054 field CLRLQIBADLQT 0x04 2123 field CLRLQIBADLQT 0x04
2055 field CLRLQIATNLQ 0x02 2124 field CLRLQIATNLQ 0x02
2056 field CLRLQIATNCMD 0x01 2125 field CLRLQIATNCMD 0x01
2126 dont_generate_debug_code
2057} 2127}
2058 2128
2059/* 2129/*
@@ -2070,6 +2140,7 @@ register LQIMODE0 {
2070 field ENLQIBADLQT 0x04 2140 field ENLQIBADLQT 0x04
2071 field ENLQIATNLQ 0x02 2141 field ENLQIATNLQ 0x02
2072 field ENLQIATNCMD 0x01 2142 field ENLQIATNCMD 0x01
2143 dont_generate_debug_code
2073} 2144}
2074 2145
2075/* 2146/*
@@ -2106,6 +2177,7 @@ register CLRLQIINT1 {
2106 field CLRLQIBADLQI 0x04 2177 field CLRLQIBADLQI 0x04
2107 field CLRLQIOVERI_LQ 0x02 2178 field CLRLQIOVERI_LQ 0x02
2108 field CLRLQIOVERI_NLQ 0x01 2179 field CLRLQIOVERI_NLQ 0x01
2180 dont_generate_debug_code
2109} 2181}
2110 2182
2111/* 2183/*
@@ -2124,6 +2196,7 @@ register LQIMODE1 {
2124 field ENLQIBADLQI 0x04 2196 field ENLQIBADLQI 0x04
2125 field ENLQIOVERI_LQ 0x02 /* LQIOVERI1 */ 2197 field ENLQIOVERI_LQ 0x02 /* LQIOVERI1 */
2126 field ENLQIOVERI_NLQ 0x01 /* LQIOVERI2 */ 2198 field ENLQIOVERI_NLQ 0x01 /* LQIOVERI2 */
2199 dont_generate_debug_code
2127} 2200}
2128 2201
2129/* 2202/*
@@ -2165,6 +2238,7 @@ register CLRSINT3 {
2165 count 3 2238 count 3
2166 field CLRNTRAMPERR 0x02 2239 field CLRNTRAMPERR 0x02
2167 field CLROSRAMPERR 0x01 2240 field CLROSRAMPERR 0x01
2241 dont_generate_debug_code
2168} 2242}
2169 2243
2170/* 2244/*
@@ -2177,6 +2251,7 @@ register SIMODE3 {
2177 count 4 2251 count 4
2178 field ENNTRAMPERR 0x02 2252 field ENNTRAMPERR 0x02
2179 field ENOSRAMPERR 0x01 2253 field ENOSRAMPERR 0x01
2254 dont_generate_debug_code
2180} 2255}
2181 2256
2182/* 2257/*
@@ -2207,6 +2282,7 @@ register CLRLQOINT0 {
2207 field CLRLQOATNLQ 0x04 2282 field CLRLQOATNLQ 0x04
2208 field CLRLQOATNPKT 0x02 2283 field CLRLQOATNPKT 0x02
2209 field CLRLQOTCRC 0x01 2284 field CLRLQOTCRC 0x01
2285 dont_generate_debug_code
2210} 2286}
2211 2287
2212/* 2288/*
@@ -2222,6 +2298,7 @@ register LQOMODE0 {
2222 field ENLQOATNLQ 0x04 2298 field ENLQOATNLQ 0x04
2223 field ENLQOATNPKT 0x02 2299 field ENLQOATNPKT 0x02
2224 field ENLQOTCRC 0x01 2300 field ENLQOTCRC 0x01
2301 dont_generate_debug_code
2225} 2302}
2226 2303
2227/* 2304/*
@@ -2251,6 +2328,7 @@ register CLRLQOINT1 {
2251 field CLRLQOBADQAS 0x04 2328 field CLRLQOBADQAS 0x04
2252 field CLRLQOBUSFREE 0x02 2329 field CLRLQOBUSFREE 0x02
2253 field CLRLQOPHACHGINPKT 0x01 2330 field CLRLQOPHACHGINPKT 0x01
2331 dont_generate_debug_code
2254} 2332}
2255 2333
2256/* 2334/*
@@ -2266,6 +2344,7 @@ register LQOMODE1 {
2266 field ENLQOBADQAS 0x04 2344 field ENLQOBADQAS 0x04
2267 field ENLQOBUSFREE 0x02 2345 field ENLQOBUSFREE 0x02
2268 field ENLQOPHACHGINPKT 0x01 2346 field ENLQOPHACHGINPKT 0x01
2347 dont_generate_debug_code
2269} 2348}
2270 2349
2271/* 2350/*
@@ -2289,6 +2368,7 @@ register OS_SPACE_CNT {
2289 access_mode RO 2368 access_mode RO
2290 modes M_CFG 2369 modes M_CFG
2291 count 2 2370 count 2
2371 dont_generate_debug_code
2292} 2372}
2293 2373
2294/* 2374/*
@@ -2318,6 +2398,7 @@ register GSFIFO {
2318 access_mode RO 2398 access_mode RO
2319 size 2 2399 size 2
2320 modes M_DFF0, M_DFF1, M_SCSI 2400 modes M_DFF0, M_DFF1, M_SCSI
2401 dont_generate_debug_code
2321} 2402}
2322 2403
2323/* 2404/*
@@ -2341,6 +2422,7 @@ register NEXTSCB {
2341 access_mode RW 2422 access_mode RW
2342 size 2 2423 size 2
2343 modes M_SCSI 2424 modes M_SCSI
2425 dont_generate_debug_code
2344} 2426}
2345 2427
2346/* 2428/*
@@ -2357,6 +2439,7 @@ register LQOSCSCTL {
2357 field LQOBUSETDLY 0x40 2439 field LQOBUSETDLY 0x40
2358 field LQONOHOLDLACK 0x02 2440 field LQONOHOLDLACK 0x02
2359 field LQONOCHKOVER 0x01 2441 field LQONOCHKOVER 0x01
2442 dont_generate_debug_code
2360} 2443}
2361 2444
2362/* 2445/*
@@ -2389,6 +2472,7 @@ register CLRSEQINTSRC {
2389 field CLRCFG4TSTAT 0x04 2472 field CLRCFG4TSTAT 0x04
2390 field CLRCFG4ICMD 0x02 2473 field CLRCFG4ICMD 0x02
2391 field CLRCFG4TCMD 0x01 2474 field CLRCFG4TCMD 0x01
2475 dont_generate_debug_code
2392} 2476}
2393 2477
2394/* 2478/*
@@ -2415,6 +2499,7 @@ register CURRSCB {
2415 access_mode RW 2499 access_mode RW
2416 size 2 2500 size 2
2417 modes M_SCSI 2501 modes M_SCSI
2502 dont_generate_debug_code
2418} 2503}
2419 2504
2420/* 2505/*
@@ -2472,6 +2557,7 @@ register LASTSCB {
2472 access_mode RW 2557 access_mode RW
2473 size 2 2558 size 2
2474 modes M_SCSI 2559 modes M_SCSI
2560 dont_generate_debug_code
2475} 2561}
2476 2562
2477/* 2563/*
@@ -2494,6 +2580,7 @@ register SHADDR {
2494 access_mode RO 2580 access_mode RO
2495 size 8 2581 size 8
2496 modes M_DFF0, M_DFF1 2582 modes M_DFF0, M_DFF1
2583 dont_generate_debug_code
2497} 2584}
2498 2585
2499/* 2586/*
@@ -2513,6 +2600,7 @@ register NEGOADDR {
2513 address 0x060 2600 address 0x060
2514 access_mode RW 2601 access_mode RW
2515 modes M_SCSI 2602 modes M_SCSI
2603 dont_generate_debug_code
2516} 2604}
2517 2605
2518/* 2606/*
@@ -2523,6 +2611,7 @@ register NEGPERIOD {
2523 access_mode RW 2611 access_mode RW
2524 modes M_SCSI 2612 modes M_SCSI
2525 count 1 2613 count 1
2614 dont_generate_debug_code
2526} 2615}
2527 2616
2528/* 2617/*
@@ -2543,6 +2632,7 @@ register NEGOFFSET {
2543 access_mode RW 2632 access_mode RW
2544 modes M_SCSI 2633 modes M_SCSI
2545 count 1 2634 count 1
2635 dont_generate_debug_code
2546} 2636}
2547 2637
2548/* 2638/*
@@ -2557,6 +2647,7 @@ register NEGPPROPTS {
2557 field PPROPT_QAS 0x04 2647 field PPROPT_QAS 0x04
2558 field PPROPT_DT 0x02 2648 field PPROPT_DT 0x02
2559 field PPROPT_IUT 0x01 2649 field PPROPT_IUT 0x01
2650 dont_generate_debug_code
2560} 2651}
2561 2652
2562/* 2653/*
@@ -2573,6 +2664,7 @@ register NEGCONOPTS {
2573 field ENAUTOATNI 0x04 2664 field ENAUTOATNI 0x04
2574 field ENAUTOATNO 0x02 2665 field ENAUTOATNO 0x02
2575 field WIDEXFER 0x01 2666 field WIDEXFER 0x01
2667 dont_generate_debug_code
2576} 2668}
2577 2669
2578/* 2670/*
@@ -2583,6 +2675,7 @@ register ANNEXCOL {
2583 access_mode RW 2675 access_mode RW
2584 modes M_SCSI 2676 modes M_SCSI
2585 count 7 2677 count 7
2678 dont_generate_debug_code
2586} 2679}
2587 2680
2588/* 2681/*
@@ -2602,6 +2695,7 @@ register SCSCHKN {
2602 field DFFACTCLR 0x04 2695 field DFFACTCLR 0x04
2603 field SHVALIDSTDIS 0x02 2696 field SHVALIDSTDIS 0x02
2604 field LSTSGCLRDIS 0x01 2697 field LSTSGCLRDIS 0x01
2698 dont_generate_debug_code
2605} 2699}
2606 2700
2607const AHD_ANNEXCOL_PER_DEV0 4 2701const AHD_ANNEXCOL_PER_DEV0 4
@@ -2635,6 +2729,7 @@ register ANNEXDAT {
2635 access_mode RW 2729 access_mode RW
2636 modes M_SCSI 2730 modes M_SCSI
2637 count 3 2731 count 3
2732 dont_generate_debug_code
2638} 2733}
2639 2734
2640/* 2735/*
@@ -2645,6 +2740,7 @@ register IOWNID {
2645 address 0x067 2740 address 0x067
2646 access_mode RW 2741 access_mode RW
2647 modes M_SCSI 2742 modes M_SCSI
2743 dont_generate_debug_code
2648} 2744}
2649 2745
2650/* 2746/*
@@ -2671,6 +2767,7 @@ register TOWNID {
2671 access_mode RW 2767 access_mode RW
2672 modes M_SCSI 2768 modes M_SCSI
2673 count 2 2769 count 2
2770 dont_generate_debug_code
2674} 2771}
2675 2772
2676/* 2773/*
@@ -2702,6 +2799,7 @@ register SHCNT {
2702 access_mode RW 2799 access_mode RW
2703 size 3 2800 size 3
2704 modes M_DFF0, M_DFF1 2801 modes M_DFF0, M_DFF1
2802 dont_generate_debug_code
2705} 2803}
2706 2804
2707/* 2805/*
@@ -2789,6 +2887,7 @@ register SCBPTR {
2789 access_mode RW 2887 access_mode RW
2790 size 2 2888 size 2
2791 modes M_DFF0, M_DFF1, M_CCHAN, M_SCSI 2889 modes M_DFF0, M_DFF1, M_CCHAN, M_SCSI
2890 dont_generate_debug_code
2792} 2891}
2793 2892
2794/* 2893/*
@@ -2816,6 +2915,7 @@ register SCBAUTOPTR {
2816 field AUSCBPTR_EN 0x80 2915 field AUSCBPTR_EN 0x80
2817 field SCBPTR_ADDR 0x38 2916 field SCBPTR_ADDR 0x38
2818 field SCBPTR_OFF 0x07 2917 field SCBPTR_OFF 0x07
2918 dont_generate_debug_code
2819} 2919}
2820 2920
2821/* 2921/*
@@ -2825,6 +2925,7 @@ register CCSGADDR {
2825 address 0x0AC 2925 address 0x0AC
2826 access_mode RW 2926 access_mode RW
2827 modes M_DFF0, M_DFF1 2927 modes M_DFF0, M_DFF1
2928 dont_generate_debug_code
2828} 2929}
2829 2930
2830/* 2931/*
@@ -2834,6 +2935,7 @@ register CCSCBADDR {
2834 address 0x0AC 2935 address 0x0AC
2835 access_mode RW 2936 access_mode RW
2836 modes M_CCHAN 2937 modes M_CCHAN
2938 dont_generate_debug_code
2837} 2939}
2838 2940
2839/* 2941/*
@@ -2899,6 +3001,7 @@ register CCSGRAM {
2899 address 0x0B0 3001 address 0x0B0
2900 access_mode RW 3002 access_mode RW
2901 modes M_DFF0, M_DFF1 3003 modes M_DFF0, M_DFF1
3004 dont_generate_debug_code
2902} 3005}
2903 3006
2904/* 3007/*
@@ -2908,6 +3011,7 @@ register CCSCBRAM {
2908 address 0x0B0 3011 address 0x0B0
2909 access_mode RW 3012 access_mode RW
2910 modes M_CCHAN 3013 modes M_CCHAN
3014 dont_generate_debug_code
2911} 3015}
2912 3016
2913/* 3017/*
@@ -2958,6 +3062,7 @@ register BRDDAT {
2958 access_mode RW 3062 access_mode RW
2959 modes M_SCSI 3063 modes M_SCSI
2960 count 2 3064 count 2
3065 dont_generate_debug_code
2961} 3066}
2962 3067
2963/* 3068/*
@@ -2974,6 +3079,7 @@ register BRDCTL {
2974 field BRDEN 0x04 3079 field BRDEN 0x04
2975 field BRDRW 0x02 3080 field BRDRW 0x02
2976 field BRDSTB 0x01 3081 field BRDSTB 0x01
3082 dont_generate_debug_code
2977} 3083}
2978 3084
2979/* 3085/*
@@ -2984,6 +3090,7 @@ register SEEADR {
2984 access_mode RW 3090 access_mode RW
2985 modes M_SCSI 3091 modes M_SCSI
2986 count 4 3092 count 4
3093 dont_generate_debug_code
2987} 3094}
2988 3095
2989/* 3096/*
@@ -2995,6 +3102,7 @@ register SEEDAT {
2995 size 2 3102 size 2
2996 modes M_SCSI 3103 modes M_SCSI
2997 count 4 3104 count 4
3105 dont_generate_debug_code
2998} 3106}
2999 3107
3000/* 3108/*
@@ -3011,6 +3119,7 @@ register SEESTAT {
3011 field SEEARBACK 0x04 3119 field SEEARBACK 0x04
3012 field SEEBUSY 0x02 3120 field SEEBUSY 0x02
3013 field SEESTART 0x01 3121 field SEESTART 0x01
3122 dont_generate_debug_code
3014} 3123}
3015 3124
3016/* 3125/*
@@ -3036,6 +3145,7 @@ register SEECTL {
3036 mask SEEOP_EWDS 0x40 3145 mask SEEOP_EWDS 0x40
3037 field SEERST 0x02 3146 field SEERST 0x02
3038 field SEESTART 0x01 3147 field SEESTART 0x01
3148 dont_generate_debug_code
3039} 3149}
3040 3150
3041const SEEOP_ERAL_ADDR 0x80 3151const SEEOP_ERAL_ADDR 0x80
@@ -3050,6 +3160,7 @@ register SCBCNT {
3050 address 0x0BF 3160 address 0x0BF
3051 access_mode RW 3161 access_mode RW
3052 modes M_SCSI 3162 modes M_SCSI
3163 dont_generate_debug_code
3053} 3164}
3054 3165
3055/* 3166/*
@@ -3061,6 +3172,7 @@ register DFWADDR {
3061 access_mode RW 3172 access_mode RW
3062 size 2 3173 size 2
3063 modes M_DFF0, M_DFF1 3174 modes M_DFF0, M_DFF1
3175 dont_generate_debug_code
3064} 3176}
3065 3177
3066/* 3178/*
@@ -3087,6 +3199,7 @@ register DSPDATACTL {
3087 field DESQDIS 0x10 3199 field DESQDIS 0x10
3088 field RCVROFFSTDIS 0x04 3200 field RCVROFFSTDIS 0x04
3089 field XMITOFFSTDIS 0x02 3201 field XMITOFFSTDIS 0x02
3202 dont_generate_debug_code
3090} 3203}
3091 3204
3092/* 3205/*
@@ -3132,6 +3245,7 @@ register DFDAT {
3132 address 0x0C4 3245 address 0x0C4
3133 access_mode RW 3246 access_mode RW
3134 modes M_DFF0, M_DFF1 3247 modes M_DFF0, M_DFF1
3248 dont_generate_debug_code
3135} 3249}
3136 3250
3137/* 3251/*
@@ -3144,6 +3258,7 @@ register DSPSELECT {
3144 count 1 3258 count 1
3145 field AUTOINCEN 0x80 3259 field AUTOINCEN 0x80
3146 field DSPSEL 0x1F 3260 field DSPSEL 0x1F
3261 dont_generate_debug_code
3147} 3262}
3148 3263
3149const NUMDSPS 0x14 3264const NUMDSPS 0x14
@@ -3158,6 +3273,7 @@ register WRTBIASCTL {
3158 count 3 3273 count 3
3159 field AUTOXBCDIS 0x80 3274 field AUTOXBCDIS 0x80
3160 field XMITMANVAL 0x3F 3275 field XMITMANVAL 0x3F
3276 dont_generate_debug_code
3161} 3277}
3162 3278
3163/* 3279/*
@@ -3316,6 +3432,7 @@ register FLAGS {
3316 count 23 3432 count 23
3317 field ZERO 0x02 3433 field ZERO 0x02
3318 field CARRY 0x01 3434 field CARRY 0x01
3435 dont_generate_debug_code
3319} 3436}
3320 3437
3321/* 3438/*
@@ -3344,6 +3461,7 @@ register SEQRAM {
3344 address 0x0DA 3461 address 0x0DA
3345 access_mode RW 3462 access_mode RW
3346 count 2 3463 count 2
3464 dont_generate_debug_code
3347} 3465}
3348 3466
3349/* 3467/*
@@ -3355,6 +3473,7 @@ register PRGMCNT {
3355 access_mode RW 3473 access_mode RW
3356 size 2 3474 size 2
3357 count 5 3475 count 5
3476 dont_generate_debug_code
3358} 3477}
3359 3478
3360/* 3479/*
@@ -3364,6 +3483,7 @@ register ACCUM {
3364 address 0x0E0 3483 address 0x0E0
3365 access_mode RW 3484 access_mode RW
3366 accumulator 3485 accumulator
3486 dont_generate_debug_code
3367} 3487}
3368 3488
3369/* 3489/*
@@ -3380,6 +3500,7 @@ register SINDEX {
3380 access_mode RW 3500 access_mode RW
3381 size 2 3501 size 2
3382 sindex 3502 sindex
3503 dont_generate_debug_code
3383} 3504}
3384 3505
3385/* 3506/*
@@ -3390,6 +3511,7 @@ register DINDEX {
3390 address 0x0E4 3511 address 0x0E4
3391 access_mode RW 3512 access_mode RW
3392 size 2 3513 size 2
3514 dont_generate_debug_code
3393} 3515}
3394 3516
3395/* 3517/*
@@ -3415,6 +3537,7 @@ register ALLONES {
3415 address 0x0E8 3537 address 0x0E8
3416 access_mode RO 3538 access_mode RO
3417 allones 3539 allones
3540 dont_generate_debug_code
3418} 3541}
3419 3542
3420/* 3543/*
@@ -3425,6 +3548,7 @@ register ALLZEROS {
3425 address 0x0EA 3548 address 0x0EA
3426 access_mode RO 3549 access_mode RO
3427 allzeros 3550 allzeros
3551 dont_generate_debug_code
3428} 3552}
3429 3553
3430/* 3554/*
@@ -3435,6 +3559,7 @@ register NONE {
3435 address 0x0EA 3559 address 0x0EA
3436 access_mode WO 3560 access_mode WO
3437 none 3561 none
3562 dont_generate_debug_code
3438} 3563}
3439 3564
3440/* 3565/*
@@ -3445,6 +3570,7 @@ register NONE {
3445register SINDIR { 3570register SINDIR {
3446 address 0x0EC 3571 address 0x0EC
3447 access_mode RO 3572 access_mode RO
3573 dont_generate_debug_code
3448} 3574}
3449 3575
3450/* 3576/*
@@ -3455,6 +3581,7 @@ register SINDIR {
3455register DINDIR { 3581register DINDIR {
3456 address 0x0ED 3582 address 0x0ED
3457 access_mode WO 3583 access_mode WO
3584 dont_generate_debug_code
3458} 3585}
3459 3586
3460/* 3587/*
@@ -3479,6 +3606,7 @@ register FUNCTION1 {
3479register STACK { 3606register STACK {
3480 address 0x0F2 3607 address 0x0F2
3481 access_mode RW 3608 access_mode RW
3609 dont_generate_debug_code
3482} 3610}
3483 3611
3484/* 3612/*
@@ -3491,6 +3619,7 @@ register INTVEC1_ADDR {
3491 size 2 3619 size 2
3492 modes M_CFG 3620 modes M_CFG
3493 count 1 3621 count 1
3622 dont_generate_debug_code
3494} 3623}
3495 3624
3496/* 3625/*
@@ -3503,6 +3632,7 @@ register CURADDR {
3503 size 2 3632 size 2
3504 modes M_SCSI 3633 modes M_SCSI
3505 count 2 3634 count 2
3635 dont_generate_debug_code
3506} 3636}
3507 3637
3508/* 3638/*
@@ -3515,6 +3645,7 @@ register INTVEC2_ADDR {
3515 size 2 3645 size 2
3516 modes M_CFG 3646 modes M_CFG
3517 count 1 3647 count 1
3648 dont_generate_debug_code
3518} 3649}
3519 3650
3520/* 3651/*
@@ -3543,12 +3674,14 @@ scratch_ram {
3543 modes 0, 1, 2, 3 3674 modes 0, 1, 2, 3
3544 REG0 { 3675 REG0 {
3545 size 2 3676 size 2
3677 dont_generate_debug_code
3546 } 3678 }
3547 REG1 { 3679 REG1 {
3548 size 2 3680 size 2
3549 } 3681 }
3550 REG_ISR { 3682 REG_ISR {
3551 size 2 3683 size 2
3684 dont_generate_debug_code
3552 } 3685 }
3553 SG_STATE { 3686 SG_STATE {
3554 size 1 3687 size 1
@@ -3572,9 +3705,11 @@ scratch_ram {
3572 modes 0, 1, 2, 3 3705 modes 0, 1, 2, 3
3573 LONGJMP_ADDR { 3706 LONGJMP_ADDR {
3574 size 2 3707 size 2
3708 dont_generate_debug_code
3575 } 3709 }
3576 ACCUM_SAVE { 3710 ACCUM_SAVE {
3577 size 1 3711 size 1
3712 dont_generate_debug_code
3578 } 3713 }
3579} 3714}
3580 3715
@@ -3591,18 +3726,22 @@ scratch_ram {
3591 */ 3726 */
3592 WAITING_SCB_TAILS { 3727 WAITING_SCB_TAILS {
3593 size 32 3728 size 32
3729 dont_generate_debug_code
3594 } 3730 }
3595 WAITING_TID_HEAD { 3731 WAITING_TID_HEAD {
3596 size 2 3732 size 2
3733 dont_generate_debug_code
3597 } 3734 }
3598 WAITING_TID_TAIL { 3735 WAITING_TID_TAIL {
3599 size 2 3736 size 2
3737 dont_generate_debug_code
3600 } 3738 }
3601 /* 3739 /*
3602 * SCBID of the next SCB in the new SCB queue. 3740 * SCBID of the next SCB in the new SCB queue.
3603 */ 3741 */
3604 NEXT_QUEUED_SCB_ADDR { 3742 NEXT_QUEUED_SCB_ADDR {
3605 size 4 3743 size 4
3744 dont_generate_debug_code
3606 } 3745 }
3607 /* 3746 /*
3608 * head of list of SCBs that have 3747 * head of list of SCBs that have
@@ -3611,6 +3750,7 @@ scratch_ram {
3611 */ 3750 */
3612 COMPLETE_SCB_HEAD { 3751 COMPLETE_SCB_HEAD {
3613 size 2 3752 size 2
3753 dont_generate_debug_code
3614 } 3754 }
3615 /* 3755 /*
3616 * The list of completed SCBs in 3756 * The list of completed SCBs in
@@ -3618,6 +3758,7 @@ scratch_ram {
3618 */ 3758 */
3619 COMPLETE_SCB_DMAINPROG_HEAD { 3759 COMPLETE_SCB_DMAINPROG_HEAD {
3620 size 2 3760 size 2
3761 dont_generate_debug_code
3621 } 3762 }
3622 /* 3763 /*
3623 * head of list of SCBs that have 3764 * head of list of SCBs that have
@@ -3626,6 +3767,7 @@ scratch_ram {
3626 */ 3767 */
3627 COMPLETE_DMA_SCB_HEAD { 3768 COMPLETE_DMA_SCB_HEAD {
3628 size 2 3769 size 2
3770 dont_generate_debug_code
3629 } 3771 }
3630 /* 3772 /*
3631 * tail of list of SCBs that have 3773 * tail of list of SCBs that have
@@ -3634,6 +3776,7 @@ scratch_ram {
3634 */ 3776 */
3635 COMPLETE_DMA_SCB_TAIL { 3777 COMPLETE_DMA_SCB_TAIL {
3636 size 2 3778 size 2
3779 dont_generate_debug_code
3637 } 3780 }
3638 /* 3781 /*
3639 * head of list of SCBs that have 3782 * head of list of SCBs that have
@@ -3643,6 +3786,7 @@ scratch_ram {
3643 */ 3786 */
3644 COMPLETE_ON_QFREEZE_HEAD { 3787 COMPLETE_ON_QFREEZE_HEAD {
3645 size 2 3788 size 2
3789 dont_generate_debug_code
3646 } 3790 }
3647 /* 3791 /*
3648 * Counting semaphore to prevent new select-outs 3792 * Counting semaphore to prevent new select-outs
@@ -3667,6 +3811,7 @@ scratch_ram {
3667 */ 3811 */
3668 MSG_OUT { 3812 MSG_OUT {
3669 size 1 3813 size 1
3814 dont_generate_debug_code
3670 } 3815 }
3671 /* Parameters for DMA Logic */ 3816 /* Parameters for DMA Logic */
3672 DMAPARAMS { 3817 DMAPARAMS {
@@ -3682,6 +3827,7 @@ scratch_ram {
3682 field DIRECTION 0x04 /* Set indicates PCI->SCSI */ 3827 field DIRECTION 0x04 /* Set indicates PCI->SCSI */
3683 field FIFOFLUSH 0x02 3828 field FIFOFLUSH 0x02
3684 field FIFORESET 0x01 3829 field FIFORESET 0x01
3830 dont_generate_debug_code
3685 } 3831 }
3686 SEQ_FLAGS { 3832 SEQ_FLAGS {
3687 size 1 3833 size 1
@@ -3703,9 +3849,11 @@ scratch_ram {
3703 */ 3849 */
3704 SAVED_SCSIID { 3850 SAVED_SCSIID {
3705 size 1 3851 size 1
3852 dont_generate_debug_code
3706 } 3853 }
3707 SAVED_LUN { 3854 SAVED_LUN {
3708 size 1 3855 size 1
3856 dont_generate_debug_code
3709 } 3857 }
3710 /* 3858 /*
3711 * The last bus phase as seen by the sequencer. 3859 * The last bus phase as seen by the sequencer.
@@ -3733,6 +3881,7 @@ scratch_ram {
3733 */ 3881 */
3734 QOUTFIFO_ENTRY_VALID_TAG { 3882 QOUTFIFO_ENTRY_VALID_TAG {
3735 size 1 3883 size 1
3884 dont_generate_debug_code
3736 } 3885 }
3737 /* 3886 /*
3738 * Kernel and sequencer offsets into the queue of 3887 * Kernel and sequencer offsets into the queue of
@@ -3742,10 +3891,12 @@ scratch_ram {
3742 KERNEL_TQINPOS { 3891 KERNEL_TQINPOS {
3743 size 1 3892 size 1
3744 count 1 3893 count 1
3894 dont_generate_debug_code
3745 } 3895 }
3746 TQINPOS { 3896 TQINPOS {
3747 size 1 3897 size 1
3748 count 8 3898 count 8
3899 dont_generate_debug_code
3749 } 3900 }
3750 /* 3901 /*
3751 * Base address of our shared data with the kernel driver in host 3902 * Base address of our shared data with the kernel driver in host
@@ -3754,6 +3905,7 @@ scratch_ram {
3754 */ 3905 */
3755 SHARED_DATA_ADDR { 3906 SHARED_DATA_ADDR {
3756 size 4 3907 size 4
3908 dont_generate_debug_code
3757 } 3909 }
3758 /* 3910 /*
3759 * Pointer to location in host memory for next 3911 * Pointer to location in host memory for next
@@ -3761,6 +3913,7 @@ scratch_ram {
3761 */ 3913 */
3762 QOUTFIFO_NEXT_ADDR { 3914 QOUTFIFO_NEXT_ADDR {
3763 size 4 3915 size 4
3916 dont_generate_debug_code
3764 } 3917 }
3765 ARG_1 { 3918 ARG_1 {
3766 size 1 3919 size 1
@@ -3773,11 +3926,13 @@ scratch_ram {
3773 mask CONT_MSG_LOOP_READ 0x03 3926 mask CONT_MSG_LOOP_READ 0x03
3774 mask CONT_MSG_LOOP_TARG 0x02 3927 mask CONT_MSG_LOOP_TARG 0x02
3775 alias RETURN_1 3928 alias RETURN_1
3929 dont_generate_debug_code
3776 } 3930 }
3777 ARG_2 { 3931 ARG_2 {
3778 size 1 3932 size 1
3779 count 1 3933 count 1
3780 alias RETURN_2 3934 alias RETURN_2
3935 dont_generate_debug_code
3781 } 3936 }
3782 3937
3783 /* 3938 /*
@@ -3785,6 +3940,7 @@ scratch_ram {
3785 */ 3940 */
3786 LAST_MSG { 3941 LAST_MSG {
3787 size 1 3942 size 1
3943 dont_generate_debug_code
3788 } 3944 }
3789 3945
3790 /* 3946 /*
@@ -3801,6 +3957,7 @@ scratch_ram {
3801 field MANUALP 0x0C 3957 field MANUALP 0x0C
3802 field ENAUTOATNP 0x02 3958 field ENAUTOATNP 0x02
3803 field ALTSTIM 0x01 3959 field ALTSTIM 0x01
3960 dont_generate_debug_code
3804 } 3961 }
3805 3962
3806 /* 3963 /*
@@ -3809,6 +3966,7 @@ scratch_ram {
3809 INITIATOR_TAG { 3966 INITIATOR_TAG {
3810 size 1 3967 size 1
3811 count 1 3968 count 1
3969 dont_generate_debug_code
3812 } 3970 }
3813 3971
3814 SEQ_FLAGS2 { 3972 SEQ_FLAGS2 {
@@ -3820,6 +3978,7 @@ scratch_ram {
3820 3978
3821 ALLOCFIFO_SCBPTR { 3979 ALLOCFIFO_SCBPTR {
3822 size 2 3980 size 2
3981 dont_generate_debug_code
3823 } 3982 }
3824 3983
3825 /* 3984 /*
@@ -3829,6 +3988,7 @@ scratch_ram {
3829 */ 3988 */
3830 INT_COALESCING_TIMER { 3989 INT_COALESCING_TIMER {
3831 size 2 3990 size 2
3991 dont_generate_debug_code
3832 } 3992 }
3833 3993
3834 /* 3994 /*
@@ -3838,6 +3998,7 @@ scratch_ram {
3838 */ 3998 */
3839 INT_COALESCING_MAXCMDS { 3999 INT_COALESCING_MAXCMDS {
3840 size 1 4000 size 1
4001 dont_generate_debug_code
3841 } 4002 }
3842 4003
3843 /* 4004 /*
@@ -3846,6 +4007,7 @@ scratch_ram {
3846 */ 4007 */
3847 INT_COALESCING_MINCMDS { 4008 INT_COALESCING_MINCMDS {
3848 size 1 4009 size 1
4010 dont_generate_debug_code
3849 } 4011 }
3850 4012
3851 /* 4013 /*
@@ -3853,6 +4015,7 @@ scratch_ram {
3853 */ 4015 */
3854 CMDS_PENDING { 4016 CMDS_PENDING {
3855 size 2 4017 size 2
4018 dont_generate_debug_code
3856 } 4019 }
3857 4020
3858 /* 4021 /*
@@ -3860,6 +4023,7 @@ scratch_ram {
3860 */ 4023 */
3861 INT_COALESCING_CMDCOUNT { 4024 INT_COALESCING_CMDCOUNT {
3862 size 1 4025 size 1
4026 dont_generate_debug_code
3863 } 4027 }
3864 4028
3865 /* 4029 /*
@@ -3868,6 +4032,7 @@ scratch_ram {
3868 */ 4032 */
3869 LOCAL_HS_MAILBOX { 4033 LOCAL_HS_MAILBOX {
3870 size 1 4034 size 1
4035 dont_generate_debug_code
3871 } 4036 }
3872 /* 4037 /*
3873 * Target-mode CDB type to CDB length table used 4038 * Target-mode CDB type to CDB length table used
@@ -3876,6 +4041,7 @@ scratch_ram {
3876 CMDSIZE_TABLE { 4041 CMDSIZE_TABLE {
3877 size 8 4042 size 8
3878 count 8 4043 count 8
4044 dont_generate_debug_code
3879 } 4045 }
3880 /* 4046 /*
3881 * When an SCB with the MK_MESSAGE flag is 4047 * When an SCB with the MK_MESSAGE flag is
@@ -3908,25 +4074,31 @@ scb {
3908 size 4 4074 size 4
3909 alias SCB_CDB_STORE 4075 alias SCB_CDB_STORE
3910 alias SCB_HOST_CDB_PTR 4076 alias SCB_HOST_CDB_PTR
4077 dont_generate_debug_code
3911 } 4078 }
3912 SCB_RESIDUAL_SGPTR { 4079 SCB_RESIDUAL_SGPTR {
3913 size 4 4080 size 4
3914 field SG_ADDR_MASK 0xf8 /* In the last byte */ 4081 field SG_ADDR_MASK 0xf8 /* In the last byte */
3915 field SG_OVERRUN_RESID 0x02 /* In the first byte */ 4082 field SG_OVERRUN_RESID 0x02 /* In the first byte */
3916 field SG_LIST_NULL 0x01 /* In the first byte */ 4083 field SG_LIST_NULL 0x01 /* In the first byte */
4084 dont_generate_debug_code
3917 } 4085 }
3918 SCB_SCSI_STATUS { 4086 SCB_SCSI_STATUS {
3919 size 1 4087 size 1
3920 alias SCB_HOST_CDB_LEN 4088 alias SCB_HOST_CDB_LEN
4089 dont_generate_debug_code
3921 } 4090 }
3922 SCB_TARGET_PHASES { 4091 SCB_TARGET_PHASES {
3923 size 1 4092 size 1
4093 dont_generate_debug_code
3924 } 4094 }
3925 SCB_TARGET_DATA_DIR { 4095 SCB_TARGET_DATA_DIR {
3926 size 1 4096 size 1
4097 dont_generate_debug_code
3927 } 4098 }
3928 SCB_TARGET_ITAG { 4099 SCB_TARGET_ITAG {
3929 size 1 4100 size 1
4101 dont_generate_debug_code
3930 } 4102 }
3931 SCB_SENSE_BUSADDR { 4103 SCB_SENSE_BUSADDR {
3932 /* 4104 /*
@@ -3936,10 +4108,12 @@ scb {
3936 */ 4108 */
3937 size 4 4109 size 4
3938 alias SCB_NEXT_COMPLETE 4110 alias SCB_NEXT_COMPLETE
4111 dont_generate_debug_code
3939 } 4112 }
3940 SCB_TAG { 4113 SCB_TAG {
3941 alias SCB_FIFO_USE_COUNT 4114 alias SCB_FIFO_USE_COUNT
3942 size 2 4115 size 2
4116 dont_generate_debug_code
3943 } 4117 }
3944 SCB_CONTROL { 4118 SCB_CONTROL {
3945 size 1 4119 size 1
@@ -3959,6 +4133,7 @@ scb {
3959 SCB_LUN { 4133 SCB_LUN {
3960 size 1 4134 size 1
3961 field LID 0xff 4135 field LID 0xff
4136 dont_generate_debug_code
3962 } 4137 }
3963 SCB_TASK_ATTRIBUTE { 4138 SCB_TASK_ATTRIBUTE {
3964 size 1 4139 size 1
@@ -3967,16 +4142,20 @@ scb {
3967 * ignore wide residue message handling. 4142 * ignore wide residue message handling.
3968 */ 4143 */
3969 field SCB_XFERLEN_ODD 0x01 4144 field SCB_XFERLEN_ODD 0x01
4145 dont_generate_debug_code
3970 } 4146 }
3971 SCB_CDB_LEN { 4147 SCB_CDB_LEN {
3972 size 1 4148 size 1
3973 field SCB_CDB_LEN_PTR 0x80 /* CDB in host memory */ 4149 field SCB_CDB_LEN_PTR 0x80 /* CDB in host memory */
4150 dont_generate_debug_code
3974 } 4151 }
3975 SCB_TASK_MANAGEMENT { 4152 SCB_TASK_MANAGEMENT {
3976 size 1 4153 size 1
4154 dont_generate_debug_code
3977 } 4155 }
3978 SCB_DATAPTR { 4156 SCB_DATAPTR {
3979 size 8 4157 size 8
4158 dont_generate_debug_code
3980 } 4159 }
3981 SCB_DATACNT { 4160 SCB_DATACNT {
3982 /* 4161 /*
@@ -3986,22 +4165,27 @@ scb {
3986 size 4 4165 size 4
3987 field SG_LAST_SEG 0x80 /* In the fourth byte */ 4166 field SG_LAST_SEG 0x80 /* In the fourth byte */
3988 field SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */ 4167 field SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */
4168 dont_generate_debug_code
3989 } 4169 }
3990 SCB_SGPTR { 4170 SCB_SGPTR {
3991 size 4 4171 size 4
3992 field SG_STATUS_VALID 0x04 /* In the first byte */ 4172 field SG_STATUS_VALID 0x04 /* In the first byte */
3993 field SG_FULL_RESID 0x02 /* In the first byte */ 4173 field SG_FULL_RESID 0x02 /* In the first byte */
3994 field SG_LIST_NULL 0x01 /* In the first byte */ 4174 field SG_LIST_NULL 0x01 /* In the first byte */
4175 dont_generate_debug_code
3995 } 4176 }
3996 SCB_BUSADDR { 4177 SCB_BUSADDR {
3997 size 4 4178 size 4
4179 dont_generate_debug_code
3998 } 4180 }
3999 SCB_NEXT { 4181 SCB_NEXT {
4000 alias SCB_NEXT_SCB_BUSADDR 4182 alias SCB_NEXT_SCB_BUSADDR
4001 size 2 4183 size 2
4184 dont_generate_debug_code
4002 } 4185 }
4003 SCB_NEXT2 { 4186 SCB_NEXT2 {
4004 size 2 4187 size 2
4188 dont_generate_debug_code
4005 } 4189 }
4006 SCB_SPARE { 4190 SCB_SPARE {
4007 size 8 4191 size 8
@@ -4009,6 +4193,7 @@ scb {
4009 } 4193 }
4010 SCB_DISCONNECTED_LISTS { 4194 SCB_DISCONNECTED_LISTS {
4011 size 8 4195 size 8
4196 dont_generate_debug_code
4012 } 4197 }
4013} 4198}
4014 4199
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 55508b0fcec4..bdad54ec088c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -2472,8 +2472,6 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2472 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) 2472 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
2473 ahd_outb(ahd, CLRLQOINT1, 0); 2473 ahd_outb(ahd, CLRLQOINT1, 0);
2474 } else if ((status & SELTO) != 0) { 2474 } else if ((status & SELTO) != 0) {
2475 u_int scbid;
2476
2477 /* Stop the selection */ 2475 /* Stop the selection */
2478 ahd_outb(ahd, SCSISEQ0, 0); 2476 ahd_outb(ahd, SCSISEQ0, 0);
2479 2477
@@ -2583,9 +2581,6 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
2583 case BUSFREE_DFF0: 2581 case BUSFREE_DFF0:
2584 case BUSFREE_DFF1: 2582 case BUSFREE_DFF1:
2585 { 2583 {
2586 u_int scbid;
2587 struct scb *scb;
2588
2589 mode = busfreetime == BUSFREE_DFF0 2584 mode = busfreetime == BUSFREE_DFF0
2590 ? AHD_MODE_DFF0 : AHD_MODE_DFF1; 2585 ? AHD_MODE_DFF0 : AHD_MODE_DFF1;
2591 ahd_set_modes(ahd, mode, mode); 2586 ahd_set_modes(ahd, mode, mode);
@@ -3689,7 +3684,7 @@ ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
3689 * by the capabilities of the bus connectivity of and sync settings for 3684 * by the capabilities of the bus connectivity of and sync settings for
3690 * the target. 3685 * the target.
3691 */ 3686 */
3692void 3687static void
3693ahd_devlimited_syncrate(struct ahd_softc *ahd, 3688ahd_devlimited_syncrate(struct ahd_softc *ahd,
3694 struct ahd_initiator_tinfo *tinfo, 3689 struct ahd_initiator_tinfo *tinfo,
3695 u_int *period, u_int *ppr_options, role_t role) 3690 u_int *period, u_int *ppr_options, role_t role)
@@ -4136,7 +4131,7 @@ ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4136 4131
4137 /* 4132 /*
4138 * Harpoon2A assumed that there would be a 4133 * Harpoon2A assumed that there would be a
4139 * fallback rate between 160MHz and 80Mhz, 4134 * fallback rate between 160MHz and 80MHz,
4140 * so 7 is used as the period factor rather 4135 * so 7 is used as the period factor rather
4141 * than 8 for 160MHz. 4136 * than 8 for 160MHz.
4142 */ 4137 */
@@ -8708,7 +8703,7 @@ ahd_reset_current_bus(struct ahd_softc *ahd)
8708int 8703int
8709ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) 8704ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
8710{ 8705{
8711 struct ahd_devinfo devinfo; 8706 struct ahd_devinfo caminfo;
8712 u_int initiator; 8707 u_int initiator;
8713 u_int target; 8708 u_int target;
8714 u_int max_scsiid; 8709 u_int max_scsiid;
@@ -8729,7 +8724,7 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
8729 8724
8730 ahd->pending_device = NULL; 8725 ahd->pending_device = NULL;
8731 8726
8732 ahd_compile_devinfo(&devinfo, 8727 ahd_compile_devinfo(&caminfo,
8733 CAM_TARGET_WILDCARD, 8728 CAM_TARGET_WILDCARD,
8734 CAM_TARGET_WILDCARD, 8729 CAM_TARGET_WILDCARD,
8735 CAM_LUN_WILDCARD, 8730 CAM_LUN_WILDCARD,
@@ -8868,7 +8863,7 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
8868 } 8863 }
8869 8864
8870 /* Notify the XPT that a bus reset occurred */ 8865 /* Notify the XPT that a bus reset occurred */
8871 ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD, 8866 ahd_send_async(ahd, caminfo.channel, CAM_TARGET_WILDCARD,
8872 CAM_LUN_WILDCARD, AC_BUS_RESET); 8867 CAM_LUN_WILDCARD, AC_BUS_RESET);
8873 8868
8874 ahd_restart(ahd); 8869 ahd_restart(ahd);
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index c25b6adffbf9..a734d77e880e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -223,10 +223,10 @@ static const char *pci_bus_modes[] =
223 "PCI bus mode unknown", 223 "PCI bus mode unknown",
224 "PCI bus mode unknown", 224 "PCI bus mode unknown",
225 "PCI bus mode unknown", 225 "PCI bus mode unknown",
226 "PCI-X 101-133Mhz", 226 "PCI-X 101-133MHz",
227 "PCI-X 67-100Mhz", 227 "PCI-X 67-100MHz",
228 "PCI-X 50-66Mhz", 228 "PCI-X 50-66MHz",
229 "PCI 33 or 66Mhz" 229 "PCI 33 or 66MHz"
230}; 230};
231 231
232#define TESTMODE 0x00000800ul 232#define TESTMODE 0x00000800ul
@@ -337,8 +337,6 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
337 * 64bit bus (PCI64BIT set in devconfig). 337 * 64bit bus (PCI64BIT set in devconfig).
338 */ 338 */
339 if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) { 339 if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) {
340 uint32_t devconfig;
341
342 if (bootverbose) 340 if (bootverbose)
343 printf("%s: Enabling 39Bit Addressing\n", 341 printf("%s: Enabling 39Bit Addressing\n",
344 ahd_name(ahd)); 342 ahd_name(ahd));
@@ -483,8 +481,6 @@ ahd_pci_test_register_access(struct ahd_softc *ahd)
483 goto fail; 481 goto fail;
484 482
485 if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) { 483 if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) {
486 u_int targpcistat;
487
488 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 484 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
489 targpcistat = ahd_inb(ahd, TARGPCISTAT); 485 targpcistat = ahd_inb(ahd, TARGPCISTAT);
490 if ((targpcistat & STA) != 0) 486 if ((targpcistat & STA) != 0)
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
index c21ceab8e913..cdcead071ef6 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
@@ -34,13 +34,6 @@ ahd_reg_print_t ahd_seqintcode_print;
34#endif 34#endif
35 35
36#if AIC_DEBUG_REGISTERS 36#if AIC_DEBUG_REGISTERS
37ahd_reg_print_t ahd_clrint_print;
38#else
39#define ahd_clrint_print(regvalue, cur_col, wrap) \
40 ahd_print_register(NULL, 0, "CLRINT", 0x03, regvalue, cur_col, wrap)
41#endif
42
43#if AIC_DEBUG_REGISTERS
44ahd_reg_print_t ahd_error_print; 37ahd_reg_print_t ahd_error_print;
45#else 38#else
46#define ahd_error_print(regvalue, cur_col, wrap) \ 39#define ahd_error_print(regvalue, cur_col, wrap) \
@@ -48,20 +41,6 @@ ahd_reg_print_t ahd_error_print;
48#endif 41#endif
49 42
50#if AIC_DEBUG_REGISTERS 43#if AIC_DEBUG_REGISTERS
51ahd_reg_print_t ahd_hcntrl_print;
52#else
53#define ahd_hcntrl_print(regvalue, cur_col, wrap) \
54 ahd_print_register(NULL, 0, "HCNTRL", 0x05, regvalue, cur_col, wrap)
55#endif
56
57#if AIC_DEBUG_REGISTERS
58ahd_reg_print_t ahd_hnscb_qoff_print;
59#else
60#define ahd_hnscb_qoff_print(regvalue, cur_col, wrap) \
61 ahd_print_register(NULL, 0, "HNSCB_QOFF", 0x06, regvalue, cur_col, wrap)
62#endif
63
64#if AIC_DEBUG_REGISTERS
65ahd_reg_print_t ahd_hescb_qoff_print; 44ahd_reg_print_t ahd_hescb_qoff_print;
66#else 45#else
67#define ahd_hescb_qoff_print(regvalue, cur_col, wrap) \ 46#define ahd_hescb_qoff_print(regvalue, cur_col, wrap) \
@@ -97,13 +76,6 @@ ahd_reg_print_t ahd_swtimer_print;
97#endif 76#endif
98 77
99#if AIC_DEBUG_REGISTERS 78#if AIC_DEBUG_REGISTERS
100ahd_reg_print_t ahd_snscb_qoff_print;
101#else
102#define ahd_snscb_qoff_print(regvalue, cur_col, wrap) \
103 ahd_print_register(NULL, 0, "SNSCB_QOFF", 0x10, regvalue, cur_col, wrap)
104#endif
105
106#if AIC_DEBUG_REGISTERS
107ahd_reg_print_t ahd_sescb_qoff_print; 79ahd_reg_print_t ahd_sescb_qoff_print;
108#else 80#else
109#define ahd_sescb_qoff_print(regvalue, cur_col, wrap) \ 81#define ahd_sescb_qoff_print(regvalue, cur_col, wrap) \
@@ -111,20 +83,6 @@ ahd_reg_print_t ahd_sescb_qoff_print;
111#endif 83#endif
112 84
113#if AIC_DEBUG_REGISTERS 85#if AIC_DEBUG_REGISTERS
114ahd_reg_print_t ahd_sdscb_qoff_print;
115#else
116#define ahd_sdscb_qoff_print(regvalue, cur_col, wrap) \
117 ahd_print_register(NULL, 0, "SDSCB_QOFF", 0x14, regvalue, cur_col, wrap)
118#endif
119
120#if AIC_DEBUG_REGISTERS
121ahd_reg_print_t ahd_qoff_ctlsta_print;
122#else
123#define ahd_qoff_ctlsta_print(regvalue, cur_col, wrap) \
124 ahd_print_register(NULL, 0, "QOFF_CTLSTA", 0x16, regvalue, cur_col, wrap)
125#endif
126
127#if AIC_DEBUG_REGISTERS
128ahd_reg_print_t ahd_intctl_print; 86ahd_reg_print_t ahd_intctl_print;
129#else 87#else
130#define ahd_intctl_print(regvalue, cur_col, wrap) \ 88#define ahd_intctl_print(regvalue, cur_col, wrap) \
@@ -139,13 +97,6 @@ ahd_reg_print_t ahd_dfcntrl_print;
139#endif 97#endif
140 98
141#if AIC_DEBUG_REGISTERS 99#if AIC_DEBUG_REGISTERS
142ahd_reg_print_t ahd_dscommand0_print;
143#else
144#define ahd_dscommand0_print(regvalue, cur_col, wrap) \
145 ahd_print_register(NULL, 0, "DSCOMMAND0", 0x19, regvalue, cur_col, wrap)
146#endif
147
148#if AIC_DEBUG_REGISTERS
149ahd_reg_print_t ahd_dfstatus_print; 100ahd_reg_print_t ahd_dfstatus_print;
150#else 101#else
151#define ahd_dfstatus_print(regvalue, cur_col, wrap) \ 102#define ahd_dfstatus_print(regvalue, cur_col, wrap) \
@@ -160,13 +111,6 @@ ahd_reg_print_t ahd_sg_cache_shadow_print;
160#endif 111#endif
161 112
162#if AIC_DEBUG_REGISTERS 113#if AIC_DEBUG_REGISTERS
163ahd_reg_print_t ahd_sg_cache_pre_print;
164#else
165#define ahd_sg_cache_pre_print(regvalue, cur_col, wrap) \
166 ahd_print_register(NULL, 0, "SG_CACHE_PRE", 0x1b, regvalue, cur_col, wrap)
167#endif
168
169#if AIC_DEBUG_REGISTERS
170ahd_reg_print_t ahd_lqin_print; 114ahd_reg_print_t ahd_lqin_print;
171#else 115#else
172#define ahd_lqin_print(regvalue, cur_col, wrap) \ 116#define ahd_lqin_print(regvalue, cur_col, wrap) \
@@ -293,13 +237,6 @@ ahd_reg_print_t ahd_sxfrctl0_print;
293#endif 237#endif
294 238
295#if AIC_DEBUG_REGISTERS 239#if AIC_DEBUG_REGISTERS
296ahd_reg_print_t ahd_sxfrctl1_print;
297#else
298#define ahd_sxfrctl1_print(regvalue, cur_col, wrap) \
299 ahd_print_register(NULL, 0, "SXFRCTL1", 0x3d, regvalue, cur_col, wrap)
300#endif
301
302#if AIC_DEBUG_REGISTERS
303ahd_reg_print_t ahd_dffstat_print; 240ahd_reg_print_t ahd_dffstat_print;
304#else 241#else
305#define ahd_dffstat_print(regvalue, cur_col, wrap) \ 242#define ahd_dffstat_print(regvalue, cur_col, wrap) \
@@ -314,13 +251,6 @@ ahd_reg_print_t ahd_multargid_print;
314#endif 251#endif
315 252
316#if AIC_DEBUG_REGISTERS 253#if AIC_DEBUG_REGISTERS
317ahd_reg_print_t ahd_scsisigo_print;
318#else
319#define ahd_scsisigo_print(regvalue, cur_col, wrap) \
320 ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap)
321#endif
322
323#if AIC_DEBUG_REGISTERS
324ahd_reg_print_t ahd_scsisigi_print; 254ahd_reg_print_t ahd_scsisigi_print;
325#else 255#else
326#define ahd_scsisigi_print(regvalue, cur_col, wrap) \ 256#define ahd_scsisigi_print(regvalue, cur_col, wrap) \
@@ -363,13 +293,6 @@ ahd_reg_print_t ahd_selid_print;
363#endif 293#endif
364 294
365#if AIC_DEBUG_REGISTERS 295#if AIC_DEBUG_REGISTERS
366ahd_reg_print_t ahd_optionmode_print;
367#else
368#define ahd_optionmode_print(regvalue, cur_col, wrap) \
369 ahd_print_register(NULL, 0, "OPTIONMODE", 0x4a, regvalue, cur_col, wrap)
370#endif
371
372#if AIC_DEBUG_REGISTERS
373ahd_reg_print_t ahd_sblkctl_print; 296ahd_reg_print_t ahd_sblkctl_print;
374#else 297#else
375#define ahd_sblkctl_print(regvalue, cur_col, wrap) \ 298#define ahd_sblkctl_print(regvalue, cur_col, wrap) \
@@ -391,13 +314,6 @@ ahd_reg_print_t ahd_simode0_print;
391#endif 314#endif
392 315
393#if AIC_DEBUG_REGISTERS 316#if AIC_DEBUG_REGISTERS
394ahd_reg_print_t ahd_clrsint0_print;
395#else
396#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
397 ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
398#endif
399
400#if AIC_DEBUG_REGISTERS
401ahd_reg_print_t ahd_sstat1_print; 317ahd_reg_print_t ahd_sstat1_print;
402#else 318#else
403#define ahd_sstat1_print(regvalue, cur_col, wrap) \ 319#define ahd_sstat1_print(regvalue, cur_col, wrap) \
@@ -405,13 +321,6 @@ ahd_reg_print_t ahd_sstat1_print;
405#endif 321#endif
406 322
407#if AIC_DEBUG_REGISTERS 323#if AIC_DEBUG_REGISTERS
408ahd_reg_print_t ahd_clrsint1_print;
409#else
410#define ahd_clrsint1_print(regvalue, cur_col, wrap) \
411 ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap)
412#endif
413
414#if AIC_DEBUG_REGISTERS
415ahd_reg_print_t ahd_sstat2_print; 324ahd_reg_print_t ahd_sstat2_print;
416#else 325#else
417#define ahd_sstat2_print(regvalue, cur_col, wrap) \ 326#define ahd_sstat2_print(regvalue, cur_col, wrap) \
@@ -461,17 +370,17 @@ ahd_reg_print_t ahd_lqistat0_print;
461#endif 370#endif
462 371
463#if AIC_DEBUG_REGISTERS 372#if AIC_DEBUG_REGISTERS
464ahd_reg_print_t ahd_lqimode0_print; 373ahd_reg_print_t ahd_clrlqiint0_print;
465#else 374#else
466#define ahd_lqimode0_print(regvalue, cur_col, wrap) \ 375#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
467 ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap) 376 ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
468#endif 377#endif
469 378
470#if AIC_DEBUG_REGISTERS 379#if AIC_DEBUG_REGISTERS
471ahd_reg_print_t ahd_clrlqiint0_print; 380ahd_reg_print_t ahd_lqimode0_print;
472#else 381#else
473#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \ 382#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
474 ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap) 383 ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
475#endif 384#endif
476 385
477#if AIC_DEBUG_REGISTERS 386#if AIC_DEBUG_REGISTERS
@@ -629,17 +538,17 @@ ahd_reg_print_t ahd_seqintsrc_print;
629#endif 538#endif
630 539
631#if AIC_DEBUG_REGISTERS 540#if AIC_DEBUG_REGISTERS
632ahd_reg_print_t ahd_seqimode_print; 541ahd_reg_print_t ahd_currscb_print;
633#else 542#else
634#define ahd_seqimode_print(regvalue, cur_col, wrap) \ 543#define ahd_currscb_print(regvalue, cur_col, wrap) \
635 ahd_print_register(NULL, 0, "SEQIMODE", 0x5c, regvalue, cur_col, wrap) 544 ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
636#endif 545#endif
637 546
638#if AIC_DEBUG_REGISTERS 547#if AIC_DEBUG_REGISTERS
639ahd_reg_print_t ahd_currscb_print; 548ahd_reg_print_t ahd_seqimode_print;
640#else 549#else
641#define ahd_currscb_print(regvalue, cur_col, wrap) \ 550#define ahd_seqimode_print(regvalue, cur_col, wrap) \
642 ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap) 551 ahd_print_register(NULL, 0, "SEQIMODE", 0x5c, regvalue, cur_col, wrap)
643#endif 552#endif
644 553
645#if AIC_DEBUG_REGISTERS 554#if AIC_DEBUG_REGISTERS
@@ -657,13 +566,6 @@ ahd_reg_print_t ahd_lastscb_print;
657#endif 566#endif
658 567
659#if AIC_DEBUG_REGISTERS 568#if AIC_DEBUG_REGISTERS
660ahd_reg_print_t ahd_shaddr_print;
661#else
662#define ahd_shaddr_print(regvalue, cur_col, wrap) \
663 ahd_print_register(NULL, 0, "SHADDR", 0x60, regvalue, cur_col, wrap)
664#endif
665
666#if AIC_DEBUG_REGISTERS
667ahd_reg_print_t ahd_negoaddr_print; 569ahd_reg_print_t ahd_negoaddr_print;
668#else 570#else
669#define ahd_negoaddr_print(regvalue, cur_col, wrap) \ 571#define ahd_negoaddr_print(regvalue, cur_col, wrap) \
@@ -748,27 +650,6 @@ ahd_reg_print_t ahd_seloid_print;
748#endif 650#endif
749 651
750#if AIC_DEBUG_REGISTERS 652#if AIC_DEBUG_REGISTERS
751ahd_reg_print_t ahd_haddr_print;
752#else
753#define ahd_haddr_print(regvalue, cur_col, wrap) \
754 ahd_print_register(NULL, 0, "HADDR", 0x70, regvalue, cur_col, wrap)
755#endif
756
757#if AIC_DEBUG_REGISTERS
758ahd_reg_print_t ahd_hcnt_print;
759#else
760#define ahd_hcnt_print(regvalue, cur_col, wrap) \
761 ahd_print_register(NULL, 0, "HCNT", 0x78, regvalue, cur_col, wrap)
762#endif
763
764#if AIC_DEBUG_REGISTERS
765ahd_reg_print_t ahd_sghaddr_print;
766#else
767#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
768 ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
769#endif
770
771#if AIC_DEBUG_REGISTERS
772ahd_reg_print_t ahd_scbhaddr_print; 653ahd_reg_print_t ahd_scbhaddr_print;
773#else 654#else
774#define ahd_scbhaddr_print(regvalue, cur_col, wrap) \ 655#define ahd_scbhaddr_print(regvalue, cur_col, wrap) \
@@ -776,10 +657,10 @@ ahd_reg_print_t ahd_scbhaddr_print;
776#endif 657#endif
777 658
778#if AIC_DEBUG_REGISTERS 659#if AIC_DEBUG_REGISTERS
779ahd_reg_print_t ahd_sghcnt_print; 660ahd_reg_print_t ahd_sghaddr_print;
780#else 661#else
781#define ahd_sghcnt_print(regvalue, cur_col, wrap) \ 662#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
782 ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap) 663 ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
783#endif 664#endif
784 665
785#if AIC_DEBUG_REGISTERS 666#if AIC_DEBUG_REGISTERS
@@ -790,10 +671,10 @@ ahd_reg_print_t ahd_scbhcnt_print;
790#endif 671#endif
791 672
792#if AIC_DEBUG_REGISTERS 673#if AIC_DEBUG_REGISTERS
793ahd_reg_print_t ahd_dff_thrsh_print; 674ahd_reg_print_t ahd_sghcnt_print;
794#else 675#else
795#define ahd_dff_thrsh_print(regvalue, cur_col, wrap) \ 676#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
796 ahd_print_register(NULL, 0, "DFF_THRSH", 0x88, regvalue, cur_col, wrap) 677 ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
797#endif 678#endif
798 679
799#if AIC_DEBUG_REGISTERS 680#if AIC_DEBUG_REGISTERS
@@ -867,13 +748,6 @@ ahd_reg_print_t ahd_targpcistat_print;
867#endif 748#endif
868 749
869#if AIC_DEBUG_REGISTERS 750#if AIC_DEBUG_REGISTERS
870ahd_reg_print_t ahd_scbptr_print;
871#else
872#define ahd_scbptr_print(regvalue, cur_col, wrap) \
873 ahd_print_register(NULL, 0, "SCBPTR", 0xa8, regvalue, cur_col, wrap)
874#endif
875
876#if AIC_DEBUG_REGISTERS
877ahd_reg_print_t ahd_scbautoptr_print; 751ahd_reg_print_t ahd_scbautoptr_print;
878#else 752#else
879#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \ 753#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \
@@ -881,13 +755,6 @@ ahd_reg_print_t ahd_scbautoptr_print;
881#endif 755#endif
882 756
883#if AIC_DEBUG_REGISTERS 757#if AIC_DEBUG_REGISTERS
884ahd_reg_print_t ahd_ccsgaddr_print;
885#else
886#define ahd_ccsgaddr_print(regvalue, cur_col, wrap) \
887 ahd_print_register(NULL, 0, "CCSGADDR", 0xac, regvalue, cur_col, wrap)
888#endif
889
890#if AIC_DEBUG_REGISTERS
891ahd_reg_print_t ahd_ccscbaddr_print; 758ahd_reg_print_t ahd_ccscbaddr_print;
892#else 759#else
893#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \ 760#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \
@@ -909,13 +776,6 @@ ahd_reg_print_t ahd_ccsgctl_print;
909#endif 776#endif
910 777
911#if AIC_DEBUG_REGISTERS 778#if AIC_DEBUG_REGISTERS
912ahd_reg_print_t ahd_ccsgram_print;
913#else
914#define ahd_ccsgram_print(regvalue, cur_col, wrap) \
915 ahd_print_register(NULL, 0, "CCSGRAM", 0xb0, regvalue, cur_col, wrap)
916#endif
917
918#if AIC_DEBUG_REGISTERS
919ahd_reg_print_t ahd_ccscbram_print; 779ahd_reg_print_t ahd_ccscbram_print;
920#else 780#else
921#define ahd_ccscbram_print(regvalue, cur_col, wrap) \ 781#define ahd_ccscbram_print(regvalue, cur_col, wrap) \
@@ -930,13 +790,6 @@ ahd_reg_print_t ahd_brddat_print;
930#endif 790#endif
931 791
932#if AIC_DEBUG_REGISTERS 792#if AIC_DEBUG_REGISTERS
933ahd_reg_print_t ahd_brdctl_print;
934#else
935#define ahd_brdctl_print(regvalue, cur_col, wrap) \
936 ahd_print_register(NULL, 0, "BRDCTL", 0xb9, regvalue, cur_col, wrap)
937#endif
938
939#if AIC_DEBUG_REGISTERS
940ahd_reg_print_t ahd_seeadr_print; 793ahd_reg_print_t ahd_seeadr_print;
941#else 794#else
942#define ahd_seeadr_print(regvalue, cur_col, wrap) \ 795#define ahd_seeadr_print(regvalue, cur_col, wrap) \
@@ -972,13 +825,6 @@ ahd_reg_print_t ahd_dspdatactl_print;
972#endif 825#endif
973 826
974#if AIC_DEBUG_REGISTERS 827#if AIC_DEBUG_REGISTERS
975ahd_reg_print_t ahd_dfdat_print;
976#else
977#define ahd_dfdat_print(regvalue, cur_col, wrap) \
978 ahd_print_register(NULL, 0, "DFDAT", 0xc4, regvalue, cur_col, wrap)
979#endif
980
981#if AIC_DEBUG_REGISTERS
982ahd_reg_print_t ahd_dspselect_print; 828ahd_reg_print_t ahd_dspselect_print;
983#else 829#else
984#define ahd_dspselect_print(regvalue, cur_col, wrap) \ 830#define ahd_dspselect_print(regvalue, cur_col, wrap) \
@@ -1000,13 +846,6 @@ ahd_reg_print_t ahd_seqctl0_print;
1000#endif 846#endif
1001 847
1002#if AIC_DEBUG_REGISTERS 848#if AIC_DEBUG_REGISTERS
1003ahd_reg_print_t ahd_flags_print;
1004#else
1005#define ahd_flags_print(regvalue, cur_col, wrap) \
1006 ahd_print_register(NULL, 0, "FLAGS", 0xd8, regvalue, cur_col, wrap)
1007#endif
1008
1009#if AIC_DEBUG_REGISTERS
1010ahd_reg_print_t ahd_seqintctl_print; 849ahd_reg_print_t ahd_seqintctl_print;
1011#else 850#else
1012#define ahd_seqintctl_print(regvalue, cur_col, wrap) \ 851#define ahd_seqintctl_print(regvalue, cur_col, wrap) \
@@ -1014,13 +853,6 @@ ahd_reg_print_t ahd_seqintctl_print;
1014#endif 853#endif
1015 854
1016#if AIC_DEBUG_REGISTERS 855#if AIC_DEBUG_REGISTERS
1017ahd_reg_print_t ahd_seqram_print;
1018#else
1019#define ahd_seqram_print(regvalue, cur_col, wrap) \
1020 ahd_print_register(NULL, 0, "SEQRAM", 0xda, regvalue, cur_col, wrap)
1021#endif
1022
1023#if AIC_DEBUG_REGISTERS
1024ahd_reg_print_t ahd_prgmcnt_print; 856ahd_reg_print_t ahd_prgmcnt_print;
1025#else 857#else
1026#define ahd_prgmcnt_print(regvalue, cur_col, wrap) \ 858#define ahd_prgmcnt_print(regvalue, cur_col, wrap) \
@@ -1028,41 +860,6 @@ ahd_reg_print_t ahd_prgmcnt_print;
1028#endif 860#endif
1029 861
1030#if AIC_DEBUG_REGISTERS 862#if AIC_DEBUG_REGISTERS
1031ahd_reg_print_t ahd_accum_print;
1032#else
1033#define ahd_accum_print(regvalue, cur_col, wrap) \
1034 ahd_print_register(NULL, 0, "ACCUM", 0xe0, regvalue, cur_col, wrap)
1035#endif
1036
1037#if AIC_DEBUG_REGISTERS
1038ahd_reg_print_t ahd_sindex_print;
1039#else
1040#define ahd_sindex_print(regvalue, cur_col, wrap) \
1041 ahd_print_register(NULL, 0, "SINDEX", 0xe2, regvalue, cur_col, wrap)
1042#endif
1043
1044#if AIC_DEBUG_REGISTERS
1045ahd_reg_print_t ahd_dindex_print;
1046#else
1047#define ahd_dindex_print(regvalue, cur_col, wrap) \
1048 ahd_print_register(NULL, 0, "DINDEX", 0xe4, regvalue, cur_col, wrap)
1049#endif
1050
1051#if AIC_DEBUG_REGISTERS
1052ahd_reg_print_t ahd_allones_print;
1053#else
1054#define ahd_allones_print(regvalue, cur_col, wrap) \
1055 ahd_print_register(NULL, 0, "ALLONES", 0xe8, regvalue, cur_col, wrap)
1056#endif
1057
1058#if AIC_DEBUG_REGISTERS
1059ahd_reg_print_t ahd_allzeros_print;
1060#else
1061#define ahd_allzeros_print(regvalue, cur_col, wrap) \
1062 ahd_print_register(NULL, 0, "ALLZEROS", 0xea, regvalue, cur_col, wrap)
1063#endif
1064
1065#if AIC_DEBUG_REGISTERS
1066ahd_reg_print_t ahd_none_print; 863ahd_reg_print_t ahd_none_print;
1067#else 864#else
1068#define ahd_none_print(regvalue, cur_col, wrap) \ 865#define ahd_none_print(regvalue, cur_col, wrap) \
@@ -1070,27 +867,6 @@ ahd_reg_print_t ahd_none_print;
1070#endif 867#endif
1071 868
1072#if AIC_DEBUG_REGISTERS 869#if AIC_DEBUG_REGISTERS
1073ahd_reg_print_t ahd_sindir_print;
1074#else
1075#define ahd_sindir_print(regvalue, cur_col, wrap) \
1076 ahd_print_register(NULL, 0, "SINDIR", 0xec, regvalue, cur_col, wrap)
1077#endif
1078
1079#if AIC_DEBUG_REGISTERS
1080ahd_reg_print_t ahd_dindir_print;
1081#else
1082#define ahd_dindir_print(regvalue, cur_col, wrap) \
1083 ahd_print_register(NULL, 0, "DINDIR", 0xed, regvalue, cur_col, wrap)
1084#endif
1085
1086#if AIC_DEBUG_REGISTERS
1087ahd_reg_print_t ahd_stack_print;
1088#else
1089#define ahd_stack_print(regvalue, cur_col, wrap) \
1090 ahd_print_register(NULL, 0, "STACK", 0xf2, regvalue, cur_col, wrap)
1091#endif
1092
1093#if AIC_DEBUG_REGISTERS
1094ahd_reg_print_t ahd_intvec1_addr_print; 870ahd_reg_print_t ahd_intvec1_addr_print;
1095#else 871#else
1096#define ahd_intvec1_addr_print(regvalue, cur_col, wrap) \ 872#define ahd_intvec1_addr_print(regvalue, cur_col, wrap) \
@@ -1126,17 +902,17 @@ ahd_reg_print_t ahd_accum_save_print;
1126#endif 902#endif
1127 903
1128#if AIC_DEBUG_REGISTERS 904#if AIC_DEBUG_REGISTERS
1129ahd_reg_print_t ahd_sram_base_print; 905ahd_reg_print_t ahd_waiting_scb_tails_print;
1130#else 906#else
1131#define ahd_sram_base_print(regvalue, cur_col, wrap) \ 907#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
1132 ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap) 908 ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
1133#endif 909#endif
1134 910
1135#if AIC_DEBUG_REGISTERS 911#if AIC_DEBUG_REGISTERS
1136ahd_reg_print_t ahd_waiting_scb_tails_print; 912ahd_reg_print_t ahd_sram_base_print;
1137#else 913#else
1138#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \ 914#define ahd_sram_base_print(regvalue, cur_col, wrap) \
1139 ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap) 915 ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
1140#endif 916#endif
1141 917
1142#if AIC_DEBUG_REGISTERS 918#if AIC_DEBUG_REGISTERS
@@ -1224,13 +1000,6 @@ ahd_reg_print_t ahd_msg_out_print;
1224#endif 1000#endif
1225 1001
1226#if AIC_DEBUG_REGISTERS 1002#if AIC_DEBUG_REGISTERS
1227ahd_reg_print_t ahd_dmaparams_print;
1228#else
1229#define ahd_dmaparams_print(regvalue, cur_col, wrap) \
1230 ahd_print_register(NULL, 0, "DMAPARAMS", 0x138, regvalue, cur_col, wrap)
1231#endif
1232
1233#if AIC_DEBUG_REGISTERS
1234ahd_reg_print_t ahd_seq_flags_print; 1003ahd_reg_print_t ahd_seq_flags_print;
1235#else 1004#else
1236#define ahd_seq_flags_print(regvalue, cur_col, wrap) \ 1005#define ahd_seq_flags_print(regvalue, cur_col, wrap) \
@@ -1238,20 +1007,6 @@ ahd_reg_print_t ahd_seq_flags_print;
1238#endif 1007#endif
1239 1008
1240#if AIC_DEBUG_REGISTERS 1009#if AIC_DEBUG_REGISTERS
1241ahd_reg_print_t ahd_saved_scsiid_print;
1242#else
1243#define ahd_saved_scsiid_print(regvalue, cur_col, wrap) \
1244 ahd_print_register(NULL, 0, "SAVED_SCSIID", 0x13a, regvalue, cur_col, wrap)
1245#endif
1246
1247#if AIC_DEBUG_REGISTERS
1248ahd_reg_print_t ahd_saved_lun_print;
1249#else
1250#define ahd_saved_lun_print(regvalue, cur_col, wrap) \
1251 ahd_print_register(NULL, 0, "SAVED_LUN", 0x13b, regvalue, cur_col, wrap)
1252#endif
1253
1254#if AIC_DEBUG_REGISTERS
1255ahd_reg_print_t ahd_lastphase_print; 1010ahd_reg_print_t ahd_lastphase_print;
1256#else 1011#else
1257#define ahd_lastphase_print(regvalue, cur_col, wrap) \ 1012#define ahd_lastphase_print(regvalue, cur_col, wrap) \
@@ -1273,20 +1028,6 @@ ahd_reg_print_t ahd_kernel_tqinpos_print;
1273#endif 1028#endif
1274 1029
1275#if AIC_DEBUG_REGISTERS 1030#if AIC_DEBUG_REGISTERS
1276ahd_reg_print_t ahd_tqinpos_print;
1277#else
1278#define ahd_tqinpos_print(regvalue, cur_col, wrap) \
1279 ahd_print_register(NULL, 0, "TQINPOS", 0x13f, regvalue, cur_col, wrap)
1280#endif
1281
1282#if AIC_DEBUG_REGISTERS
1283ahd_reg_print_t ahd_shared_data_addr_print;
1284#else
1285#define ahd_shared_data_addr_print(regvalue, cur_col, wrap) \
1286 ahd_print_register(NULL, 0, "SHARED_DATA_ADDR", 0x140, regvalue, cur_col, wrap)
1287#endif
1288
1289#if AIC_DEBUG_REGISTERS
1290ahd_reg_print_t ahd_qoutfifo_next_addr_print; 1031ahd_reg_print_t ahd_qoutfifo_next_addr_print;
1291#else 1032#else
1292#define ahd_qoutfifo_next_addr_print(regvalue, cur_col, wrap) \ 1033#define ahd_qoutfifo_next_addr_print(regvalue, cur_col, wrap) \
@@ -1294,20 +1035,6 @@ ahd_reg_print_t ahd_qoutfifo_next_addr_print;
1294#endif 1035#endif
1295 1036
1296#if AIC_DEBUG_REGISTERS 1037#if AIC_DEBUG_REGISTERS
1297ahd_reg_print_t ahd_arg_1_print;
1298#else
1299#define ahd_arg_1_print(regvalue, cur_col, wrap) \
1300 ahd_print_register(NULL, 0, "ARG_1", 0x148, regvalue, cur_col, wrap)
1301#endif
1302
1303#if AIC_DEBUG_REGISTERS
1304ahd_reg_print_t ahd_arg_2_print;
1305#else
1306#define ahd_arg_2_print(regvalue, cur_col, wrap) \
1307 ahd_print_register(NULL, 0, "ARG_2", 0x149, regvalue, cur_col, wrap)
1308#endif
1309
1310#if AIC_DEBUG_REGISTERS
1311ahd_reg_print_t ahd_last_msg_print; 1038ahd_reg_print_t ahd_last_msg_print;
1312#else 1039#else
1313#define ahd_last_msg_print(regvalue, cur_col, wrap) \ 1040#define ahd_last_msg_print(regvalue, cur_col, wrap) \
@@ -1406,13 +1133,6 @@ ahd_reg_print_t ahd_mk_message_scsiid_print;
1406#endif 1133#endif
1407 1134
1408#if AIC_DEBUG_REGISTERS 1135#if AIC_DEBUG_REGISTERS
1409ahd_reg_print_t ahd_scb_residual_datacnt_print;
1410#else
1411#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
1412 ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
1413#endif
1414
1415#if AIC_DEBUG_REGISTERS
1416ahd_reg_print_t ahd_scb_base_print; 1136ahd_reg_print_t ahd_scb_base_print;
1417#else 1137#else
1418#define ahd_scb_base_print(regvalue, cur_col, wrap) \ 1138#define ahd_scb_base_print(regvalue, cur_col, wrap) \
@@ -1420,17 +1140,10 @@ ahd_reg_print_t ahd_scb_base_print;
1420#endif 1140#endif
1421 1141
1422#if AIC_DEBUG_REGISTERS 1142#if AIC_DEBUG_REGISTERS
1423ahd_reg_print_t ahd_scb_residual_sgptr_print; 1143ahd_reg_print_t ahd_scb_residual_datacnt_print;
1424#else
1425#define ahd_scb_residual_sgptr_print(regvalue, cur_col, wrap) \
1426 ahd_print_register(NULL, 0, "SCB_RESIDUAL_SGPTR", 0x184, regvalue, cur_col, wrap)
1427#endif
1428
1429#if AIC_DEBUG_REGISTERS
1430ahd_reg_print_t ahd_scb_scsi_status_print;
1431#else 1144#else
1432#define ahd_scb_scsi_status_print(regvalue, cur_col, wrap) \ 1145#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
1433 ahd_print_register(NULL, 0, "SCB_SCSI_STATUS", 0x188, regvalue, cur_col, wrap) 1146 ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
1434#endif 1147#endif
1435 1148
1436#if AIC_DEBUG_REGISTERS 1149#if AIC_DEBUG_REGISTERS
@@ -1476,13 +1189,6 @@ ahd_reg_print_t ahd_scb_task_attribute_print;
1476#endif 1189#endif
1477 1190
1478#if AIC_DEBUG_REGISTERS 1191#if AIC_DEBUG_REGISTERS
1479ahd_reg_print_t ahd_scb_cdb_len_print;
1480#else
1481#define ahd_scb_cdb_len_print(regvalue, cur_col, wrap) \
1482 ahd_print_register(NULL, 0, "SCB_CDB_LEN", 0x196, regvalue, cur_col, wrap)
1483#endif
1484
1485#if AIC_DEBUG_REGISTERS
1486ahd_reg_print_t ahd_scb_task_management_print; 1192ahd_reg_print_t ahd_scb_task_management_print;
1487#else 1193#else
1488#define ahd_scb_task_management_print(regvalue, cur_col, wrap) \ 1194#define ahd_scb_task_management_print(regvalue, cur_col, wrap) \
@@ -1518,13 +1224,6 @@ ahd_reg_print_t ahd_scb_busaddr_print;
1518#endif 1224#endif
1519 1225
1520#if AIC_DEBUG_REGISTERS 1226#if AIC_DEBUG_REGISTERS
1521ahd_reg_print_t ahd_scb_next_print;
1522#else
1523#define ahd_scb_next_print(regvalue, cur_col, wrap) \
1524 ahd_print_register(NULL, 0, "SCB_NEXT", 0x1ac, regvalue, cur_col, wrap)
1525#endif
1526
1527#if AIC_DEBUG_REGISTERS
1528ahd_reg_print_t ahd_scb_next2_print; 1227ahd_reg_print_t ahd_scb_next2_print;
1529#else 1228#else
1530#define ahd_scb_next2_print(regvalue, cur_col, wrap) \ 1229#define ahd_scb_next2_print(regvalue, cur_col, wrap) \
@@ -1717,10 +1416,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1717 1416
1718#define SG_CACHE_PRE 0x1b 1417#define SG_CACHE_PRE 0x1b
1719 1418
1720#define TYPEPTR 0x20
1721
1722#define LQIN 0x20 1419#define LQIN 0x20
1723 1420
1421#define TYPEPTR 0x20
1422
1724#define TAGPTR 0x21 1423#define TAGPTR 0x21
1725 1424
1726#define LUNPTR 0x22 1425#define LUNPTR 0x22
@@ -1780,6 +1479,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1780#define SINGLECMD 0x02 1479#define SINGLECMD 0x02
1781#define ABORTPENDING 0x01 1480#define ABORTPENDING 0x01
1782 1481
1482#define SCSBIST0 0x39
1483#define GSBISTERR 0x40
1484#define GSBISTDONE 0x20
1485#define GSBISTRUN 0x10
1486#define OSBISTERR 0x04
1487#define OSBISTDONE 0x02
1488#define OSBISTRUN 0x01
1489
1783#define LQCTL2 0x39 1490#define LQCTL2 0x39
1784#define LQIRETRY 0x80 1491#define LQIRETRY 0x80
1785#define LQICONTINUE 0x40 1492#define LQICONTINUE 0x40
@@ -1790,13 +1497,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1790#define LQOTOIDLE 0x02 1497#define LQOTOIDLE 0x02
1791#define LQOPAUSE 0x01 1498#define LQOPAUSE 0x01
1792 1499
1793#define SCSBIST0 0x39 1500#define SCSBIST1 0x3a
1794#define GSBISTERR 0x40 1501#define NTBISTERR 0x04
1795#define GSBISTDONE 0x20 1502#define NTBISTDONE 0x02
1796#define GSBISTRUN 0x10 1503#define NTBISTRUN 0x01
1797#define OSBISTERR 0x04
1798#define OSBISTDONE 0x02
1799#define OSBISTRUN 0x01
1800 1504
1801#define SCSISEQ0 0x3a 1505#define SCSISEQ0 0x3a
1802#define TEMODEO 0x80 1506#define TEMODEO 0x80
@@ -1805,15 +1509,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1805#define FORCEBUSFREE 0x10 1509#define FORCEBUSFREE 0x10
1806#define SCSIRSTO 0x01 1510#define SCSIRSTO 0x01
1807 1511
1808#define SCSBIST1 0x3a
1809#define NTBISTERR 0x04
1810#define NTBISTDONE 0x02
1811#define NTBISTRUN 0x01
1812
1813#define SCSISEQ1 0x3b 1512#define SCSISEQ1 0x3b
1814 1513
1815#define BUSINITID 0x3c
1816
1817#define SXFRCTL0 0x3c 1514#define SXFRCTL0 0x3c
1818#define DFON 0x80 1515#define DFON 0x80
1819#define DFPEXP 0x40 1516#define DFPEXP 0x40
@@ -1822,6 +1519,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1822 1519
1823#define DLCOUNT 0x3c 1520#define DLCOUNT 0x3c
1824 1521
1522#define BUSINITID 0x3c
1523
1825#define SXFRCTL1 0x3d 1524#define SXFRCTL1 0x3d
1826#define BITBUCKET 0x80 1525#define BITBUCKET 0x80
1827#define ENSACHK 0x40 1526#define ENSACHK 0x40
@@ -1846,8 +1545,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1846#define CURRFIFO_1 0x01 1545#define CURRFIFO_1 0x01
1847#define CURRFIFO_0 0x00 1546#define CURRFIFO_0 0x00
1848 1547
1849#define MULTARGID 0x40
1850
1851#define SCSISIGO 0x40 1548#define SCSISIGO 0x40
1852#define CDO 0x80 1549#define CDO 0x80
1853#define IOO 0x40 1550#define IOO 0x40
@@ -1858,6 +1555,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1858#define REQO 0x02 1555#define REQO 0x02
1859#define ACKO 0x01 1556#define ACKO 0x01
1860 1557
1558#define MULTARGID 0x40
1559
1861#define SCSISIGI 0x41 1560#define SCSISIGI 0x41
1862#define ATNI 0x10 1561#define ATNI 0x10
1863#define SELI 0x08 1562#define SELI 0x08
@@ -1904,6 +1603,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1904#define ENAB20 0x04 1603#define ENAB20 0x04
1905#define SELWIDE 0x02 1604#define SELWIDE 0x02
1906 1605
1606#define CLRSINT0 0x4b
1607#define CLRSELDO 0x40
1608#define CLRSELDI 0x20
1609#define CLRSELINGO 0x10
1610#define CLRIOERR 0x08
1611#define CLROVERRUN 0x04
1612#define CLRSPIORDY 0x02
1613#define CLRARBDO 0x01
1614
1907#define SSTAT0 0x4b 1615#define SSTAT0 0x4b
1908#define TARGET 0x80 1616#define TARGET 0x80
1909#define SELDO 0x40 1617#define SELDO 0x40
@@ -1923,14 +1631,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1923#define ENSPIORDY 0x02 1631#define ENSPIORDY 0x02
1924#define ENARBDO 0x01 1632#define ENARBDO 0x01
1925 1633
1926#define CLRSINT0 0x4b 1634#define CLRSINT1 0x4c
1927#define CLRSELDO 0x40 1635#define CLRSELTIMEO 0x80
1928#define CLRSELDI 0x20 1636#define CLRATNO 0x40
1929#define CLRSELINGO 0x10 1637#define CLRSCSIRSTI 0x20
1930#define CLRIOERR 0x08 1638#define CLRBUSFREE 0x08
1931#define CLROVERRUN 0x04 1639#define CLRSCSIPERR 0x04
1932#define CLRSPIORDY 0x02 1640#define CLRSTRB2FAST 0x02
1933#define CLRARBDO 0x01 1641#define CLRREQINIT 0x01
1934 1642
1935#define SSTAT1 0x4c 1643#define SSTAT1 0x4c
1936#define SELTO 0x80 1644#define SELTO 0x80
@@ -1942,15 +1650,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1942#define STRB2FAST 0x02 1650#define STRB2FAST 0x02
1943#define REQINIT 0x01 1651#define REQINIT 0x01
1944 1652
1945#define CLRSINT1 0x4c
1946#define CLRSELTIMEO 0x80
1947#define CLRATNO 0x40
1948#define CLRSCSIRSTI 0x20
1949#define CLRBUSFREE 0x08
1950#define CLRSCSIPERR 0x04
1951#define CLRSTRB2FAST 0x02
1952#define CLRREQINIT 0x01
1953
1954#define SSTAT2 0x4d 1653#define SSTAT2 0x4d
1955#define BUSFREETIME 0xc0 1654#define BUSFREETIME 0xc0
1956#define NONPACKREQ 0x20 1655#define NONPACKREQ 0x20
@@ -1998,14 +1697,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
1998#define LQIATNLQ 0x02 1697#define LQIATNLQ 0x02
1999#define LQIATNCMD 0x01 1698#define LQIATNCMD 0x01
2000 1699
2001#define LQIMODE0 0x50
2002#define ENLQIATNQASK 0x20
2003#define ENLQICRCT1 0x10
2004#define ENLQICRCT2 0x08
2005#define ENLQIBADLQT 0x04
2006#define ENLQIATNLQ 0x02
2007#define ENLQIATNCMD 0x01
2008
2009#define CLRLQIINT0 0x50 1700#define CLRLQIINT0 0x50
2010#define CLRLQIATNQAS 0x20 1701#define CLRLQIATNQAS 0x20
2011#define CLRLQICRCT1 0x10 1702#define CLRLQICRCT1 0x10
@@ -2014,6 +1705,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2014#define CLRLQIATNLQ 0x02 1705#define CLRLQIATNLQ 0x02
2015#define CLRLQIATNCMD 0x01 1706#define CLRLQIATNCMD 0x01
2016 1707
1708#define LQIMODE0 0x50
1709#define ENLQIATNQASK 0x20
1710#define ENLQICRCT1 0x10
1711#define ENLQICRCT2 0x08
1712#define ENLQIBADLQT 0x04
1713#define ENLQIATNLQ 0x02
1714#define ENLQIATNCMD 0x01
1715
2017#define LQIMODE1 0x51 1716#define LQIMODE1 0x51
2018#define ENLQIPHASE_LQ 0x80 1717#define ENLQIPHASE_LQ 0x80
2019#define ENLQIPHASE_NLQ 0x40 1718#define ENLQIPHASE_NLQ 0x40
@@ -2160,6 +1859,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2160#define CFG4ICMD 0x02 1859#define CFG4ICMD 0x02
2161#define CFG4TCMD 0x01 1860#define CFG4TCMD 0x01
2162 1861
1862#define CURRSCB 0x5c
1863
2163#define SEQIMODE 0x5c 1864#define SEQIMODE 0x5c
2164#define ENCTXTDONE 0x40 1865#define ENCTXTDONE 0x40
2165#define ENSAVEPTRS 0x20 1866#define ENSAVEPTRS 0x20
@@ -2169,8 +1870,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2169#define ENCFG4ICMD 0x02 1870#define ENCFG4ICMD 0x02
2170#define ENCFG4TCMD 0x01 1871#define ENCFG4TCMD 0x01
2171 1872
2172#define CURRSCB 0x5c
2173
2174#define MDFFSTAT 0x5d 1873#define MDFFSTAT 0x5d
2175#define SHCNTNEGATIVE 0x40 1874#define SHCNTNEGATIVE 0x40
2176#define SHCNTMINUS1 0x20 1875#define SHCNTMINUS1 0x20
@@ -2185,29 +1884,29 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2185 1884
2186#define DFFTAG 0x5e 1885#define DFFTAG 0x5e
2187 1886
1887#define LASTSCB 0x5e
1888
2188#define SCSITEST 0x5e 1889#define SCSITEST 0x5e
2189#define CNTRTEST 0x08 1890#define CNTRTEST 0x08
2190#define SEL_TXPLL_DEBUG 0x04 1891#define SEL_TXPLL_DEBUG 0x04
2191 1892
2192#define LASTSCB 0x5e
2193
2194#define IOPDNCTL 0x5f 1893#define IOPDNCTL 0x5f
2195#define DISABLE_OE 0x80 1894#define DISABLE_OE 0x80
2196#define PDN_IDIST 0x04 1895#define PDN_IDIST 0x04
2197#define PDN_DIFFSENSE 0x01 1896#define PDN_DIFFSENSE 0x01
2198 1897
2199#define DGRPCRCI 0x60
2200
2201#define SHADDR 0x60 1898#define SHADDR 0x60
2202 1899
2203#define NEGOADDR 0x60 1900#define NEGOADDR 0x60
2204 1901
2205#define NEGPERIOD 0x61 1902#define DGRPCRCI 0x60
2206 1903
2207#define NEGOFFSET 0x62 1904#define NEGPERIOD 0x61
2208 1905
2209#define PACKCRCI 0x62 1906#define PACKCRCI 0x62
2210 1907
1908#define NEGOFFSET 0x62
1909
2211#define NEGPPROPTS 0x63 1910#define NEGPPROPTS 0x63
2212#define PPROPT_PACE 0x08 1911#define PPROPT_PACE 0x08
2213#define PPROPT_QAS 0x04 1912#define PPROPT_QAS 0x04
@@ -2253,8 +1952,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2253 1952
2254#define SELOID 0x6b 1953#define SELOID 0x6b
2255 1954
2256#define FAIRNESS 0x6c
2257
2258#define PLL400CTL0 0x6c 1955#define PLL400CTL0 0x6c
2259#define PLL_VCOSEL 0x80 1956#define PLL_VCOSEL 0x80
2260#define PLL_PWDN 0x40 1957#define PLL_PWDN 0x40
@@ -2264,6 +1961,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2264#define PLL_DLPF 0x02 1961#define PLL_DLPF 0x02
2265#define PLL_ENFBM 0x01 1962#define PLL_ENFBM 0x01
2266 1963
1964#define FAIRNESS 0x6c
1965
2267#define PLL400CTL1 0x6d 1966#define PLL400CTL1 0x6d
2268#define PLL_CNTEN 0x80 1967#define PLL_CNTEN 0x80
2269#define PLL_CNTCLR 0x40 1968#define PLL_CNTCLR 0x40
@@ -2275,25 +1974,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2275 1974
2276#define HADDR 0x70 1975#define HADDR 0x70
2277 1976
2278#define HODMAADR 0x70
2279
2280#define PLLDELAY 0x70 1977#define PLLDELAY 0x70
2281#define SPLIT_DROP_REQ 0x80 1978#define SPLIT_DROP_REQ 0x80
2282 1979
2283#define HCNT 0x78 1980#define HODMAADR 0x70
2284 1981
2285#define HODMACNT 0x78 1982#define HODMACNT 0x78
2286 1983
2287#define HODMAEN 0x7a 1984#define HCNT 0x78
2288 1985
2289#define SGHADDR 0x7c 1986#define HODMAEN 0x7a
2290 1987
2291#define SCBHADDR 0x7c 1988#define SCBHADDR 0x7c
2292 1989
2293#define SGHCNT 0x84 1990#define SGHADDR 0x7c
2294 1991
2295#define SCBHCNT 0x84 1992#define SCBHCNT 0x84
2296 1993
1994#define SGHCNT 0x84
1995
2297#define DFF_THRSH 0x88 1996#define DFF_THRSH 0x88
2298#define WR_DFTHRSH 0x70 1997#define WR_DFTHRSH 0x70
2299#define RD_DFTHRSH 0x07 1998#define RD_DFTHRSH 0x07
@@ -2326,10 +2025,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2326 2025
2327#define CMCRXMSG0 0x90 2026#define CMCRXMSG0 0x90
2328 2027
2329#define OVLYRXMSG0 0x90
2330
2331#define DCHRXMSG0 0x90
2332
2333#define ROENABLE 0x90 2028#define ROENABLE 0x90
2334#define MSIROEN 0x20 2029#define MSIROEN 0x20
2335#define OVLYROEN 0x10 2030#define OVLYROEN 0x10
@@ -2338,11 +2033,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2338#define DCH1ROEN 0x02 2033#define DCH1ROEN 0x02
2339#define DCH0ROEN 0x01 2034#define DCH0ROEN 0x01
2340 2035
2341#define OVLYRXMSG1 0x91 2036#define OVLYRXMSG0 0x90
2342 2037
2343#define CMCRXMSG1 0x91 2038#define DCHRXMSG0 0x90
2344 2039
2345#define DCHRXMSG1 0x91 2040#define OVLYRXMSG1 0x91
2346 2041
2347#define NSENABLE 0x91 2042#define NSENABLE 0x91
2348#define MSINSEN 0x20 2043#define MSINSEN 0x20
@@ -2352,6 +2047,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2352#define DCH1NSEN 0x02 2047#define DCH1NSEN 0x02
2353#define DCH0NSEN 0x01 2048#define DCH0NSEN 0x01
2354 2049
2050#define CMCRXMSG1 0x91
2051
2052#define DCHRXMSG1 0x91
2053
2355#define DCHRXMSG2 0x92 2054#define DCHRXMSG2 0x92
2356 2055
2357#define CMCRXMSG2 0x92 2056#define CMCRXMSG2 0x92
@@ -2375,24 +2074,24 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2375#define TSCSERREN 0x02 2074#define TSCSERREN 0x02
2376#define CMPABCDIS 0x01 2075#define CMPABCDIS 0x01
2377 2076
2378#define CMCSEQBCNT 0x94
2379
2380#define OVLYSEQBCNT 0x94 2077#define OVLYSEQBCNT 0x94
2381 2078
2382#define DCHSEQBCNT 0x94 2079#define DCHSEQBCNT 0x94
2383 2080
2081#define CMCSEQBCNT 0x94
2082
2083#define CMCSPLTSTAT0 0x96
2084
2384#define DCHSPLTSTAT0 0x96 2085#define DCHSPLTSTAT0 0x96
2385 2086
2386#define OVLYSPLTSTAT0 0x96 2087#define OVLYSPLTSTAT0 0x96
2387 2088
2388#define CMCSPLTSTAT0 0x96 2089#define CMCSPLTSTAT1 0x97
2389 2090
2390#define OVLYSPLTSTAT1 0x97 2091#define OVLYSPLTSTAT1 0x97
2391 2092
2392#define DCHSPLTSTAT1 0x97 2093#define DCHSPLTSTAT1 0x97
2393 2094
2394#define CMCSPLTSTAT1 0x97
2395
2396#define SGRXMSG0 0x98 2095#define SGRXMSG0 0x98
2397#define CDNUM 0xf8 2096#define CDNUM 0xf8
2398#define CFNUM 0x07 2097#define CFNUM 0x07
@@ -2420,15 +2119,18 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2420#define TAG_NUM 0x1f 2119#define TAG_NUM 0x1f
2421#define RLXORD 0x10 2120#define RLXORD 0x10
2422 2121
2122#define SGSEQBCNT 0x9c
2123
2423#define SLVSPLTOUTATTR0 0x9c 2124#define SLVSPLTOUTATTR0 0x9c
2424#define LOWER_BCNT 0xff 2125#define LOWER_BCNT 0xff
2425 2126
2426#define SGSEQBCNT 0x9c
2427
2428#define SLVSPLTOUTATTR1 0x9d 2127#define SLVSPLTOUTATTR1 0x9d
2429#define CMPLT_DNUM 0xf8 2128#define CMPLT_DNUM 0xf8
2430#define CMPLT_FNUM 0x07 2129#define CMPLT_FNUM 0x07
2431 2130
2131#define SLVSPLTOUTATTR2 0x9e
2132#define CMPLT_BNUM 0xff
2133
2432#define SGSPLTSTAT0 0x9e 2134#define SGSPLTSTAT0 0x9e
2433#define STAETERM 0x80 2135#define STAETERM 0x80
2434#define SCBCERR 0x40 2136#define SCBCERR 0x40
@@ -2439,9 +2141,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2439#define RXSCEMSG 0x02 2141#define RXSCEMSG 0x02
2440#define RXSPLTRSP 0x01 2142#define RXSPLTRSP 0x01
2441 2143
2442#define SLVSPLTOUTATTR2 0x9e
2443#define CMPLT_BNUM 0xff
2444
2445#define SGSPLTSTAT1 0x9f 2144#define SGSPLTSTAT1 0x9f
2446#define RXDATABUCKET 0x01 2145#define RXDATABUCKET 0x01
2447 2146
@@ -2497,10 +2196,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2497 2196
2498#define CCSGADDR 0xac 2197#define CCSGADDR 0xac
2499 2198
2500#define CCSCBADDR 0xac
2501
2502#define CCSCBADR_BK 0xac 2199#define CCSCBADR_BK 0xac
2503 2200
2201#define CCSCBADDR 0xac
2202
2504#define CMC_RAMBIST 0xad 2203#define CMC_RAMBIST 0xad
2505#define SG_ELEMENT_SIZE 0x80 2204#define SG_ELEMENT_SIZE 0x80
2506#define SCBRAMBIST_FAIL 0x40 2205#define SCBRAMBIST_FAIL 0x40
@@ -2554,9 +2253,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2554#define SEEDAT 0xbc 2253#define SEEDAT 0xbc
2555 2254
2556#define SEECTL 0xbe 2255#define SEECTL 0xbe
2557#define SEEOP_EWDS 0x40
2558#define SEEOP_WALL 0x40 2256#define SEEOP_WALL 0x40
2559#define SEEOP_EWEN 0x40 2257#define SEEOP_EWEN 0x40
2258#define SEEOP_EWDS 0x40
2560#define SEEOPCODE 0x70 2259#define SEEOPCODE 0x70
2561#define SEERST 0x02 2260#define SEERST 0x02
2562#define SEESTART 0x01 2261#define SEESTART 0x01
@@ -2573,25 +2272,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2573 2272
2574#define SCBCNT 0xbf 2273#define SCBCNT 0xbf
2575 2274
2275#define DFWADDR 0xc0
2276
2576#define DSPFLTRCTL 0xc0 2277#define DSPFLTRCTL 0xc0
2577#define FLTRDISABLE 0x20 2278#define FLTRDISABLE 0x20
2578#define EDGESENSE 0x10 2279#define EDGESENSE 0x10
2579#define DSPFCNTSEL 0x0f 2280#define DSPFCNTSEL 0x0f
2580 2281
2581#define DFWADDR 0xc0
2582
2583#define DSPDATACTL 0xc1 2282#define DSPDATACTL 0xc1
2584#define BYPASSENAB 0x80 2283#define BYPASSENAB 0x80
2585#define DESQDIS 0x10 2284#define DESQDIS 0x10
2586#define RCVROFFSTDIS 0x04 2285#define RCVROFFSTDIS 0x04
2587#define XMITOFFSTDIS 0x02 2286#define XMITOFFSTDIS 0x02
2588 2287
2288#define DFRADDR 0xc2
2289
2589#define DSPREQCTL 0xc2 2290#define DSPREQCTL 0xc2
2590#define MANREQCTL 0xc0 2291#define MANREQCTL 0xc0
2591#define MANREQDLY 0x3f 2292#define MANREQDLY 0x3f
2592 2293
2593#define DFRADDR 0xc2
2594
2595#define DSPACKCTL 0xc3 2294#define DSPACKCTL 0xc3
2596#define MANACKCTL 0xc0 2295#define MANACKCTL 0xc0
2597#define MANACKDLY 0x3f 2296#define MANACKDLY 0x3f
@@ -2612,14 +2311,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2612 2311
2613#define WRTBIASCALC 0xc7 2312#define WRTBIASCALC 0xc7
2614 2313
2615#define DFPTRS 0xc8
2616
2617#define RCVRBIASCALC 0xc8 2314#define RCVRBIASCALC 0xc8
2618 2315
2619#define DFBKPTR 0xc9 2316#define DFPTRS 0xc8
2620 2317
2621#define SKEWCALC 0xc9 2318#define SKEWCALC 0xc9
2622 2319
2320#define DFBKPTR 0xc9
2321
2623#define DFDBCTL 0xcb 2322#define DFDBCTL 0xcb
2624#define DFF_CIO_WR_RDY 0x20 2323#define DFF_CIO_WR_RDY 0x20
2625#define DFF_CIO_RD_RDY 0x10 2324#define DFF_CIO_RD_RDY 0x10
@@ -2704,12 +2403,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2704 2403
2705#define ACCUM_SAVE 0xfa 2404#define ACCUM_SAVE 0xfa
2706 2405
2406#define WAITING_SCB_TAILS 0x100
2407
2707#define AHD_PCI_CONFIG_BASE 0x100 2408#define AHD_PCI_CONFIG_BASE 0x100
2708 2409
2709#define SRAM_BASE 0x100 2410#define SRAM_BASE 0x100
2710 2411
2711#define WAITING_SCB_TAILS 0x100
2712
2713#define WAITING_TID_HEAD 0x120 2412#define WAITING_TID_HEAD 0x120
2714 2413
2715#define WAITING_TID_TAIL 0x122 2414#define WAITING_TID_TAIL 0x122
@@ -2738,8 +2437,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2738#define PRELOADEN 0x80 2437#define PRELOADEN 0x80
2739#define WIDEODD 0x40 2438#define WIDEODD 0x40
2740#define SCSIEN 0x20 2439#define SCSIEN 0x20
2741#define SDMAENACK 0x10
2742#define SDMAEN 0x10 2440#define SDMAEN 0x10
2441#define SDMAENACK 0x10
2743#define HDMAEN 0x08 2442#define HDMAEN 0x08
2744#define HDMAENACK 0x08 2443#define HDMAENACK 0x08
2745#define DIRECTION 0x04 2444#define DIRECTION 0x04
@@ -2837,12 +2536,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2837 2536
2838#define MK_MESSAGE_SCSIID 0x162 2537#define MK_MESSAGE_SCSIID 0x162
2839 2538
2539#define SCB_BASE 0x180
2540
2840#define SCB_RESIDUAL_DATACNT 0x180 2541#define SCB_RESIDUAL_DATACNT 0x180
2841#define SCB_CDB_STORE 0x180 2542#define SCB_CDB_STORE 0x180
2842#define SCB_HOST_CDB_PTR 0x180 2543#define SCB_HOST_CDB_PTR 0x180
2843 2544
2844#define SCB_BASE 0x180
2845
2846#define SCB_RESIDUAL_SGPTR 0x184 2545#define SCB_RESIDUAL_SGPTR 0x184
2847#define SG_ADDR_MASK 0xf8 2546#define SG_ADDR_MASK 0xf8
2848#define SG_OVERRUN_RESID 0x02 2547#define SG_OVERRUN_RESID 0x02
@@ -2910,17 +2609,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2910#define SCB_DISCONNECTED_LISTS 0x1b8 2609#define SCB_DISCONNECTED_LISTS 0x1b8
2911 2610
2912 2611
2913#define CMD_GROUP_CODE_SHIFT 0x05
2914#define STIMESEL_MIN 0x18
2915#define STIMESEL_SHIFT 0x03
2916#define INVALID_ADDR 0x80
2917#define AHD_PRECOMP_MASK 0x07
2918#define TARGET_DATA_IN 0x01
2919#define CCSCBADDR_MAX 0x80
2920#define NUMDSPS 0x14
2921#define SEEOP_EWEN_ADDR 0xc0
2922#define AHD_ANNEXCOL_PER_DEV0 0x04
2923#define DST_MODE_SHIFT 0x04
2924#define AHD_TIMER_MAX_US 0x18ffe7 2612#define AHD_TIMER_MAX_US 0x18ffe7
2925#define AHD_TIMER_MAX_TICKS 0xffff 2613#define AHD_TIMER_MAX_TICKS 0xffff
2926#define AHD_SENSE_BUFSIZE 0x100 2614#define AHD_SENSE_BUFSIZE 0x100
@@ -2955,32 +2643,43 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2955#define LUNLEN_SINGLE_LEVEL_LUN 0x0f 2643#define LUNLEN_SINGLE_LEVEL_LUN 0x0f
2956#define NVRAM_SCB_OFFSET 0x2c 2644#define NVRAM_SCB_OFFSET 0x2c
2957#define STATUS_PKT_SENSE 0xff 2645#define STATUS_PKT_SENSE 0xff
2646#define CMD_GROUP_CODE_SHIFT 0x05
2958#define MAX_OFFSET_PACED_BUG 0x7f 2647#define MAX_OFFSET_PACED_BUG 0x7f
2959#define STIMESEL_BUG_ADJ 0x08 2648#define STIMESEL_BUG_ADJ 0x08
2649#define STIMESEL_MIN 0x18
2650#define STIMESEL_SHIFT 0x03
2960#define CCSGRAM_MAXSEGS 0x10 2651#define CCSGRAM_MAXSEGS 0x10
2652#define INVALID_ADDR 0x80
2961#define SEEOP_ERAL_ADDR 0x80 2653#define SEEOP_ERAL_ADDR 0x80
2962#define AHD_SLEWRATE_DEF_REVB 0x08 2654#define AHD_SLEWRATE_DEF_REVB 0x08
2963#define AHD_PRECOMP_CUTBACK_17 0x04 2655#define AHD_PRECOMP_CUTBACK_17 0x04
2656#define AHD_PRECOMP_MASK 0x07
2964#define SRC_MODE_SHIFT 0x00 2657#define SRC_MODE_SHIFT 0x00
2965#define PKT_OVERRUN_BUFSIZE 0x200 2658#define PKT_OVERRUN_BUFSIZE 0x200
2966#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30 2659#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30
2660#define TARGET_DATA_IN 0x01
2967#define HOST_MSG 0xff 2661#define HOST_MSG 0xff
2968#define MAX_OFFSET 0xfe 2662#define MAX_OFFSET 0xfe
2969#define BUS_16_BIT 0x01 2663#define BUS_16_BIT 0x01
2664#define CCSCBADDR_MAX 0x80
2665#define NUMDSPS 0x14
2666#define SEEOP_EWEN_ADDR 0xc0
2667#define AHD_ANNEXCOL_PER_DEV0 0x04
2668#define DST_MODE_SHIFT 0x04
2970 2669
2971 2670
2972/* Downloaded Constant Definitions */ 2671/* Downloaded Constant Definitions */
2973#define SG_SIZEOF 0x04
2974#define SG_PREFETCH_ALIGN_MASK 0x02
2975#define SG_PREFETCH_CNT_LIMIT 0x01
2976#define CACHELINE_MASK 0x07 2672#define CACHELINE_MASK 0x07
2977#define SCB_TRANSFER_SIZE 0x06 2673#define SCB_TRANSFER_SIZE 0x06
2978#define PKT_OVERRUN_BUFOFFSET 0x05 2674#define PKT_OVERRUN_BUFOFFSET 0x05
2675#define SG_SIZEOF 0x04
2979#define SG_PREFETCH_ADDR_MASK 0x03 2676#define SG_PREFETCH_ADDR_MASK 0x03
2677#define SG_PREFETCH_ALIGN_MASK 0x02
2678#define SG_PREFETCH_CNT_LIMIT 0x01
2980#define SG_PREFETCH_CNT 0x00 2679#define SG_PREFETCH_CNT 0x00
2981#define DOWNLOAD_CONST_COUNT 0x08 2680#define DOWNLOAD_CONST_COUNT 0x08
2982 2681
2983 2682
2984/* Exported Labels */ 2683/* Exported Labels */
2985#define LABEL_timer_isr 0x28b
2986#define LABEL_seq_isr 0x28f 2684#define LABEL_seq_isr 0x28f
2685#define LABEL_timer_isr 0x28b
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
index c4c8a96bf5a3..f5ea715d6ac3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
@@ -8,18 +8,6 @@
8 8
9#include "aic79xx_osm.h" 9#include "aic79xx_osm.h"
10 10
11static const ahd_reg_parse_entry_t MODE_PTR_parse_table[] = {
12 { "SRC_MODE", 0x07, 0x07 },
13 { "DST_MODE", 0x70, 0x70 }
14};
15
16int
17ahd_mode_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
18{
19 return (ahd_print_register(MODE_PTR_parse_table, 2, "MODE_PTR",
20 0x00, regvalue, cur_col, wrap));
21}
22
23static const ahd_reg_parse_entry_t INTSTAT_parse_table[] = { 11static const ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
24 { "SPLTINT", 0x01, 0x01 }, 12 { "SPLTINT", 0x01, 0x01 },
25 { "CMDCMPLT", 0x02, 0x02 }, 13 { "CMDCMPLT", 0x02, 0x02 },
@@ -39,110 +27,6 @@ ahd_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
39 0x01, regvalue, cur_col, wrap)); 27 0x01, regvalue, cur_col, wrap));
40} 28}
41 29
42static const ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = {
43 { "NO_SEQINT", 0x00, 0xff },
44 { "BAD_PHASE", 0x01, 0xff },
45 { "SEND_REJECT", 0x02, 0xff },
46 { "PROTO_VIOLATION", 0x03, 0xff },
47 { "NO_MATCH", 0x04, 0xff },
48 { "IGN_WIDE_RES", 0x05, 0xff },
49 { "PDATA_REINIT", 0x06, 0xff },
50 { "HOST_MSG_LOOP", 0x07, 0xff },
51 { "BAD_STATUS", 0x08, 0xff },
52 { "DATA_OVERRUN", 0x09, 0xff },
53 { "MKMSG_FAILED", 0x0a, 0xff },
54 { "MISSED_BUSFREE", 0x0b, 0xff },
55 { "DUMP_CARD_STATE", 0x0c, 0xff },
56 { "ILLEGAL_PHASE", 0x0d, 0xff },
57 { "INVALID_SEQINT", 0x0e, 0xff },
58 { "CFG4ISTAT_INTR", 0x0f, 0xff },
59 { "STATUS_OVERRUN", 0x10, 0xff },
60 { "CFG4OVERRUN", 0x11, 0xff },
61 { "ENTERING_NONPACK", 0x12, 0xff },
62 { "TASKMGMT_FUNC_COMPLETE",0x13, 0xff },
63 { "TASKMGMT_CMD_CMPLT_OKAY",0x14, 0xff },
64 { "TRACEPOINT0", 0x15, 0xff },
65 { "TRACEPOINT1", 0x16, 0xff },
66 { "TRACEPOINT2", 0x17, 0xff },
67 { "TRACEPOINT3", 0x18, 0xff },
68 { "SAW_HWERR", 0x19, 0xff },
69 { "BAD_SCB_STATUS", 0x1a, 0xff }
70};
71
72int
73ahd_seqintcode_print(u_int regvalue, u_int *cur_col, u_int wrap)
74{
75 return (ahd_print_register(SEQINTCODE_parse_table, 27, "SEQINTCODE",
76 0x02, regvalue, cur_col, wrap));
77}
78
79static const ahd_reg_parse_entry_t CLRINT_parse_table[] = {
80 { "CLRSPLTINT", 0x01, 0x01 },
81 { "CLRCMDINT", 0x02, 0x02 },
82 { "CLRSEQINT", 0x04, 0x04 },
83 { "CLRSCSIINT", 0x08, 0x08 },
84 { "CLRPCIINT", 0x10, 0x10 },
85 { "CLRSWTMINT", 0x20, 0x20 },
86 { "CLRBRKADRINT", 0x40, 0x40 },
87 { "CLRHWERRINT", 0x80, 0x80 }
88};
89
90int
91ahd_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
92{
93 return (ahd_print_register(CLRINT_parse_table, 8, "CLRINT",
94 0x03, regvalue, cur_col, wrap));
95}
96
97static const ahd_reg_parse_entry_t ERROR_parse_table[] = {
98 { "DSCTMOUT", 0x02, 0x02 },
99 { "ILLOPCODE", 0x04, 0x04 },
100 { "SQPARERR", 0x08, 0x08 },
101 { "DPARERR", 0x10, 0x10 },
102 { "MPARERR", 0x20, 0x20 },
103 { "CIOACCESFAIL", 0x40, 0x40 },
104 { "CIOPARERR", 0x80, 0x80 }
105};
106
107int
108ahd_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
109{
110 return (ahd_print_register(ERROR_parse_table, 7, "ERROR",
111 0x04, regvalue, cur_col, wrap));
112}
113
114static const ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
115 { "CHIPRST", 0x01, 0x01 },
116 { "CHIPRSTACK", 0x01, 0x01 },
117 { "INTEN", 0x02, 0x02 },
118 { "PAUSE", 0x04, 0x04 },
119 { "SWTIMER_START_B", 0x08, 0x08 },
120 { "SWINT", 0x10, 0x10 },
121 { "POWRDN", 0x40, 0x40 },
122 { "SEQ_RESET", 0x80, 0x80 }
123};
124
125int
126ahd_hcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
127{
128 return (ahd_print_register(HCNTRL_parse_table, 8, "HCNTRL",
129 0x05, regvalue, cur_col, wrap));
130}
131
132int
133ahd_hnscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
134{
135 return (ahd_print_register(NULL, 0, "HNSCB_QOFF",
136 0x06, regvalue, cur_col, wrap));
137}
138
139int
140ahd_hescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
141{
142 return (ahd_print_register(NULL, 0, "HESCB_QOFF",
143 0x08, regvalue, cur_col, wrap));
144}
145
146static const ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = { 30static const ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
147 { "ENINT_COALESCE", 0x40, 0x40 }, 31 { "ENINT_COALESCE", 0x40, 0x40 },
148 { "HOST_TQINPOS", 0x80, 0x80 } 32 { "HOST_TQINPOS", 0x80, 0x80 }
@@ -170,77 +54,6 @@ ahd_seqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
170 0x0c, regvalue, cur_col, wrap)); 54 0x0c, regvalue, cur_col, wrap));
171} 55}
172 56
173static const ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = {
174 { "CLRSEQ_SPLTINT", 0x01, 0x01 },
175 { "CLRSEQ_PCIINT", 0x02, 0x02 },
176 { "CLRSEQ_SCSIINT", 0x04, 0x04 },
177 { "CLRSEQ_SEQINT", 0x08, 0x08 },
178 { "CLRSEQ_SWTMRTO", 0x10, 0x10 }
179};
180
181int
182ahd_clrseqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
183{
184 return (ahd_print_register(CLRSEQINTSTAT_parse_table, 5, "CLRSEQINTSTAT",
185 0x0c, regvalue, cur_col, wrap));
186}
187
188int
189ahd_swtimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
190{
191 return (ahd_print_register(NULL, 0, "SWTIMER",
192 0x0e, regvalue, cur_col, wrap));
193}
194
195int
196ahd_snscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
197{
198 return (ahd_print_register(NULL, 0, "SNSCB_QOFF",
199 0x10, regvalue, cur_col, wrap));
200}
201
202int
203ahd_sescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
204{
205 return (ahd_print_register(NULL, 0, "SESCB_QOFF",
206 0x12, regvalue, cur_col, wrap));
207}
208
209int
210ahd_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
211{
212 return (ahd_print_register(NULL, 0, "SDSCB_QOFF",
213 0x14, regvalue, cur_col, wrap));
214}
215
216static const ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
217 { "SCB_QSIZE_4", 0x00, 0x0f },
218 { "SCB_QSIZE_8", 0x01, 0x0f },
219 { "SCB_QSIZE_16", 0x02, 0x0f },
220 { "SCB_QSIZE_32", 0x03, 0x0f },
221 { "SCB_QSIZE_64", 0x04, 0x0f },
222 { "SCB_QSIZE_128", 0x05, 0x0f },
223 { "SCB_QSIZE_256", 0x06, 0x0f },
224 { "SCB_QSIZE_512", 0x07, 0x0f },
225 { "SCB_QSIZE_1024", 0x08, 0x0f },
226 { "SCB_QSIZE_2048", 0x09, 0x0f },
227 { "SCB_QSIZE_4096", 0x0a, 0x0f },
228 { "SCB_QSIZE_8192", 0x0b, 0x0f },
229 { "SCB_QSIZE_16384", 0x0c, 0x0f },
230 { "SCB_QSIZE", 0x0f, 0x0f },
231 { "HS_MAILBOX_ACT", 0x10, 0x10 },
232 { "SDSCB_ROLLOVR", 0x20, 0x20 },
233 { "NEW_SCB_AVAIL", 0x40, 0x40 },
234 { "EMPTY_SCB_AVAIL", 0x80, 0x80 }
235};
236
237int
238ahd_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
239{
240 return (ahd_print_register(QOFF_CTLSTA_parse_table, 18, "QOFF_CTLSTA",
241 0x16, regvalue, cur_col, wrap));
242}
243
244static const ahd_reg_parse_entry_t INTCTL_parse_table[] = { 57static const ahd_reg_parse_entry_t INTCTL_parse_table[] = {
245 { "SPLTINTEN", 0x01, 0x01 }, 58 { "SPLTINTEN", 0x01, 0x01 },
246 { "SEQINTEN", 0x02, 0x02 }, 59 { "SEQINTEN", 0x02, 0x02 },
@@ -280,22 +93,6 @@ ahd_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
280 0x19, regvalue, cur_col, wrap)); 93 0x19, regvalue, cur_col, wrap));
281} 94}
282 95
283static const ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
284 { "CIOPARCKEN", 0x01, 0x01 },
285 { "DISABLE_TWATE", 0x02, 0x02 },
286 { "EXTREQLCK", 0x10, 0x10 },
287 { "MPARCKEN", 0x20, 0x20 },
288 { "DPARCKEN", 0x40, 0x40 },
289 { "CACHETHEN", 0x80, 0x80 }
290};
291
292int
293ahd_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
294{
295 return (ahd_print_register(DSCOMMAND0_parse_table, 6, "DSCOMMAND0",
296 0x19, regvalue, cur_col, wrap));
297}
298
299static const ahd_reg_parse_entry_t DFSTATUS_parse_table[] = { 96static const ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
300 { "FIFOEMP", 0x01, 0x01 }, 97 { "FIFOEMP", 0x01, 0x01 },
301 { "FIFOFULL", 0x02, 0x02 }, 98 { "FIFOFULL", 0x02, 0x02 },
@@ -327,146 +124,6 @@ ahd_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
327 0x1b, regvalue, cur_col, wrap)); 124 0x1b, regvalue, cur_col, wrap));
328} 125}
329 126
330static const ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
331 { "LAST_SEG", 0x02, 0x02 },
332 { "ODD_SEG", 0x04, 0x04 },
333 { "SG_ADDR_MASK", 0xf8, 0xf8 }
334};
335
336int
337ahd_sg_cache_pre_print(u_int regvalue, u_int *cur_col, u_int wrap)
338{
339 return (ahd_print_register(SG_CACHE_PRE_parse_table, 3, "SG_CACHE_PRE",
340 0x1b, regvalue, cur_col, wrap));
341}
342
343int
344ahd_lqin_print(u_int regvalue, u_int *cur_col, u_int wrap)
345{
346 return (ahd_print_register(NULL, 0, "LQIN",
347 0x20, regvalue, cur_col, wrap));
348}
349
350int
351ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
352{
353 return (ahd_print_register(NULL, 0, "LUNPTR",
354 0x22, regvalue, cur_col, wrap));
355}
356
357int
358ahd_cmdlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
359{
360 return (ahd_print_register(NULL, 0, "CMDLENPTR",
361 0x25, regvalue, cur_col, wrap));
362}
363
364int
365ahd_attrptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
366{
367 return (ahd_print_register(NULL, 0, "ATTRPTR",
368 0x26, regvalue, cur_col, wrap));
369}
370
371int
372ahd_flagptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
373{
374 return (ahd_print_register(NULL, 0, "FLAGPTR",
375 0x27, regvalue, cur_col, wrap));
376}
377
378int
379ahd_cmdptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
380{
381 return (ahd_print_register(NULL, 0, "CMDPTR",
382 0x28, regvalue, cur_col, wrap));
383}
384
385int
386ahd_qnextptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
387{
388 return (ahd_print_register(NULL, 0, "QNEXTPTR",
389 0x29, regvalue, cur_col, wrap));
390}
391
392int
393ahd_abrtbyteptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
394{
395 return (ahd_print_register(NULL, 0, "ABRTBYTEPTR",
396 0x2b, regvalue, cur_col, wrap));
397}
398
399int
400ahd_abrtbitptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
401{
402 return (ahd_print_register(NULL, 0, "ABRTBITPTR",
403 0x2c, regvalue, cur_col, wrap));
404}
405
406static const ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
407 { "ILUNLEN", 0x0f, 0x0f },
408 { "TLUNLEN", 0xf0, 0xf0 }
409};
410
411int
412ahd_lunlen_print(u_int regvalue, u_int *cur_col, u_int wrap)
413{
414 return (ahd_print_register(LUNLEN_parse_table, 2, "LUNLEN",
415 0x30, regvalue, cur_col, wrap));
416}
417
418int
419ahd_cdblimit_print(u_int regvalue, u_int *cur_col, u_int wrap)
420{
421 return (ahd_print_register(NULL, 0, "CDBLIMIT",
422 0x31, regvalue, cur_col, wrap));
423}
424
425int
426ahd_maxcmd_print(u_int regvalue, u_int *cur_col, u_int wrap)
427{
428 return (ahd_print_register(NULL, 0, "MAXCMD",
429 0x32, regvalue, cur_col, wrap));
430}
431
432int
433ahd_maxcmdcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
434{
435 return (ahd_print_register(NULL, 0, "MAXCMDCNT",
436 0x33, regvalue, cur_col, wrap));
437}
438
439static const ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
440 { "ABORTPENDING", 0x01, 0x01 },
441 { "SINGLECMD", 0x02, 0x02 },
442 { "PCI2PCI", 0x04, 0x04 }
443};
444
445int
446ahd_lqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
447{
448 return (ahd_print_register(LQCTL1_parse_table, 3, "LQCTL1",
449 0x38, regvalue, cur_col, wrap));
450}
451
452static const ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
453 { "LQOPAUSE", 0x01, 0x01 },
454 { "LQOTOIDLE", 0x02, 0x02 },
455 { "LQOCONTINUE", 0x04, 0x04 },
456 { "LQORETRY", 0x08, 0x08 },
457 { "LQIPAUSE", 0x10, 0x10 },
458 { "LQITOIDLE", 0x20, 0x20 },
459 { "LQICONTINUE", 0x40, 0x40 },
460 { "LQIRETRY", 0x80, 0x80 }
461};
462
463int
464ahd_lqctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
465{
466 return (ahd_print_register(LQCTL2_parse_table, 8, "LQCTL2",
467 0x39, regvalue, cur_col, wrap));
468}
469
470static const ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = { 127static const ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
471 { "SCSIRSTO", 0x01, 0x01 }, 128 { "SCSIRSTO", 0x01, 0x01 },
472 { "FORCEBUSFREE", 0x10, 0x10 }, 129 { "FORCEBUSFREE", 0x10, 0x10 },
@@ -498,37 +155,6 @@ ahd_scsiseq1_print(u_int regvalue, u_int *cur_col, u_int wrap)
498 0x3b, regvalue, cur_col, wrap)); 155 0x3b, regvalue, cur_col, wrap));
499} 156}
500 157
501static const ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = {
502 { "SPIOEN", 0x08, 0x08 },
503 { "BIOSCANCELEN", 0x10, 0x10 },
504 { "DFPEXP", 0x40, 0x40 },
505 { "DFON", 0x80, 0x80 }
506};
507
508int
509ahd_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
510{
511 return (ahd_print_register(SXFRCTL0_parse_table, 4, "SXFRCTL0",
512 0x3c, regvalue, cur_col, wrap));
513}
514
515static const ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
516 { "STPWEN", 0x01, 0x01 },
517 { "ACTNEGEN", 0x02, 0x02 },
518 { "ENSTIMER", 0x04, 0x04 },
519 { "STIMESEL", 0x18, 0x18 },
520 { "ENSPCHK", 0x20, 0x20 },
521 { "ENSACHK", 0x40, 0x40 },
522 { "BITBUCKET", 0x80, 0x80 }
523};
524
525int
526ahd_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
527{
528 return (ahd_print_register(SXFRCTL1_parse_table, 7, "SXFRCTL1",
529 0x3d, regvalue, cur_col, wrap));
530}
531
532static const ahd_reg_parse_entry_t DFFSTAT_parse_table[] = { 158static const ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
533 { "CURRFIFO_0", 0x00, 0x03 }, 159 { "CURRFIFO_0", 0x00, 0x03 },
534 { "CURRFIFO_1", 0x01, 0x03 }, 160 { "CURRFIFO_1", 0x01, 0x03 },
@@ -545,40 +171,6 @@ ahd_dffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
545 0x3f, regvalue, cur_col, wrap)); 171 0x3f, regvalue, cur_col, wrap));
546} 172}
547 173
548int
549ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
550{
551 return (ahd_print_register(NULL, 0, "MULTARGID",
552 0x40, regvalue, cur_col, wrap));
553}
554
555static const ahd_reg_parse_entry_t SCSISIGO_parse_table[] = {
556 { "P_DATAOUT", 0x00, 0xe0 },
557 { "P_DATAOUT_DT", 0x20, 0xe0 },
558 { "P_DATAIN", 0x40, 0xe0 },
559 { "P_DATAIN_DT", 0x60, 0xe0 },
560 { "P_COMMAND", 0x80, 0xe0 },
561 { "P_MESGOUT", 0xa0, 0xe0 },
562 { "P_STATUS", 0xc0, 0xe0 },
563 { "P_MESGIN", 0xe0, 0xe0 },
564 { "ACKO", 0x01, 0x01 },
565 { "REQO", 0x02, 0x02 },
566 { "BSYO", 0x04, 0x04 },
567 { "SELO", 0x08, 0x08 },
568 { "ATNO", 0x10, 0x10 },
569 { "MSGO", 0x20, 0x20 },
570 { "IOO", 0x40, 0x40 },
571 { "CDO", 0x80, 0x80 },
572 { "PHASE_MASK", 0xe0, 0xe0 }
573};
574
575int
576ahd_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
577{
578 return (ahd_print_register(SCSISIGO_parse_table, 17, "SCSISIGO",
579 0x40, regvalue, cur_col, wrap));
580}
581
582static const ahd_reg_parse_entry_t SCSISIGI_parse_table[] = { 174static const ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
583 { "P_DATAOUT", 0x00, 0xe0 }, 175 { "P_DATAOUT", 0x00, 0xe0 },
584 { "P_DATAOUT_DT", 0x20, 0xe0 }, 176 { "P_DATAOUT_DT", 0x20, 0xe0 },
@@ -624,31 +216,12 @@ ahd_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
624} 216}
625 217
626int 218int
627ahd_scsidat_print(u_int regvalue, u_int *cur_col, u_int wrap)
628{
629 return (ahd_print_register(NULL, 0, "SCSIDAT",
630 0x44, regvalue, cur_col, wrap));
631}
632
633int
634ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap) 219ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap)
635{ 220{
636 return (ahd_print_register(NULL, 0, "SCSIBUS", 221 return (ahd_print_register(NULL, 0, "SCSIBUS",
637 0x46, regvalue, cur_col, wrap)); 222 0x46, regvalue, cur_col, wrap));
638} 223}
639 224
640static const ahd_reg_parse_entry_t TARGIDIN_parse_table[] = {
641 { "TARGID", 0x0f, 0x0f },
642 { "CLKOUT", 0x80, 0x80 }
643};
644
645int
646ahd_targidin_print(u_int regvalue, u_int *cur_col, u_int wrap)
647{
648 return (ahd_print_register(TARGIDIN_parse_table, 2, "TARGIDIN",
649 0x48, regvalue, cur_col, wrap));
650}
651
652static const ahd_reg_parse_entry_t SELID_parse_table[] = { 225static const ahd_reg_parse_entry_t SELID_parse_table[] = {
653 { "ONEBIT", 0x08, 0x08 }, 226 { "ONEBIT", 0x08, 0x08 },
654 { "SELID_MASK", 0xf0, 0xf0 } 227 { "SELID_MASK", 0xf0, 0xf0 }
@@ -661,38 +234,6 @@ ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
661 0x49, regvalue, cur_col, wrap)); 234 0x49, regvalue, cur_col, wrap));
662} 235}
663 236
664static const ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = {
665 { "AUTO_MSGOUT_DE", 0x02, 0x02 },
666 { "ENDGFORMCHK", 0x04, 0x04 },
667 { "BUSFREEREV", 0x10, 0x10 },
668 { "BIASCANCTL", 0x20, 0x20 },
669 { "AUTOACKEN", 0x40, 0x40 },
670 { "BIOSCANCTL", 0x80, 0x80 },
671 { "OPTIONMODE_DEFAULTS",0x02, 0x02 }
672};
673
674int
675ahd_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
676{
677 return (ahd_print_register(OPTIONMODE_parse_table, 7, "OPTIONMODE",
678 0x4a, regvalue, cur_col, wrap));
679}
680
681static const ahd_reg_parse_entry_t SBLKCTL_parse_table[] = {
682 { "SELWIDE", 0x02, 0x02 },
683 { "ENAB20", 0x04, 0x04 },
684 { "ENAB40", 0x08, 0x08 },
685 { "DIAGLEDON", 0x40, 0x40 },
686 { "DIAGLEDEN", 0x80, 0x80 }
687};
688
689int
690ahd_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
691{
692 return (ahd_print_register(SBLKCTL_parse_table, 5, "SBLKCTL",
693 0x4a, regvalue, cur_col, wrap));
694}
695
696static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = { 237static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
697 { "ARBDO", 0x01, 0x01 }, 238 { "ARBDO", 0x01, 0x01 },
698 { "SPIORDY", 0x02, 0x02 }, 239 { "SPIORDY", 0x02, 0x02 },
@@ -728,23 +269,6 @@ ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
728 0x4b, regvalue, cur_col, wrap)); 269 0x4b, regvalue, cur_col, wrap));
729} 270}
730 271
731static const ahd_reg_parse_entry_t CLRSINT0_parse_table[] = {
732 { "CLRARBDO", 0x01, 0x01 },
733 { "CLRSPIORDY", 0x02, 0x02 },
734 { "CLROVERRUN", 0x04, 0x04 },
735 { "CLRIOERR", 0x08, 0x08 },
736 { "CLRSELINGO", 0x10, 0x10 },
737 { "CLRSELDI", 0x20, 0x20 },
738 { "CLRSELDO", 0x40, 0x40 }
739};
740
741int
742ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
743{
744 return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
745 0x4b, regvalue, cur_col, wrap));
746}
747
748static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = { 272static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
749 { "REQINIT", 0x01, 0x01 }, 273 { "REQINIT", 0x01, 0x01 },
750 { "STRB2FAST", 0x02, 0x02 }, 274 { "STRB2FAST", 0x02, 0x02 },
@@ -763,23 +287,6 @@ ahd_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
763 0x4c, regvalue, cur_col, wrap)); 287 0x4c, regvalue, cur_col, wrap));
764} 288}
765 289
766static const ahd_reg_parse_entry_t CLRSINT1_parse_table[] = {
767 { "CLRREQINIT", 0x01, 0x01 },
768 { "CLRSTRB2FAST", 0x02, 0x02 },
769 { "CLRSCSIPERR", 0x04, 0x04 },
770 { "CLRBUSFREE", 0x08, 0x08 },
771 { "CLRSCSIRSTI", 0x20, 0x20 },
772 { "CLRATNO", 0x40, 0x40 },
773 { "CLRSELTIMEO", 0x80, 0x80 }
774};
775
776int
777ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
778{
779 return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
780 0x4c, regvalue, cur_col, wrap));
781}
782
783static const ahd_reg_parse_entry_t SSTAT2_parse_table[] = { 290static const ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
784 { "BUSFREE_LQO", 0x40, 0xc0 }, 291 { "BUSFREE_LQO", 0x40, 0xc0 },
785 { "BUSFREE_DFF0", 0x80, 0xc0 }, 292 { "BUSFREE_DFF0", 0x80, 0xc0 },
@@ -800,20 +307,6 @@ ahd_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
800 0x4d, regvalue, cur_col, wrap)); 307 0x4d, regvalue, cur_col, wrap));
801} 308}
802 309
803static const ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
804 { "CLRDMADONE", 0x01, 0x01 },
805 { "CLRSDONE", 0x02, 0x02 },
806 { "CLRWIDE_RES", 0x04, 0x04 },
807 { "CLRNONPACKREQ", 0x20, 0x20 }
808};
809
810int
811ahd_clrsint2_print(u_int regvalue, u_int *cur_col, u_int wrap)
812{
813 return (ahd_print_register(CLRSINT2_parse_table, 4, "CLRSINT2",
814 0x4d, regvalue, cur_col, wrap));
815}
816
817static const ahd_reg_parse_entry_t PERRDIAG_parse_table[] = { 310static const ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
818 { "DTERR", 0x01, 0x01 }, 311 { "DTERR", 0x01, 0x01 },
819 { "DGFORMERR", 0x02, 0x02 }, 312 { "DGFORMERR", 0x02, 0x02 },
@@ -833,26 +326,12 @@ ahd_perrdiag_print(u_int regvalue, u_int *cur_col, u_int wrap)
833} 326}
834 327
835int 328int
836ahd_lqistate_print(u_int regvalue, u_int *cur_col, u_int wrap)
837{
838 return (ahd_print_register(NULL, 0, "LQISTATE",
839 0x4e, regvalue, cur_col, wrap));
840}
841
842int
843ahd_soffcnt_print(u_int regvalue, u_int *cur_col, u_int wrap) 329ahd_soffcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
844{ 330{
845 return (ahd_print_register(NULL, 0, "SOFFCNT", 331 return (ahd_print_register(NULL, 0, "SOFFCNT",
846 0x4f, regvalue, cur_col, wrap)); 332 0x4f, regvalue, cur_col, wrap));
847} 333}
848 334
849int
850ahd_lqostate_print(u_int regvalue, u_int *cur_col, u_int wrap)
851{
852 return (ahd_print_register(NULL, 0, "LQOSTATE",
853 0x4f, regvalue, cur_col, wrap));
854}
855
856static const ahd_reg_parse_entry_t LQISTAT0_parse_table[] = { 335static const ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
857 { "LQIATNCMD", 0x01, 0x01 }, 336 { "LQIATNCMD", 0x01, 0x01 },
858 { "LQIATNLQ", 0x02, 0x02 }, 337 { "LQIATNLQ", 0x02, 0x02 },
@@ -869,56 +348,6 @@ ahd_lqistat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
869 0x50, regvalue, cur_col, wrap)); 348 0x50, regvalue, cur_col, wrap));
870} 349}
871 350
872static const ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
873 { "ENLQIATNCMD", 0x01, 0x01 },
874 { "ENLQIATNLQ", 0x02, 0x02 },
875 { "ENLQIBADLQT", 0x04, 0x04 },
876 { "ENLQICRCT2", 0x08, 0x08 },
877 { "ENLQICRCT1", 0x10, 0x10 },
878 { "ENLQIATNQASK", 0x20, 0x20 }
879};
880
881int
882ahd_lqimode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
883{
884 return (ahd_print_register(LQIMODE0_parse_table, 6, "LQIMODE0",
885 0x50, regvalue, cur_col, wrap));
886}
887
888static const ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = {
889 { "CLRLQIATNCMD", 0x01, 0x01 },
890 { "CLRLQIATNLQ", 0x02, 0x02 },
891 { "CLRLQIBADLQT", 0x04, 0x04 },
892 { "CLRLQICRCT2", 0x08, 0x08 },
893 { "CLRLQICRCT1", 0x10, 0x10 },
894 { "CLRLQIATNQAS", 0x20, 0x20 }
895};
896
897int
898ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
899{
900 return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
901 0x50, regvalue, cur_col, wrap));
902}
903
904static const ahd_reg_parse_entry_t LQIMODE1_parse_table[] = {
905 { "ENLQIOVERI_NLQ", 0x01, 0x01 },
906 { "ENLQIOVERI_LQ", 0x02, 0x02 },
907 { "ENLQIBADLQI", 0x04, 0x04 },
908 { "ENLQICRCI_NLQ", 0x08, 0x08 },
909 { "ENLQICRCI_LQ", 0x10, 0x10 },
910 { "ENLIQABORT", 0x20, 0x20 },
911 { "ENLQIPHASE_NLQ", 0x40, 0x40 },
912 { "ENLQIPHASE_LQ", 0x80, 0x80 }
913};
914
915int
916ahd_lqimode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
917{
918 return (ahd_print_register(LQIMODE1_parse_table, 8, "LQIMODE1",
919 0x51, regvalue, cur_col, wrap));
920}
921
922static const ahd_reg_parse_entry_t LQISTAT1_parse_table[] = { 351static const ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
923 { "LQIOVERI_NLQ", 0x01, 0x01 }, 352 { "LQIOVERI_NLQ", 0x01, 0x01 },
924 { "LQIOVERI_LQ", 0x02, 0x02 }, 353 { "LQIOVERI_LQ", 0x02, 0x02 },
@@ -937,24 +366,6 @@ ahd_lqistat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
937 0x51, regvalue, cur_col, wrap)); 366 0x51, regvalue, cur_col, wrap));
938} 367}
939 368
940static const ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = {
941 { "CLRLQIOVERI_NLQ", 0x01, 0x01 },
942 { "CLRLQIOVERI_LQ", 0x02, 0x02 },
943 { "CLRLQIBADLQI", 0x04, 0x04 },
944 { "CLRLQICRCI_NLQ", 0x08, 0x08 },
945 { "CLRLQICRCI_LQ", 0x10, 0x10 },
946 { "CLRLIQABORT", 0x20, 0x20 },
947 { "CLRLQIPHASE_NLQ", 0x40, 0x40 },
948 { "CLRLQIPHASE_LQ", 0x80, 0x80 }
949};
950
951int
952ahd_clrlqiint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
953{
954 return (ahd_print_register(CLRLQIINT1_parse_table, 8, "CLRLQIINT1",
955 0x51, regvalue, cur_col, wrap));
956}
957
958static const ahd_reg_parse_entry_t LQISTAT2_parse_table[] = { 369static const ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
959 { "LQIGSAVAIL", 0x01, 0x01 }, 370 { "LQIGSAVAIL", 0x01, 0x01 },
960 { "LQISTOPCMD", 0x02, 0x02 }, 371 { "LQISTOPCMD", 0x02, 0x02 },
@@ -985,30 +396,6 @@ ahd_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
985 0x53, regvalue, cur_col, wrap)); 396 0x53, regvalue, cur_col, wrap));
986} 397}
987 398
988static const ahd_reg_parse_entry_t SIMODE3_parse_table[] = {
989 { "ENOSRAMPERR", 0x01, 0x01 },
990 { "ENNTRAMPERR", 0x02, 0x02 }
991};
992
993int
994ahd_simode3_print(u_int regvalue, u_int *cur_col, u_int wrap)
995{
996 return (ahd_print_register(SIMODE3_parse_table, 2, "SIMODE3",
997 0x53, regvalue, cur_col, wrap));
998}
999
1000static const ahd_reg_parse_entry_t CLRSINT3_parse_table[] = {
1001 { "CLROSRAMPERR", 0x01, 0x01 },
1002 { "CLRNTRAMPERR", 0x02, 0x02 }
1003};
1004
1005int
1006ahd_clrsint3_print(u_int regvalue, u_int *cur_col, u_int wrap)
1007{
1008 return (ahd_print_register(CLRSINT3_parse_table, 2, "CLRSINT3",
1009 0x53, regvalue, cur_col, wrap));
1010}
1011
1012static const ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = { 399static const ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
1013 { "LQOTCRC", 0x01, 0x01 }, 400 { "LQOTCRC", 0x01, 0x01 },
1014 { "LQOATNPKT", 0x02, 0x02 }, 401 { "LQOATNPKT", 0x02, 0x02 },
@@ -1024,51 +411,6 @@ ahd_lqostat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1024 0x54, regvalue, cur_col, wrap)); 411 0x54, regvalue, cur_col, wrap));
1025} 412}
1026 413
1027static const ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = {
1028 { "CLRLQOTCRC", 0x01, 0x01 },
1029 { "CLRLQOATNPKT", 0x02, 0x02 },
1030 { "CLRLQOATNLQ", 0x04, 0x04 },
1031 { "CLRLQOSTOPT2", 0x08, 0x08 },
1032 { "CLRLQOTARGSCBPERR", 0x10, 0x10 }
1033};
1034
1035int
1036ahd_clrlqoint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1037{
1038 return (ahd_print_register(CLRLQOINT0_parse_table, 5, "CLRLQOINT0",
1039 0x54, regvalue, cur_col, wrap));
1040}
1041
1042static const ahd_reg_parse_entry_t LQOMODE0_parse_table[] = {
1043 { "ENLQOTCRC", 0x01, 0x01 },
1044 { "ENLQOATNPKT", 0x02, 0x02 },
1045 { "ENLQOATNLQ", 0x04, 0x04 },
1046 { "ENLQOSTOPT2", 0x08, 0x08 },
1047 { "ENLQOTARGSCBPERR", 0x10, 0x10 }
1048};
1049
1050int
1051ahd_lqomode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1052{
1053 return (ahd_print_register(LQOMODE0_parse_table, 5, "LQOMODE0",
1054 0x54, regvalue, cur_col, wrap));
1055}
1056
1057static const ahd_reg_parse_entry_t LQOMODE1_parse_table[] = {
1058 { "ENLQOPHACHGINPKT", 0x01, 0x01 },
1059 { "ENLQOBUSFREE", 0x02, 0x02 },
1060 { "ENLQOBADQAS", 0x04, 0x04 },
1061 { "ENLQOSTOPI2", 0x08, 0x08 },
1062 { "ENLQOINITSCBPERR", 0x10, 0x10 }
1063};
1064
1065int
1066ahd_lqomode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1067{
1068 return (ahd_print_register(LQOMODE1_parse_table, 5, "LQOMODE1",
1069 0x55, regvalue, cur_col, wrap));
1070}
1071
1072static const ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = { 414static const ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
1073 { "LQOPHACHGINPKT", 0x01, 0x01 }, 415 { "LQOPHACHGINPKT", 0x01, 0x01 },
1074 { "LQOBUSFREE", 0x02, 0x02 }, 416 { "LQOBUSFREE", 0x02, 0x02 },
@@ -1084,21 +426,6 @@ ahd_lqostat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1084 0x55, regvalue, cur_col, wrap)); 426 0x55, regvalue, cur_col, wrap));
1085} 427}
1086 428
1087static const ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = {
1088 { "CLRLQOPHACHGINPKT", 0x01, 0x01 },
1089 { "CLRLQOBUSFREE", 0x02, 0x02 },
1090 { "CLRLQOBADQAS", 0x04, 0x04 },
1091 { "CLRLQOSTOPI2", 0x08, 0x08 },
1092 { "CLRLQOINITSCBPERR", 0x10, 0x10 }
1093};
1094
1095int
1096ahd_clrlqoint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1097{
1098 return (ahd_print_register(CLRLQOINT1_parse_table, 5, "CLRLQOINT1",
1099 0x55, regvalue, cur_col, wrap));
1100}
1101
1102static const ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = { 429static const ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
1103 { "LQOSTOP0", 0x01, 0x01 }, 430 { "LQOSTOP0", 0x01, 0x01 },
1104 { "LQOPHACHGOUTPKT", 0x02, 0x02 }, 431 { "LQOPHACHGOUTPKT", 0x02, 0x02 },
@@ -1113,13 +440,6 @@ ahd_lqostat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
1113 0x56, regvalue, cur_col, wrap)); 440 0x56, regvalue, cur_col, wrap));
1114} 441}
1115 442
1116int
1117ahd_os_space_cnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1118{
1119 return (ahd_print_register(NULL, 0, "OS_SPACE_CNT",
1120 0x56, regvalue, cur_col, wrap));
1121}
1122
1123static const ahd_reg_parse_entry_t SIMODE1_parse_table[] = { 443static const ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
1124 { "ENREQINIT", 0x01, 0x01 }, 444 { "ENREQINIT", 0x01, 0x01 },
1125 { "ENSTRB2FAST", 0x02, 0x02 }, 445 { "ENSTRB2FAST", 0x02, 0x02 },
@@ -1138,13 +458,6 @@ ahd_simode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1138 0x57, regvalue, cur_col, wrap)); 458 0x57, regvalue, cur_col, wrap));
1139} 459}
1140 460
1141int
1142ahd_gsfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
1143{
1144 return (ahd_print_register(NULL, 0, "GSFIFO",
1145 0x58, regvalue, cur_col, wrap));
1146}
1147
1148static const ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = { 461static const ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
1149 { "RSTCHN", 0x01, 0x01 }, 462 { "RSTCHN", 0x01, 0x01 },
1150 { "CLRCHN", 0x02, 0x02 }, 463 { "CLRCHN", 0x02, 0x02 },
@@ -1159,44 +472,6 @@ ahd_dffsxfrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1159 0x5a, regvalue, cur_col, wrap)); 472 0x5a, regvalue, cur_col, wrap));
1160} 473}
1161 474
1162static const ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = {
1163 { "LQONOCHKOVER", 0x01, 0x01 },
1164 { "LQONOHOLDLACK", 0x02, 0x02 },
1165 { "LQOBUSETDLY", 0x40, 0x40 },
1166 { "LQOH2A_VERSION", 0x80, 0x80 }
1167};
1168
1169int
1170ahd_lqoscsctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1171{
1172 return (ahd_print_register(LQOSCSCTL_parse_table, 4, "LQOSCSCTL",
1173 0x5a, regvalue, cur_col, wrap));
1174}
1175
1176int
1177ahd_nextscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1178{
1179 return (ahd_print_register(NULL, 0, "NEXTSCB",
1180 0x5a, regvalue, cur_col, wrap));
1181}
1182
1183static const ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = {
1184 { "CLRCFG4TCMD", 0x01, 0x01 },
1185 { "CLRCFG4ICMD", 0x02, 0x02 },
1186 { "CLRCFG4TSTAT", 0x04, 0x04 },
1187 { "CLRCFG4ISTAT", 0x08, 0x08 },
1188 { "CLRCFG4DATA", 0x10, 0x10 },
1189 { "CLRSAVEPTRS", 0x20, 0x20 },
1190 { "CLRCTXTDONE", 0x40, 0x40 }
1191};
1192
1193int
1194ahd_clrseqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
1195{
1196 return (ahd_print_register(CLRSEQINTSRC_parse_table, 7, "CLRSEQINTSRC",
1197 0x5b, regvalue, cur_col, wrap));
1198}
1199
1200static const ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = { 475static const ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
1201 { "CFG4TCMD", 0x01, 0x01 }, 476 { "CFG4TCMD", 0x01, 0x01 },
1202 { "CFG4ICMD", 0x02, 0x02 }, 477 { "CFG4ICMD", 0x02, 0x02 },
@@ -1231,13 +506,6 @@ ahd_seqimode_print(u_int regvalue, u_int *cur_col, u_int wrap)
1231 0x5c, regvalue, cur_col, wrap)); 506 0x5c, regvalue, cur_col, wrap));
1232} 507}
1233 508
1234int
1235ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1236{
1237 return (ahd_print_register(NULL, 0, "CURRSCB",
1238 0x5c, regvalue, cur_col, wrap));
1239}
1240
1241static const ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = { 509static const ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
1242 { "FIFOFREE", 0x01, 0x01 }, 510 { "FIFOFREE", 0x01, 0x01 },
1243 { "DATAINFIFO", 0x02, 0x02 }, 511 { "DATAINFIFO", 0x02, 0x02 },
@@ -1256,308 +524,12 @@ ahd_mdffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1256} 524}
1257 525
1258int 526int
1259ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1260{
1261 return (ahd_print_register(NULL, 0, "LASTSCB",
1262 0x5e, regvalue, cur_col, wrap));
1263}
1264
1265int
1266ahd_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1267{
1268 return (ahd_print_register(NULL, 0, "SHADDR",
1269 0x60, regvalue, cur_col, wrap));
1270}
1271
1272int
1273ahd_negoaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1274{
1275 return (ahd_print_register(NULL, 0, "NEGOADDR",
1276 0x60, regvalue, cur_col, wrap));
1277}
1278
1279int
1280ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
1281{
1282 return (ahd_print_register(NULL, 0, "NEGPERIOD",
1283 0x61, regvalue, cur_col, wrap));
1284}
1285
1286int
1287ahd_negoffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
1288{
1289 return (ahd_print_register(NULL, 0, "NEGOFFSET",
1290 0x62, regvalue, cur_col, wrap));
1291}
1292
1293static const ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = {
1294 { "PPROPT_IUT", 0x01, 0x01 },
1295 { "PPROPT_DT", 0x02, 0x02 },
1296 { "PPROPT_QAS", 0x04, 0x04 },
1297 { "PPROPT_PACE", 0x08, 0x08 }
1298};
1299
1300int
1301ahd_negppropts_print(u_int regvalue, u_int *cur_col, u_int wrap)
1302{
1303 return (ahd_print_register(NEGPPROPTS_parse_table, 4, "NEGPPROPTS",
1304 0x63, regvalue, cur_col, wrap));
1305}
1306
1307static const ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = {
1308 { "WIDEXFER", 0x01, 0x01 },
1309 { "ENAUTOATNO", 0x02, 0x02 },
1310 { "ENAUTOATNI", 0x04, 0x04 },
1311 { "ENSLOWCRC", 0x08, 0x08 },
1312 { "RTI_OVRDTRN", 0x10, 0x10 },
1313 { "RTI_WRTDIS", 0x20, 0x20 },
1314 { "ENSNAPSHOT", 0x40, 0x40 }
1315};
1316
1317int
1318ahd_negconopts_print(u_int regvalue, u_int *cur_col, u_int wrap)
1319{
1320 return (ahd_print_register(NEGCONOPTS_parse_table, 7, "NEGCONOPTS",
1321 0x64, regvalue, cur_col, wrap));
1322}
1323
1324int
1325ahd_annexcol_print(u_int regvalue, u_int *cur_col, u_int wrap)
1326{
1327 return (ahd_print_register(NULL, 0, "ANNEXCOL",
1328 0x65, regvalue, cur_col, wrap));
1329}
1330
1331int
1332ahd_annexdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1333{
1334 return (ahd_print_register(NULL, 0, "ANNEXDAT",
1335 0x66, regvalue, cur_col, wrap));
1336}
1337
1338static const ahd_reg_parse_entry_t SCSCHKN_parse_table[] = {
1339 { "LSTSGCLRDIS", 0x01, 0x01 },
1340 { "SHVALIDSTDIS", 0x02, 0x02 },
1341 { "DFFACTCLR", 0x04, 0x04 },
1342 { "SDONEMSKDIS", 0x08, 0x08 },
1343 { "WIDERESEN", 0x10, 0x10 },
1344 { "CURRFIFODEF", 0x20, 0x20 },
1345 { "STSELSKIDDIS", 0x40, 0x40 },
1346 { "BIDICHKDIS", 0x80, 0x80 }
1347};
1348
1349int
1350ahd_scschkn_print(u_int regvalue, u_int *cur_col, u_int wrap)
1351{
1352 return (ahd_print_register(SCSCHKN_parse_table, 8, "SCSCHKN",
1353 0x66, regvalue, cur_col, wrap));
1354}
1355
1356int
1357ahd_iownid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1358{
1359 return (ahd_print_register(NULL, 0, "IOWNID",
1360 0x67, regvalue, cur_col, wrap));
1361}
1362
1363int
1364ahd_shcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1365{
1366 return (ahd_print_register(NULL, 0, "SHCNT",
1367 0x68, regvalue, cur_col, wrap));
1368}
1369
1370int
1371ahd_townid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1372{
1373 return (ahd_print_register(NULL, 0, "TOWNID",
1374 0x69, regvalue, cur_col, wrap));
1375}
1376
1377int
1378ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap) 527ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1379{ 528{
1380 return (ahd_print_register(NULL, 0, "SELOID", 529 return (ahd_print_register(NULL, 0, "SELOID",
1381 0x6b, regvalue, cur_col, wrap)); 530 0x6b, regvalue, cur_col, wrap));
1382} 531}
1383 532
1384int
1385ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1386{
1387 return (ahd_print_register(NULL, 0, "HADDR",
1388 0x70, regvalue, cur_col, wrap));
1389}
1390
1391int
1392ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1393{
1394 return (ahd_print_register(NULL, 0, "HCNT",
1395 0x78, regvalue, cur_col, wrap));
1396}
1397
1398int
1399ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1400{
1401 return (ahd_print_register(NULL, 0, "SGHADDR",
1402 0x7c, regvalue, cur_col, wrap));
1403}
1404
1405int
1406ahd_scbhaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1407{
1408 return (ahd_print_register(NULL, 0, "SCBHADDR",
1409 0x7c, regvalue, cur_col, wrap));
1410}
1411
1412int
1413ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1414{
1415 return (ahd_print_register(NULL, 0, "SGHCNT",
1416 0x84, regvalue, cur_col, wrap));
1417}
1418
1419int
1420ahd_scbhcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1421{
1422 return (ahd_print_register(NULL, 0, "SCBHCNT",
1423 0x84, regvalue, cur_col, wrap));
1424}
1425
1426static const ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
1427 { "WR_DFTHRSH_MIN", 0x00, 0x70 },
1428 { "RD_DFTHRSH_MIN", 0x00, 0x07 },
1429 { "RD_DFTHRSH_25", 0x01, 0x07 },
1430 { "RD_DFTHRSH_50", 0x02, 0x07 },
1431 { "RD_DFTHRSH_63", 0x03, 0x07 },
1432 { "RD_DFTHRSH_75", 0x04, 0x07 },
1433 { "RD_DFTHRSH_85", 0x05, 0x07 },
1434 { "RD_DFTHRSH_90", 0x06, 0x07 },
1435 { "RD_DFTHRSH_MAX", 0x07, 0x07 },
1436 { "WR_DFTHRSH_25", 0x10, 0x70 },
1437 { "WR_DFTHRSH_50", 0x20, 0x70 },
1438 { "WR_DFTHRSH_63", 0x30, 0x70 },
1439 { "WR_DFTHRSH_75", 0x40, 0x70 },
1440 { "WR_DFTHRSH_85", 0x50, 0x70 },
1441 { "WR_DFTHRSH_90", 0x60, 0x70 },
1442 { "WR_DFTHRSH_MAX", 0x70, 0x70 },
1443 { "RD_DFTHRSH", 0x07, 0x07 },
1444 { "WR_DFTHRSH", 0x70, 0x70 }
1445};
1446
1447int
1448ahd_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
1449{
1450 return (ahd_print_register(DFF_THRSH_parse_table, 18, "DFF_THRSH",
1451 0x88, regvalue, cur_col, wrap));
1452}
1453
1454static const ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
1455 { "CMPABCDIS", 0x01, 0x01 },
1456 { "TSCSERREN", 0x02, 0x02 },
1457 { "SRSPDPEEN", 0x04, 0x04 },
1458 { "SPLTSTADIS", 0x08, 0x08 },
1459 { "SPLTSMADIS", 0x10, 0x10 },
1460 { "UNEXPSCIEN", 0x20, 0x20 },
1461 { "SERRPULSE", 0x80, 0x80 }
1462};
1463
1464int
1465ahd_pcixctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1466{
1467 return (ahd_print_register(PCIXCTL_parse_table, 7, "PCIXCTL",
1468 0x93, regvalue, cur_col, wrap));
1469}
1470
1471static const ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
1472 { "RXSPLTRSP", 0x01, 0x01 },
1473 { "RXSCEMSG", 0x02, 0x02 },
1474 { "RXOVRUN", 0x04, 0x04 },
1475 { "CNTNOTCMPLT", 0x08, 0x08 },
1476 { "SCDATBUCKET", 0x10, 0x10 },
1477 { "SCADERR", 0x20, 0x20 },
1478 { "SCBCERR", 0x40, 0x40 },
1479 { "STAETERM", 0x80, 0x80 }
1480};
1481
1482int
1483ahd_dchspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1484{
1485 return (ahd_print_register(DCHSPLTSTAT0_parse_table, 8, "DCHSPLTSTAT0",
1486 0x96, regvalue, cur_col, wrap));
1487}
1488
1489static const ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
1490 { "RXDATABUCKET", 0x01, 0x01 }
1491};
1492
1493int
1494ahd_dchspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1495{
1496 return (ahd_print_register(DCHSPLTSTAT1_parse_table, 1, "DCHSPLTSTAT1",
1497 0x97, regvalue, cur_col, wrap));
1498}
1499
1500static const ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
1501 { "RXSPLTRSP", 0x01, 0x01 },
1502 { "RXSCEMSG", 0x02, 0x02 },
1503 { "RXOVRUN", 0x04, 0x04 },
1504 { "CNTNOTCMPLT", 0x08, 0x08 },
1505 { "SCDATBUCKET", 0x10, 0x10 },
1506 { "SCADERR", 0x20, 0x20 },
1507 { "SCBCERR", 0x40, 0x40 },
1508 { "STAETERM", 0x80, 0x80 }
1509};
1510
1511int
1512ahd_sgspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1513{
1514 return (ahd_print_register(SGSPLTSTAT0_parse_table, 8, "SGSPLTSTAT0",
1515 0x9e, regvalue, cur_col, wrap));
1516}
1517
1518static const ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = {
1519 { "RXDATABUCKET", 0x01, 0x01 }
1520};
1521
1522int
1523ahd_sgspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1524{
1525 return (ahd_print_register(SGSPLTSTAT1_parse_table, 1, "SGSPLTSTAT1",
1526 0x9f, regvalue, cur_col, wrap));
1527}
1528
1529static const ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
1530 { "DPR", 0x01, 0x01 },
1531 { "TWATERR", 0x02, 0x02 },
1532 { "RDPERR", 0x04, 0x04 },
1533 { "SCAAPERR", 0x08, 0x08 },
1534 { "RTA", 0x10, 0x10 },
1535 { "RMA", 0x20, 0x20 },
1536 { "SSE", 0x40, 0x40 },
1537 { "DPE", 0x80, 0x80 }
1538};
1539
1540int
1541ahd_df0pcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1542{
1543 return (ahd_print_register(DF0PCISTAT_parse_table, 8, "DF0PCISTAT",
1544 0xa0, regvalue, cur_col, wrap));
1545}
1546
1547int
1548ahd_reg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1549{
1550 return (ahd_print_register(NULL, 0, "REG0",
1551 0xa0, regvalue, cur_col, wrap));
1552}
1553
1554int
1555ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1556{
1557 return (ahd_print_register(NULL, 0, "REG_ISR",
1558 0xa4, regvalue, cur_col, wrap));
1559}
1560
1561static const ahd_reg_parse_entry_t SG_STATE_parse_table[] = { 533static const ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
1562 { "SEGS_AVAIL", 0x01, 0x01 }, 534 { "SEGS_AVAIL", 0x01, 0x01 },
1563 { "LOADING_NEEDED", 0x02, 0x02 }, 535 { "LOADING_NEEDED", 0x02, 0x02 },
@@ -1571,54 +543,6 @@ ahd_sg_state_print(u_int regvalue, u_int *cur_col, u_int wrap)
1571 0xa6, regvalue, cur_col, wrap)); 543 0xa6, regvalue, cur_col, wrap));
1572} 544}
1573 545
1574static const ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
1575 { "TWATERR", 0x02, 0x02 },
1576 { "STA", 0x08, 0x08 },
1577 { "SSE", 0x40, 0x40 },
1578 { "DPE", 0x80, 0x80 }
1579};
1580
1581int
1582ahd_targpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1583{
1584 return (ahd_print_register(TARGPCISTAT_parse_table, 4, "TARGPCISTAT",
1585 0xa7, regvalue, cur_col, wrap));
1586}
1587
1588int
1589ahd_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1590{
1591 return (ahd_print_register(NULL, 0, "SCBPTR",
1592 0xa8, regvalue, cur_col, wrap));
1593}
1594
1595static const ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
1596 { "SCBPTR_OFF", 0x07, 0x07 },
1597 { "SCBPTR_ADDR", 0x38, 0x38 },
1598 { "AUSCBPTR_EN", 0x80, 0x80 }
1599};
1600
1601int
1602ahd_scbautoptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1603{
1604 return (ahd_print_register(SCBAUTOPTR_parse_table, 3, "SCBAUTOPTR",
1605 0xab, regvalue, cur_col, wrap));
1606}
1607
1608int
1609ahd_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1610{
1611 return (ahd_print_register(NULL, 0, "CCSGADDR",
1612 0xac, regvalue, cur_col, wrap));
1613}
1614
1615int
1616ahd_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1617{
1618 return (ahd_print_register(NULL, 0, "CCSCBADDR",
1619 0xac, regvalue, cur_col, wrap));
1620}
1621
1622static const ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = { 546static const ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
1623 { "CCSCBRESET", 0x01, 0x01 }, 547 { "CCSCBRESET", 0x01, 0x01 },
1624 { "CCSCBDIR", 0x04, 0x04 }, 548 { "CCSCBDIR", 0x04, 0x04 },
@@ -1651,138 +575,6 @@ ahd_ccsgctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1651 0xad, regvalue, cur_col, wrap)); 575 0xad, regvalue, cur_col, wrap));
1652} 576}
1653 577
1654int
1655ahd_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
1656{
1657 return (ahd_print_register(NULL, 0, "CCSGRAM",
1658 0xb0, regvalue, cur_col, wrap));
1659}
1660
1661int
1662ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
1663{
1664 return (ahd_print_register(NULL, 0, "CCSCBRAM",
1665 0xb0, regvalue, cur_col, wrap));
1666}
1667
1668int
1669ahd_brddat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1670{
1671 return (ahd_print_register(NULL, 0, "BRDDAT",
1672 0xb8, regvalue, cur_col, wrap));
1673}
1674
1675static const ahd_reg_parse_entry_t BRDCTL_parse_table[] = {
1676 { "BRDSTB", 0x01, 0x01 },
1677 { "BRDRW", 0x02, 0x02 },
1678 { "BRDEN", 0x04, 0x04 },
1679 { "BRDADDR", 0x38, 0x38 },
1680 { "FLXARBREQ", 0x40, 0x40 },
1681 { "FLXARBACK", 0x80, 0x80 }
1682};
1683
1684int
1685ahd_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1686{
1687 return (ahd_print_register(BRDCTL_parse_table, 6, "BRDCTL",
1688 0xb9, regvalue, cur_col, wrap));
1689}
1690
1691int
1692ahd_seeadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1693{
1694 return (ahd_print_register(NULL, 0, "SEEADR",
1695 0xba, regvalue, cur_col, wrap));
1696}
1697
1698int
1699ahd_seedat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1700{
1701 return (ahd_print_register(NULL, 0, "SEEDAT",
1702 0xbc, regvalue, cur_col, wrap));
1703}
1704
1705static const ahd_reg_parse_entry_t SEECTL_parse_table[] = {
1706 { "SEEOP_ERAL", 0x40, 0x70 },
1707 { "SEEOP_WRITE", 0x50, 0x70 },
1708 { "SEEOP_READ", 0x60, 0x70 },
1709 { "SEEOP_ERASE", 0x70, 0x70 },
1710 { "SEESTART", 0x01, 0x01 },
1711 { "SEERST", 0x02, 0x02 },
1712 { "SEEOPCODE", 0x70, 0x70 },
1713 { "SEEOP_EWEN", 0x40, 0x40 },
1714 { "SEEOP_WALL", 0x40, 0x40 },
1715 { "SEEOP_EWDS", 0x40, 0x40 }
1716};
1717
1718int
1719ahd_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1720{
1721 return (ahd_print_register(SEECTL_parse_table, 10, "SEECTL",
1722 0xbe, regvalue, cur_col, wrap));
1723}
1724
1725static const ahd_reg_parse_entry_t SEESTAT_parse_table[] = {
1726 { "SEESTART", 0x01, 0x01 },
1727 { "SEEBUSY", 0x02, 0x02 },
1728 { "SEEARBACK", 0x04, 0x04 },
1729 { "LDALTID_L", 0x08, 0x08 },
1730 { "SEEOPCODE", 0x70, 0x70 },
1731 { "INIT_DONE", 0x80, 0x80 }
1732};
1733
1734int
1735ahd_seestat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1736{
1737 return (ahd_print_register(SEESTAT_parse_table, 6, "SEESTAT",
1738 0xbe, regvalue, cur_col, wrap));
1739}
1740
1741static const ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
1742 { "XMITOFFSTDIS", 0x02, 0x02 },
1743 { "RCVROFFSTDIS", 0x04, 0x04 },
1744 { "DESQDIS", 0x10, 0x10 },
1745 { "BYPASSENAB", 0x80, 0x80 }
1746};
1747
1748int
1749ahd_dspdatactl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1750{
1751 return (ahd_print_register(DSPDATACTL_parse_table, 4, "DSPDATACTL",
1752 0xc1, regvalue, cur_col, wrap));
1753}
1754
1755int
1756ahd_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1757{
1758 return (ahd_print_register(NULL, 0, "DFDAT",
1759 0xc4, regvalue, cur_col, wrap));
1760}
1761
1762static const ahd_reg_parse_entry_t DSPSELECT_parse_table[] = {
1763 { "DSPSEL", 0x1f, 0x1f },
1764 { "AUTOINCEN", 0x80, 0x80 }
1765};
1766
1767int
1768ahd_dspselect_print(u_int regvalue, u_int *cur_col, u_int wrap)
1769{
1770 return (ahd_print_register(DSPSELECT_parse_table, 2, "DSPSELECT",
1771 0xc4, regvalue, cur_col, wrap));
1772}
1773
1774static const ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = {
1775 { "XMITMANVAL", 0x3f, 0x3f },
1776 { "AUTOXBCDIS", 0x80, 0x80 }
1777};
1778
1779int
1780ahd_wrtbiasctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1781{
1782 return (ahd_print_register(WRTBIASCTL_parse_table, 2, "WRTBIASCTL",
1783 0xc5, regvalue, cur_col, wrap));
1784}
1785
1786static const ahd_reg_parse_entry_t SEQCTL0_parse_table[] = { 578static const ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
1787 { "LOADRAM", 0x01, 0x01 }, 579 { "LOADRAM", 0x01, 0x01 },
1788 { "SEQRESET", 0x02, 0x02 }, 580 { "SEQRESET", 0x02, 0x02 },
@@ -1801,18 +593,6 @@ ahd_seqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1801 0xd6, regvalue, cur_col, wrap)); 593 0xd6, regvalue, cur_col, wrap));
1802} 594}
1803 595
1804static const ahd_reg_parse_entry_t FLAGS_parse_table[] = {
1805 { "CARRY", 0x01, 0x01 },
1806 { "ZERO", 0x02, 0x02 }
1807};
1808
1809int
1810ahd_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
1811{
1812 return (ahd_print_register(FLAGS_parse_table, 2, "FLAGS",
1813 0xd8, regvalue, cur_col, wrap));
1814}
1815
1816static const ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = { 596static const ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
1817 { "IRET", 0x01, 0x01 }, 597 { "IRET", 0x01, 0x01 },
1818 { "INTMASK1", 0x02, 0x02 }, 598 { "INTMASK1", 0x02, 0x02 },
@@ -1831,118 +611,6 @@ ahd_seqintctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1831} 611}
1832 612
1833int 613int
1834ahd_seqram_print(u_int regvalue, u_int *cur_col, u_int wrap)
1835{
1836 return (ahd_print_register(NULL, 0, "SEQRAM",
1837 0xda, regvalue, cur_col, wrap));
1838}
1839
1840int
1841ahd_prgmcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1842{
1843 return (ahd_print_register(NULL, 0, "PRGMCNT",
1844 0xde, regvalue, cur_col, wrap));
1845}
1846
1847int
1848ahd_accum_print(u_int regvalue, u_int *cur_col, u_int wrap)
1849{
1850 return (ahd_print_register(NULL, 0, "ACCUM",
1851 0xe0, regvalue, cur_col, wrap));
1852}
1853
1854int
1855ahd_sindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
1856{
1857 return (ahd_print_register(NULL, 0, "SINDEX",
1858 0xe2, regvalue, cur_col, wrap));
1859}
1860
1861int
1862ahd_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
1863{
1864 return (ahd_print_register(NULL, 0, "DINDEX",
1865 0xe4, regvalue, cur_col, wrap));
1866}
1867
1868int
1869ahd_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
1870{
1871 return (ahd_print_register(NULL, 0, "ALLONES",
1872 0xe8, regvalue, cur_col, wrap));
1873}
1874
1875int
1876ahd_allzeros_print(u_int regvalue, u_int *cur_col, u_int wrap)
1877{
1878 return (ahd_print_register(NULL, 0, "ALLZEROS",
1879 0xea, regvalue, cur_col, wrap));
1880}
1881
1882int
1883ahd_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
1884{
1885 return (ahd_print_register(NULL, 0, "NONE",
1886 0xea, regvalue, cur_col, wrap));
1887}
1888
1889int
1890ahd_sindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
1891{
1892 return (ahd_print_register(NULL, 0, "SINDIR",
1893 0xec, regvalue, cur_col, wrap));
1894}
1895
1896int
1897ahd_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
1898{
1899 return (ahd_print_register(NULL, 0, "DINDIR",
1900 0xed, regvalue, cur_col, wrap));
1901}
1902
1903int
1904ahd_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
1905{
1906 return (ahd_print_register(NULL, 0, "STACK",
1907 0xf2, regvalue, cur_col, wrap));
1908}
1909
1910int
1911ahd_intvec1_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1912{
1913 return (ahd_print_register(NULL, 0, "INTVEC1_ADDR",
1914 0xf4, regvalue, cur_col, wrap));
1915}
1916
1917int
1918ahd_curaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1919{
1920 return (ahd_print_register(NULL, 0, "CURADDR",
1921 0xf4, regvalue, cur_col, wrap));
1922}
1923
1924int
1925ahd_intvec2_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1926{
1927 return (ahd_print_register(NULL, 0, "INTVEC2_ADDR",
1928 0xf6, regvalue, cur_col, wrap));
1929}
1930
1931int
1932ahd_longjmp_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1933{
1934 return (ahd_print_register(NULL, 0, "LONGJMP_ADDR",
1935 0xf8, regvalue, cur_col, wrap));
1936}
1937
1938int
1939ahd_accum_save_print(u_int regvalue, u_int *cur_col, u_int wrap)
1940{
1941 return (ahd_print_register(NULL, 0, "ACCUM_SAVE",
1942 0xfa, regvalue, cur_col, wrap));
1943}
1944
1945int
1946ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap) 614ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
1947{ 615{
1948 return (ahd_print_register(NULL, 0, "SRAM_BASE", 616 return (ahd_print_register(NULL, 0, "SRAM_BASE",
@@ -1950,69 +618,6 @@ ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
1950} 618}
1951 619
1952int 620int
1953ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap)
1954{
1955 return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
1956 0x100, regvalue, cur_col, wrap));
1957}
1958
1959int
1960ahd_waiting_tid_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
1961{
1962 return (ahd_print_register(NULL, 0, "WAITING_TID_HEAD",
1963 0x120, regvalue, cur_col, wrap));
1964}
1965
1966int
1967ahd_waiting_tid_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
1968{
1969 return (ahd_print_register(NULL, 0, "WAITING_TID_TAIL",
1970 0x122, regvalue, cur_col, wrap));
1971}
1972
1973int
1974ahd_next_queued_scb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1975{
1976 return (ahd_print_register(NULL, 0, "NEXT_QUEUED_SCB_ADDR",
1977 0x124, regvalue, cur_col, wrap));
1978}
1979
1980int
1981ahd_complete_scb_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
1982{
1983 return (ahd_print_register(NULL, 0, "COMPLETE_SCB_HEAD",
1984 0x128, regvalue, cur_col, wrap));
1985}
1986
1987int
1988ahd_complete_scb_dmainprog_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
1989{
1990 return (ahd_print_register(NULL, 0, "COMPLETE_SCB_DMAINPROG_HEAD",
1991 0x12a, regvalue, cur_col, wrap));
1992}
1993
1994int
1995ahd_complete_dma_scb_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
1996{
1997 return (ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_HEAD",
1998 0x12c, regvalue, cur_col, wrap));
1999}
2000
2001int
2002ahd_complete_dma_scb_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
2003{
2004 return (ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_TAIL",
2005 0x12e, regvalue, cur_col, wrap));
2006}
2007
2008int
2009ahd_complete_on_qfreeze_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
2010{
2011 return (ahd_print_register(NULL, 0, "COMPLETE_ON_QFREEZE_HEAD",
2012 0x130, regvalue, cur_col, wrap));
2013}
2014
2015int
2016ahd_qfreeze_count_print(u_int regvalue, u_int *cur_col, u_int wrap) 621ahd_qfreeze_count_print(u_int regvalue, u_int *cur_col, u_int wrap)
2017{ 622{
2018 return (ahd_print_register(NULL, 0, "QFREEZE_COUNT", 623 return (ahd_print_register(NULL, 0, "QFREEZE_COUNT",
@@ -2033,33 +638,6 @@ ahd_saved_mode_print(u_int regvalue, u_int *cur_col, u_int wrap)
2033 0x136, regvalue, cur_col, wrap)); 638 0x136, regvalue, cur_col, wrap));
2034} 639}
2035 640
2036int
2037ahd_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
2038{
2039 return (ahd_print_register(NULL, 0, "MSG_OUT",
2040 0x137, regvalue, cur_col, wrap));
2041}
2042
2043static const ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = {
2044 { "FIFORESET", 0x01, 0x01 },
2045 { "FIFOFLUSH", 0x02, 0x02 },
2046 { "DIRECTION", 0x04, 0x04 },
2047 { "HDMAEN", 0x08, 0x08 },
2048 { "HDMAENACK", 0x08, 0x08 },
2049 { "SDMAEN", 0x10, 0x10 },
2050 { "SDMAENACK", 0x10, 0x10 },
2051 { "SCSIEN", 0x20, 0x20 },
2052 { "WIDEODD", 0x40, 0x40 },
2053 { "PRELOADEN", 0x80, 0x80 }
2054};
2055
2056int
2057ahd_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
2058{
2059 return (ahd_print_register(DMAPARAMS_parse_table, 10, "DMAPARAMS",
2060 0x138, regvalue, cur_col, wrap));
2061}
2062
2063static const ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = { 641static const ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
2064 { "NO_DISCONNECT", 0x01, 0x01 }, 642 { "NO_DISCONNECT", 0x01, 0x01 },
2065 { "SPHASE_PENDING", 0x02, 0x02 }, 643 { "SPHASE_PENDING", 0x02, 0x02 },
@@ -2079,20 +657,6 @@ ahd_seq_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
2079 0x139, regvalue, cur_col, wrap)); 657 0x139, regvalue, cur_col, wrap));
2080} 658}
2081 659
2082int
2083ahd_saved_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
2084{
2085 return (ahd_print_register(NULL, 0, "SAVED_SCSIID",
2086 0x13a, regvalue, cur_col, wrap));
2087}
2088
2089int
2090ahd_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
2091{
2092 return (ahd_print_register(NULL, 0, "SAVED_LUN",
2093 0x13b, regvalue, cur_col, wrap));
2094}
2095
2096static const ahd_reg_parse_entry_t LASTPHASE_parse_table[] = { 660static const ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
2097 { "P_DATAOUT", 0x00, 0xe0 }, 661 { "P_DATAOUT", 0x00, 0xe0 },
2098 { "P_DATAOUT_DT", 0x20, 0xe0 }, 662 { "P_DATAOUT_DT", 0x20, 0xe0 },
@@ -2116,96 +680,6 @@ ahd_lastphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
2116 0x13c, regvalue, cur_col, wrap)); 680 0x13c, regvalue, cur_col, wrap));
2117} 681}
2118 682
2119int
2120ahd_qoutfifo_entry_valid_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
2121{
2122 return (ahd_print_register(NULL, 0, "QOUTFIFO_ENTRY_VALID_TAG",
2123 0x13d, regvalue, cur_col, wrap));
2124}
2125
2126int
2127ahd_kernel_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
2128{
2129 return (ahd_print_register(NULL, 0, "KERNEL_TQINPOS",
2130 0x13e, regvalue, cur_col, wrap));
2131}
2132
2133int
2134ahd_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
2135{
2136 return (ahd_print_register(NULL, 0, "TQINPOS",
2137 0x13f, regvalue, cur_col, wrap));
2138}
2139
2140int
2141ahd_shared_data_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2142{
2143 return (ahd_print_register(NULL, 0, "SHARED_DATA_ADDR",
2144 0x140, regvalue, cur_col, wrap));
2145}
2146
2147int
2148ahd_qoutfifo_next_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2149{
2150 return (ahd_print_register(NULL, 0, "QOUTFIFO_NEXT_ADDR",
2151 0x144, regvalue, cur_col, wrap));
2152}
2153
2154static const ahd_reg_parse_entry_t ARG_1_parse_table[] = {
2155 { "CONT_MSG_LOOP_TARG", 0x02, 0x02 },
2156 { "CONT_MSG_LOOP_READ", 0x03, 0x03 },
2157 { "CONT_MSG_LOOP_WRITE",0x04, 0x04 },
2158 { "EXIT_MSG_LOOP", 0x08, 0x08 },
2159 { "MSGOUT_PHASEMIS", 0x10, 0x10 },
2160 { "SEND_REJ", 0x20, 0x20 },
2161 { "SEND_SENSE", 0x40, 0x40 },
2162 { "SEND_MSG", 0x80, 0x80 }
2163};
2164
2165int
2166ahd_arg_1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2167{
2168 return (ahd_print_register(ARG_1_parse_table, 8, "ARG_1",
2169 0x148, regvalue, cur_col, wrap));
2170}
2171
2172int
2173ahd_arg_2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2174{
2175 return (ahd_print_register(NULL, 0, "ARG_2",
2176 0x149, regvalue, cur_col, wrap));
2177}
2178
2179int
2180ahd_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
2181{
2182 return (ahd_print_register(NULL, 0, "LAST_MSG",
2183 0x14a, regvalue, cur_col, wrap));
2184}
2185
2186static const ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
2187 { "ALTSTIM", 0x01, 0x01 },
2188 { "ENAUTOATNP", 0x02, 0x02 },
2189 { "MANUALP", 0x0c, 0x0c },
2190 { "ENRSELI", 0x10, 0x10 },
2191 { "ENSELI", 0x20, 0x20 },
2192 { "MANUALCTL", 0x40, 0x40 }
2193};
2194
2195int
2196ahd_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
2197{
2198 return (ahd_print_register(SCSISEQ_TEMPLATE_parse_table, 6, "SCSISEQ_TEMPLATE",
2199 0x14b, regvalue, cur_col, wrap));
2200}
2201
2202int
2203ahd_initiator_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
2204{
2205 return (ahd_print_register(NULL, 0, "INITIATOR_TAG",
2206 0x14c, regvalue, cur_col, wrap));
2207}
2208
2209static const ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = { 683static const ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
2210 { "PENDING_MK_MESSAGE", 0x01, 0x01 }, 684 { "PENDING_MK_MESSAGE", 0x01, 0x01 },
2211 { "TARGET_MSG_PENDING", 0x02, 0x02 }, 685 { "TARGET_MSG_PENDING", 0x02, 0x02 },
@@ -2220,62 +694,6 @@ ahd_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2220} 694}
2221 695
2222int 696int
2223ahd_allocfifo_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2224{
2225 return (ahd_print_register(NULL, 0, "ALLOCFIFO_SCBPTR",
2226 0x14e, regvalue, cur_col, wrap));
2227}
2228
2229int
2230ahd_int_coalescing_timer_print(u_int regvalue, u_int *cur_col, u_int wrap)
2231{
2232 return (ahd_print_register(NULL, 0, "INT_COALESCING_TIMER",
2233 0x150, regvalue, cur_col, wrap));
2234}
2235
2236int
2237ahd_int_coalescing_maxcmds_print(u_int regvalue, u_int *cur_col, u_int wrap)
2238{
2239 return (ahd_print_register(NULL, 0, "INT_COALESCING_MAXCMDS",
2240 0x152, regvalue, cur_col, wrap));
2241}
2242
2243int
2244ahd_int_coalescing_mincmds_print(u_int regvalue, u_int *cur_col, u_int wrap)
2245{
2246 return (ahd_print_register(NULL, 0, "INT_COALESCING_MINCMDS",
2247 0x153, regvalue, cur_col, wrap));
2248}
2249
2250int
2251ahd_cmds_pending_print(u_int regvalue, u_int *cur_col, u_int wrap)
2252{
2253 return (ahd_print_register(NULL, 0, "CMDS_PENDING",
2254 0x154, regvalue, cur_col, wrap));
2255}
2256
2257int
2258ahd_int_coalescing_cmdcount_print(u_int regvalue, u_int *cur_col, u_int wrap)
2259{
2260 return (ahd_print_register(NULL, 0, "INT_COALESCING_CMDCOUNT",
2261 0x156, regvalue, cur_col, wrap));
2262}
2263
2264int
2265ahd_local_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
2266{
2267 return (ahd_print_register(NULL, 0, "LOCAL_HS_MAILBOX",
2268 0x157, regvalue, cur_col, wrap));
2269}
2270
2271int
2272ahd_cmdsize_table_print(u_int regvalue, u_int *cur_col, u_int wrap)
2273{
2274 return (ahd_print_register(NULL, 0, "CMDSIZE_TABLE",
2275 0x158, regvalue, cur_col, wrap));
2276}
2277
2278int
2279ahd_mk_message_scb_print(u_int regvalue, u_int *cur_col, u_int wrap) 697ahd_mk_message_scb_print(u_int regvalue, u_int *cur_col, u_int wrap)
2280{ 698{
2281 return (ahd_print_register(NULL, 0, "MK_MESSAGE_SCB", 699 return (ahd_print_register(NULL, 0, "MK_MESSAGE_SCB",
@@ -2290,53 +708,12 @@ ahd_mk_message_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
2290} 708}
2291 709
2292int 710int
2293ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2294{
2295 return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT",
2296 0x180, regvalue, cur_col, wrap));
2297}
2298
2299int
2300ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap) 711ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
2301{ 712{
2302 return (ahd_print_register(NULL, 0, "SCB_BASE", 713 return (ahd_print_register(NULL, 0, "SCB_BASE",
2303 0x180, regvalue, cur_col, wrap)); 714 0x180, regvalue, cur_col, wrap));
2304} 715}
2305 716
2306static const ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = {
2307 { "SG_LIST_NULL", 0x01, 0x01 },
2308 { "SG_OVERRUN_RESID", 0x02, 0x02 },
2309 { "SG_ADDR_MASK", 0xf8, 0xf8 }
2310};
2311
2312int
2313ahd_scb_residual_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2314{
2315 return (ahd_print_register(SCB_RESIDUAL_SGPTR_parse_table, 3, "SCB_RESIDUAL_SGPTR",
2316 0x184, regvalue, cur_col, wrap));
2317}
2318
2319int
2320ahd_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
2321{
2322 return (ahd_print_register(NULL, 0, "SCB_SCSI_STATUS",
2323 0x188, regvalue, cur_col, wrap));
2324}
2325
2326int
2327ahd_scb_sense_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2328{
2329 return (ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR",
2330 0x18c, regvalue, cur_col, wrap));
2331}
2332
2333int
2334ahd_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
2335{
2336 return (ahd_print_register(NULL, 0, "SCB_TAG",
2337 0x190, regvalue, cur_col, wrap));
2338}
2339
2340static const ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = { 717static const ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
2341 { "SCB_TAG_TYPE", 0x03, 0x03 }, 718 { "SCB_TAG_TYPE", 0x03, 0x03 },
2342 { "DISCONNECTED", 0x04, 0x04 }, 719 { "DISCONNECTED", 0x04, 0x04 },
@@ -2366,103 +743,3 @@ ahd_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
2366 0x193, regvalue, cur_col, wrap)); 743 0x193, regvalue, cur_col, wrap));
2367} 744}
2368 745
2369static const ahd_reg_parse_entry_t SCB_LUN_parse_table[] = {
2370 { "LID", 0xff, 0xff }
2371};
2372
2373int
2374ahd_scb_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
2375{
2376 return (ahd_print_register(SCB_LUN_parse_table, 1, "SCB_LUN",
2377 0x194, regvalue, cur_col, wrap));
2378}
2379
2380static const ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = {
2381 { "SCB_XFERLEN_ODD", 0x01, 0x01 }
2382};
2383
2384int
2385ahd_scb_task_attribute_print(u_int regvalue, u_int *cur_col, u_int wrap)
2386{
2387 return (ahd_print_register(SCB_TASK_ATTRIBUTE_parse_table, 1, "SCB_TASK_ATTRIBUTE",
2388 0x195, regvalue, cur_col, wrap));
2389}
2390
2391static const ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = {
2392 { "SCB_CDB_LEN_PTR", 0x80, 0x80 }
2393};
2394
2395int
2396ahd_scb_cdb_len_print(u_int regvalue, u_int *cur_col, u_int wrap)
2397{
2398 return (ahd_print_register(SCB_CDB_LEN_parse_table, 1, "SCB_CDB_LEN",
2399 0x196, regvalue, cur_col, wrap));
2400}
2401
2402int
2403ahd_scb_task_management_print(u_int regvalue, u_int *cur_col, u_int wrap)
2404{
2405 return (ahd_print_register(NULL, 0, "SCB_TASK_MANAGEMENT",
2406 0x197, regvalue, cur_col, wrap));
2407}
2408
2409int
2410ahd_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2411{
2412 return (ahd_print_register(NULL, 0, "SCB_DATAPTR",
2413 0x198, regvalue, cur_col, wrap));
2414}
2415
2416static const ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
2417 { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f },
2418 { "SG_LAST_SEG", 0x80, 0x80 }
2419};
2420
2421int
2422ahd_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2423{
2424 return (ahd_print_register(SCB_DATACNT_parse_table, 2, "SCB_DATACNT",
2425 0x1a0, regvalue, cur_col, wrap));
2426}
2427
2428static const ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
2429 { "SG_LIST_NULL", 0x01, 0x01 },
2430 { "SG_FULL_RESID", 0x02, 0x02 },
2431 { "SG_STATUS_VALID", 0x04, 0x04 }
2432};
2433
2434int
2435ahd_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2436{
2437 return (ahd_print_register(SCB_SGPTR_parse_table, 3, "SCB_SGPTR",
2438 0x1a4, regvalue, cur_col, wrap));
2439}
2440
2441int
2442ahd_scb_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2443{
2444 return (ahd_print_register(NULL, 0, "SCB_BUSADDR",
2445 0x1a8, regvalue, cur_col, wrap));
2446}
2447
2448int
2449ahd_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
2450{
2451 return (ahd_print_register(NULL, 0, "SCB_NEXT",
2452 0x1ac, regvalue, cur_col, wrap));
2453}
2454
2455int
2456ahd_scb_next2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2457{
2458 return (ahd_print_register(NULL, 0, "SCB_NEXT2",
2459 0x1ae, regvalue, cur_col, wrap));
2460}
2461
2462int
2463ahd_scb_disconnected_lists_print(u_int regvalue, u_int *cur_col, u_int wrap)
2464{
2465 return (ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS",
2466 0x1b8, regvalue, cur_col, wrap));
2467}
2468
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
index 0d2f763c3427..9a96e55da39a 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.reg
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -51,6 +51,17 @@ VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $"
51 */ 51 */
52 52
53/* 53/*
54 * Registers marked "dont_generate_debug_code" are not (yet) referenced
55 * from the driver code, and this keyword inhibit generation
56 * of debug code for them.
57 *
58 * REG_PRETTY_PRINT config will complain if dont_generate_debug_code
59 * is added to the register which is referenced in the driver.
60 * Unreferenced register with no dont_generate_debug_code will result
61 * in dead code. No warning is issued.
62 */
63
64/*
54 * SCSI Sequence Control (p. 3-11). 65 * SCSI Sequence Control (p. 3-11).
55 * Each bit, when set starts a specific SCSI sequence on the bus 66 * Each bit, when set starts a specific SCSI sequence on the bus
56 */ 67 */
@@ -97,6 +108,7 @@ register SXFRCTL1 {
97 field ENSTIMER 0x04 108 field ENSTIMER 0x04
98 field ACTNEGEN 0x02 109 field ACTNEGEN 0x02
99 field STPWEN 0x01 /* Powered Termination */ 110 field STPWEN 0x01 /* Powered Termination */
111 dont_generate_debug_code
100} 112}
101 113
102/* 114/*
@@ -155,6 +167,7 @@ register SCSISIGO {
155 mask P_MESGOUT CDI|MSGI 167 mask P_MESGOUT CDI|MSGI
156 mask P_STATUS CDI|IOI 168 mask P_STATUS CDI|IOI
157 mask P_MESGIN CDI|IOI|MSGI 169 mask P_MESGIN CDI|IOI|MSGI
170 dont_generate_debug_code
158} 171}
159 172
160/* 173/*
@@ -194,6 +207,7 @@ register SCSIID {
194 */ 207 */
195 alias SCSIOFFSET 208 alias SCSIOFFSET
196 mask SOFS_ULTRA2 0x7f /* Sync offset U2 chips */ 209 mask SOFS_ULTRA2 0x7f /* Sync offset U2 chips */
210 dont_generate_debug_code
197} 211}
198 212
199/* 213/*
@@ -205,6 +219,7 @@ register SCSIID {
205register SCSIDATL { 219register SCSIDATL {
206 address 0x006 220 address 0x006
207 access_mode RW 221 access_mode RW
222 dont_generate_debug_code
208} 223}
209 224
210register SCSIDATH { 225register SCSIDATH {
@@ -223,6 +238,7 @@ register STCNT {
223 address 0x008 238 address 0x008
224 size 3 239 size 3
225 access_mode RW 240 access_mode RW
241 dont_generate_debug_code
226} 242}
227 243
228/* ALT_MODE registers (Ultra2 and Ultra160 chips) */ 244/* ALT_MODE registers (Ultra2 and Ultra160 chips) */
@@ -248,6 +264,7 @@ register OPTIONMODE {
248 field AUTO_MSGOUT_DE 0x02 264 field AUTO_MSGOUT_DE 0x02
249 field DIS_MSGIN_DUALEDGE 0x01 265 field DIS_MSGIN_DUALEDGE 0x01
250 mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE|DIS_MSGIN_DUALEDGE 266 mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE|DIS_MSGIN_DUALEDGE
267 dont_generate_debug_code
251} 268}
252 269
253/* ALT_MODE register on Ultra160 chips */ 270/* ALT_MODE register on Ultra160 chips */
@@ -256,6 +273,7 @@ register TARGCRCCNT {
256 size 2 273 size 2
257 access_mode RW 274 access_mode RW
258 count 2 275 count 2
276 dont_generate_debug_code
259} 277}
260 278
261/* 279/*
@@ -271,6 +289,7 @@ register CLRSINT0 {
271 field CLRSWRAP 0x08 289 field CLRSWRAP 0x08
272 field CLRIOERR 0x08 /* Ultra2 Only */ 290 field CLRIOERR 0x08 /* Ultra2 Only */
273 field CLRSPIORDY 0x02 291 field CLRSPIORDY 0x02
292 dont_generate_debug_code
274} 293}
275 294
276/* 295/*
@@ -306,6 +325,7 @@ register CLRSINT1 {
306 field CLRSCSIPERR 0x04 325 field CLRSCSIPERR 0x04
307 field CLRPHASECHG 0x02 326 field CLRPHASECHG 0x02
308 field CLRREQINIT 0x01 327 field CLRREQINIT 0x01
328 dont_generate_debug_code
309} 329}
310 330
311/* 331/*
@@ -360,6 +380,7 @@ register SCSIID_ULTRA2 {
360 access_mode RW 380 access_mode RW
361 mask TID 0xf0 /* Target ID mask */ 381 mask TID 0xf0 /* Target ID mask */
362 mask OID 0x0f /* Our ID mask */ 382 mask OID 0x0f /* Our ID mask */
383 dont_generate_debug_code
363} 384}
364 385
365/* 386/*
@@ -425,6 +446,7 @@ register SHADDR {
425 address 0x014 446 address 0x014
426 size 4 447 size 4
427 access_mode RO 448 access_mode RO
449 dont_generate_debug_code
428} 450}
429 451
430/* 452/*
@@ -441,6 +463,7 @@ register SELTIMER {
441 field STAGE2 0x02 463 field STAGE2 0x02
442 field STAGE1 0x01 464 field STAGE1 0x01
443 alias TARGIDIN 465 alias TARGIDIN
466 dont_generate_debug_code
444} 467}
445 468
446/* 469/*
@@ -453,6 +476,7 @@ register SELID {
453 access_mode RW 476 access_mode RW
454 mask SELID_MASK 0xf0 477 mask SELID_MASK 0xf0
455 field ONEBIT 0x08 478 field ONEBIT 0x08
479 dont_generate_debug_code
456} 480}
457 481
458register SCAMCTL { 482register SCAMCTL {
@@ -473,6 +497,7 @@ register TARGID {
473 size 2 497 size 2
474 access_mode RW 498 access_mode RW
475 count 14 499 count 14
500 dont_generate_debug_code
476} 501}
477 502
478/* 503/*
@@ -495,6 +520,7 @@ register SPIOCAP {
495 field EEPROM 0x04 /* Writable external BIOS ROM */ 520 field EEPROM 0x04 /* Writable external BIOS ROM */
496 field ROM 0x02 /* Logic for accessing external ROM */ 521 field ROM 0x02 /* Logic for accessing external ROM */
497 field SSPIOCPS 0x01 /* Termination and cable detection */ 522 field SSPIOCPS 0x01 /* Termination and cable detection */
523 dont_generate_debug_code
498} 524}
499 525
500register BRDCTL { 526register BRDCTL {
@@ -514,6 +540,7 @@ register BRDCTL {
514 field BRDDAT2 0x04 540 field BRDDAT2 0x04
515 field BRDRW_ULTRA2 0x02 541 field BRDRW_ULTRA2 0x02
516 field BRDSTB_ULTRA2 0x01 542 field BRDSTB_ULTRA2 0x01
543 dont_generate_debug_code
517} 544}
518 545
519/* 546/*
@@ -551,6 +578,7 @@ register SEECTL {
551 field SEECK 0x04 578 field SEECK 0x04
552 field SEEDO 0x02 579 field SEEDO 0x02
553 field SEEDI 0x01 580 field SEEDI 0x01
581 dont_generate_debug_code
554} 582}
555/* 583/*
556 * SCSI Block Control (p. 3-32) 584 * SCSI Block Control (p. 3-32)
@@ -601,6 +629,7 @@ register SEQRAM {
601 address 0x061 629 address 0x061
602 access_mode RW 630 access_mode RW
603 count 2 631 count 2
632 dont_generate_debug_code
604} 633}
605 634
606/* 635/*
@@ -610,6 +639,7 @@ register SEQRAM {
610register SEQADDR0 { 639register SEQADDR0 {
611 address 0x062 640 address 0x062
612 access_mode RW 641 access_mode RW
642 dont_generate_debug_code
613} 643}
614 644
615register SEQADDR1 { 645register SEQADDR1 {
@@ -617,6 +647,7 @@ register SEQADDR1 {
617 access_mode RW 647 access_mode RW
618 count 8 648 count 8
619 mask SEQADDR1_MASK 0x01 649 mask SEQADDR1_MASK 0x01
650 dont_generate_debug_code
620} 651}
621 652
622/* 653/*
@@ -627,35 +658,41 @@ register ACCUM {
627 address 0x064 658 address 0x064
628 access_mode RW 659 access_mode RW
629 accumulator 660 accumulator
661 dont_generate_debug_code
630} 662}
631 663
632register SINDEX { 664register SINDEX {
633 address 0x065 665 address 0x065
634 access_mode RW 666 access_mode RW
635 sindex 667 sindex
668 dont_generate_debug_code
636} 669}
637 670
638register DINDEX { 671register DINDEX {
639 address 0x066 672 address 0x066
640 access_mode RW 673 access_mode RW
674 dont_generate_debug_code
641} 675}
642 676
643register ALLONES { 677register ALLONES {
644 address 0x069 678 address 0x069
645 access_mode RO 679 access_mode RO
646 allones 680 allones
681 dont_generate_debug_code
647} 682}
648 683
649register ALLZEROS { 684register ALLZEROS {
650 address 0x06a 685 address 0x06a
651 access_mode RO 686 access_mode RO
652 allzeros 687 allzeros
688 dont_generate_debug_code
653} 689}
654 690
655register NONE { 691register NONE {
656 address 0x06a 692 address 0x06a
657 access_mode WO 693 access_mode WO
658 none 694 none
695 dont_generate_debug_code
659} 696}
660 697
661register FLAGS { 698register FLAGS {
@@ -664,16 +701,19 @@ register FLAGS {
664 count 18 701 count 18
665 field ZERO 0x02 702 field ZERO 0x02
666 field CARRY 0x01 703 field CARRY 0x01
704 dont_generate_debug_code
667} 705}
668 706
669register SINDIR { 707register SINDIR {
670 address 0x06c 708 address 0x06c
671 access_mode RO 709 access_mode RO
710 dont_generate_debug_code
672} 711}
673 712
674register DINDIR { 713register DINDIR {
675 address 0x06d 714 address 0x06d
676 access_mode WO 715 access_mode WO
716 dont_generate_debug_code
677} 717}
678 718
679register FUNCTION1 { 719register FUNCTION1 {
@@ -685,6 +725,7 @@ register STACK {
685 address 0x06f 725 address 0x06f
686 access_mode RO 726 access_mode RO
687 count 5 727 count 5
728 dont_generate_debug_code
688} 729}
689 730
690const STACK_SIZE 4 731const STACK_SIZE 4
@@ -716,6 +757,7 @@ register DSCOMMAND0 {
716 field RAMPS 0x04 /* External SCB RAM Present */ 757 field RAMPS 0x04 /* External SCB RAM Present */
717 field USCBSIZE32 0x02 /* Use 32byte SCB Page Size */ 758 field USCBSIZE32 0x02 /* Use 32byte SCB Page Size */
718 field CIOPARCKEN 0x01 /* Internal bus parity error enable */ 759 field CIOPARCKEN 0x01 /* Internal bus parity error enable */
760 dont_generate_debug_code
719} 761}
720 762
721register DSCOMMAND1 { 763register DSCOMMAND1 {
@@ -724,6 +766,7 @@ register DSCOMMAND1 {
724 mask DSLATT 0xfc /* PCI latency timer (non-ultra2) */ 766 mask DSLATT 0xfc /* PCI latency timer (non-ultra2) */
725 field HADDLDSEL1 0x02 /* Host Address Load Select Bits */ 767 field HADDLDSEL1 0x02 /* Host Address Load Select Bits */
726 field HADDLDSEL0 0x01 768 field HADDLDSEL0 0x01
769 dont_generate_debug_code
727} 770}
728 771
729/* 772/*
@@ -735,6 +778,7 @@ register BUSTIME {
735 count 2 778 count 2
736 mask BOFF 0xf0 779 mask BOFF 0xf0
737 mask BON 0x0f 780 mask BON 0x0f
781 dont_generate_debug_code
738} 782}
739 783
740/* 784/*
@@ -749,6 +793,7 @@ register BUSSPD {
749 mask STBON 0x07 793 mask STBON 0x07
750 mask DFTHRSH_100 0xc0 794 mask DFTHRSH_100 0xc0
751 mask DFTHRSH_75 0x80 795 mask DFTHRSH_75 0x80
796 dont_generate_debug_code
752} 797}
753 798
754/* aic7850/55/60/70/80/95 only */ 799/* aic7850/55/60/70/80/95 only */
@@ -756,6 +801,7 @@ register DSPCISTATUS {
756 address 0x086 801 address 0x086
757 count 4 802 count 4
758 mask DFTHRSH_100 0xc0 803 mask DFTHRSH_100 0xc0
804 dont_generate_debug_code
759} 805}
760 806
761/* aic7890/91/96/97 only */ 807/* aic7890/91/96/97 only */
@@ -764,6 +810,7 @@ register HS_MAILBOX {
764 mask HOST_MAILBOX 0xF0 810 mask HOST_MAILBOX 0xF0
765 mask SEQ_MAILBOX 0x0F 811 mask SEQ_MAILBOX 0x0F
766 mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */ 812 mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */
813 dont_generate_debug_code
767} 814}
768 815
769const HOST_MAILBOX_SHIFT 4 816const HOST_MAILBOX_SHIFT 4
@@ -784,6 +831,7 @@ register HCNTRL {
784 field INTEN 0x02 831 field INTEN 0x02
785 field CHIPRST 0x01 832 field CHIPRST 0x01
786 field CHIPRSTACK 0x01 833 field CHIPRSTACK 0x01
834 dont_generate_debug_code
787} 835}
788 836
789/* 837/*
@@ -795,12 +843,14 @@ register HADDR {
795 address 0x088 843 address 0x088
796 size 4 844 size 4
797 access_mode RW 845 access_mode RW
846 dont_generate_debug_code
798} 847}
799 848
800register HCNT { 849register HCNT {
801 address 0x08c 850 address 0x08c
802 size 3 851 size 3
803 access_mode RW 852 access_mode RW
853 dont_generate_debug_code
804} 854}
805 855
806/* 856/*
@@ -810,6 +860,7 @@ register HCNT {
810register SCBPTR { 860register SCBPTR {
811 address 0x090 861 address 0x090
812 access_mode RW 862 access_mode RW
863 dont_generate_debug_code
813} 864}
814 865
815/* 866/*
@@ -878,6 +929,7 @@ register INTSTAT {
878 929
879 mask SEQINT_MASK 0xf0|SEQINT /* SEQINT Status Codes */ 930 mask SEQINT_MASK 0xf0|SEQINT /* SEQINT Status Codes */
880 mask INT_PEND (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT) 931 mask INT_PEND (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT)
932 dont_generate_debug_code
881} 933}
882 934
883/* 935/*
@@ -911,6 +963,7 @@ register CLRINT {
911 field CLRSCSIINT 0x04 963 field CLRSCSIINT 0x04
912 field CLRCMDINT 0x02 964 field CLRCMDINT 0x02
913 field CLRSEQINT 0x01 965 field CLRSEQINT 0x01
966 dont_generate_debug_code
914} 967}
915 968
916register DFCNTRL { 969register DFCNTRL {
@@ -944,6 +997,7 @@ register DFSTATUS {
944register DFWADDR { 997register DFWADDR {
945 address 0x95 998 address 0x95
946 access_mode RW 999 access_mode RW
1000 dont_generate_debug_code
947} 1001}
948 1002
949register DFRADDR { 1003register DFRADDR {
@@ -954,6 +1008,7 @@ register DFRADDR {
954register DFDAT { 1008register DFDAT {
955 address 0x099 1009 address 0x099
956 access_mode RW 1010 access_mode RW
1011 dont_generate_debug_code
957} 1012}
958 1013
959/* 1014/*
@@ -967,6 +1022,7 @@ register SCBCNT {
967 count 1 1022 count 1
968 field SCBAUTO 0x80 1023 field SCBAUTO 0x80
969 mask SCBCNT_MASK 0x1f 1024 mask SCBCNT_MASK 0x1f
1025 dont_generate_debug_code
970} 1026}
971 1027
972/* 1028/*
@@ -977,6 +1033,7 @@ register QINFIFO {
977 address 0x09b 1033 address 0x09b
978 access_mode RW 1034 access_mode RW
979 count 12 1035 count 12
1036 dont_generate_debug_code
980} 1037}
981 1038
982/* 1039/*
@@ -996,6 +1053,7 @@ register QOUTFIFO {
996 address 0x09d 1053 address 0x09d
997 access_mode WO 1054 access_mode WO
998 count 7 1055 count 7
1056 dont_generate_debug_code
999} 1057}
1000 1058
1001register CRCCONTROL1 { 1059register CRCCONTROL1 {
@@ -1008,6 +1066,7 @@ register CRCCONTROL1 {
1008 field CRCREQCHKEN 0x10 1066 field CRCREQCHKEN 0x10
1009 field TARGCRCENDEN 0x08 1067 field TARGCRCENDEN 0x08
1010 field TARGCRCCNTEN 0x04 1068 field TARGCRCCNTEN 0x04
1069 dont_generate_debug_code
1011} 1070}
1012 1071
1013 1072
@@ -1040,6 +1099,7 @@ register SFUNCT {
1040 access_mode RW 1099 access_mode RW
1041 count 4 1100 count 4
1042 field ALT_MODE 0x80 1101 field ALT_MODE 0x80
1102 dont_generate_debug_code
1043} 1103}
1044 1104
1045/* 1105/*
@@ -1053,24 +1113,31 @@ scb {
1053 size 4 1113 size 4
1054 alias SCB_RESIDUAL_DATACNT 1114 alias SCB_RESIDUAL_DATACNT
1055 alias SCB_CDB_STORE 1115 alias SCB_CDB_STORE
1116 dont_generate_debug_code
1056 } 1117 }
1057 SCB_RESIDUAL_SGPTR { 1118 SCB_RESIDUAL_SGPTR {
1058 size 4 1119 size 4
1120 dont_generate_debug_code
1059 } 1121 }
1060 SCB_SCSI_STATUS { 1122 SCB_SCSI_STATUS {
1061 size 1 1123 size 1
1124 dont_generate_debug_code
1062 } 1125 }
1063 SCB_TARGET_PHASES { 1126 SCB_TARGET_PHASES {
1064 size 1 1127 size 1
1128 dont_generate_debug_code
1065 } 1129 }
1066 SCB_TARGET_DATA_DIR { 1130 SCB_TARGET_DATA_DIR {
1067 size 1 1131 size 1
1132 dont_generate_debug_code
1068 } 1133 }
1069 SCB_TARGET_ITAG { 1134 SCB_TARGET_ITAG {
1070 size 1 1135 size 1
1136 dont_generate_debug_code
1071 } 1137 }
1072 SCB_DATAPTR { 1138 SCB_DATAPTR {
1073 size 4 1139 size 4
1140 dont_generate_debug_code
1074 } 1141 }
1075 SCB_DATACNT { 1142 SCB_DATACNT {
1076 /* 1143 /*
@@ -1080,12 +1147,14 @@ scb {
1080 size 4 1147 size 4
1081 field SG_LAST_SEG 0x80 /* In the fourth byte */ 1148 field SG_LAST_SEG 0x80 /* In the fourth byte */
1082 mask SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */ 1149 mask SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */
1150 dont_generate_debug_code
1083 } 1151 }
1084 SCB_SGPTR { 1152 SCB_SGPTR {
1085 size 4 1153 size 4
1086 field SG_RESID_VALID 0x04 /* In the first byte */ 1154 field SG_RESID_VALID 0x04 /* In the first byte */
1087 field SG_FULL_RESID 0x02 /* In the first byte */ 1155 field SG_FULL_RESID 0x02 /* In the first byte */
1088 field SG_LIST_NULL 0x01 /* In the first byte */ 1156 field SG_LIST_NULL 0x01 /* In the first byte */
1157 dont_generate_debug_code
1089 } 1158 }
1090 SCB_CONTROL { 1159 SCB_CONTROL {
1091 size 1 1160 size 1
@@ -1115,22 +1184,27 @@ scb {
1115 } 1184 }
1116 SCB_CDB_LEN { 1185 SCB_CDB_LEN {
1117 size 1 1186 size 1
1187 dont_generate_debug_code
1118 } 1188 }
1119 SCB_SCSIRATE { 1189 SCB_SCSIRATE {
1120 size 1 1190 size 1
1191 dont_generate_debug_code
1121 } 1192 }
1122 SCB_SCSIOFFSET { 1193 SCB_SCSIOFFSET {
1123 size 1 1194 size 1
1124 count 1 1195 count 1
1196 dont_generate_debug_code
1125 } 1197 }
1126 SCB_NEXT { 1198 SCB_NEXT {
1127 size 1 1199 size 1
1200 dont_generate_debug_code
1128 } 1201 }
1129 SCB_64_SPARE { 1202 SCB_64_SPARE {
1130 size 16 1203 size 16
1131 } 1204 }
1132 SCB_64_BTT { 1205 SCB_64_BTT {
1133 size 16 1206 size 16
1207 dont_generate_debug_code
1134 } 1208 }
1135} 1209}
1136 1210
@@ -1149,6 +1223,7 @@ register SEECTL_2840 {
1149 field CS_2840 0x04 1223 field CS_2840 0x04
1150 field CK_2840 0x02 1224 field CK_2840 0x02
1151 field DO_2840 0x01 1225 field DO_2840 0x01
1226 dont_generate_debug_code
1152} 1227}
1153 1228
1154register STATUS_2840 { 1229register STATUS_2840 {
@@ -1159,6 +1234,7 @@ register STATUS_2840 {
1159 mask BIOS_SEL 0x60 1234 mask BIOS_SEL 0x60
1160 mask ADSEL 0x1e 1235 mask ADSEL 0x1e
1161 field DI_2840 0x01 1236 field DI_2840 0x01
1237 dont_generate_debug_code
1162} 1238}
1163 1239
1164/* --------------------- AIC-7870-only definitions -------------------- */ 1240/* --------------------- AIC-7870-only definitions -------------------- */
@@ -1166,18 +1242,22 @@ register STATUS_2840 {
1166register CCHADDR { 1242register CCHADDR {
1167 address 0x0E0 1243 address 0x0E0
1168 size 8 1244 size 8
1245 dont_generate_debug_code
1169} 1246}
1170 1247
1171register CCHCNT { 1248register CCHCNT {
1172 address 0x0E8 1249 address 0x0E8
1250 dont_generate_debug_code
1173} 1251}
1174 1252
1175register CCSGRAM { 1253register CCSGRAM {
1176 address 0x0E9 1254 address 0x0E9
1255 dont_generate_debug_code
1177} 1256}
1178 1257
1179register CCSGADDR { 1258register CCSGADDR {
1180 address 0x0EA 1259 address 0x0EA
1260 dont_generate_debug_code
1181} 1261}
1182 1262
1183register CCSGCTL { 1263register CCSGCTL {
@@ -1186,11 +1266,13 @@ register CCSGCTL {
1186 field CCSGEN 0x08 1266 field CCSGEN 0x08
1187 field SG_FETCH_NEEDED 0x02 /* Bit used for software state */ 1267 field SG_FETCH_NEEDED 0x02 /* Bit used for software state */
1188 field CCSGRESET 0x01 1268 field CCSGRESET 0x01
1269 dont_generate_debug_code
1189} 1270}
1190 1271
1191register CCSCBCNT { 1272register CCSCBCNT {
1192 address 0xEF 1273 address 0xEF
1193 count 1 1274 count 1
1275 dont_generate_debug_code
1194} 1276}
1195 1277
1196register CCSCBCTL { 1278register CCSCBCTL {
@@ -1201,14 +1283,17 @@ register CCSCBCTL {
1201 field CCSCBEN 0x08 1283 field CCSCBEN 0x08
1202 field CCSCBDIR 0x04 1284 field CCSCBDIR 0x04
1203 field CCSCBRESET 0x01 1285 field CCSCBRESET 0x01
1286 dont_generate_debug_code
1204} 1287}
1205 1288
1206register CCSCBADDR { 1289register CCSCBADDR {
1207 address 0x0ED 1290 address 0x0ED
1291 dont_generate_debug_code
1208} 1292}
1209 1293
1210register CCSCBRAM { 1294register CCSCBRAM {
1211 address 0xEC 1295 address 0xEC
1296 dont_generate_debug_code
1212} 1297}
1213 1298
1214/* 1299/*
@@ -1218,23 +1303,28 @@ register SCBBADDR {
1218 address 0x0F0 1303 address 0x0F0
1219 access_mode RW 1304 access_mode RW
1220 count 3 1305 count 3
1306 dont_generate_debug_code
1221} 1307}
1222 1308
1223register CCSCBPTR { 1309register CCSCBPTR {
1224 address 0x0F1 1310 address 0x0F1
1311 dont_generate_debug_code
1225} 1312}
1226 1313
1227register HNSCB_QOFF { 1314register HNSCB_QOFF {
1228 address 0x0F4 1315 address 0x0F4
1229 count 4 1316 count 4
1317 dont_generate_debug_code
1230} 1318}
1231 1319
1232register SNSCB_QOFF { 1320register SNSCB_QOFF {
1233 address 0x0F6 1321 address 0x0F6
1322 dont_generate_debug_code
1234} 1323}
1235 1324
1236register SDSCB_QOFF { 1325register SDSCB_QOFF {
1237 address 0x0F8 1326 address 0x0F8
1327 dont_generate_debug_code
1238} 1328}
1239 1329
1240register QOFF_CTLSTA { 1330register QOFF_CTLSTA {
@@ -1244,6 +1334,7 @@ register QOFF_CTLSTA {
1244 field SDSCB_ROLLOVER 0x10 1334 field SDSCB_ROLLOVER 0x10
1245 mask SCB_QSIZE 0x07 1335 mask SCB_QSIZE 0x07
1246 mask SCB_QSIZE_256 0x06 1336 mask SCB_QSIZE_256 0x06
1337 dont_generate_debug_code
1247} 1338}
1248 1339
1249register DFF_THRSH { 1340register DFF_THRSH {
@@ -1267,6 +1358,7 @@ register DFF_THRSH {
1267 mask WR_DFTHRSH_90 0x60 1358 mask WR_DFTHRSH_90 0x60
1268 mask WR_DFTHRSH_MAX 0x70 1359 mask WR_DFTHRSH_MAX 0x70
1269 count 4 1360 count 4
1361 dont_generate_debug_code
1270} 1362}
1271 1363
1272register SG_CACHE_PRE { 1364register SG_CACHE_PRE {
@@ -1275,6 +1367,7 @@ register SG_CACHE_PRE {
1275 mask SG_ADDR_MASK 0xf8 1367 mask SG_ADDR_MASK 0xf8
1276 field LAST_SEG 0x02 1368 field LAST_SEG 0x02
1277 field LAST_SEG_DONE 0x01 1369 field LAST_SEG_DONE 0x01
1370 dont_generate_debug_code
1278} 1371}
1279 1372
1280register SG_CACHE_SHADOW { 1373register SG_CACHE_SHADOW {
@@ -1283,6 +1376,7 @@ register SG_CACHE_SHADOW {
1283 mask SG_ADDR_MASK 0xf8 1376 mask SG_ADDR_MASK 0xf8
1284 field LAST_SEG 0x02 1377 field LAST_SEG 0x02
1285 field LAST_SEG_DONE 0x01 1378 field LAST_SEG_DONE 0x01
1379 dont_generate_debug_code
1286} 1380}
1287/* ---------------------- Scratch RAM Offsets ------------------------- */ 1381/* ---------------------- Scratch RAM Offsets ------------------------- */
1288/* These offsets are either to values that are initialized by the board's 1382/* These offsets are either to values that are initialized by the board's
@@ -1309,6 +1403,7 @@ scratch_ram {
1309 BUSY_TARGETS { 1403 BUSY_TARGETS {
1310 alias TARG_SCSIRATE 1404 alias TARG_SCSIRATE
1311 size 16 1405 size 16
1406 dont_generate_debug_code
1312 } 1407 }
1313 /* 1408 /*
1314 * Bit vector of targets that have ULTRA enabled as set by 1409 * Bit vector of targets that have ULTRA enabled as set by
@@ -1321,6 +1416,7 @@ scratch_ram {
1321 alias CMDSIZE_TABLE 1416 alias CMDSIZE_TABLE
1322 size 2 1417 size 2
1323 count 2 1418 count 2
1419 dont_generate_debug_code
1324 } 1420 }
1325 /* 1421 /*
1326 * Bit vector of targets that have disconnection disabled as set by 1422 * Bit vector of targets that have disconnection disabled as set by
@@ -1331,6 +1427,7 @@ scratch_ram {
1331 DISC_DSB { 1427 DISC_DSB {
1332 size 2 1428 size 2
1333 count 6 1429 count 6
1430 dont_generate_debug_code
1334 } 1431 }
1335 CMDSIZE_TABLE_TAIL { 1432 CMDSIZE_TABLE_TAIL {
1336 size 4 1433 size 4
@@ -1341,12 +1438,14 @@ scratch_ram {
1341 */ 1438 */
1342 MWI_RESIDUAL { 1439 MWI_RESIDUAL {
1343 size 1 1440 size 1
1441 dont_generate_debug_code
1344 } 1442 }
1345 /* 1443 /*
1346 * SCBID of the next SCB to be started by the controller. 1444 * SCBID of the next SCB to be started by the controller.
1347 */ 1445 */
1348 NEXT_QUEUED_SCB { 1446 NEXT_QUEUED_SCB {
1349 size 1 1447 size 1
1448 dont_generate_debug_code
1350 } 1449 }
1351 /* 1450 /*
1352 * Single byte buffer used to designate the type or message 1451 * Single byte buffer used to designate the type or message
@@ -1354,6 +1453,7 @@ scratch_ram {
1354 */ 1453 */
1355 MSG_OUT { 1454 MSG_OUT {
1356 size 1 1455 size 1
1456 dont_generate_debug_code
1357 } 1457 }
1358 /* Parameters for DMA Logic */ 1458 /* Parameters for DMA Logic */
1359 DMAPARAMS { 1459 DMAPARAMS {
@@ -1369,6 +1469,7 @@ scratch_ram {
1369 field DIRECTION 0x04 /* Set indicates PCI->SCSI */ 1469 field DIRECTION 0x04 /* Set indicates PCI->SCSI */
1370 field FIFOFLUSH 0x02 1470 field FIFOFLUSH 0x02
1371 field FIFORESET 0x01 1471 field FIFORESET 0x01
1472 dont_generate_debug_code
1372 } 1473 }
1373 SEQ_FLAGS { 1474 SEQ_FLAGS {
1374 size 1 1475 size 1
@@ -1390,9 +1491,11 @@ scratch_ram {
1390 */ 1491 */
1391 SAVED_SCSIID { 1492 SAVED_SCSIID {
1392 size 1 1493 size 1
1494 dont_generate_debug_code
1393 } 1495 }
1394 SAVED_LUN { 1496 SAVED_LUN {
1395 size 1 1497 size 1
1498 dont_generate_debug_code
1396 } 1499 }
1397 /* 1500 /*
1398 * The last bus phase as seen by the sequencer. 1501 * The last bus phase as seen by the sequencer.
@@ -1417,6 +1520,7 @@ scratch_ram {
1417 */ 1520 */
1418 WAITING_SCBH { 1521 WAITING_SCBH {
1419 size 1 1522 size 1
1523 dont_generate_debug_code
1420 } 1524 }
1421 /* 1525 /*
1422 * head of list of SCBs that are 1526 * head of list of SCBs that are
@@ -1425,6 +1529,7 @@ scratch_ram {
1425 */ 1529 */
1426 DISCONNECTED_SCBH { 1530 DISCONNECTED_SCBH {
1427 size 1 1531 size 1
1532 dont_generate_debug_code
1428 } 1533 }
1429 /* 1534 /*
1430 * head of list of SCBs that are 1535 * head of list of SCBs that are
@@ -1432,6 +1537,7 @@ scratch_ram {
1432 */ 1537 */
1433 FREE_SCBH { 1538 FREE_SCBH {
1434 size 1 1539 size 1
1540 dont_generate_debug_code
1435 } 1541 }
1436 /* 1542 /*
1437 * head of list of SCBs that have 1543 * head of list of SCBs that have
@@ -1446,6 +1552,7 @@ scratch_ram {
1446 */ 1552 */
1447 HSCB_ADDR { 1553 HSCB_ADDR {
1448 size 4 1554 size 4
1555 dont_generate_debug_code
1449 } 1556 }
1450 /* 1557 /*
1451 * Base address of our shared data with the kernel driver in host 1558 * Base address of our shared data with the kernel driver in host
@@ -1454,15 +1561,19 @@ scratch_ram {
1454 */ 1561 */
1455 SHARED_DATA_ADDR { 1562 SHARED_DATA_ADDR {
1456 size 4 1563 size 4
1564 dont_generate_debug_code
1457 } 1565 }
1458 KERNEL_QINPOS { 1566 KERNEL_QINPOS {
1459 size 1 1567 size 1
1568 dont_generate_debug_code
1460 } 1569 }
1461 QINPOS { 1570 QINPOS {
1462 size 1 1571 size 1
1572 dont_generate_debug_code
1463 } 1573 }
1464 QOUTPOS { 1574 QOUTPOS {
1465 size 1 1575 size 1
1576 dont_generate_debug_code
1466 } 1577 }
1467 /* 1578 /*
1468 * Kernel and sequencer offsets into the queue of 1579 * Kernel and sequencer offsets into the queue of
@@ -1471,9 +1582,11 @@ scratch_ram {
1471 */ 1582 */
1472 KERNEL_TQINPOS { 1583 KERNEL_TQINPOS {
1473 size 1 1584 size 1
1585 dont_generate_debug_code
1474 } 1586 }
1475 TQINPOS { 1587 TQINPOS {
1476 size 1 1588 size 1
1589 dont_generate_debug_code
1477 } 1590 }
1478 ARG_1 { 1591 ARG_1 {
1479 size 1 1592 size 1
@@ -1486,10 +1599,12 @@ scratch_ram {
1486 mask CONT_MSG_LOOP 0x04 1599 mask CONT_MSG_LOOP 0x04
1487 mask CONT_TARG_SESSION 0x02 1600 mask CONT_TARG_SESSION 0x02
1488 alias RETURN_1 1601 alias RETURN_1
1602 dont_generate_debug_code
1489 } 1603 }
1490 ARG_2 { 1604 ARG_2 {
1491 size 1 1605 size 1
1492 alias RETURN_2 1606 alias RETURN_2
1607 dont_generate_debug_code
1493 } 1608 }
1494 1609
1495 /* 1610 /*
@@ -1498,6 +1613,7 @@ scratch_ram {
1498 LAST_MSG { 1613 LAST_MSG {
1499 size 1 1614 size 1
1500 alias TARG_IMMEDIATE_SCB 1615 alias TARG_IMMEDIATE_SCB
1616 dont_generate_debug_code
1501 } 1617 }
1502 1618
1503 /* 1619 /*
@@ -1513,6 +1629,7 @@ scratch_ram {
1513 field ENAUTOATNO 0x08 1629 field ENAUTOATNO 0x08
1514 field ENAUTOATNI 0x04 1630 field ENAUTOATNI 0x04
1515 field ENAUTOATNP 0x02 1631 field ENAUTOATNP 0x02
1632 dont_generate_debug_code
1516 } 1633 }
1517} 1634}
1518 1635
@@ -1533,12 +1650,14 @@ scratch_ram {
1533 field HA_274_EXTENDED_TRANS 0x01 1650 field HA_274_EXTENDED_TRANS 0x01
1534 alias INITIATOR_TAG 1651 alias INITIATOR_TAG
1535 count 1 1652 count 1
1653 dont_generate_debug_code
1536 } 1654 }
1537 1655
1538 SEQ_FLAGS2 { 1656 SEQ_FLAGS2 {
1539 size 1 1657 size 1
1540 field SCB_DMA 0x01 1658 field SCB_DMA 0x01
1541 field TARGET_MSG_PENDING 0x02 1659 field TARGET_MSG_PENDING 0x02
1660 dont_generate_debug_code
1542 } 1661 }
1543} 1662}
1544 1663
@@ -1562,6 +1681,7 @@ scratch_ram {
1562 field ENSPCHK 0x20 1681 field ENSPCHK 0x20
1563 mask HSCSIID 0x07 /* our SCSI ID */ 1682 mask HSCSIID 0x07 /* our SCSI ID */
1564 mask HWSCSIID 0x0f /* our SCSI ID if Wide Bus */ 1683 mask HWSCSIID 0x0f /* our SCSI ID if Wide Bus */
1684 dont_generate_debug_code
1565 } 1685 }
1566 INTDEF { 1686 INTDEF {
1567 address 0x05c 1687 address 0x05c
@@ -1569,11 +1689,13 @@ scratch_ram {
1569 count 1 1689 count 1
1570 field EDGE_TRIG 0x80 1690 field EDGE_TRIG 0x80
1571 mask VECTOR 0x0f 1691 mask VECTOR 0x0f
1692 dont_generate_debug_code
1572 } 1693 }
1573 HOSTCONF { 1694 HOSTCONF {
1574 address 0x05d 1695 address 0x05d
1575 size 1 1696 size 1
1576 count 1 1697 count 1
1698 dont_generate_debug_code
1577 } 1699 }
1578 HA_274_BIOSCTRL { 1700 HA_274_BIOSCTRL {
1579 address 0x05f 1701 address 0x05f
@@ -1582,6 +1704,7 @@ scratch_ram {
1582 mask BIOSMODE 0x30 1704 mask BIOSMODE 0x30
1583 mask BIOSDISABLED 0x30 1705 mask BIOSDISABLED 0x30
1584 field CHANNEL_B_PRIMARY 0x08 1706 field CHANNEL_B_PRIMARY 0x08
1707 dont_generate_debug_code
1585 } 1708 }
1586} 1709}
1587 1710
@@ -1595,6 +1718,7 @@ scratch_ram {
1595 TARG_OFFSET { 1718 TARG_OFFSET {
1596 size 16 1719 size 16
1597 count 1 1720 count 1
1721 dont_generate_debug_code
1598 } 1722 }
1599} 1723}
1600 1724
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 0ae2b4605d09..e6f2bb7365e6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -814,6 +814,7 @@ ahc_intr(struct ahc_softc *ahc)
814static void 814static void
815ahc_restart(struct ahc_softc *ahc) 815ahc_restart(struct ahc_softc *ahc)
816{ 816{
817 uint8_t sblkctl;
817 818
818 ahc_pause(ahc); 819 ahc_pause(ahc);
819 820
@@ -868,6 +869,12 @@ ahc_restart(struct ahc_softc *ahc)
868 ahc_outb(ahc, SEQADDR0, 0); 869 ahc_outb(ahc, SEQADDR0, 0);
869 ahc_outb(ahc, SEQADDR1, 0); 870 ahc_outb(ahc, SEQADDR1, 0);
870 871
872 /*
873 * Take the LED out of diagnostic mode on PM resume, too
874 */
875 sblkctl = ahc_inb(ahc, SBLKCTL);
876 ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON)));
877
871 ahc_unpause(ahc); 878 ahc_unpause(ahc);
872} 879}
873 880
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
index 2ce1febca207..e821082a4f47 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
@@ -27,20 +27,6 @@ ahc_reg_print_t ahc_sxfrctl0_print;
27#endif 27#endif
28 28
29#if AIC_DEBUG_REGISTERS 29#if AIC_DEBUG_REGISTERS
30ahc_reg_print_t ahc_sxfrctl1_print;
31#else
32#define ahc_sxfrctl1_print(regvalue, cur_col, wrap) \
33 ahc_print_register(NULL, 0, "SXFRCTL1", 0x02, regvalue, cur_col, wrap)
34#endif
35
36#if AIC_DEBUG_REGISTERS
37ahc_reg_print_t ahc_scsisigo_print;
38#else
39#define ahc_scsisigo_print(regvalue, cur_col, wrap) \
40 ahc_print_register(NULL, 0, "SCSISIGO", 0x03, regvalue, cur_col, wrap)
41#endif
42
43#if AIC_DEBUG_REGISTERS
44ahc_reg_print_t ahc_scsisigi_print; 30ahc_reg_print_t ahc_scsisigi_print;
45#else 31#else
46#define ahc_scsisigi_print(regvalue, cur_col, wrap) \ 32#define ahc_scsisigi_print(regvalue, cur_col, wrap) \
@@ -55,55 +41,6 @@ ahc_reg_print_t ahc_scsirate_print;
55#endif 41#endif
56 42
57#if AIC_DEBUG_REGISTERS 43#if AIC_DEBUG_REGISTERS
58ahc_reg_print_t ahc_scsiid_print;
59#else
60#define ahc_scsiid_print(regvalue, cur_col, wrap) \
61 ahc_print_register(NULL, 0, "SCSIID", 0x05, regvalue, cur_col, wrap)
62#endif
63
64#if AIC_DEBUG_REGISTERS
65ahc_reg_print_t ahc_scsidatl_print;
66#else
67#define ahc_scsidatl_print(regvalue, cur_col, wrap) \
68 ahc_print_register(NULL, 0, "SCSIDATL", 0x06, regvalue, cur_col, wrap)
69#endif
70
71#if AIC_DEBUG_REGISTERS
72ahc_reg_print_t ahc_scsidath_print;
73#else
74#define ahc_scsidath_print(regvalue, cur_col, wrap) \
75 ahc_print_register(NULL, 0, "SCSIDATH", 0x07, regvalue, cur_col, wrap)
76#endif
77
78#if AIC_DEBUG_REGISTERS
79ahc_reg_print_t ahc_stcnt_print;
80#else
81#define ahc_stcnt_print(regvalue, cur_col, wrap) \
82 ahc_print_register(NULL, 0, "STCNT", 0x08, regvalue, cur_col, wrap)
83#endif
84
85#if AIC_DEBUG_REGISTERS
86ahc_reg_print_t ahc_optionmode_print;
87#else
88#define ahc_optionmode_print(regvalue, cur_col, wrap) \
89 ahc_print_register(NULL, 0, "OPTIONMODE", 0x08, regvalue, cur_col, wrap)
90#endif
91
92#if AIC_DEBUG_REGISTERS
93ahc_reg_print_t ahc_targcrccnt_print;
94#else
95#define ahc_targcrccnt_print(regvalue, cur_col, wrap) \
96 ahc_print_register(NULL, 0, "TARGCRCCNT", 0x0a, regvalue, cur_col, wrap)
97#endif
98
99#if AIC_DEBUG_REGISTERS
100ahc_reg_print_t ahc_clrsint0_print;
101#else
102#define ahc_clrsint0_print(regvalue, cur_col, wrap) \
103 ahc_print_register(NULL, 0, "CLRSINT0", 0x0b, regvalue, cur_col, wrap)
104#endif
105
106#if AIC_DEBUG_REGISTERS
107ahc_reg_print_t ahc_sstat0_print; 44ahc_reg_print_t ahc_sstat0_print;
108#else 45#else
109#define ahc_sstat0_print(regvalue, cur_col, wrap) \ 46#define ahc_sstat0_print(regvalue, cur_col, wrap) \
@@ -111,13 +48,6 @@ ahc_reg_print_t ahc_sstat0_print;
111#endif 48#endif
112 49
113#if AIC_DEBUG_REGISTERS 50#if AIC_DEBUG_REGISTERS
114ahc_reg_print_t ahc_clrsint1_print;
115#else
116#define ahc_clrsint1_print(regvalue, cur_col, wrap) \
117 ahc_print_register(NULL, 0, "CLRSINT1", 0x0c, regvalue, cur_col, wrap)
118#endif
119
120#if AIC_DEBUG_REGISTERS
121ahc_reg_print_t ahc_sstat1_print; 51ahc_reg_print_t ahc_sstat1_print;
122#else 52#else
123#define ahc_sstat1_print(regvalue, cur_col, wrap) \ 53#define ahc_sstat1_print(regvalue, cur_col, wrap) \
@@ -139,13 +69,6 @@ ahc_reg_print_t ahc_sstat3_print;
139#endif 69#endif
140 70
141#if AIC_DEBUG_REGISTERS 71#if AIC_DEBUG_REGISTERS
142ahc_reg_print_t ahc_scsiid_ultra2_print;
143#else
144#define ahc_scsiid_ultra2_print(regvalue, cur_col, wrap) \
145 ahc_print_register(NULL, 0, "SCSIID_ULTRA2", 0x0f, regvalue, cur_col, wrap)
146#endif
147
148#if AIC_DEBUG_REGISTERS
149ahc_reg_print_t ahc_simode0_print; 72ahc_reg_print_t ahc_simode0_print;
150#else 73#else
151#define ahc_simode0_print(regvalue, cur_col, wrap) \ 74#define ahc_simode0_print(regvalue, cur_col, wrap) \
@@ -167,76 +90,6 @@ ahc_reg_print_t ahc_scsibusl_print;
167#endif 90#endif
168 91
169#if AIC_DEBUG_REGISTERS 92#if AIC_DEBUG_REGISTERS
170ahc_reg_print_t ahc_scsibush_print;
171#else
172#define ahc_scsibush_print(regvalue, cur_col, wrap) \
173 ahc_print_register(NULL, 0, "SCSIBUSH", 0x13, regvalue, cur_col, wrap)
174#endif
175
176#if AIC_DEBUG_REGISTERS
177ahc_reg_print_t ahc_sxfrctl2_print;
178#else
179#define ahc_sxfrctl2_print(regvalue, cur_col, wrap) \
180 ahc_print_register(NULL, 0, "SXFRCTL2", 0x13, regvalue, cur_col, wrap)
181#endif
182
183#if AIC_DEBUG_REGISTERS
184ahc_reg_print_t ahc_shaddr_print;
185#else
186#define ahc_shaddr_print(regvalue, cur_col, wrap) \
187 ahc_print_register(NULL, 0, "SHADDR", 0x14, regvalue, cur_col, wrap)
188#endif
189
190#if AIC_DEBUG_REGISTERS
191ahc_reg_print_t ahc_seltimer_print;
192#else
193#define ahc_seltimer_print(regvalue, cur_col, wrap) \
194 ahc_print_register(NULL, 0, "SELTIMER", 0x18, regvalue, cur_col, wrap)
195#endif
196
197#if AIC_DEBUG_REGISTERS
198ahc_reg_print_t ahc_selid_print;
199#else
200#define ahc_selid_print(regvalue, cur_col, wrap) \
201 ahc_print_register(NULL, 0, "SELID", 0x19, regvalue, cur_col, wrap)
202#endif
203
204#if AIC_DEBUG_REGISTERS
205ahc_reg_print_t ahc_scamctl_print;
206#else
207#define ahc_scamctl_print(regvalue, cur_col, wrap) \
208 ahc_print_register(NULL, 0, "SCAMCTL", 0x1a, regvalue, cur_col, wrap)
209#endif
210
211#if AIC_DEBUG_REGISTERS
212ahc_reg_print_t ahc_targid_print;
213#else
214#define ahc_targid_print(regvalue, cur_col, wrap) \
215 ahc_print_register(NULL, 0, "TARGID", 0x1b, regvalue, cur_col, wrap)
216#endif
217
218#if AIC_DEBUG_REGISTERS
219ahc_reg_print_t ahc_spiocap_print;
220#else
221#define ahc_spiocap_print(regvalue, cur_col, wrap) \
222 ahc_print_register(NULL, 0, "SPIOCAP", 0x1b, regvalue, cur_col, wrap)
223#endif
224
225#if AIC_DEBUG_REGISTERS
226ahc_reg_print_t ahc_brdctl_print;
227#else
228#define ahc_brdctl_print(regvalue, cur_col, wrap) \
229 ahc_print_register(NULL, 0, "BRDCTL", 0x1d, regvalue, cur_col, wrap)
230#endif
231
232#if AIC_DEBUG_REGISTERS
233ahc_reg_print_t ahc_seectl_print;
234#else
235#define ahc_seectl_print(regvalue, cur_col, wrap) \
236 ahc_print_register(NULL, 0, "SEECTL", 0x1e, regvalue, cur_col, wrap)
237#endif
238
239#if AIC_DEBUG_REGISTERS
240ahc_reg_print_t ahc_sblkctl_print; 93ahc_reg_print_t ahc_sblkctl_print;
241#else 94#else
242#define ahc_sblkctl_print(regvalue, cur_col, wrap) \ 95#define ahc_sblkctl_print(regvalue, cur_col, wrap) \
@@ -244,62 +97,6 @@ ahc_reg_print_t ahc_sblkctl_print;
244#endif 97#endif
245 98
246#if AIC_DEBUG_REGISTERS 99#if AIC_DEBUG_REGISTERS
247ahc_reg_print_t ahc_busy_targets_print;
248#else
249#define ahc_busy_targets_print(regvalue, cur_col, wrap) \
250 ahc_print_register(NULL, 0, "BUSY_TARGETS", 0x20, regvalue, cur_col, wrap)
251#endif
252
253#if AIC_DEBUG_REGISTERS
254ahc_reg_print_t ahc_ultra_enb_print;
255#else
256#define ahc_ultra_enb_print(regvalue, cur_col, wrap) \
257 ahc_print_register(NULL, 0, "ULTRA_ENB", 0x30, regvalue, cur_col, wrap)
258#endif
259
260#if AIC_DEBUG_REGISTERS
261ahc_reg_print_t ahc_disc_dsb_print;
262#else
263#define ahc_disc_dsb_print(regvalue, cur_col, wrap) \
264 ahc_print_register(NULL, 0, "DISC_DSB", 0x32, regvalue, cur_col, wrap)
265#endif
266
267#if AIC_DEBUG_REGISTERS
268ahc_reg_print_t ahc_cmdsize_table_tail_print;
269#else
270#define ahc_cmdsize_table_tail_print(regvalue, cur_col, wrap) \
271 ahc_print_register(NULL, 0, "CMDSIZE_TABLE_TAIL", 0x34, regvalue, cur_col, wrap)
272#endif
273
274#if AIC_DEBUG_REGISTERS
275ahc_reg_print_t ahc_mwi_residual_print;
276#else
277#define ahc_mwi_residual_print(regvalue, cur_col, wrap) \
278 ahc_print_register(NULL, 0, "MWI_RESIDUAL", 0x38, regvalue, cur_col, wrap)
279#endif
280
281#if AIC_DEBUG_REGISTERS
282ahc_reg_print_t ahc_next_queued_scb_print;
283#else
284#define ahc_next_queued_scb_print(regvalue, cur_col, wrap) \
285 ahc_print_register(NULL, 0, "NEXT_QUEUED_SCB", 0x39, regvalue, cur_col, wrap)
286#endif
287
288#if AIC_DEBUG_REGISTERS
289ahc_reg_print_t ahc_msg_out_print;
290#else
291#define ahc_msg_out_print(regvalue, cur_col, wrap) \
292 ahc_print_register(NULL, 0, "MSG_OUT", 0x3a, regvalue, cur_col, wrap)
293#endif
294
295#if AIC_DEBUG_REGISTERS
296ahc_reg_print_t ahc_dmaparams_print;
297#else
298#define ahc_dmaparams_print(regvalue, cur_col, wrap) \
299 ahc_print_register(NULL, 0, "DMAPARAMS", 0x3b, regvalue, cur_col, wrap)
300#endif
301
302#if AIC_DEBUG_REGISTERS
303ahc_reg_print_t ahc_seq_flags_print; 100ahc_reg_print_t ahc_seq_flags_print;
304#else 101#else
305#define ahc_seq_flags_print(regvalue, cur_col, wrap) \ 102#define ahc_seq_flags_print(regvalue, cur_col, wrap) \
@@ -307,20 +104,6 @@ ahc_reg_print_t ahc_seq_flags_print;
307#endif 104#endif
308 105
309#if AIC_DEBUG_REGISTERS 106#if AIC_DEBUG_REGISTERS
310ahc_reg_print_t ahc_saved_scsiid_print;
311#else
312#define ahc_saved_scsiid_print(regvalue, cur_col, wrap) \
313 ahc_print_register(NULL, 0, "SAVED_SCSIID", 0x3d, regvalue, cur_col, wrap)
314#endif
315
316#if AIC_DEBUG_REGISTERS
317ahc_reg_print_t ahc_saved_lun_print;
318#else
319#define ahc_saved_lun_print(regvalue, cur_col, wrap) \
320 ahc_print_register(NULL, 0, "SAVED_LUN", 0x3e, regvalue, cur_col, wrap)
321#endif
322
323#if AIC_DEBUG_REGISTERS
324ahc_reg_print_t ahc_lastphase_print; 107ahc_reg_print_t ahc_lastphase_print;
325#else 108#else
326#define ahc_lastphase_print(regvalue, cur_col, wrap) \ 109#define ahc_lastphase_print(regvalue, cur_col, wrap) \
@@ -328,153 +111,6 @@ ahc_reg_print_t ahc_lastphase_print;
328#endif 111#endif
329 112
330#if AIC_DEBUG_REGISTERS 113#if AIC_DEBUG_REGISTERS
331ahc_reg_print_t ahc_waiting_scbh_print;
332#else
333#define ahc_waiting_scbh_print(regvalue, cur_col, wrap) \
334 ahc_print_register(NULL, 0, "WAITING_SCBH", 0x40, regvalue, cur_col, wrap)
335#endif
336
337#if AIC_DEBUG_REGISTERS
338ahc_reg_print_t ahc_disconnected_scbh_print;
339#else
340#define ahc_disconnected_scbh_print(regvalue, cur_col, wrap) \
341 ahc_print_register(NULL, 0, "DISCONNECTED_SCBH", 0x41, regvalue, cur_col, wrap)
342#endif
343
344#if AIC_DEBUG_REGISTERS
345ahc_reg_print_t ahc_free_scbh_print;
346#else
347#define ahc_free_scbh_print(regvalue, cur_col, wrap) \
348 ahc_print_register(NULL, 0, "FREE_SCBH", 0x42, regvalue, cur_col, wrap)
349#endif
350
351#if AIC_DEBUG_REGISTERS
352ahc_reg_print_t ahc_complete_scbh_print;
353#else
354#define ahc_complete_scbh_print(regvalue, cur_col, wrap) \
355 ahc_print_register(NULL, 0, "COMPLETE_SCBH", 0x43, regvalue, cur_col, wrap)
356#endif
357
358#if AIC_DEBUG_REGISTERS
359ahc_reg_print_t ahc_hscb_addr_print;
360#else
361#define ahc_hscb_addr_print(regvalue, cur_col, wrap) \
362 ahc_print_register(NULL, 0, "HSCB_ADDR", 0x44, regvalue, cur_col, wrap)
363#endif
364
365#if AIC_DEBUG_REGISTERS
366ahc_reg_print_t ahc_shared_data_addr_print;
367#else
368#define ahc_shared_data_addr_print(regvalue, cur_col, wrap) \
369 ahc_print_register(NULL, 0, "SHARED_DATA_ADDR", 0x48, regvalue, cur_col, wrap)
370#endif
371
372#if AIC_DEBUG_REGISTERS
373ahc_reg_print_t ahc_kernel_qinpos_print;
374#else
375#define ahc_kernel_qinpos_print(regvalue, cur_col, wrap) \
376 ahc_print_register(NULL, 0, "KERNEL_QINPOS", 0x4c, regvalue, cur_col, wrap)
377#endif
378
379#if AIC_DEBUG_REGISTERS
380ahc_reg_print_t ahc_qinpos_print;
381#else
382#define ahc_qinpos_print(regvalue, cur_col, wrap) \
383 ahc_print_register(NULL, 0, "QINPOS", 0x4d, regvalue, cur_col, wrap)
384#endif
385
386#if AIC_DEBUG_REGISTERS
387ahc_reg_print_t ahc_qoutpos_print;
388#else
389#define ahc_qoutpos_print(regvalue, cur_col, wrap) \
390 ahc_print_register(NULL, 0, "QOUTPOS", 0x4e, regvalue, cur_col, wrap)
391#endif
392
393#if AIC_DEBUG_REGISTERS
394ahc_reg_print_t ahc_kernel_tqinpos_print;
395#else
396#define ahc_kernel_tqinpos_print(regvalue, cur_col, wrap) \
397 ahc_print_register(NULL, 0, "KERNEL_TQINPOS", 0x4f, regvalue, cur_col, wrap)
398#endif
399
400#if AIC_DEBUG_REGISTERS
401ahc_reg_print_t ahc_tqinpos_print;
402#else
403#define ahc_tqinpos_print(regvalue, cur_col, wrap) \
404 ahc_print_register(NULL, 0, "TQINPOS", 0x50, regvalue, cur_col, wrap)
405#endif
406
407#if AIC_DEBUG_REGISTERS
408ahc_reg_print_t ahc_arg_1_print;
409#else
410#define ahc_arg_1_print(regvalue, cur_col, wrap) \
411 ahc_print_register(NULL, 0, "ARG_1", 0x51, regvalue, cur_col, wrap)
412#endif
413
414#if AIC_DEBUG_REGISTERS
415ahc_reg_print_t ahc_arg_2_print;
416#else
417#define ahc_arg_2_print(regvalue, cur_col, wrap) \
418 ahc_print_register(NULL, 0, "ARG_2", 0x52, regvalue, cur_col, wrap)
419#endif
420
421#if AIC_DEBUG_REGISTERS
422ahc_reg_print_t ahc_last_msg_print;
423#else
424#define ahc_last_msg_print(regvalue, cur_col, wrap) \
425 ahc_print_register(NULL, 0, "LAST_MSG", 0x53, regvalue, cur_col, wrap)
426#endif
427
428#if AIC_DEBUG_REGISTERS
429ahc_reg_print_t ahc_scsiseq_template_print;
430#else
431#define ahc_scsiseq_template_print(regvalue, cur_col, wrap) \
432 ahc_print_register(NULL, 0, "SCSISEQ_TEMPLATE", 0x54, regvalue, cur_col, wrap)
433#endif
434
435#if AIC_DEBUG_REGISTERS
436ahc_reg_print_t ahc_ha_274_biosglobal_print;
437#else
438#define ahc_ha_274_biosglobal_print(regvalue, cur_col, wrap) \
439 ahc_print_register(NULL, 0, "HA_274_BIOSGLOBAL", 0x56, regvalue, cur_col, wrap)
440#endif
441
442#if AIC_DEBUG_REGISTERS
443ahc_reg_print_t ahc_seq_flags2_print;
444#else
445#define ahc_seq_flags2_print(regvalue, cur_col, wrap) \
446 ahc_print_register(NULL, 0, "SEQ_FLAGS2", 0x57, regvalue, cur_col, wrap)
447#endif
448
449#if AIC_DEBUG_REGISTERS
450ahc_reg_print_t ahc_scsiconf_print;
451#else
452#define ahc_scsiconf_print(regvalue, cur_col, wrap) \
453 ahc_print_register(NULL, 0, "SCSICONF", 0x5a, regvalue, cur_col, wrap)
454#endif
455
456#if AIC_DEBUG_REGISTERS
457ahc_reg_print_t ahc_intdef_print;
458#else
459#define ahc_intdef_print(regvalue, cur_col, wrap) \
460 ahc_print_register(NULL, 0, "INTDEF", 0x5c, regvalue, cur_col, wrap)
461#endif
462
463#if AIC_DEBUG_REGISTERS
464ahc_reg_print_t ahc_hostconf_print;
465#else
466#define ahc_hostconf_print(regvalue, cur_col, wrap) \
467 ahc_print_register(NULL, 0, "HOSTCONF", 0x5d, regvalue, cur_col, wrap)
468#endif
469
470#if AIC_DEBUG_REGISTERS
471ahc_reg_print_t ahc_ha_274_biosctrl_print;
472#else
473#define ahc_ha_274_biosctrl_print(regvalue, cur_col, wrap) \
474 ahc_print_register(NULL, 0, "HA_274_BIOSCTRL", 0x5f, regvalue, cur_col, wrap)
475#endif
476
477#if AIC_DEBUG_REGISTERS
478ahc_reg_print_t ahc_seqctl_print; 114ahc_reg_print_t ahc_seqctl_print;
479#else 115#else
480#define ahc_seqctl_print(regvalue, cur_col, wrap) \ 116#define ahc_seqctl_print(regvalue, cur_col, wrap) \
@@ -482,111 +118,6 @@ ahc_reg_print_t ahc_seqctl_print;
482#endif 118#endif
483 119
484#if AIC_DEBUG_REGISTERS 120#if AIC_DEBUG_REGISTERS
485ahc_reg_print_t ahc_seqram_print;
486#else
487#define ahc_seqram_print(regvalue, cur_col, wrap) \
488 ahc_print_register(NULL, 0, "SEQRAM", 0x61, regvalue, cur_col, wrap)
489#endif
490
491#if AIC_DEBUG_REGISTERS
492ahc_reg_print_t ahc_seqaddr0_print;
493#else
494#define ahc_seqaddr0_print(regvalue, cur_col, wrap) \
495 ahc_print_register(NULL, 0, "SEQADDR0", 0x62, regvalue, cur_col, wrap)
496#endif
497
498#if AIC_DEBUG_REGISTERS
499ahc_reg_print_t ahc_seqaddr1_print;
500#else
501#define ahc_seqaddr1_print(regvalue, cur_col, wrap) \
502 ahc_print_register(NULL, 0, "SEQADDR1", 0x63, regvalue, cur_col, wrap)
503#endif
504
505#if AIC_DEBUG_REGISTERS
506ahc_reg_print_t ahc_accum_print;
507#else
508#define ahc_accum_print(regvalue, cur_col, wrap) \
509 ahc_print_register(NULL, 0, "ACCUM", 0x64, regvalue, cur_col, wrap)
510#endif
511
512#if AIC_DEBUG_REGISTERS
513ahc_reg_print_t ahc_sindex_print;
514#else
515#define ahc_sindex_print(regvalue, cur_col, wrap) \
516 ahc_print_register(NULL, 0, "SINDEX", 0x65, regvalue, cur_col, wrap)
517#endif
518
519#if AIC_DEBUG_REGISTERS
520ahc_reg_print_t ahc_dindex_print;
521#else
522#define ahc_dindex_print(regvalue, cur_col, wrap) \
523 ahc_print_register(NULL, 0, "DINDEX", 0x66, regvalue, cur_col, wrap)
524#endif
525
526#if AIC_DEBUG_REGISTERS
527ahc_reg_print_t ahc_allones_print;
528#else
529#define ahc_allones_print(regvalue, cur_col, wrap) \
530 ahc_print_register(NULL, 0, "ALLONES", 0x69, regvalue, cur_col, wrap)
531#endif
532
533#if AIC_DEBUG_REGISTERS
534ahc_reg_print_t ahc_allzeros_print;
535#else
536#define ahc_allzeros_print(regvalue, cur_col, wrap) \
537 ahc_print_register(NULL, 0, "ALLZEROS", 0x6a, regvalue, cur_col, wrap)
538#endif
539
540#if AIC_DEBUG_REGISTERS
541ahc_reg_print_t ahc_none_print;
542#else
543#define ahc_none_print(regvalue, cur_col, wrap) \
544 ahc_print_register(NULL, 0, "NONE", 0x6a, regvalue, cur_col, wrap)
545#endif
546
547#if AIC_DEBUG_REGISTERS
548ahc_reg_print_t ahc_flags_print;
549#else
550#define ahc_flags_print(regvalue, cur_col, wrap) \
551 ahc_print_register(NULL, 0, "FLAGS", 0x6b, regvalue, cur_col, wrap)
552#endif
553
554#if AIC_DEBUG_REGISTERS
555ahc_reg_print_t ahc_sindir_print;
556#else
557#define ahc_sindir_print(regvalue, cur_col, wrap) \
558 ahc_print_register(NULL, 0, "SINDIR", 0x6c, regvalue, cur_col, wrap)
559#endif
560
561#if AIC_DEBUG_REGISTERS
562ahc_reg_print_t ahc_dindir_print;
563#else
564#define ahc_dindir_print(regvalue, cur_col, wrap) \
565 ahc_print_register(NULL, 0, "DINDIR", 0x6d, regvalue, cur_col, wrap)
566#endif
567
568#if AIC_DEBUG_REGISTERS
569ahc_reg_print_t ahc_function1_print;
570#else
571#define ahc_function1_print(regvalue, cur_col, wrap) \
572 ahc_print_register(NULL, 0, "FUNCTION1", 0x6e, regvalue, cur_col, wrap)
573#endif
574
575#if AIC_DEBUG_REGISTERS
576ahc_reg_print_t ahc_stack_print;
577#else
578#define ahc_stack_print(regvalue, cur_col, wrap) \
579 ahc_print_register(NULL, 0, "STACK", 0x6f, regvalue, cur_col, wrap)
580#endif
581
582#if AIC_DEBUG_REGISTERS
583ahc_reg_print_t ahc_targ_offset_print;
584#else
585#define ahc_targ_offset_print(regvalue, cur_col, wrap) \
586 ahc_print_register(NULL, 0, "TARG_OFFSET", 0x70, regvalue, cur_col, wrap)
587#endif
588
589#if AIC_DEBUG_REGISTERS
590ahc_reg_print_t ahc_sram_base_print; 121ahc_reg_print_t ahc_sram_base_print;
591#else 122#else
592#define ahc_sram_base_print(regvalue, cur_col, wrap) \ 123#define ahc_sram_base_print(regvalue, cur_col, wrap) \
@@ -594,97 +125,6 @@ ahc_reg_print_t ahc_sram_base_print;
594#endif 125#endif
595 126
596#if AIC_DEBUG_REGISTERS 127#if AIC_DEBUG_REGISTERS
597ahc_reg_print_t ahc_bctl_print;
598#else
599#define ahc_bctl_print(regvalue, cur_col, wrap) \
600 ahc_print_register(NULL, 0, "BCTL", 0x84, regvalue, cur_col, wrap)
601#endif
602
603#if AIC_DEBUG_REGISTERS
604ahc_reg_print_t ahc_dscommand0_print;
605#else
606#define ahc_dscommand0_print(regvalue, cur_col, wrap) \
607 ahc_print_register(NULL, 0, "DSCOMMAND0", 0x84, regvalue, cur_col, wrap)
608#endif
609
610#if AIC_DEBUG_REGISTERS
611ahc_reg_print_t ahc_bustime_print;
612#else
613#define ahc_bustime_print(regvalue, cur_col, wrap) \
614 ahc_print_register(NULL, 0, "BUSTIME", 0x85, regvalue, cur_col, wrap)
615#endif
616
617#if AIC_DEBUG_REGISTERS
618ahc_reg_print_t ahc_dscommand1_print;
619#else
620#define ahc_dscommand1_print(regvalue, cur_col, wrap) \
621 ahc_print_register(NULL, 0, "DSCOMMAND1", 0x85, regvalue, cur_col, wrap)
622#endif
623
624#if AIC_DEBUG_REGISTERS
625ahc_reg_print_t ahc_busspd_print;
626#else
627#define ahc_busspd_print(regvalue, cur_col, wrap) \
628 ahc_print_register(NULL, 0, "BUSSPD", 0x86, regvalue, cur_col, wrap)
629#endif
630
631#if AIC_DEBUG_REGISTERS
632ahc_reg_print_t ahc_hs_mailbox_print;
633#else
634#define ahc_hs_mailbox_print(regvalue, cur_col, wrap) \
635 ahc_print_register(NULL, 0, "HS_MAILBOX", 0x86, regvalue, cur_col, wrap)
636#endif
637
638#if AIC_DEBUG_REGISTERS
639ahc_reg_print_t ahc_dspcistatus_print;
640#else
641#define ahc_dspcistatus_print(regvalue, cur_col, wrap) \
642 ahc_print_register(NULL, 0, "DSPCISTATUS", 0x86, regvalue, cur_col, wrap)
643#endif
644
645#if AIC_DEBUG_REGISTERS
646ahc_reg_print_t ahc_hcntrl_print;
647#else
648#define ahc_hcntrl_print(regvalue, cur_col, wrap) \
649 ahc_print_register(NULL, 0, "HCNTRL", 0x87, regvalue, cur_col, wrap)
650#endif
651
652#if AIC_DEBUG_REGISTERS
653ahc_reg_print_t ahc_haddr_print;
654#else
655#define ahc_haddr_print(regvalue, cur_col, wrap) \
656 ahc_print_register(NULL, 0, "HADDR", 0x88, regvalue, cur_col, wrap)
657#endif
658
659#if AIC_DEBUG_REGISTERS
660ahc_reg_print_t ahc_hcnt_print;
661#else
662#define ahc_hcnt_print(regvalue, cur_col, wrap) \
663 ahc_print_register(NULL, 0, "HCNT", 0x8c, regvalue, cur_col, wrap)
664#endif
665
666#if AIC_DEBUG_REGISTERS
667ahc_reg_print_t ahc_scbptr_print;
668#else
669#define ahc_scbptr_print(regvalue, cur_col, wrap) \
670 ahc_print_register(NULL, 0, "SCBPTR", 0x90, regvalue, cur_col, wrap)
671#endif
672
673#if AIC_DEBUG_REGISTERS
674ahc_reg_print_t ahc_intstat_print;
675#else
676#define ahc_intstat_print(regvalue, cur_col, wrap) \
677 ahc_print_register(NULL, 0, "INTSTAT", 0x91, regvalue, cur_col, wrap)
678#endif
679
680#if AIC_DEBUG_REGISTERS
681ahc_reg_print_t ahc_clrint_print;
682#else
683#define ahc_clrint_print(regvalue, cur_col, wrap) \
684 ahc_print_register(NULL, 0, "CLRINT", 0x92, regvalue, cur_col, wrap)
685#endif
686
687#if AIC_DEBUG_REGISTERS
688ahc_reg_print_t ahc_error_print; 128ahc_reg_print_t ahc_error_print;
689#else 129#else
690#define ahc_error_print(regvalue, cur_col, wrap) \ 130#define ahc_error_print(regvalue, cur_col, wrap) \
@@ -706,69 +146,6 @@ ahc_reg_print_t ahc_dfstatus_print;
706#endif 146#endif
707 147
708#if AIC_DEBUG_REGISTERS 148#if AIC_DEBUG_REGISTERS
709ahc_reg_print_t ahc_dfwaddr_print;
710#else
711#define ahc_dfwaddr_print(regvalue, cur_col, wrap) \
712 ahc_print_register(NULL, 0, "DFWADDR", 0x95, regvalue, cur_col, wrap)
713#endif
714
715#if AIC_DEBUG_REGISTERS
716ahc_reg_print_t ahc_dfraddr_print;
717#else
718#define ahc_dfraddr_print(regvalue, cur_col, wrap) \
719 ahc_print_register(NULL, 0, "DFRADDR", 0x97, regvalue, cur_col, wrap)
720#endif
721
722#if AIC_DEBUG_REGISTERS
723ahc_reg_print_t ahc_dfdat_print;
724#else
725#define ahc_dfdat_print(regvalue, cur_col, wrap) \
726 ahc_print_register(NULL, 0, "DFDAT", 0x99, regvalue, cur_col, wrap)
727#endif
728
729#if AIC_DEBUG_REGISTERS
730ahc_reg_print_t ahc_scbcnt_print;
731#else
732#define ahc_scbcnt_print(regvalue, cur_col, wrap) \
733 ahc_print_register(NULL, 0, "SCBCNT", 0x9a, regvalue, cur_col, wrap)
734#endif
735
736#if AIC_DEBUG_REGISTERS
737ahc_reg_print_t ahc_qinfifo_print;
738#else
739#define ahc_qinfifo_print(regvalue, cur_col, wrap) \
740 ahc_print_register(NULL, 0, "QINFIFO", 0x9b, regvalue, cur_col, wrap)
741#endif
742
743#if AIC_DEBUG_REGISTERS
744ahc_reg_print_t ahc_qincnt_print;
745#else
746#define ahc_qincnt_print(regvalue, cur_col, wrap) \
747 ahc_print_register(NULL, 0, "QINCNT", 0x9c, regvalue, cur_col, wrap)
748#endif
749
750#if AIC_DEBUG_REGISTERS
751ahc_reg_print_t ahc_qoutfifo_print;
752#else
753#define ahc_qoutfifo_print(regvalue, cur_col, wrap) \
754 ahc_print_register(NULL, 0, "QOUTFIFO", 0x9d, regvalue, cur_col, wrap)
755#endif
756
757#if AIC_DEBUG_REGISTERS
758ahc_reg_print_t ahc_crccontrol1_print;
759#else
760#define ahc_crccontrol1_print(regvalue, cur_col, wrap) \
761 ahc_print_register(NULL, 0, "CRCCONTROL1", 0x9d, regvalue, cur_col, wrap)
762#endif
763
764#if AIC_DEBUG_REGISTERS
765ahc_reg_print_t ahc_qoutcnt_print;
766#else
767#define ahc_qoutcnt_print(regvalue, cur_col, wrap) \
768 ahc_print_register(NULL, 0, "QOUTCNT", 0x9e, regvalue, cur_col, wrap)
769#endif
770
771#if AIC_DEBUG_REGISTERS
772ahc_reg_print_t ahc_scsiphase_print; 149ahc_reg_print_t ahc_scsiphase_print;
773#else 150#else
774#define ahc_scsiphase_print(regvalue, cur_col, wrap) \ 151#define ahc_scsiphase_print(regvalue, cur_col, wrap) \
@@ -776,13 +153,6 @@ ahc_reg_print_t ahc_scsiphase_print;
776#endif 153#endif
777 154
778#if AIC_DEBUG_REGISTERS 155#if AIC_DEBUG_REGISTERS
779ahc_reg_print_t ahc_sfunct_print;
780#else
781#define ahc_sfunct_print(regvalue, cur_col, wrap) \
782 ahc_print_register(NULL, 0, "SFUNCT", 0x9f, regvalue, cur_col, wrap)
783#endif
784
785#if AIC_DEBUG_REGISTERS
786ahc_reg_print_t ahc_scb_base_print; 156ahc_reg_print_t ahc_scb_base_print;
787#else 157#else
788#define ahc_scb_base_print(regvalue, cur_col, wrap) \ 158#define ahc_scb_base_print(regvalue, cur_col, wrap) \
@@ -790,69 +160,6 @@ ahc_reg_print_t ahc_scb_base_print;
790#endif 160#endif
791 161
792#if AIC_DEBUG_REGISTERS 162#if AIC_DEBUG_REGISTERS
793ahc_reg_print_t ahc_scb_cdb_ptr_print;
794#else
795#define ahc_scb_cdb_ptr_print(regvalue, cur_col, wrap) \
796 ahc_print_register(NULL, 0, "SCB_CDB_PTR", 0xa0, regvalue, cur_col, wrap)
797#endif
798
799#if AIC_DEBUG_REGISTERS
800ahc_reg_print_t ahc_scb_residual_sgptr_print;
801#else
802#define ahc_scb_residual_sgptr_print(regvalue, cur_col, wrap) \
803 ahc_print_register(NULL, 0, "SCB_RESIDUAL_SGPTR", 0xa4, regvalue, cur_col, wrap)
804#endif
805
806#if AIC_DEBUG_REGISTERS
807ahc_reg_print_t ahc_scb_scsi_status_print;
808#else
809#define ahc_scb_scsi_status_print(regvalue, cur_col, wrap) \
810 ahc_print_register(NULL, 0, "SCB_SCSI_STATUS", 0xa8, regvalue, cur_col, wrap)
811#endif
812
813#if AIC_DEBUG_REGISTERS
814ahc_reg_print_t ahc_scb_target_phases_print;
815#else
816#define ahc_scb_target_phases_print(regvalue, cur_col, wrap) \
817 ahc_print_register(NULL, 0, "SCB_TARGET_PHASES", 0xa9, regvalue, cur_col, wrap)
818#endif
819
820#if AIC_DEBUG_REGISTERS
821ahc_reg_print_t ahc_scb_target_data_dir_print;
822#else
823#define ahc_scb_target_data_dir_print(regvalue, cur_col, wrap) \
824 ahc_print_register(NULL, 0, "SCB_TARGET_DATA_DIR", 0xaa, regvalue, cur_col, wrap)
825#endif
826
827#if AIC_DEBUG_REGISTERS
828ahc_reg_print_t ahc_scb_target_itag_print;
829#else
830#define ahc_scb_target_itag_print(regvalue, cur_col, wrap) \
831 ahc_print_register(NULL, 0, "SCB_TARGET_ITAG", 0xab, regvalue, cur_col, wrap)
832#endif
833
834#if AIC_DEBUG_REGISTERS
835ahc_reg_print_t ahc_scb_dataptr_print;
836#else
837#define ahc_scb_dataptr_print(regvalue, cur_col, wrap) \
838 ahc_print_register(NULL, 0, "SCB_DATAPTR", 0xac, regvalue, cur_col, wrap)
839#endif
840
841#if AIC_DEBUG_REGISTERS
842ahc_reg_print_t ahc_scb_datacnt_print;
843#else
844#define ahc_scb_datacnt_print(regvalue, cur_col, wrap) \
845 ahc_print_register(NULL, 0, "SCB_DATACNT", 0xb0, regvalue, cur_col, wrap)
846#endif
847
848#if AIC_DEBUG_REGISTERS
849ahc_reg_print_t ahc_scb_sgptr_print;
850#else
851#define ahc_scb_sgptr_print(regvalue, cur_col, wrap) \
852 ahc_print_register(NULL, 0, "SCB_SGPTR", 0xb4, regvalue, cur_col, wrap)
853#endif
854
855#if AIC_DEBUG_REGISTERS
856ahc_reg_print_t ahc_scb_control_print; 163ahc_reg_print_t ahc_scb_control_print;
857#else 164#else
858#define ahc_scb_control_print(regvalue, cur_col, wrap) \ 165#define ahc_scb_control_print(regvalue, cur_col, wrap) \
@@ -880,188 +187,6 @@ ahc_reg_print_t ahc_scb_tag_print;
880 ahc_print_register(NULL, 0, "SCB_TAG", 0xbb, regvalue, cur_col, wrap) 187 ahc_print_register(NULL, 0, "SCB_TAG", 0xbb, regvalue, cur_col, wrap)
881#endif 188#endif
882 189
883#if AIC_DEBUG_REGISTERS
884ahc_reg_print_t ahc_scb_cdb_len_print;
885#else
886#define ahc_scb_cdb_len_print(regvalue, cur_col, wrap) \
887 ahc_print_register(NULL, 0, "SCB_CDB_LEN", 0xbc, regvalue, cur_col, wrap)
888#endif
889
890#if AIC_DEBUG_REGISTERS
891ahc_reg_print_t ahc_scb_scsirate_print;
892#else
893#define ahc_scb_scsirate_print(regvalue, cur_col, wrap) \
894 ahc_print_register(NULL, 0, "SCB_SCSIRATE", 0xbd, regvalue, cur_col, wrap)
895#endif
896
897#if AIC_DEBUG_REGISTERS
898ahc_reg_print_t ahc_scb_scsioffset_print;
899#else
900#define ahc_scb_scsioffset_print(regvalue, cur_col, wrap) \
901 ahc_print_register(NULL, 0, "SCB_SCSIOFFSET", 0xbe, regvalue, cur_col, wrap)
902#endif
903
904#if AIC_DEBUG_REGISTERS
905ahc_reg_print_t ahc_scb_next_print;
906#else
907#define ahc_scb_next_print(regvalue, cur_col, wrap) \
908 ahc_print_register(NULL, 0, "SCB_NEXT", 0xbf, regvalue, cur_col, wrap)
909#endif
910
911#if AIC_DEBUG_REGISTERS
912ahc_reg_print_t ahc_scb_64_spare_print;
913#else
914#define ahc_scb_64_spare_print(regvalue, cur_col, wrap) \
915 ahc_print_register(NULL, 0, "SCB_64_SPARE", 0xc0, regvalue, cur_col, wrap)
916#endif
917
918#if AIC_DEBUG_REGISTERS
919ahc_reg_print_t ahc_seectl_2840_print;
920#else
921#define ahc_seectl_2840_print(regvalue, cur_col, wrap) \
922 ahc_print_register(NULL, 0, "SEECTL_2840", 0xc0, regvalue, cur_col, wrap)
923#endif
924
925#if AIC_DEBUG_REGISTERS
926ahc_reg_print_t ahc_status_2840_print;
927#else
928#define ahc_status_2840_print(regvalue, cur_col, wrap) \
929 ahc_print_register(NULL, 0, "STATUS_2840", 0xc1, regvalue, cur_col, wrap)
930#endif
931
932#if AIC_DEBUG_REGISTERS
933ahc_reg_print_t ahc_scb_64_btt_print;
934#else
935#define ahc_scb_64_btt_print(regvalue, cur_col, wrap) \
936 ahc_print_register(NULL, 0, "SCB_64_BTT", 0xd0, regvalue, cur_col, wrap)
937#endif
938
939#if AIC_DEBUG_REGISTERS
940ahc_reg_print_t ahc_cchaddr_print;
941#else
942#define ahc_cchaddr_print(regvalue, cur_col, wrap) \
943 ahc_print_register(NULL, 0, "CCHADDR", 0xe0, regvalue, cur_col, wrap)
944#endif
945
946#if AIC_DEBUG_REGISTERS
947ahc_reg_print_t ahc_cchcnt_print;
948#else
949#define ahc_cchcnt_print(regvalue, cur_col, wrap) \
950 ahc_print_register(NULL, 0, "CCHCNT", 0xe8, regvalue, cur_col, wrap)
951#endif
952
953#if AIC_DEBUG_REGISTERS
954ahc_reg_print_t ahc_ccsgram_print;
955#else
956#define ahc_ccsgram_print(regvalue, cur_col, wrap) \
957 ahc_print_register(NULL, 0, "CCSGRAM", 0xe9, regvalue, cur_col, wrap)
958#endif
959
960#if AIC_DEBUG_REGISTERS
961ahc_reg_print_t ahc_ccsgaddr_print;
962#else
963#define ahc_ccsgaddr_print(regvalue, cur_col, wrap) \
964 ahc_print_register(NULL, 0, "CCSGADDR", 0xea, regvalue, cur_col, wrap)
965#endif
966
967#if AIC_DEBUG_REGISTERS
968ahc_reg_print_t ahc_ccsgctl_print;
969#else
970#define ahc_ccsgctl_print(regvalue, cur_col, wrap) \
971 ahc_print_register(NULL, 0, "CCSGCTL", 0xeb, regvalue, cur_col, wrap)
972#endif
973
974#if AIC_DEBUG_REGISTERS
975ahc_reg_print_t ahc_ccscbram_print;
976#else
977#define ahc_ccscbram_print(regvalue, cur_col, wrap) \
978 ahc_print_register(NULL, 0, "CCSCBRAM", 0xec, regvalue, cur_col, wrap)
979#endif
980
981#if AIC_DEBUG_REGISTERS
982ahc_reg_print_t ahc_ccscbaddr_print;
983#else
984#define ahc_ccscbaddr_print(regvalue, cur_col, wrap) \
985 ahc_print_register(NULL, 0, "CCSCBADDR", 0xed, regvalue, cur_col, wrap)
986#endif
987
988#if AIC_DEBUG_REGISTERS
989ahc_reg_print_t ahc_ccscbctl_print;
990#else
991#define ahc_ccscbctl_print(regvalue, cur_col, wrap) \
992 ahc_print_register(NULL, 0, "CCSCBCTL", 0xee, regvalue, cur_col, wrap)
993#endif
994
995#if AIC_DEBUG_REGISTERS
996ahc_reg_print_t ahc_ccscbcnt_print;
997#else
998#define ahc_ccscbcnt_print(regvalue, cur_col, wrap) \
999 ahc_print_register(NULL, 0, "CCSCBCNT", 0xef, regvalue, cur_col, wrap)
1000#endif
1001
1002#if AIC_DEBUG_REGISTERS
1003ahc_reg_print_t ahc_scbbaddr_print;
1004#else
1005#define ahc_scbbaddr_print(regvalue, cur_col, wrap) \
1006 ahc_print_register(NULL, 0, "SCBBADDR", 0xf0, regvalue, cur_col, wrap)
1007#endif
1008
1009#if AIC_DEBUG_REGISTERS
1010ahc_reg_print_t ahc_ccscbptr_print;
1011#else
1012#define ahc_ccscbptr_print(regvalue, cur_col, wrap) \
1013 ahc_print_register(NULL, 0, "CCSCBPTR", 0xf1, regvalue, cur_col, wrap)
1014#endif
1015
1016#if AIC_DEBUG_REGISTERS
1017ahc_reg_print_t ahc_hnscb_qoff_print;
1018#else
1019#define ahc_hnscb_qoff_print(regvalue, cur_col, wrap) \
1020 ahc_print_register(NULL, 0, "HNSCB_QOFF", 0xf4, regvalue, cur_col, wrap)
1021#endif
1022
1023#if AIC_DEBUG_REGISTERS
1024ahc_reg_print_t ahc_snscb_qoff_print;
1025#else
1026#define ahc_snscb_qoff_print(regvalue, cur_col, wrap) \
1027 ahc_print_register(NULL, 0, "SNSCB_QOFF", 0xf6, regvalue, cur_col, wrap)
1028#endif
1029
1030#if AIC_DEBUG_REGISTERS
1031ahc_reg_print_t ahc_sdscb_qoff_print;
1032#else
1033#define ahc_sdscb_qoff_print(regvalue, cur_col, wrap) \
1034 ahc_print_register(NULL, 0, "SDSCB_QOFF", 0xf8, regvalue, cur_col, wrap)
1035#endif
1036
1037#if AIC_DEBUG_REGISTERS
1038ahc_reg_print_t ahc_qoff_ctlsta_print;
1039#else
1040#define ahc_qoff_ctlsta_print(regvalue, cur_col, wrap) \
1041 ahc_print_register(NULL, 0, "QOFF_CTLSTA", 0xfa, regvalue, cur_col, wrap)
1042#endif
1043
1044#if AIC_DEBUG_REGISTERS
1045ahc_reg_print_t ahc_dff_thrsh_print;
1046#else
1047#define ahc_dff_thrsh_print(regvalue, cur_col, wrap) \
1048 ahc_print_register(NULL, 0, "DFF_THRSH", 0xfb, regvalue, cur_col, wrap)
1049#endif
1050
1051#if AIC_DEBUG_REGISTERS
1052ahc_reg_print_t ahc_sg_cache_shadow_print;
1053#else
1054#define ahc_sg_cache_shadow_print(regvalue, cur_col, wrap) \
1055 ahc_print_register(NULL, 0, "SG_CACHE_SHADOW", 0xfc, regvalue, cur_col, wrap)
1056#endif
1057
1058#if AIC_DEBUG_REGISTERS
1059ahc_reg_print_t ahc_sg_cache_pre_print;
1060#else
1061#define ahc_sg_cache_pre_print(regvalue, cur_col, wrap) \
1062 ahc_print_register(NULL, 0, "SG_CACHE_PRE", 0xfc, regvalue, cur_col, wrap)
1063#endif
1064
1065 190
1066#define SCSISEQ 0x00 191#define SCSISEQ 0x00
1067#define TEMODE 0x80 192#define TEMODE 0x80
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
index 309a562b009e..9f9b88047d0c 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
@@ -43,48 +43,6 @@ ahc_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
43 0x01, regvalue, cur_col, wrap)); 43 0x01, regvalue, cur_col, wrap));
44} 44}
45 45
46static const ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = {
47 { "STPWEN", 0x01, 0x01 },
48 { "ACTNEGEN", 0x02, 0x02 },
49 { "ENSTIMER", 0x04, 0x04 },
50 { "ENSPCHK", 0x20, 0x20 },
51 { "SWRAPEN", 0x40, 0x40 },
52 { "BITBUCKET", 0x80, 0x80 },
53 { "STIMESEL", 0x18, 0x18 }
54};
55
56int
57ahc_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
58{
59 return (ahc_print_register(SXFRCTL1_parse_table, 7, "SXFRCTL1",
60 0x02, regvalue, cur_col, wrap));
61}
62
63static const ahc_reg_parse_entry_t SCSISIGO_parse_table[] = {
64 { "ACKO", 0x01, 0x01 },
65 { "REQO", 0x02, 0x02 },
66 { "BSYO", 0x04, 0x04 },
67 { "SELO", 0x08, 0x08 },
68 { "ATNO", 0x10, 0x10 },
69 { "MSGO", 0x20, 0x20 },
70 { "IOO", 0x40, 0x40 },
71 { "CDO", 0x80, 0x80 },
72 { "P_DATAOUT", 0x00, 0x00 },
73 { "P_DATAIN", 0x40, 0x40 },
74 { "P_COMMAND", 0x80, 0x80 },
75 { "P_MESGOUT", 0xa0, 0xa0 },
76 { "P_STATUS", 0xc0, 0xc0 },
77 { "PHASE_MASK", 0xe0, 0xe0 },
78 { "P_MESGIN", 0xe0, 0xe0 }
79};
80
81int
82ahc_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
83{
84 return (ahc_print_register(SCSISIGO_parse_table, 15, "SCSISIGO",
85 0x03, regvalue, cur_col, wrap));
86}
87
88static const ahc_reg_parse_entry_t SCSISIGI_parse_table[] = { 46static const ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
89 { "ACKI", 0x01, 0x01 }, 47 { "ACKI", 0x01, 0x01 },
90 { "REQI", 0x02, 0x02 }, 48 { "REQI", 0x02, 0x02 },
@@ -128,77 +86,6 @@ ahc_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
128 0x04, regvalue, cur_col, wrap)); 86 0x04, regvalue, cur_col, wrap));
129} 87}
130 88
131static const ahc_reg_parse_entry_t SCSIID_parse_table[] = {
132 { "TWIN_CHNLB", 0x80, 0x80 },
133 { "OID", 0x0f, 0x0f },
134 { "TWIN_TID", 0x70, 0x70 },
135 { "SOFS_ULTRA2", 0x7f, 0x7f },
136 { "TID", 0xf0, 0xf0 }
137};
138
139int
140ahc_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
141{
142 return (ahc_print_register(SCSIID_parse_table, 5, "SCSIID",
143 0x05, regvalue, cur_col, wrap));
144}
145
146int
147ahc_scsidatl_print(u_int regvalue, u_int *cur_col, u_int wrap)
148{
149 return (ahc_print_register(NULL, 0, "SCSIDATL",
150 0x06, regvalue, cur_col, wrap));
151}
152
153int
154ahc_stcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
155{
156 return (ahc_print_register(NULL, 0, "STCNT",
157 0x08, regvalue, cur_col, wrap));
158}
159
160static const ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = {
161 { "DIS_MSGIN_DUALEDGE", 0x01, 0x01 },
162 { "AUTO_MSGOUT_DE", 0x02, 0x02 },
163 { "SCSIDATL_IMGEN", 0x04, 0x04 },
164 { "EXPPHASEDIS", 0x08, 0x08 },
165 { "BUSFREEREV", 0x10, 0x10 },
166 { "ATNMGMNTEN", 0x20, 0x20 },
167 { "AUTOACKEN", 0x40, 0x40 },
168 { "AUTORATEEN", 0x80, 0x80 },
169 { "OPTIONMODE_DEFAULTS",0x03, 0x03 }
170};
171
172int
173ahc_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
174{
175 return (ahc_print_register(OPTIONMODE_parse_table, 9, "OPTIONMODE",
176 0x08, regvalue, cur_col, wrap));
177}
178
179int
180ahc_targcrccnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
181{
182 return (ahc_print_register(NULL, 0, "TARGCRCCNT",
183 0x0a, regvalue, cur_col, wrap));
184}
185
186static const ahc_reg_parse_entry_t CLRSINT0_parse_table[] = {
187 { "CLRSPIORDY", 0x02, 0x02 },
188 { "CLRSWRAP", 0x08, 0x08 },
189 { "CLRIOERR", 0x08, 0x08 },
190 { "CLRSELINGO", 0x10, 0x10 },
191 { "CLRSELDI", 0x20, 0x20 },
192 { "CLRSELDO", 0x40, 0x40 }
193};
194
195int
196ahc_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
197{
198 return (ahc_print_register(CLRSINT0_parse_table, 6, "CLRSINT0",
199 0x0b, regvalue, cur_col, wrap));
200}
201
202static const ahc_reg_parse_entry_t SSTAT0_parse_table[] = { 89static const ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
203 { "DMADONE", 0x01, 0x01 }, 90 { "DMADONE", 0x01, 0x01 },
204 { "SPIORDY", 0x02, 0x02 }, 91 { "SPIORDY", 0x02, 0x02 },
@@ -218,23 +105,6 @@ ahc_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
218 0x0b, regvalue, cur_col, wrap)); 105 0x0b, regvalue, cur_col, wrap));
219} 106}
220 107
221static const ahc_reg_parse_entry_t CLRSINT1_parse_table[] = {
222 { "CLRREQINIT", 0x01, 0x01 },
223 { "CLRPHASECHG", 0x02, 0x02 },
224 { "CLRSCSIPERR", 0x04, 0x04 },
225 { "CLRBUSFREE", 0x08, 0x08 },
226 { "CLRSCSIRSTI", 0x20, 0x20 },
227 { "CLRATNO", 0x40, 0x40 },
228 { "CLRSELTIMEO", 0x80, 0x80 }
229};
230
231int
232ahc_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
233{
234 return (ahc_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
235 0x0c, regvalue, cur_col, wrap));
236}
237
238static const ahc_reg_parse_entry_t SSTAT1_parse_table[] = { 108static const ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
239 { "REQINIT", 0x01, 0x01 }, 109 { "REQINIT", 0x01, 0x01 },
240 { "PHASECHG", 0x02, 0x02 }, 110 { "PHASECHG", 0x02, 0x02 },
@@ -284,18 +154,6 @@ ahc_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
284 0x0e, regvalue, cur_col, wrap)); 154 0x0e, regvalue, cur_col, wrap));
285} 155}
286 156
287static const ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = {
288 { "OID", 0x0f, 0x0f },
289 { "TID", 0xf0, 0xf0 }
290};
291
292int
293ahc_scsiid_ultra2_print(u_int regvalue, u_int *cur_col, u_int wrap)
294{
295 return (ahc_print_register(SCSIID_ULTRA2_parse_table, 2, "SCSIID_ULTRA2",
296 0x0f, regvalue, cur_col, wrap));
297}
298
299static const ahc_reg_parse_entry_t SIMODE0_parse_table[] = { 157static const ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
300 { "ENDMADONE", 0x01, 0x01 }, 158 { "ENDMADONE", 0x01, 0x01 },
301 { "ENSPIORDY", 0x02, 0x02 }, 159 { "ENSPIORDY", 0x02, 0x02 },
@@ -339,107 +197,6 @@ ahc_scsibusl_print(u_int regvalue, u_int *cur_col, u_int wrap)
339 0x12, regvalue, cur_col, wrap)); 197 0x12, regvalue, cur_col, wrap));
340} 198}
341 199
342int
343ahc_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
344{
345 return (ahc_print_register(NULL, 0, "SHADDR",
346 0x14, regvalue, cur_col, wrap));
347}
348
349static const ahc_reg_parse_entry_t SELTIMER_parse_table[] = {
350 { "STAGE1", 0x01, 0x01 },
351 { "STAGE2", 0x02, 0x02 },
352 { "STAGE3", 0x04, 0x04 },
353 { "STAGE4", 0x08, 0x08 },
354 { "STAGE5", 0x10, 0x10 },
355 { "STAGE6", 0x20, 0x20 }
356};
357
358int
359ahc_seltimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
360{
361 return (ahc_print_register(SELTIMER_parse_table, 6, "SELTIMER",
362 0x18, regvalue, cur_col, wrap));
363}
364
365static const ahc_reg_parse_entry_t SELID_parse_table[] = {
366 { "ONEBIT", 0x08, 0x08 },
367 { "SELID_MASK", 0xf0, 0xf0 }
368};
369
370int
371ahc_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
372{
373 return (ahc_print_register(SELID_parse_table, 2, "SELID",
374 0x19, regvalue, cur_col, wrap));
375}
376
377int
378ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
379{
380 return (ahc_print_register(NULL, 0, "TARGID",
381 0x1b, regvalue, cur_col, wrap));
382}
383
384static const ahc_reg_parse_entry_t SPIOCAP_parse_table[] = {
385 { "SSPIOCPS", 0x01, 0x01 },
386 { "ROM", 0x02, 0x02 },
387 { "EEPROM", 0x04, 0x04 },
388 { "SEEPROM", 0x08, 0x08 },
389 { "EXT_BRDCTL", 0x10, 0x10 },
390 { "SOFTCMDEN", 0x20, 0x20 },
391 { "SOFT0", 0x40, 0x40 },
392 { "SOFT1", 0x80, 0x80 }
393};
394
395int
396ahc_spiocap_print(u_int regvalue, u_int *cur_col, u_int wrap)
397{
398 return (ahc_print_register(SPIOCAP_parse_table, 8, "SPIOCAP",
399 0x1b, regvalue, cur_col, wrap));
400}
401
402static const ahc_reg_parse_entry_t BRDCTL_parse_table[] = {
403 { "BRDCTL0", 0x01, 0x01 },
404 { "BRDSTB_ULTRA2", 0x01, 0x01 },
405 { "BRDCTL1", 0x02, 0x02 },
406 { "BRDRW_ULTRA2", 0x02, 0x02 },
407 { "BRDRW", 0x04, 0x04 },
408 { "BRDDAT2", 0x04, 0x04 },
409 { "BRDCS", 0x08, 0x08 },
410 { "BRDDAT3", 0x08, 0x08 },
411 { "BRDSTB", 0x10, 0x10 },
412 { "BRDDAT4", 0x10, 0x10 },
413 { "BRDDAT5", 0x20, 0x20 },
414 { "BRDDAT6", 0x40, 0x40 },
415 { "BRDDAT7", 0x80, 0x80 }
416};
417
418int
419ahc_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
420{
421 return (ahc_print_register(BRDCTL_parse_table, 13, "BRDCTL",
422 0x1d, regvalue, cur_col, wrap));
423}
424
425static const ahc_reg_parse_entry_t SEECTL_parse_table[] = {
426 { "SEEDI", 0x01, 0x01 },
427 { "SEEDO", 0x02, 0x02 },
428 { "SEECK", 0x04, 0x04 },
429 { "SEECS", 0x08, 0x08 },
430 { "SEERDY", 0x10, 0x10 },
431 { "SEEMS", 0x20, 0x20 },
432 { "EXTARBREQ", 0x40, 0x40 },
433 { "EXTARBACK", 0x80, 0x80 }
434};
435
436int
437ahc_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
438{
439 return (ahc_print_register(SEECTL_parse_table, 8, "SEECTL",
440 0x1e, regvalue, cur_col, wrap));
441}
442
443static const ahc_reg_parse_entry_t SBLKCTL_parse_table[] = { 200static const ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
444 { "XCVR", 0x01, 0x01 }, 201 { "XCVR", 0x01, 0x01 },
445 { "SELWIDE", 0x02, 0x02 }, 202 { "SELWIDE", 0x02, 0x02 },
@@ -458,68 +215,6 @@ ahc_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
458 0x1f, regvalue, cur_col, wrap)); 215 0x1f, regvalue, cur_col, wrap));
459} 216}
460 217
461int
462ahc_busy_targets_print(u_int regvalue, u_int *cur_col, u_int wrap)
463{
464 return (ahc_print_register(NULL, 0, "BUSY_TARGETS",
465 0x20, regvalue, cur_col, wrap));
466}
467
468int
469ahc_ultra_enb_print(u_int regvalue, u_int *cur_col, u_int wrap)
470{
471 return (ahc_print_register(NULL, 0, "ULTRA_ENB",
472 0x30, regvalue, cur_col, wrap));
473}
474
475int
476ahc_disc_dsb_print(u_int regvalue, u_int *cur_col, u_int wrap)
477{
478 return (ahc_print_register(NULL, 0, "DISC_DSB",
479 0x32, regvalue, cur_col, wrap));
480}
481
482int
483ahc_mwi_residual_print(u_int regvalue, u_int *cur_col, u_int wrap)
484{
485 return (ahc_print_register(NULL, 0, "MWI_RESIDUAL",
486 0x38, regvalue, cur_col, wrap));
487}
488
489int
490ahc_next_queued_scb_print(u_int regvalue, u_int *cur_col, u_int wrap)
491{
492 return (ahc_print_register(NULL, 0, "NEXT_QUEUED_SCB",
493 0x39, regvalue, cur_col, wrap));
494}
495
496int
497ahc_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
498{
499 return (ahc_print_register(NULL, 0, "MSG_OUT",
500 0x3a, regvalue, cur_col, wrap));
501}
502
503static const ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = {
504 { "FIFORESET", 0x01, 0x01 },
505 { "FIFOFLUSH", 0x02, 0x02 },
506 { "DIRECTION", 0x04, 0x04 },
507 { "HDMAEN", 0x08, 0x08 },
508 { "HDMAENACK", 0x08, 0x08 },
509 { "SDMAEN", 0x10, 0x10 },
510 { "SDMAENACK", 0x10, 0x10 },
511 { "SCSIEN", 0x20, 0x20 },
512 { "WIDEODD", 0x40, 0x40 },
513 { "PRELOADEN", 0x80, 0x80 }
514};
515
516int
517ahc_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
518{
519 return (ahc_print_register(DMAPARAMS_parse_table, 10, "DMAPARAMS",
520 0x3b, regvalue, cur_col, wrap));
521}
522
523static const ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = { 218static const ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
524 { "NO_DISCONNECT", 0x01, 0x01 }, 219 { "NO_DISCONNECT", 0x01, 0x01 },
525 { "SPHASE_PENDING", 0x02, 0x02 }, 220 { "SPHASE_PENDING", 0x02, 0x02 },
@@ -539,20 +234,6 @@ ahc_seq_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
539 0x3c, regvalue, cur_col, wrap)); 234 0x3c, regvalue, cur_col, wrap));
540} 235}
541 236
542int
543ahc_saved_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
544{
545 return (ahc_print_register(NULL, 0, "SAVED_SCSIID",
546 0x3d, regvalue, cur_col, wrap));
547}
548
549int
550ahc_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
551{
552 return (ahc_print_register(NULL, 0, "SAVED_LUN",
553 0x3e, regvalue, cur_col, wrap));
554}
555
556static const ahc_reg_parse_entry_t LASTPHASE_parse_table[] = { 237static const ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
557 { "MSGI", 0x20, 0x20 }, 238 { "MSGI", 0x20, 0x20 },
558 { "IOI", 0x40, 0x40 }, 239 { "IOI", 0x40, 0x40 },
@@ -574,193 +255,6 @@ ahc_lastphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
574 0x3f, regvalue, cur_col, wrap)); 255 0x3f, regvalue, cur_col, wrap));
575} 256}
576 257
577int
578ahc_waiting_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
579{
580 return (ahc_print_register(NULL, 0, "WAITING_SCBH",
581 0x40, regvalue, cur_col, wrap));
582}
583
584int
585ahc_disconnected_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
586{
587 return (ahc_print_register(NULL, 0, "DISCONNECTED_SCBH",
588 0x41, regvalue, cur_col, wrap));
589}
590
591int
592ahc_free_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
593{
594 return (ahc_print_register(NULL, 0, "FREE_SCBH",
595 0x42, regvalue, cur_col, wrap));
596}
597
598int
599ahc_hscb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
600{
601 return (ahc_print_register(NULL, 0, "HSCB_ADDR",
602 0x44, regvalue, cur_col, wrap));
603}
604
605int
606ahc_shared_data_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
607{
608 return (ahc_print_register(NULL, 0, "SHARED_DATA_ADDR",
609 0x48, regvalue, cur_col, wrap));
610}
611
612int
613ahc_kernel_qinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
614{
615 return (ahc_print_register(NULL, 0, "KERNEL_QINPOS",
616 0x4c, regvalue, cur_col, wrap));
617}
618
619int
620ahc_qinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
621{
622 return (ahc_print_register(NULL, 0, "QINPOS",
623 0x4d, regvalue, cur_col, wrap));
624}
625
626int
627ahc_qoutpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
628{
629 return (ahc_print_register(NULL, 0, "QOUTPOS",
630 0x4e, regvalue, cur_col, wrap));
631}
632
633int
634ahc_kernel_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
635{
636 return (ahc_print_register(NULL, 0, "KERNEL_TQINPOS",
637 0x4f, regvalue, cur_col, wrap));
638}
639
640int
641ahc_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
642{
643 return (ahc_print_register(NULL, 0, "TQINPOS",
644 0x50, regvalue, cur_col, wrap));
645}
646
647static const ahc_reg_parse_entry_t ARG_1_parse_table[] = {
648 { "CONT_TARG_SESSION", 0x02, 0x02 },
649 { "CONT_MSG_LOOP", 0x04, 0x04 },
650 { "EXIT_MSG_LOOP", 0x08, 0x08 },
651 { "MSGOUT_PHASEMIS", 0x10, 0x10 },
652 { "SEND_REJ", 0x20, 0x20 },
653 { "SEND_SENSE", 0x40, 0x40 },
654 { "SEND_MSG", 0x80, 0x80 }
655};
656
657int
658ahc_arg_1_print(u_int regvalue, u_int *cur_col, u_int wrap)
659{
660 return (ahc_print_register(ARG_1_parse_table, 7, "ARG_1",
661 0x51, regvalue, cur_col, wrap));
662}
663
664int
665ahc_arg_2_print(u_int regvalue, u_int *cur_col, u_int wrap)
666{
667 return (ahc_print_register(NULL, 0, "ARG_2",
668 0x52, regvalue, cur_col, wrap));
669}
670
671int
672ahc_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
673{
674 return (ahc_print_register(NULL, 0, "LAST_MSG",
675 0x53, regvalue, cur_col, wrap));
676}
677
678static const ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
679 { "ENAUTOATNP", 0x02, 0x02 },
680 { "ENAUTOATNI", 0x04, 0x04 },
681 { "ENAUTOATNO", 0x08, 0x08 },
682 { "ENRSELI", 0x10, 0x10 },
683 { "ENSELI", 0x20, 0x20 },
684 { "ENSELO", 0x40, 0x40 }
685};
686
687int
688ahc_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
689{
690 return (ahc_print_register(SCSISEQ_TEMPLATE_parse_table, 6, "SCSISEQ_TEMPLATE",
691 0x54, regvalue, cur_col, wrap));
692}
693
694static const ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = {
695 { "HA_274_EXTENDED_TRANS",0x01, 0x01 }
696};
697
698int
699ahc_ha_274_biosglobal_print(u_int regvalue, u_int *cur_col, u_int wrap)
700{
701 return (ahc_print_register(HA_274_BIOSGLOBAL_parse_table, 1, "HA_274_BIOSGLOBAL",
702 0x56, regvalue, cur_col, wrap));
703}
704
705static const ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
706 { "SCB_DMA", 0x01, 0x01 },
707 { "TARGET_MSG_PENDING", 0x02, 0x02 }
708};
709
710int
711ahc_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
712{
713 return (ahc_print_register(SEQ_FLAGS2_parse_table, 2, "SEQ_FLAGS2",
714 0x57, regvalue, cur_col, wrap));
715}
716
717static const ahc_reg_parse_entry_t SCSICONF_parse_table[] = {
718 { "ENSPCHK", 0x20, 0x20 },
719 { "RESET_SCSI", 0x40, 0x40 },
720 { "TERM_ENB", 0x80, 0x80 },
721 { "HSCSIID", 0x07, 0x07 },
722 { "HWSCSIID", 0x0f, 0x0f }
723};
724
725int
726ahc_scsiconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
727{
728 return (ahc_print_register(SCSICONF_parse_table, 5, "SCSICONF",
729 0x5a, regvalue, cur_col, wrap));
730}
731
732static const ahc_reg_parse_entry_t INTDEF_parse_table[] = {
733 { "EDGE_TRIG", 0x80, 0x80 },
734 { "VECTOR", 0x0f, 0x0f }
735};
736
737int
738ahc_intdef_print(u_int regvalue, u_int *cur_col, u_int wrap)
739{
740 return (ahc_print_register(INTDEF_parse_table, 2, "INTDEF",
741 0x5c, regvalue, cur_col, wrap));
742}
743
744int
745ahc_hostconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
746{
747 return (ahc_print_register(NULL, 0, "HOSTCONF",
748 0x5d, regvalue, cur_col, wrap));
749}
750
751static const ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = {
752 { "CHANNEL_B_PRIMARY", 0x08, 0x08 },
753 { "BIOSMODE", 0x30, 0x30 },
754 { "BIOSDISABLED", 0x30, 0x30 }
755};
756
757int
758ahc_ha_274_biosctrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
759{
760 return (ahc_print_register(HA_274_BIOSCTRL_parse_table, 3, "HA_274_BIOSCTRL",
761 0x5f, regvalue, cur_col, wrap));
762}
763
764static const ahc_reg_parse_entry_t SEQCTL_parse_table[] = { 258static const ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
765 { "LOADRAM", 0x01, 0x01 }, 259 { "LOADRAM", 0x01, 0x01 },
766 { "SEQRESET", 0x02, 0x02 }, 260 { "SEQRESET", 0x02, 0x02 },
@@ -780,285 +274,12 @@ ahc_seqctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
780} 274}
781 275
782int 276int
783ahc_seqram_print(u_int regvalue, u_int *cur_col, u_int wrap)
784{
785 return (ahc_print_register(NULL, 0, "SEQRAM",
786 0x61, regvalue, cur_col, wrap));
787}
788
789int
790ahc_seqaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
791{
792 return (ahc_print_register(NULL, 0, "SEQADDR0",
793 0x62, regvalue, cur_col, wrap));
794}
795
796static const ahc_reg_parse_entry_t SEQADDR1_parse_table[] = {
797 { "SEQADDR1_MASK", 0x01, 0x01 }
798};
799
800int
801ahc_seqaddr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
802{
803 return (ahc_print_register(SEQADDR1_parse_table, 1, "SEQADDR1",
804 0x63, regvalue, cur_col, wrap));
805}
806
807int
808ahc_accum_print(u_int regvalue, u_int *cur_col, u_int wrap)
809{
810 return (ahc_print_register(NULL, 0, "ACCUM",
811 0x64, regvalue, cur_col, wrap));
812}
813
814int
815ahc_sindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
816{
817 return (ahc_print_register(NULL, 0, "SINDEX",
818 0x65, regvalue, cur_col, wrap));
819}
820
821int
822ahc_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
823{
824 return (ahc_print_register(NULL, 0, "DINDEX",
825 0x66, regvalue, cur_col, wrap));
826}
827
828int
829ahc_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
830{
831 return (ahc_print_register(NULL, 0, "ALLONES",
832 0x69, regvalue, cur_col, wrap));
833}
834
835int
836ahc_allzeros_print(u_int regvalue, u_int *cur_col, u_int wrap)
837{
838 return (ahc_print_register(NULL, 0, "ALLZEROS",
839 0x6a, regvalue, cur_col, wrap));
840}
841
842int
843ahc_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
844{
845 return (ahc_print_register(NULL, 0, "NONE",
846 0x6a, regvalue, cur_col, wrap));
847}
848
849static const ahc_reg_parse_entry_t FLAGS_parse_table[] = {
850 { "CARRY", 0x01, 0x01 },
851 { "ZERO", 0x02, 0x02 }
852};
853
854int
855ahc_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
856{
857 return (ahc_print_register(FLAGS_parse_table, 2, "FLAGS",
858 0x6b, regvalue, cur_col, wrap));
859}
860
861int
862ahc_sindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
863{
864 return (ahc_print_register(NULL, 0, "SINDIR",
865 0x6c, regvalue, cur_col, wrap));
866}
867
868int
869ahc_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
870{
871 return (ahc_print_register(NULL, 0, "DINDIR",
872 0x6d, regvalue, cur_col, wrap));
873}
874
875int
876ahc_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
877{
878 return (ahc_print_register(NULL, 0, "STACK",
879 0x6f, regvalue, cur_col, wrap));
880}
881
882int
883ahc_targ_offset_print(u_int regvalue, u_int *cur_col, u_int wrap)
884{
885 return (ahc_print_register(NULL, 0, "TARG_OFFSET",
886 0x70, regvalue, cur_col, wrap));
887}
888
889int
890ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap) 277ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
891{ 278{
892 return (ahc_print_register(NULL, 0, "SRAM_BASE", 279 return (ahc_print_register(NULL, 0, "SRAM_BASE",
893 0x70, regvalue, cur_col, wrap)); 280 0x70, regvalue, cur_col, wrap));
894} 281}
895 282
896static const ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
897 { "CIOPARCKEN", 0x01, 0x01 },
898 { "USCBSIZE32", 0x02, 0x02 },
899 { "RAMPS", 0x04, 0x04 },
900 { "INTSCBRAMSEL", 0x08, 0x08 },
901 { "EXTREQLCK", 0x10, 0x10 },
902 { "MPARCKEN", 0x20, 0x20 },
903 { "DPARCKEN", 0x40, 0x40 },
904 { "CACHETHEN", 0x80, 0x80 }
905};
906
907int
908ahc_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
909{
910 return (ahc_print_register(DSCOMMAND0_parse_table, 8, "DSCOMMAND0",
911 0x84, regvalue, cur_col, wrap));
912}
913
914static const ahc_reg_parse_entry_t BUSTIME_parse_table[] = {
915 { "BON", 0x0f, 0x0f },
916 { "BOFF", 0xf0, 0xf0 }
917};
918
919int
920ahc_bustime_print(u_int regvalue, u_int *cur_col, u_int wrap)
921{
922 return (ahc_print_register(BUSTIME_parse_table, 2, "BUSTIME",
923 0x85, regvalue, cur_col, wrap));
924}
925
926static const ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = {
927 { "HADDLDSEL0", 0x01, 0x01 },
928 { "HADDLDSEL1", 0x02, 0x02 },
929 { "DSLATT", 0xfc, 0xfc }
930};
931
932int
933ahc_dscommand1_print(u_int regvalue, u_int *cur_col, u_int wrap)
934{
935 return (ahc_print_register(DSCOMMAND1_parse_table, 3, "DSCOMMAND1",
936 0x85, regvalue, cur_col, wrap));
937}
938
939static const ahc_reg_parse_entry_t BUSSPD_parse_table[] = {
940 { "STBON", 0x07, 0x07 },
941 { "STBOFF", 0x38, 0x38 },
942 { "DFTHRSH_75", 0x80, 0x80 },
943 { "DFTHRSH", 0xc0, 0xc0 },
944 { "DFTHRSH_100", 0xc0, 0xc0 }
945};
946
947int
948ahc_busspd_print(u_int regvalue, u_int *cur_col, u_int wrap)
949{
950 return (ahc_print_register(BUSSPD_parse_table, 5, "BUSSPD",
951 0x86, regvalue, cur_col, wrap));
952}
953
954static const ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
955 { "SEQ_MAILBOX", 0x0f, 0x0f },
956 { "HOST_TQINPOS", 0x80, 0x80 },
957 { "HOST_MAILBOX", 0xf0, 0xf0 }
958};
959
960int
961ahc_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
962{
963 return (ahc_print_register(HS_MAILBOX_parse_table, 3, "HS_MAILBOX",
964 0x86, regvalue, cur_col, wrap));
965}
966
967static const ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = {
968 { "DFTHRSH_100", 0xc0, 0xc0 }
969};
970
971int
972ahc_dspcistatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
973{
974 return (ahc_print_register(DSPCISTATUS_parse_table, 1, "DSPCISTATUS",
975 0x86, regvalue, cur_col, wrap));
976}
977
978static const ahc_reg_parse_entry_t HCNTRL_parse_table[] = {
979 { "CHIPRST", 0x01, 0x01 },
980 { "CHIPRSTACK", 0x01, 0x01 },
981 { "INTEN", 0x02, 0x02 },
982 { "PAUSE", 0x04, 0x04 },
983 { "IRQMS", 0x08, 0x08 },
984 { "SWINT", 0x10, 0x10 },
985 { "POWRDN", 0x40, 0x40 }
986};
987
988int
989ahc_hcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
990{
991 return (ahc_print_register(HCNTRL_parse_table, 7, "HCNTRL",
992 0x87, regvalue, cur_col, wrap));
993}
994
995int
996ahc_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
997{
998 return (ahc_print_register(NULL, 0, "HADDR",
999 0x88, regvalue, cur_col, wrap));
1000}
1001
1002int
1003ahc_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1004{
1005 return (ahc_print_register(NULL, 0, "HCNT",
1006 0x8c, regvalue, cur_col, wrap));
1007}
1008
1009int
1010ahc_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1011{
1012 return (ahc_print_register(NULL, 0, "SCBPTR",
1013 0x90, regvalue, cur_col, wrap));
1014}
1015
1016static const ahc_reg_parse_entry_t INTSTAT_parse_table[] = {
1017 { "SEQINT", 0x01, 0x01 },
1018 { "CMDCMPLT", 0x02, 0x02 },
1019 { "SCSIINT", 0x04, 0x04 },
1020 { "BRKADRINT", 0x08, 0x08 },
1021 { "BAD_PHASE", 0x01, 0x01 },
1022 { "INT_PEND", 0x0f, 0x0f },
1023 { "SEND_REJECT", 0x11, 0x11 },
1024 { "PROTO_VIOLATION", 0x21, 0x21 },
1025 { "NO_MATCH", 0x31, 0x31 },
1026 { "IGN_WIDE_RES", 0x41, 0x41 },
1027 { "PDATA_REINIT", 0x51, 0x51 },
1028 { "HOST_MSG_LOOP", 0x61, 0x61 },
1029 { "BAD_STATUS", 0x71, 0x71 },
1030 { "PERR_DETECTED", 0x81, 0x81 },
1031 { "DATA_OVERRUN", 0x91, 0x91 },
1032 { "MKMSG_FAILED", 0xa1, 0xa1 },
1033 { "MISSED_BUSFREE", 0xb1, 0xb1 },
1034 { "SCB_MISMATCH", 0xc1, 0xc1 },
1035 { "NO_FREE_SCB", 0xd1, 0xd1 },
1036 { "OUT_OF_RANGE", 0xe1, 0xe1 },
1037 { "SEQINT_MASK", 0xf1, 0xf1 }
1038};
1039
1040int
1041ahc_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1042{
1043 return (ahc_print_register(INTSTAT_parse_table, 21, "INTSTAT",
1044 0x91, regvalue, cur_col, wrap));
1045}
1046
1047static const ahc_reg_parse_entry_t CLRINT_parse_table[] = {
1048 { "CLRSEQINT", 0x01, 0x01 },
1049 { "CLRCMDINT", 0x02, 0x02 },
1050 { "CLRSCSIINT", 0x04, 0x04 },
1051 { "CLRBRKADRINT", 0x08, 0x08 },
1052 { "CLRPARERR", 0x10, 0x10 }
1053};
1054
1055int
1056ahc_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
1057{
1058 return (ahc_print_register(CLRINT_parse_table, 5, "CLRINT",
1059 0x92, regvalue, cur_col, wrap));
1060}
1061
1062static const ahc_reg_parse_entry_t ERROR_parse_table[] = { 283static const ahc_reg_parse_entry_t ERROR_parse_table[] = {
1063 { "ILLHADDR", 0x01, 0x01 }, 284 { "ILLHADDR", 0x01, 0x01 },
1064 { "ILLSADDR", 0x02, 0x02 }, 285 { "ILLSADDR", 0x02, 0x02 },
@@ -1115,62 +336,6 @@ ahc_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
1115 0x94, regvalue, cur_col, wrap)); 336 0x94, regvalue, cur_col, wrap));
1116} 337}
1117 338
1118int
1119ahc_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1120{
1121 return (ahc_print_register(NULL, 0, "DFWADDR",
1122 0x95, regvalue, cur_col, wrap));
1123}
1124
1125int
1126ahc_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1127{
1128 return (ahc_print_register(NULL, 0, "DFDAT",
1129 0x99, regvalue, cur_col, wrap));
1130}
1131
1132static const ahc_reg_parse_entry_t SCBCNT_parse_table[] = {
1133 { "SCBAUTO", 0x80, 0x80 },
1134 { "SCBCNT_MASK", 0x1f, 0x1f }
1135};
1136
1137int
1138ahc_scbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1139{
1140 return (ahc_print_register(SCBCNT_parse_table, 2, "SCBCNT",
1141 0x9a, regvalue, cur_col, wrap));
1142}
1143
1144int
1145ahc_qinfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
1146{
1147 return (ahc_print_register(NULL, 0, "QINFIFO",
1148 0x9b, regvalue, cur_col, wrap));
1149}
1150
1151int
1152ahc_qoutfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
1153{
1154 return (ahc_print_register(NULL, 0, "QOUTFIFO",
1155 0x9d, regvalue, cur_col, wrap));
1156}
1157
1158static const ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = {
1159 { "TARGCRCCNTEN", 0x04, 0x04 },
1160 { "TARGCRCENDEN", 0x08, 0x08 },
1161 { "CRCREQCHKEN", 0x10, 0x10 },
1162 { "CRCENDCHKEN", 0x20, 0x20 },
1163 { "CRCVALCHKEN", 0x40, 0x40 },
1164 { "CRCONSEEN", 0x80, 0x80 }
1165};
1166
1167int
1168ahc_crccontrol1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1169{
1170 return (ahc_print_register(CRCCONTROL1_parse_table, 6, "CRCCONTROL1",
1171 0x9d, regvalue, cur_col, wrap));
1172}
1173
1174static const ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = { 339static const ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
1175 { "DATA_OUT_PHASE", 0x01, 0x01 }, 340 { "DATA_OUT_PHASE", 0x01, 0x01 },
1176 { "DATA_IN_PHASE", 0x02, 0x02 }, 341 { "DATA_IN_PHASE", 0x02, 0x02 },
@@ -1188,17 +353,6 @@ ahc_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
1188 0x9e, regvalue, cur_col, wrap)); 353 0x9e, regvalue, cur_col, wrap));
1189} 354}
1190 355
1191static const ahc_reg_parse_entry_t SFUNCT_parse_table[] = {
1192 { "ALT_MODE", 0x80, 0x80 }
1193};
1194
1195int
1196ahc_sfunct_print(u_int regvalue, u_int *cur_col, u_int wrap)
1197{
1198 return (ahc_print_register(SFUNCT_parse_table, 1, "SFUNCT",
1199 0x9f, regvalue, cur_col, wrap));
1200}
1201
1202int 356int
1203ahc_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap) 357ahc_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
1204{ 358{
@@ -1206,80 +360,6 @@ ahc_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
1206 0xa0, regvalue, cur_col, wrap)); 360 0xa0, regvalue, cur_col, wrap));
1207} 361}
1208 362
1209int
1210ahc_scb_cdb_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1211{
1212 return (ahc_print_register(NULL, 0, "SCB_CDB_PTR",
1213 0xa0, regvalue, cur_col, wrap));
1214}
1215
1216int
1217ahc_scb_residual_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1218{
1219 return (ahc_print_register(NULL, 0, "SCB_RESIDUAL_SGPTR",
1220 0xa4, regvalue, cur_col, wrap));
1221}
1222
1223int
1224ahc_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
1225{
1226 return (ahc_print_register(NULL, 0, "SCB_SCSI_STATUS",
1227 0xa8, regvalue, cur_col, wrap));
1228}
1229
1230int
1231ahc_scb_target_phases_print(u_int regvalue, u_int *cur_col, u_int wrap)
1232{
1233 return (ahc_print_register(NULL, 0, "SCB_TARGET_PHASES",
1234 0xa9, regvalue, cur_col, wrap));
1235}
1236
1237int
1238ahc_scb_target_data_dir_print(u_int regvalue, u_int *cur_col, u_int wrap)
1239{
1240 return (ahc_print_register(NULL, 0, "SCB_TARGET_DATA_DIR",
1241 0xaa, regvalue, cur_col, wrap));
1242}
1243
1244int
1245ahc_scb_target_itag_print(u_int regvalue, u_int *cur_col, u_int wrap)
1246{
1247 return (ahc_print_register(NULL, 0, "SCB_TARGET_ITAG",
1248 0xab, regvalue, cur_col, wrap));
1249}
1250
1251int
1252ahc_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1253{
1254 return (ahc_print_register(NULL, 0, "SCB_DATAPTR",
1255 0xac, regvalue, cur_col, wrap));
1256}
1257
1258static const ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
1259 { "SG_LAST_SEG", 0x80, 0x80 },
1260 { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f }
1261};
1262
1263int
1264ahc_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1265{
1266 return (ahc_print_register(SCB_DATACNT_parse_table, 2, "SCB_DATACNT",
1267 0xb0, regvalue, cur_col, wrap));
1268}
1269
1270static const ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
1271 { "SG_LIST_NULL", 0x01, 0x01 },
1272 { "SG_FULL_RESID", 0x02, 0x02 },
1273 { "SG_RESID_VALID", 0x04, 0x04 }
1274};
1275
1276int
1277ahc_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1278{
1279 return (ahc_print_register(SCB_SGPTR_parse_table, 3, "SCB_SGPTR",
1280 0xb4, regvalue, cur_col, wrap));
1281}
1282
1283static const ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = { 363static const ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
1284 { "DISCONNECTED", 0x04, 0x04 }, 364 { "DISCONNECTED", 0x04, 0x04 },
1285 { "ULTRAENB", 0x08, 0x08 }, 365 { "ULTRAENB", 0x08, 0x08 },
@@ -1331,248 +411,3 @@ ahc_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
1331 0xbb, regvalue, cur_col, wrap)); 411 0xbb, regvalue, cur_col, wrap));
1332} 412}
1333 413
1334int
1335ahc_scb_cdb_len_print(u_int regvalue, u_int *cur_col, u_int wrap)
1336{
1337 return (ahc_print_register(NULL, 0, "SCB_CDB_LEN",
1338 0xbc, regvalue, cur_col, wrap));
1339}
1340
1341int
1342ahc_scb_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
1343{
1344 return (ahc_print_register(NULL, 0, "SCB_SCSIRATE",
1345 0xbd, regvalue, cur_col, wrap));
1346}
1347
1348int
1349ahc_scb_scsioffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
1350{
1351 return (ahc_print_register(NULL, 0, "SCB_SCSIOFFSET",
1352 0xbe, regvalue, cur_col, wrap));
1353}
1354
1355int
1356ahc_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
1357{
1358 return (ahc_print_register(NULL, 0, "SCB_NEXT",
1359 0xbf, regvalue, cur_col, wrap));
1360}
1361
1362static const ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
1363 { "DO_2840", 0x01, 0x01 },
1364 { "CK_2840", 0x02, 0x02 },
1365 { "CS_2840", 0x04, 0x04 }
1366};
1367
1368int
1369ahc_seectl_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
1370{
1371 return (ahc_print_register(SEECTL_2840_parse_table, 3, "SEECTL_2840",
1372 0xc0, regvalue, cur_col, wrap));
1373}
1374
1375static const ahc_reg_parse_entry_t STATUS_2840_parse_table[] = {
1376 { "DI_2840", 0x01, 0x01 },
1377 { "EEPROM_TF", 0x80, 0x80 },
1378 { "ADSEL", 0x1e, 0x1e },
1379 { "BIOS_SEL", 0x60, 0x60 }
1380};
1381
1382int
1383ahc_status_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
1384{
1385 return (ahc_print_register(STATUS_2840_parse_table, 4, "STATUS_2840",
1386 0xc1, regvalue, cur_col, wrap));
1387}
1388
1389int
1390ahc_scb_64_btt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1391{
1392 return (ahc_print_register(NULL, 0, "SCB_64_BTT",
1393 0xd0, regvalue, cur_col, wrap));
1394}
1395
1396int
1397ahc_cchaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1398{
1399 return (ahc_print_register(NULL, 0, "CCHADDR",
1400 0xe0, regvalue, cur_col, wrap));
1401}
1402
1403int
1404ahc_cchcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1405{
1406 return (ahc_print_register(NULL, 0, "CCHCNT",
1407 0xe8, regvalue, cur_col, wrap));
1408}
1409
1410int
1411ahc_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
1412{
1413 return (ahc_print_register(NULL, 0, "CCSGRAM",
1414 0xe9, regvalue, cur_col, wrap));
1415}
1416
1417int
1418ahc_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1419{
1420 return (ahc_print_register(NULL, 0, "CCSGADDR",
1421 0xea, regvalue, cur_col, wrap));
1422}
1423
1424static const ahc_reg_parse_entry_t CCSGCTL_parse_table[] = {
1425 { "CCSGRESET", 0x01, 0x01 },
1426 { "SG_FETCH_NEEDED", 0x02, 0x02 },
1427 { "CCSGEN", 0x08, 0x08 },
1428 { "CCSGDONE", 0x80, 0x80 }
1429};
1430
1431int
1432ahc_ccsgctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1433{
1434 return (ahc_print_register(CCSGCTL_parse_table, 4, "CCSGCTL",
1435 0xeb, regvalue, cur_col, wrap));
1436}
1437
1438int
1439ahc_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
1440{
1441 return (ahc_print_register(NULL, 0, "CCSCBRAM",
1442 0xec, regvalue, cur_col, wrap));
1443}
1444
1445int
1446ahc_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1447{
1448 return (ahc_print_register(NULL, 0, "CCSCBADDR",
1449 0xed, regvalue, cur_col, wrap));
1450}
1451
1452static const ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = {
1453 { "CCSCBRESET", 0x01, 0x01 },
1454 { "CCSCBDIR", 0x04, 0x04 },
1455 { "CCSCBEN", 0x08, 0x08 },
1456 { "CCARREN", 0x10, 0x10 },
1457 { "ARRDONE", 0x40, 0x40 },
1458 { "CCSCBDONE", 0x80, 0x80 }
1459};
1460
1461int
1462ahc_ccscbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1463{
1464 return (ahc_print_register(CCSCBCTL_parse_table, 6, "CCSCBCTL",
1465 0xee, regvalue, cur_col, wrap));
1466}
1467
1468int
1469ahc_ccscbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1470{
1471 return (ahc_print_register(NULL, 0, "CCSCBCNT",
1472 0xef, regvalue, cur_col, wrap));
1473}
1474
1475int
1476ahc_scbbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1477{
1478 return (ahc_print_register(NULL, 0, "SCBBADDR",
1479 0xf0, regvalue, cur_col, wrap));
1480}
1481
1482int
1483ahc_ccscbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1484{
1485 return (ahc_print_register(NULL, 0, "CCSCBPTR",
1486 0xf1, regvalue, cur_col, wrap));
1487}
1488
1489int
1490ahc_hnscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
1491{
1492 return (ahc_print_register(NULL, 0, "HNSCB_QOFF",
1493 0xf4, regvalue, cur_col, wrap));
1494}
1495
1496int
1497ahc_snscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
1498{
1499 return (ahc_print_register(NULL, 0, "SNSCB_QOFF",
1500 0xf6, regvalue, cur_col, wrap));
1501}
1502
1503int
1504ahc_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
1505{
1506 return (ahc_print_register(NULL, 0, "SDSCB_QOFF",
1507 0xf8, regvalue, cur_col, wrap));
1508}
1509
1510static const ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
1511 { "SDSCB_ROLLOVER", 0x10, 0x10 },
1512 { "SNSCB_ROLLOVER", 0x20, 0x20 },
1513 { "SCB_AVAIL", 0x40, 0x40 },
1514 { "SCB_QSIZE_256", 0x06, 0x06 },
1515 { "SCB_QSIZE", 0x07, 0x07 }
1516};
1517
1518int
1519ahc_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
1520{
1521 return (ahc_print_register(QOFF_CTLSTA_parse_table, 5, "QOFF_CTLSTA",
1522 0xfa, regvalue, cur_col, wrap));
1523}
1524
1525static const ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = {
1526 { "RD_DFTHRSH_MIN", 0x00, 0x00 },
1527 { "WR_DFTHRSH_MIN", 0x00, 0x00 },
1528 { "RD_DFTHRSH_25", 0x01, 0x01 },
1529 { "RD_DFTHRSH_50", 0x02, 0x02 },
1530 { "RD_DFTHRSH_63", 0x03, 0x03 },
1531 { "RD_DFTHRSH_75", 0x04, 0x04 },
1532 { "RD_DFTHRSH_85", 0x05, 0x05 },
1533 { "RD_DFTHRSH_90", 0x06, 0x06 },
1534 { "RD_DFTHRSH", 0x07, 0x07 },
1535 { "RD_DFTHRSH_MAX", 0x07, 0x07 },
1536 { "WR_DFTHRSH_25", 0x10, 0x10 },
1537 { "WR_DFTHRSH_50", 0x20, 0x20 },
1538 { "WR_DFTHRSH_63", 0x30, 0x30 },
1539 { "WR_DFTHRSH_75", 0x40, 0x40 },
1540 { "WR_DFTHRSH_85", 0x50, 0x50 },
1541 { "WR_DFTHRSH_90", 0x60, 0x60 },
1542 { "WR_DFTHRSH", 0x70, 0x70 },
1543 { "WR_DFTHRSH_MAX", 0x70, 0x70 }
1544};
1545
1546int
1547ahc_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
1548{
1549 return (ahc_print_register(DFF_THRSH_parse_table, 18, "DFF_THRSH",
1550 0xfb, regvalue, cur_col, wrap));
1551}
1552
1553static const ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
1554 { "LAST_SEG_DONE", 0x01, 0x01 },
1555 { "LAST_SEG", 0x02, 0x02 },
1556 { "SG_ADDR_MASK", 0xf8, 0xf8 }
1557};
1558
1559int
1560ahc_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
1561{
1562 return (ahc_print_register(SG_CACHE_SHADOW_parse_table, 3, "SG_CACHE_SHADOW",
1563 0xfc, regvalue, cur_col, wrap));
1564}
1565
1566static const ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
1567 { "LAST_SEG_DONE", 0x01, 0x01 },
1568 { "LAST_SEG", 0x02, 0x02 },
1569 { "SG_ADDR_MASK", 0xf8, 0xf8 }
1570};
1571
1572int
1573ahc_sg_cache_pre_print(u_int regvalue, u_int *cur_col, u_int wrap)
1574{
1575 return (ahc_print_register(SG_CACHE_PRE_parse_table, 3, "SG_CACHE_PRE",
1576 0xfc, regvalue, cur_col, wrap));
1577}
1578
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 81be6a261cc8..e4064433842e 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -147,6 +147,8 @@ void yyerror(const char *string);
147 147
148%token T_ACCESS_MODE 148%token T_ACCESS_MODE
149 149
150%token T_DONT_GENERATE_DEBUG_CODE
151
150%token T_MODES 152%token T_MODES
151 153
152%token T_DEFINE 154%token T_DEFINE
@@ -357,6 +359,7 @@ reg_attribute:
357| size 359| size
358| count 360| count
359| access_mode 361| access_mode
362| dont_generate_debug_code
360| modes 363| modes
361| field_defn 364| field_defn
362| enum_defn 365| enum_defn
@@ -410,6 +413,13 @@ access_mode:
410 } 413 }
411; 414;
412 415
416dont_generate_debug_code:
417 T_DONT_GENERATE_DEBUG_CODE
418 {
419 cur_symbol->dont_generate_debug_code = 1;
420 }
421;
422
413modes: 423modes:
414 T_MODES mode_list 424 T_MODES mode_list
415 { 425 {
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index 2c7f02daf88d..93c8667cd704 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -164,6 +164,7 @@ download { return T_DOWNLOAD; }
164address { return T_ADDRESS; } 164address { return T_ADDRESS; }
165count { return T_COUNT; } 165count { return T_COUNT; }
166access_mode { return T_ACCESS_MODE; } 166access_mode { return T_ACCESS_MODE; }
167dont_generate_debug_code { return T_DONT_GENERATE_DEBUG_CODE; }
167modes { return T_MODES; } 168modes { return T_MODES; }
168RW|RO|WO { 169RW|RO|WO {
169 if (strcmp(yytext, "RW") == 0) 170 if (strcmp(yytext, "RW") == 0)
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index fcd357872b43..078ed600f47a 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -539,6 +539,9 @@ symtable_dump(FILE *ofile, FILE *dfile)
539 aic_print_include(dfile, stock_include_file); 539 aic_print_include(dfile, stock_include_file);
540 SLIST_FOREACH(curnode, &registers, links) { 540 SLIST_FOREACH(curnode, &registers, links) {
541 541
542 if (curnode->symbol->dont_generate_debug_code)
543 continue;
544
542 switch(curnode->symbol->type) { 545 switch(curnode->symbol->type) {
543 case REGISTER: 546 case REGISTER:
544 case SCBLOC: 547 case SCBLOC:
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index 05190c1a2fb7..2ba73ae7c777 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -137,7 +137,8 @@ typedef struct symbol {
137 struct label_info *linfo; 137 struct label_info *linfo;
138 struct cond_info *condinfo; 138 struct cond_info *condinfo;
139 struct macro_info *macroinfo; 139 struct macro_info *macroinfo;
140 }info; 140 } info;
141 int dont_generate_debug_code;
141} symbol_t; 142} symbol_t;
142 143
143typedef struct symbol_ref { 144typedef struct symbol_ref {
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 9aec4ca64e56..f7da7530875e 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -107,6 +107,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
107 struct request *req; 107 struct request *req;
108 int ret; 108 int ret;
109 109
110retry:
110 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); 111 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
111 if (!req) 112 if (!req)
112 return SCSI_DH_RES_TEMP_UNAVAIL; 113 return SCSI_DH_RES_TEMP_UNAVAIL;
@@ -121,7 +122,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
121 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); 122 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
122 req->sense_len = 0; 123 req->sense_len = 0;
123 124
124retry:
125 ret = blk_execute_rq(req->q, NULL, req, 1); 125 ret = blk_execute_rq(req->q, NULL, req, 1);
126 if (ret == -EIO) { 126 if (ret == -EIO) {
127 if (req->sense_len > 0) { 127 if (req->sense_len > 0) {
@@ -136,8 +136,10 @@ retry:
136 h->path_state = HP_SW_PATH_ACTIVE; 136 h->path_state = HP_SW_PATH_ACTIVE;
137 ret = SCSI_DH_OK; 137 ret = SCSI_DH_OK;
138 } 138 }
139 if (ret == SCSI_DH_IMM_RETRY) 139 if (ret == SCSI_DH_IMM_RETRY) {
140 blk_put_request(req);
140 goto retry; 141 goto retry;
142 }
141 if (ret == SCSI_DH_DEV_OFFLINED) { 143 if (ret == SCSI_DH_DEV_OFFLINED) {
142 h->path_state = HP_SW_PATH_PASSIVE; 144 h->path_state = HP_SW_PATH_PASSIVE;
143 ret = SCSI_DH_OK; 145 ret = SCSI_DH_OK;
@@ -200,6 +202,7 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
200 struct request *req; 202 struct request *req;
201 int ret, retry; 203 int ret, retry;
202 204
205retry:
203 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); 206 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
204 if (!req) 207 if (!req)
205 return SCSI_DH_RES_TEMP_UNAVAIL; 208 return SCSI_DH_RES_TEMP_UNAVAIL;
@@ -216,7 +219,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
216 req->sense_len = 0; 219 req->sense_len = 0;
217 retry = h->retries; 220 retry = h->retries;
218 221
219retry:
220 ret = blk_execute_rq(req->q, NULL, req, 1); 222 ret = blk_execute_rq(req->q, NULL, req, 1);
221 if (ret == -EIO) { 223 if (ret == -EIO) {
222 if (req->sense_len > 0) { 224 if (req->sense_len > 0) {
@@ -231,8 +233,10 @@ retry:
231 ret = SCSI_DH_OK; 233 ret = SCSI_DH_OK;
232 234
233 if (ret == SCSI_DH_RETRY) { 235 if (ret == SCSI_DH_RETRY) {
234 if (--retry) 236 if (--retry) {
237 blk_put_request(req);
235 goto retry; 238 goto retry;
239 }
236 ret = SCSI_DH_IO; 240 ret = SCSI_DH_IO;
237 } 241 }
238 242
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index a43c3ed4df28..3d50cabca7ee 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -401,6 +401,9 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
401 } 401 }
402 } 402 }
403 403
404 if (h->lun_state == RDAC_LUN_UNOWNED)
405 h->state = RDAC_STATE_PASSIVE;
406
404 return err; 407 return err;
405} 408}
406 409
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 8aba4fdfb522..6194ed5d02c4 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2445,7 +2445,7 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2445 hba_status = detailed_status >> 8; 2445 hba_status = detailed_status >> 8;
2446 2446
2447 // calculate resid for sg 2447 // calculate resid for sg
2448 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5)); 2448 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2449 2449
2450 pHba = (adpt_hba*) cmd->device->host->hostdata[0]; 2450 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2451 2451
@@ -2456,7 +2456,7 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2456 case I2O_SCSI_DSC_SUCCESS: 2456 case I2O_SCSI_DSC_SUCCESS:
2457 cmd->result = (DID_OK << 16); 2457 cmd->result = (DID_OK << 16);
2458 // handle underflow 2458 // handle underflow
2459 if(readl(reply+5) < cmd->underflow ) { 2459 if (readl(reply+20) < cmd->underflow) {
2460 cmd->result = (DID_ERROR <<16); 2460 cmd->result = (DID_ERROR <<16);
2461 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name); 2461 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2462 } 2462 }
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index c33bcb284df7..56f4e6bffc21 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -290,9 +290,11 @@
290#include <scsi/scsi_ioctl.h> 290#include <scsi/scsi_ioctl.h>
291#include "fdomain.h" 291#include "fdomain.h"
292 292
293#ifndef PCMCIA
293MODULE_AUTHOR("Rickard E. Faith"); 294MODULE_AUTHOR("Rickard E. Faith");
294MODULE_DESCRIPTION("Future domain SCSI driver"); 295MODULE_DESCRIPTION("Future domain SCSI driver");
295MODULE_LICENSE("GPL"); 296MODULE_LICENSE("GPL");
297#endif
296 298
297 299
298#define VERSION "$Revision: 5.51 $" 300#define VERSION "$Revision: 5.51 $"
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index c387c15a2128..fb247fdfa2bd 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -588,7 +588,7 @@ static struct pci_driver gdth_pci_driver = {
588 .remove = gdth_pci_remove_one, 588 .remove = gdth_pci_remove_one,
589}; 589};
590 590
591static void gdth_pci_remove_one(struct pci_dev *pdev) 591static void __devexit gdth_pci_remove_one(struct pci_dev *pdev)
592{ 592{
593 gdth_ha_str *ha = pci_get_drvdata(pdev); 593 gdth_ha_str *ha = pci_get_drvdata(pdev);
594 594
@@ -600,7 +600,7 @@ static void gdth_pci_remove_one(struct pci_dev *pdev)
600 pci_disable_device(pdev); 600 pci_disable_device(pdev);
601} 601}
602 602
603static int gdth_pci_init_one(struct pci_dev *pdev, 603static int __devinit gdth_pci_init_one(struct pci_dev *pdev,
604 const struct pci_device_id *ent) 604 const struct pci_device_id *ent)
605{ 605{
606 ushort vendor = pdev->vendor; 606 ushort vendor = pdev->vendor;
@@ -853,7 +853,7 @@ static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha)
853#endif /* CONFIG_ISA */ 853#endif /* CONFIG_ISA */
854 854
855#ifdef CONFIG_PCI 855#ifdef CONFIG_PCI
856static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, 856static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
857 gdth_ha_str *ha) 857 gdth_ha_str *ha)
858{ 858{
859 register gdt6_dpram_str __iomem *dp6_ptr; 859 register gdt6_dpram_str __iomem *dp6_ptr;
@@ -1237,7 +1237,7 @@ static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1237 1237
1238/* controller protocol functions */ 1238/* controller protocol functions */
1239 1239
1240static void __init gdth_enable_int(gdth_ha_str *ha) 1240static void __devinit gdth_enable_int(gdth_ha_str *ha)
1241{ 1241{
1242 ulong flags; 1242 ulong flags;
1243 gdt2_dpram_str __iomem *dp2_ptr; 1243 gdt2_dpram_str __iomem *dp2_ptr;
@@ -1553,7 +1553,7 @@ static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode,
1553 1553
1554/* search for devices */ 1554/* search for devices */
1555 1555
1556static int __init gdth_search_drives(gdth_ha_str *ha) 1556static int __devinit gdth_search_drives(gdth_ha_str *ha)
1557{ 1557{
1558 ushort cdev_cnt, i; 1558 ushort cdev_cnt, i;
1559 int ok; 1559 int ok;
@@ -4935,7 +4935,7 @@ static int __init gdth_eisa_probe_one(ushort eisa_slot)
4935#endif /* CONFIG_EISA */ 4935#endif /* CONFIG_EISA */
4936 4936
4937#ifdef CONFIG_PCI 4937#ifdef CONFIG_PCI
4938static int gdth_pci_probe_one(gdth_pci_str *pcistr, 4938static int __devinit gdth_pci_probe_one(gdth_pci_str *pcistr,
4939 gdth_ha_str **ha_out) 4939 gdth_ha_str **ha_out)
4940{ 4940{
4941 struct Scsi_Host *shp; 4941 struct Scsi_Host *shp;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 87e09f35d3d4..6cad1758243a 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1442,7 +1442,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1442 spin_lock_irqsave(shost->host_lock, lock_flags); 1442 spin_lock_irqsave(shost->host_lock, lock_flags);
1443 if (sdev->type == TYPE_DISK) { 1443 if (sdev->type == TYPE_DISK) {
1444 sdev->allow_restart = 1; 1444 sdev->allow_restart = 1;
1445 sdev->timeout = 60 * HZ; 1445 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1446 } 1446 }
1447 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); 1447 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1448 spin_unlock_irqrestore(shost->host_lock, lock_flags); 1448 spin_unlock_irqrestore(shost->host_lock, lock_flags);
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 2a5b29d12172..e2dd6a45924a 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -864,21 +864,23 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
864 864
865 INIT_WORK(&vport->crq_work, handle_crq); 865 INIT_WORK(&vport->crq_work, handle_crq);
866 866
867 err = crq_queue_create(&vport->crq_queue, target); 867 err = scsi_add_host(shost, target->dev);
868 if (err) 868 if (err)
869 goto free_srp_target; 869 goto free_srp_target;
870 870
871 err = scsi_add_host(shost, target->dev); 871 err = scsi_tgt_alloc_queue(shost);
872 if (err) 872 if (err)
873 goto destroy_queue; 873 goto remove_host;
874 874
875 err = scsi_tgt_alloc_queue(shost); 875 err = crq_queue_create(&vport->crq_queue, target);
876 if (err) 876 if (err)
877 goto destroy_queue; 877 goto free_queue;
878 878
879 return 0; 879 return 0;
880destroy_queue: 880free_queue:
881 crq_queue_destroy(target); 881 scsi_tgt_free_queue(shost);
882remove_host:
883 scsi_remove_host(shost);
882free_srp_target: 884free_srp_target:
883 srp_target_free(target); 885 srp_target_free(target);
884put_host: 886put_host:
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index afc96e844a25..2370fd82ebfe 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -452,40 +452,34 @@ static ide_driver_t idescsi_driver = {
452#endif 452#endif
453}; 453};
454 454
455static int idescsi_ide_open(struct inode *inode, struct file *filp) 455static int idescsi_ide_open(struct block_device *bdev, fmode_t mode)
456{ 456{
457 struct gendisk *disk = inode->i_bdev->bd_disk; 457 struct ide_scsi_obj *scsi = ide_scsi_get(bdev->bd_disk);
458 struct ide_scsi_obj *scsi;
459 458
460 if (!(scsi = ide_scsi_get(disk))) 459 if (!scsi)
461 return -ENXIO; 460 return -ENXIO;
462 461
463 return 0; 462 return 0;
464} 463}
465 464
466static int idescsi_ide_release(struct inode *inode, struct file *filp) 465static int idescsi_ide_release(struct gendisk *disk, fmode_t mode)
467{ 466{
468 struct gendisk *disk = inode->i_bdev->bd_disk; 467 ide_scsi_put(ide_scsi_g(disk));
469 struct ide_scsi_obj *scsi = ide_scsi_g(disk);
470
471 ide_scsi_put(scsi);
472
473 return 0; 468 return 0;
474} 469}
475 470
476static int idescsi_ide_ioctl(struct inode *inode, struct file *file, 471static int idescsi_ide_ioctl(struct block_device *bdev, fmode_t mode,
477 unsigned int cmd, unsigned long arg) 472 unsigned int cmd, unsigned long arg)
478{ 473{
479 struct block_device *bdev = inode->i_bdev;
480 struct ide_scsi_obj *scsi = ide_scsi_g(bdev->bd_disk); 474 struct ide_scsi_obj *scsi = ide_scsi_g(bdev->bd_disk);
481 return generic_ide_ioctl(scsi->drive, file, bdev, cmd, arg); 475 return generic_ide_ioctl(scsi->drive, bdev, cmd, arg);
482} 476}
483 477
484static struct block_device_operations idescsi_ops = { 478static struct block_device_operations idescsi_ops = {
485 .owner = THIS_MODULE, 479 .owner = THIS_MODULE,
486 .open = idescsi_ide_open, 480 .open = idescsi_ide_open,
487 .release = idescsi_ide_release, 481 .release = idescsi_ide_release,
488 .ioctl = idescsi_ide_ioctl, 482 .locked_ioctl = idescsi_ide_ioctl,
489}; 483};
490 484
491static int idescsi_slave_configure(struct scsi_device * sdp) 485static int idescsi_slave_configure(struct scsi_device * sdp)
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 098739deb02e..ded854a6dd35 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2456,20 +2456,14 @@ static ssize_t ipr_read_trace(struct kobject *kobj,
2456 struct Scsi_Host *shost = class_to_shost(dev); 2456 struct Scsi_Host *shost = class_to_shost(dev);
2457 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 2457 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2458 unsigned long lock_flags = 0; 2458 unsigned long lock_flags = 0;
2459 int size = IPR_TRACE_SIZE; 2459 ssize_t ret;
2460 char *src = (char *)ioa_cfg->trace;
2461
2462 if (off > size)
2463 return 0;
2464 if (off + count > size) {
2465 size -= off;
2466 count = size;
2467 }
2468 2460
2469 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2461 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2470 memcpy(buf, &src[off], count); 2462 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
2463 IPR_TRACE_SIZE);
2471 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2464 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2472 return count; 2465
2466 return ret;
2473} 2467}
2474 2468
2475static struct bin_attribute ipr_trace_attr = { 2469static struct bin_attribute ipr_trace_attr = {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 801c7cf54d2e..3fdee7370ccc 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -489,12 +489,6 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
489 if (!__kfifo_get(session->cmdpool.queue, 489 if (!__kfifo_get(session->cmdpool.queue,
490 (void*)&task, sizeof(void*))) 490 (void*)&task, sizeof(void*)))
491 return NULL; 491 return NULL;
492
493 if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
494 hdr->ttt == RESERVED_ITT) {
495 conn->ping_task = task;
496 conn->last_ping = jiffies;
497 }
498 } 492 }
499 /* 493 /*
500 * released in complete pdu for task we expect a response for, and 494 * released in complete pdu for task we expect a response for, and
@@ -703,6 +697,11 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
703 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); 697 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
704 if (!task) 698 if (!task)
705 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); 699 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
700 else if (!rhdr) {
701 /* only track our nops */
702 conn->ping_task = task;
703 conn->last_ping = jiffies;
704 }
706} 705}
707 706
708static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 707static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 28c9da7d4a5c..7dc62deb4087 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4402,6 +4402,10 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4402 scb_t *scb; 4402 scb_t *scb;
4403 int rval; 4403 int rval;
4404 4404
4405 scmd = scsi_allocate_command(GFP_KERNEL);
4406 if (!scmd)
4407 return -ENOMEM;
4408
4405 /* 4409 /*
4406 * The internal commands share one command id and hence are 4410 * The internal commands share one command id and hence are
4407 * serialized. This is so because we want to reserve maximum number of 4411 * serialized. This is so because we want to reserve maximum number of
@@ -4412,12 +4416,11 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4412 scb = &adapter->int_scb; 4416 scb = &adapter->int_scb;
4413 memset(scb, 0, sizeof(scb_t)); 4417 memset(scb, 0, sizeof(scb_t));
4414 4418
4415 scmd = &adapter->int_scmd;
4416 memset(scmd, 0, sizeof(Scsi_Cmnd));
4417
4418 sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); 4419 sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
4419 scmd->device = sdev; 4420 scmd->device = sdev;
4420 4421
4422 memset(adapter->int_cdb, 0, sizeof(adapter->int_cdb));
4423 scmd->cmnd = adapter->int_cdb;
4421 scmd->device->host = adapter->host; 4424 scmd->device->host = adapter->host;
4422 scmd->host_scribble = (void *)scb; 4425 scmd->host_scribble = (void *)scb;
4423 scmd->cmnd[0] = MEGA_INTERNAL_CMD; 4426 scmd->cmnd[0] = MEGA_INTERNAL_CMD;
@@ -4456,6 +4459,8 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4456 4459
4457 mutex_unlock(&adapter->int_mtx); 4460 mutex_unlock(&adapter->int_mtx);
4458 4461
4462 scsi_free_command(GFP_KERNEL, scmd);
4463
4459 return rval; 4464 return rval;
4460} 4465}
4461 4466
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index ee70bd4ae4ba..795201fa0b48 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -888,8 +888,8 @@ typedef struct {
888 888
889 u8 sglen; /* f/w supported scatter-gather list length */ 889 u8 sglen; /* f/w supported scatter-gather list length */
890 890
891 unsigned char int_cdb[MAX_COMMAND_SIZE];
891 scb_t int_scb; 892 scb_t int_scb;
892 Scsi_Cmnd int_scmd;
893 struct mutex int_mtx; /* To synchronize the internal 893 struct mutex int_mtx; /* To synchronize the internal
894 commands */ 894 commands */
895 struct completion int_waitq; /* wait queue for internal 895 struct completion int_waitq; /* wait queue for internal
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index afe1de998763..17ce7abe17ee 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1016,7 +1016,8 @@ static int megasas_slave_configure(struct scsi_device *sdev)
1016 * The RAID firmware may require extended timeouts. 1016 * The RAID firmware may require extended timeouts.
1017 */ 1017 */
1018 if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS) 1018 if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS)
1019 sdev->timeout = MEGASAS_DEFAULT_CMD_TIMEOUT * HZ; 1019 blk_queue_rq_timeout(sdev->request_queue,
1020 MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
1020 return 0; 1021 return 0;
1021} 1022}
1022 1023
@@ -2988,17 +2989,6 @@ static int megasas_mgmt_open(struct inode *inode, struct file *filep)
2988} 2989}
2989 2990
2990/** 2991/**
2991 * megasas_mgmt_release - char node "release" entry point
2992 */
2993static int megasas_mgmt_release(struct inode *inode, struct file *filep)
2994{
2995 filep->private_data = NULL;
2996 fasync_helper(-1, filep, 0, &megasas_async_queue);
2997
2998 return 0;
2999}
3000
3001/**
3002 * megasas_mgmt_fasync - Async notifier registration from applications 2992 * megasas_mgmt_fasync - Async notifier registration from applications
3003 * 2993 *
3004 * This function adds the calling process to a driver global queue. When an 2994 * This function adds the calling process to a driver global queue. When an
@@ -3345,7 +3335,6 @@ megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
3345static const struct file_operations megasas_mgmt_fops = { 3335static const struct file_operations megasas_mgmt_fops = {
3346 .owner = THIS_MODULE, 3336 .owner = THIS_MODULE,
3347 .open = megasas_mgmt_open, 3337 .open = megasas_mgmt_open,
3348 .release = megasas_mgmt_release,
3349 .fasync = megasas_mgmt_fasync, 3338 .fasync = megasas_mgmt_fasync,
3350 .unlocked_ioctl = megasas_mgmt_ioctl, 3339 .unlocked_ioctl = megasas_mgmt_ioctl,
3351#ifdef CONFIG_COMPAT 3340#ifdef CONFIG_COMPAT
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index f25f41a499e5..b97194096d8e 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2547,7 +2547,6 @@ typedef struct scsi_qla_host {
2547 uint8_t fcode_revision[16]; 2547 uint8_t fcode_revision[16];
2548 uint32_t fw_revision[4]; 2548 uint32_t fw_revision[4];
2549 2549
2550 uint16_t fdt_odd_index;
2551 uint32_t fdt_wrt_disable; 2550 uint32_t fdt_wrt_disable;
2552 uint32_t fdt_erase_cmd; 2551 uint32_t fdt_erase_cmd;
2553 uint32_t fdt_block_size; 2552 uint32_t fdt_block_size;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a470f2d3270d..4218f20f5ed5 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -140,7 +140,6 @@ int
140qla2100_pci_config(scsi_qla_host_t *ha) 140qla2100_pci_config(scsi_qla_host_t *ha)
141{ 141{
142 uint16_t w; 142 uint16_t w;
143 uint32_t d;
144 unsigned long flags; 143 unsigned long flags;
145 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 144 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
146 145
@@ -151,10 +150,7 @@ qla2100_pci_config(scsi_qla_host_t *ha)
151 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 150 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
152 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 151 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
153 152
154 /* Reset expansion ROM address decode enable */ 153 pci_disable_rom(ha->pdev);
155 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
156 d &= ~PCI_ROM_ADDRESS_ENABLE;
157 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
158 154
159 /* Get PCI bus information. */ 155 /* Get PCI bus information. */
160 spin_lock_irqsave(&ha->hardware_lock, flags); 156 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -174,7 +170,6 @@ int
174qla2300_pci_config(scsi_qla_host_t *ha) 170qla2300_pci_config(scsi_qla_host_t *ha)
175{ 171{
176 uint16_t w; 172 uint16_t w;
177 uint32_t d;
178 unsigned long flags = 0; 173 unsigned long flags = 0;
179 uint32_t cnt; 174 uint32_t cnt;
180 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 175 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -236,10 +231,7 @@ qla2300_pci_config(scsi_qla_host_t *ha)
236 231
237 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 232 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
238 233
239 /* Reset expansion ROM address decode enable */ 234 pci_disable_rom(ha->pdev);
240 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
241 d &= ~PCI_ROM_ADDRESS_ENABLE;
242 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
243 235
244 /* Get PCI bus information. */ 236 /* Get PCI bus information. */
245 spin_lock_irqsave(&ha->hardware_lock, flags); 237 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -259,7 +251,6 @@ int
259qla24xx_pci_config(scsi_qla_host_t *ha) 251qla24xx_pci_config(scsi_qla_host_t *ha)
260{ 252{
261 uint16_t w; 253 uint16_t w;
262 uint32_t d;
263 unsigned long flags = 0; 254 unsigned long flags = 0;
264 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 255 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
265 256
@@ -281,10 +272,7 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
281 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 272 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
282 pcie_set_readrq(ha->pdev, 2048); 273 pcie_set_readrq(ha->pdev, 2048);
283 274
284 /* Reset expansion ROM address decode enable */ 275 pci_disable_rom(ha->pdev);
285 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
286 d &= ~PCI_ROM_ADDRESS_ENABLE;
287 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
288 276
289 ha->chip_revision = ha->pdev->revision; 277 ha->chip_revision = ha->pdev->revision;
290 278
@@ -306,7 +294,6 @@ int
306qla25xx_pci_config(scsi_qla_host_t *ha) 294qla25xx_pci_config(scsi_qla_host_t *ha)
307{ 295{
308 uint16_t w; 296 uint16_t w;
309 uint32_t d;
310 297
311 pci_set_master(ha->pdev); 298 pci_set_master(ha->pdev);
312 pci_try_set_mwi(ha->pdev); 299 pci_try_set_mwi(ha->pdev);
@@ -320,10 +307,7 @@ qla25xx_pci_config(scsi_qla_host_t *ha)
320 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 307 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
321 pcie_set_readrq(ha->pdev, 2048); 308 pcie_set_readrq(ha->pdev, 2048);
322 309
323 /* Reset expansion ROM address decode enable */ 310 pci_disable_rom(ha->pdev);
324 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
325 d &= ~PCI_ROM_ADDRESS_ENABLE;
326 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
327 311
328 ha->chip_revision = ha->pdev->revision; 312 ha->chip_revision = ha->pdev->revision;
329 313
@@ -980,7 +964,6 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
980 &ha->fw_minor_version, 964 &ha->fw_minor_version,
981 &ha->fw_subminor_version, 965 &ha->fw_subminor_version,
982 &ha->fw_attributes, &ha->fw_memory_size); 966 &ha->fw_attributes, &ha->fw_memory_size);
983 qla2x00_resize_request_q(ha);
984 ha->flags.npiv_supported = 0; 967 ha->flags.npiv_supported = 0;
985 if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) || 968 if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) ||
986 IS_QLA84XX(ha)) && 969 IS_QLA84XX(ha)) &&
@@ -992,6 +975,7 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
992 ha->max_npiv_vports = 975 ha->max_npiv_vports =
993 MIN_MULTI_ID_FABRIC - 1; 976 MIN_MULTI_ID_FABRIC - 1;
994 } 977 }
978 qla2x00_resize_request_q(ha);
995 979
996 if (ql2xallocfwdump) 980 if (ql2xallocfwdump)
997 qla2x00_alloc_fw_dump(ha); 981 qla2x00_alloc_fw_dump(ha);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 36bc6851e23d..3402746ec128 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1964,7 +1964,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1964 *cur_iocb_cnt = mcp->mb[7]; 1964 *cur_iocb_cnt = mcp->mb[7];
1965 if (orig_iocb_cnt) 1965 if (orig_iocb_cnt)
1966 *orig_iocb_cnt = mcp->mb[10]; 1966 *orig_iocb_cnt = mcp->mb[10];
1967 if (max_npiv_vports) 1967 if (ha->flags.npiv_supported && max_npiv_vports)
1968 *max_npiv_vports = mcp->mb[11]; 1968 *max_npiv_vports = mcp->mb[11];
1969 } 1969 }
1970 1970
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 21dd182ad512..35567203ef61 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -728,6 +728,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
728 if (ha->isp_ops->abort_command(ha, sp)) { 728 if (ha->isp_ops->abort_command(ha, sp)) {
729 DEBUG2(printk("%s(%ld): abort_command " 729 DEBUG2(printk("%s(%ld): abort_command "
730 "mbx failed.\n", __func__, ha->host_no)); 730 "mbx failed.\n", __func__, ha->host_no));
731 ret = FAILED;
731 } else { 732 } else {
732 DEBUG3(printk("%s(%ld): abort_command " 733 DEBUG3(printk("%s(%ld): abort_command "
733 "mbx success.\n", __func__, ha->host_no)); 734 "mbx success.\n", __func__, ha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 90a13211717f..e4af678eb2d6 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -722,6 +722,7 @@ done:
722static void 722static void
723qla2xxx_get_fdt_info(scsi_qla_host_t *ha) 723qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
724{ 724{
725#define FLASH_BLK_SIZE_4K 0x1000
725#define FLASH_BLK_SIZE_32K 0x8000 726#define FLASH_BLK_SIZE_32K 0x8000
726#define FLASH_BLK_SIZE_64K 0x10000 727#define FLASH_BLK_SIZE_64K 0x10000
727 const char *loc, *locations[] = { "MID", "FDT" }; 728 const char *loc, *locations[] = { "MID", "FDT" };
@@ -755,7 +756,6 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
755 loc = locations[1]; 756 loc = locations[1];
756 mid = le16_to_cpu(fdt->man_id); 757 mid = le16_to_cpu(fdt->man_id);
757 fid = le16_to_cpu(fdt->id); 758 fid = le16_to_cpu(fdt->id);
758 ha->fdt_odd_index = mid == 0x1f;
759 ha->fdt_wrt_disable = fdt->wrt_disable_bits; 759 ha->fdt_wrt_disable = fdt->wrt_disable_bits;
760 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd); 760 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd);
761 ha->fdt_block_size = le32_to_cpu(fdt->block_size); 761 ha->fdt_block_size = le32_to_cpu(fdt->block_size);
@@ -788,8 +788,7 @@ no_flash_data:
788 ha->fdt_block_size = FLASH_BLK_SIZE_64K; 788 ha->fdt_block_size = FLASH_BLK_SIZE_64K;
789 break; 789 break;
790 case 0x1f: /* Atmel 26DF081A. */ 790 case 0x1f: /* Atmel 26DF081A. */
791 ha->fdt_odd_index = 1; 791 ha->fdt_block_size = FLASH_BLK_SIZE_4K;
792 ha->fdt_block_size = FLASH_BLK_SIZE_64K;
793 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320); 792 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320);
794 ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339); 793 ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339);
795 ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336); 794 ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336);
@@ -801,9 +800,9 @@ no_flash_data:
801 } 800 }
802done: 801done:
803 DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x " 802 DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
804 "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", loc, mid, fid, 803 "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
805 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, 804 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
806 ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable, 805 ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable,
807 ha->fdt_block_size)); 806 ha->fdt_block_size));
808} 807}
809 808
@@ -987,13 +986,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
987 qla24xx_unprotect_flash(ha); 986 qla24xx_unprotect_flash(ha);
988 987
989 for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { 988 for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
990 if (ha->fdt_odd_index) { 989
991 findex = faddr << 2; 990 findex = faddr;
992 fdata = findex & sec_mask; 991 fdata = (findex & sec_mask) << 2;
993 } else {
994 findex = faddr;
995 fdata = (findex & sec_mask) << 2;
996 }
997 992
998 /* Are we at the beginning of a sector? */ 993 /* Are we at the beginning of a sector? */
999 if ((findex & rest_addr) == 0) { 994 if ((findex & rest_addr) == 0) {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index be5e299df528..eea6720adf16 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k8" 10#define QLA2XXX_VERSION "8.02.01-k9"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 94ed262bdf0c..edfaf241c5ba 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -932,8 +932,7 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
932 int i, rtn = NEEDS_RETRY; 932 int i, rtn = NEEDS_RETRY;
933 933
934 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++) 934 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
935 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, 935 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
936 scmd->device->timeout, 0);
937 936
938 if (rtn == SUCCESS) 937 if (rtn == SUCCESS)
939 return 0; 938 return 0;
@@ -1340,9 +1339,10 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1340 * LLD/transport was disrupted during processing of the IO. 1339 * LLD/transport was disrupted during processing of the IO.
1341 * The transport class is now blocked/blocking, 1340 * The transport class is now blocked/blocking,
1342 * and the transport will decide what to do with the IO 1341 * and the transport will decide what to do with the IO
1343 * based on its timers and recovery capablilities. 1342 * based on its timers and recovery capablilities if
1343 * there are enough retries.
1344 */ 1344 */
1345 return ADD_TO_MLQUEUE; 1345 goto maybe_retry;
1346 case DID_TRANSPORT_FAILFAST: 1346 case DID_TRANSPORT_FAILFAST:
1347 /* 1347 /*
1348 * The transport decided to failfast the IO (most likely 1348 * The transport decided to failfast the IO (most likely
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 28b19ef26309..dc1cfb2fd76b 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -237,7 +237,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
237 case SCSI_IOCTL_SEND_COMMAND: 237 case SCSI_IOCTL_SEND_COMMAND:
238 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 238 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
239 return -EACCES; 239 return -EACCES;
240 return sg_scsi_ioctl(NULL, sdev->request_queue, NULL, arg); 240 return sg_scsi_ioctl(sdev->request_queue, NULL, 0, arg);
241 case SCSI_IOCTL_DOORLOCK: 241 case SCSI_IOCTL_DOORLOCK:
242 return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); 242 return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
243 case SCSI_IOCTL_DOORUNLOCK: 243 case SCSI_IOCTL_DOORUNLOCK:
@@ -277,14 +277,14 @@ EXPORT_SYMBOL(scsi_ioctl);
277 * @filp: either NULL or a &struct file which must have the O_NONBLOCK flag. 277 * @filp: either NULL or a &struct file which must have the O_NONBLOCK flag.
278 */ 278 */
279int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, 279int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
280 void __user *arg, struct file *filp) 280 void __user *arg, int ndelay)
281{ 281{
282 int val, result; 282 int val, result;
283 283
284 /* The first set of iocts may be executed even if we're doing 284 /* The first set of iocts may be executed even if we're doing
285 * error processing, as long as the device was opened 285 * error processing, as long as the device was opened
286 * non-blocking */ 286 * non-blocking */
287 if (filp && (filp->f_flags & O_NONBLOCK)) { 287 if (ndelay) {
288 if (scsi_host_in_recovery(sdev->host)) 288 if (scsi_host_in_recovery(sdev->host))
289 return -ENODEV; 289 return -ENODEV;
290 } else if (!scsi_block_when_processing_errors(sdev)) 290 } else if (!scsi_block_when_processing_errors(sdev))
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e5a9526d2037..148d3af92aef 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -529,6 +529,14 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
529 spin_unlock_irqrestore(shost->host_lock, flags); 529 spin_unlock_irqrestore(shost->host_lock, flags);
530} 530}
531 531
532static inline int scsi_device_is_busy(struct scsi_device *sdev)
533{
534 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
535 return 1;
536
537 return 0;
538}
539
532static inline int scsi_target_is_busy(struct scsi_target *starget) 540static inline int scsi_target_is_busy(struct scsi_target *starget)
533{ 541{
534 return ((starget->can_queue > 0 && 542 return ((starget->can_queue > 0 &&
@@ -536,6 +544,15 @@ static inline int scsi_target_is_busy(struct scsi_target *starget)
536 starget->target_blocked); 544 starget->target_blocked);
537} 545}
538 546
547static inline int scsi_host_is_busy(struct Scsi_Host *shost)
548{
549 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
550 shost->host_blocked || shost->host_self_blocked)
551 return 1;
552
553 return 0;
554}
555
539/* 556/*
540 * Function: scsi_run_queue() 557 * Function: scsi_run_queue()
541 * 558 *
@@ -550,19 +567,18 @@ static inline int scsi_target_is_busy(struct scsi_target *starget)
550 */ 567 */
551static void scsi_run_queue(struct request_queue *q) 568static void scsi_run_queue(struct request_queue *q)
552{ 569{
553 struct scsi_device *starved_head = NULL, *sdev = q->queuedata; 570 struct scsi_device *sdev = q->queuedata;
554 struct Scsi_Host *shost = sdev->host; 571 struct Scsi_Host *shost = sdev->host;
572 LIST_HEAD(starved_list);
555 unsigned long flags; 573 unsigned long flags;
556 574
557 if (scsi_target(sdev)->single_lun) 575 if (scsi_target(sdev)->single_lun)
558 scsi_single_lun_run(sdev); 576 scsi_single_lun_run(sdev);
559 577
560 spin_lock_irqsave(shost->host_lock, flags); 578 spin_lock_irqsave(shost->host_lock, flags);
561 while (!list_empty(&shost->starved_list) && 579 list_splice_init(&shost->starved_list, &starved_list);
562 !shost->host_blocked && !shost->host_self_blocked &&
563 !((shost->can_queue > 0) &&
564 (shost->host_busy >= shost->can_queue))) {
565 580
581 while (!list_empty(&starved_list)) {
566 int flagset; 582 int flagset;
567 583
568 /* 584 /*
@@ -575,24 +591,18 @@ static void scsi_run_queue(struct request_queue *q)
575 * scsi_request_fn must get the host_lock before checking 591 * scsi_request_fn must get the host_lock before checking
576 * or modifying starved_list or starved_entry. 592 * or modifying starved_list or starved_entry.
577 */ 593 */
578 sdev = list_entry(shost->starved_list.next, 594 if (scsi_host_is_busy(shost))
579 struct scsi_device, starved_entry);
580 /*
581 * The *queue_ready functions can add a device back onto the
582 * starved list's tail, so we must check for a infinite loop.
583 */
584 if (sdev == starved_head)
585 break; 595 break;
586 if (!starved_head)
587 starved_head = sdev;
588 596
597 sdev = list_entry(starved_list.next,
598 struct scsi_device, starved_entry);
599 list_del_init(&sdev->starved_entry);
589 if (scsi_target_is_busy(scsi_target(sdev))) { 600 if (scsi_target_is_busy(scsi_target(sdev))) {
590 list_move_tail(&sdev->starved_entry, 601 list_move_tail(&sdev->starved_entry,
591 &shost->starved_list); 602 &shost->starved_list);
592 continue; 603 continue;
593 } 604 }
594 605
595 list_del_init(&sdev->starved_entry);
596 spin_unlock(shost->host_lock); 606 spin_unlock(shost->host_lock);
597 607
598 spin_lock(sdev->request_queue->queue_lock); 608 spin_lock(sdev->request_queue->queue_lock);
@@ -608,6 +618,8 @@ static void scsi_run_queue(struct request_queue *q)
608 618
609 spin_lock(shost->host_lock); 619 spin_lock(shost->host_lock);
610 } 620 }
621 /* put any unprocessed entries back */
622 list_splice(&starved_list, &shost->starved_list);
611 spin_unlock_irqrestore(shost->host_lock, flags); 623 spin_unlock_irqrestore(shost->host_lock, flags);
612 624
613 blk_run_queue(q); 625 blk_run_queue(q);
@@ -636,8 +648,8 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
636 struct request *req = cmd->request; 648 struct request *req = cmd->request;
637 unsigned long flags; 649 unsigned long flags;
638 650
639 scsi_unprep_request(req);
640 spin_lock_irqsave(q->queue_lock, flags); 651 spin_lock_irqsave(q->queue_lock, flags);
652 scsi_unprep_request(req);
641 blk_requeue_request(q, req); 653 blk_requeue_request(q, req);
642 spin_unlock_irqrestore(q->queue_lock, flags); 654 spin_unlock_irqrestore(q->queue_lock, flags);
643 655
@@ -1348,8 +1360,6 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
1348static inline int scsi_dev_queue_ready(struct request_queue *q, 1360static inline int scsi_dev_queue_ready(struct request_queue *q,
1349 struct scsi_device *sdev) 1361 struct scsi_device *sdev)
1350{ 1362{
1351 if (sdev->device_busy >= sdev->queue_depth)
1352 return 0;
1353 if (sdev->device_busy == 0 && sdev->device_blocked) { 1363 if (sdev->device_busy == 0 && sdev->device_blocked) {
1354 /* 1364 /*
1355 * unblock after device_blocked iterates to zero 1365 * unblock after device_blocked iterates to zero
@@ -1363,7 +1373,7 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
1363 return 0; 1373 return 0;
1364 } 1374 }
1365 } 1375 }
1366 if (sdev->device_blocked) 1376 if (scsi_device_is_busy(sdev))
1367 return 0; 1377 return 0;
1368 1378
1369 return 1; 1379 return 1;
@@ -1440,8 +1450,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1440 return 0; 1450 return 0;
1441 } 1451 }
1442 } 1452 }
1443 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 1453 if (scsi_host_is_busy(shost)) {
1444 shost->host_blocked || shost->host_self_blocked) {
1445 if (list_empty(&sdev->starved_entry)) 1454 if (list_empty(&sdev->starved_entry))
1446 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1455 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1447 return 0; 1456 return 0;
@@ -1455,6 +1464,37 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1455} 1464}
1456 1465
1457/* 1466/*
1467 * Busy state exporting function for request stacking drivers.
1468 *
1469 * For efficiency, no lock is taken to check the busy state of
1470 * shost/starget/sdev, since the returned value is not guaranteed and
1471 * may be changed after request stacking drivers call the function,
1472 * regardless of taking lock or not.
1473 *
1474 * When scsi can't dispatch I/Os anymore and needs to kill I/Os
1475 * (e.g. !sdev), scsi needs to return 'not busy'.
1476 * Otherwise, request stacking drivers may hold requests forever.
1477 */
1478static int scsi_lld_busy(struct request_queue *q)
1479{
1480 struct scsi_device *sdev = q->queuedata;
1481 struct Scsi_Host *shost;
1482 struct scsi_target *starget;
1483
1484 if (!sdev)
1485 return 0;
1486
1487 shost = sdev->host;
1488 starget = scsi_target(sdev);
1489
1490 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1491 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1492 return 1;
1493
1494 return 0;
1495}
1496
1497/*
1458 * Kill a request for a dead device 1498 * Kill a request for a dead device
1459 */ 1499 */
1460static void scsi_kill_request(struct request *req, struct request_queue *q) 1500static void scsi_kill_request(struct request *req, struct request_queue *q)
@@ -1757,6 +1797,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1757 blk_queue_prep_rq(q, scsi_prep_fn); 1797 blk_queue_prep_rq(q, scsi_prep_fn);
1758 blk_queue_softirq_done(q, scsi_softirq_done); 1798 blk_queue_softirq_done(q, scsi_softirq_done);
1759 blk_queue_rq_timed_out(q, scsi_times_out); 1799 blk_queue_rq_timed_out(q, scsi_times_out);
1800 blk_queue_lld_busy(q, scsi_lld_busy);
1760 return q; 1801 return q;
1761} 1802}
1762 1803
@@ -2105,22 +2146,21 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2105 do { 2146 do {
2106 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2147 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2107 timeout, retries); 2148 timeout, retries);
2108 } while ((driver_byte(result) & DRIVER_SENSE) && 2149 if (sdev->removable && scsi_sense_valid(sshdr) &&
2109 sshdr && sshdr->sense_key == UNIT_ATTENTION && 2150 sshdr->sense_key == UNIT_ATTENTION)
2110 --retries); 2151 sdev->changed = 1;
2152 } while (scsi_sense_valid(sshdr) &&
2153 sshdr->sense_key == UNIT_ATTENTION && --retries);
2111 2154
2112 if (!sshdr) 2155 if (!sshdr)
2113 /* could not allocate sense buffer, so can't process it */ 2156 /* could not allocate sense buffer, so can't process it */
2114 return result; 2157 return result;
2115 2158
2116 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { 2159 if (sdev->removable && scsi_sense_valid(sshdr) &&
2117 2160 (sshdr->sense_key == UNIT_ATTENTION ||
2118 if ((scsi_sense_valid(sshdr)) && 2161 sshdr->sense_key == NOT_READY)) {
2119 ((sshdr->sense_key == UNIT_ATTENTION) || 2162 sdev->changed = 1;
2120 (sshdr->sense_key == NOT_READY))) { 2163 result = 0;
2121 sdev->changed = 1;
2122 result = 0;
2123 }
2124 } 2164 }
2125 if (!sshdr_external) 2165 if (!sshdr_external)
2126 kfree(sshdr); 2166 kfree(sshdr);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index b37e133de805..723fdecd91bd 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -205,16 +205,13 @@ static struct notifier_block scsi_netlink_notifier = {
205}; 205};
206 206
207 207
208/** 208/*
209 * GENERIC SCSI transport receive and event handlers 209 * GENERIC SCSI transport receive and event handlers
210 **/ 210 */
211 211
212/** 212/**
213 * scsi_generic_msg_handler - receive message handler for GENERIC transport 213 * scsi_generic_msg_handler - receive message handler for GENERIC transport messages
214 * messages
215 *
216 * @skb: socket receive buffer 214 * @skb: socket receive buffer
217 *
218 **/ 215 **/
219static int 216static int
220scsi_generic_msg_handler(struct sk_buff *skb) 217scsi_generic_msg_handler(struct sk_buff *skb)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 7c4d2e68df1c..5081b3981d3c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -609,17 +609,15 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
609 * In the latter case @inode and @filp carry an abridged amount 609 * In the latter case @inode and @filp carry an abridged amount
610 * of information as noted above. 610 * of information as noted above.
611 **/ 611 **/
612static int sd_open(struct inode *inode, struct file *filp) 612static int sd_open(struct block_device *bdev, fmode_t mode)
613{ 613{
614 struct gendisk *disk = inode->i_bdev->bd_disk; 614 struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
615 struct scsi_disk *sdkp;
616 struct scsi_device *sdev; 615 struct scsi_device *sdev;
617 int retval; 616 int retval;
618 617
619 if (!(sdkp = scsi_disk_get(disk))) 618 if (!sdkp)
620 return -ENXIO; 619 return -ENXIO;
621 620
622
623 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n")); 621 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
624 622
625 sdev = sdkp->device; 623 sdev = sdkp->device;
@@ -633,14 +631,13 @@ static int sd_open(struct inode *inode, struct file *filp)
633 goto error_out; 631 goto error_out;
634 632
635 if (sdev->removable || sdkp->write_prot) 633 if (sdev->removable || sdkp->write_prot)
636 check_disk_change(inode->i_bdev); 634 check_disk_change(bdev);
637 635
638 /* 636 /*
639 * If the drive is empty, just let the open fail. 637 * If the drive is empty, just let the open fail.
640 */ 638 */
641 retval = -ENOMEDIUM; 639 retval = -ENOMEDIUM;
642 if (sdev->removable && !sdkp->media_present && 640 if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
643 !(filp->f_flags & O_NDELAY))
644 goto error_out; 641 goto error_out;
645 642
646 /* 643 /*
@@ -648,7 +645,7 @@ static int sd_open(struct inode *inode, struct file *filp)
648 * if the user expects to be able to write to the thing. 645 * if the user expects to be able to write to the thing.
649 */ 646 */
650 retval = -EROFS; 647 retval = -EROFS;
651 if (sdkp->write_prot && (filp->f_mode & FMODE_WRITE)) 648 if (sdkp->write_prot && (mode & FMODE_WRITE))
652 goto error_out; 649 goto error_out;
653 650
654 /* 651 /*
@@ -684,9 +681,8 @@ error_out:
684 * Note: may block (uninterruptible) if error recovery is underway 681 * Note: may block (uninterruptible) if error recovery is underway
685 * on this disk. 682 * on this disk.
686 **/ 683 **/
687static int sd_release(struct inode *inode, struct file *filp) 684static int sd_release(struct gendisk *disk, fmode_t mode)
688{ 685{
689 struct gendisk *disk = inode->i_bdev->bd_disk;
690 struct scsi_disk *sdkp = scsi_disk(disk); 686 struct scsi_disk *sdkp = scsi_disk(disk);
691 struct scsi_device *sdev = sdkp->device; 687 struct scsi_device *sdev = sdkp->device;
692 688
@@ -743,10 +739,9 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
743 * Note: most ioctls are forward onto the block subsystem or further 739 * Note: most ioctls are forward onto the block subsystem or further
744 * down in the scsi subsystem. 740 * down in the scsi subsystem.
745 **/ 741 **/
746static int sd_ioctl(struct inode * inode, struct file * filp, 742static int sd_ioctl(struct block_device *bdev, fmode_t mode,
747 unsigned int cmd, unsigned long arg) 743 unsigned int cmd, unsigned long arg)
748{ 744{
749 struct block_device *bdev = inode->i_bdev;
750 struct gendisk *disk = bdev->bd_disk; 745 struct gendisk *disk = bdev->bd_disk;
751 struct scsi_device *sdp = scsi_disk(disk)->device; 746 struct scsi_device *sdp = scsi_disk(disk)->device;
752 void __user *p = (void __user *)arg; 747 void __user *p = (void __user *)arg;
@@ -761,7 +756,8 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
761 * may try and take the device offline, in which case all further 756 * may try and take the device offline, in which case all further
762 * access to the device is prohibited. 757 * access to the device is prohibited.
763 */ 758 */
764 error = scsi_nonblockable_ioctl(sdp, cmd, p, filp); 759 error = scsi_nonblockable_ioctl(sdp, cmd, p,
760 (mode & FMODE_NDELAY) != 0);
765 if (!scsi_block_when_processing_errors(sdp) || !error) 761 if (!scsi_block_when_processing_errors(sdp) || !error)
766 return error; 762 return error;
767 763
@@ -775,7 +771,7 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
775 case SCSI_IOCTL_GET_BUS_NUMBER: 771 case SCSI_IOCTL_GET_BUS_NUMBER:
776 return scsi_ioctl(sdp, cmd, p); 772 return scsi_ioctl(sdp, cmd, p);
777 default: 773 default:
778 error = scsi_cmd_ioctl(filp, disk->queue, disk, cmd, p); 774 error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
779 if (error != -ENOTTY) 775 if (error != -ENOTTY)
780 return error; 776 return error;
781 } 777 }
@@ -928,11 +924,10 @@ static void sd_rescan(struct device *dev)
928 * This gets directly called from VFS. When the ioctl 924 * This gets directly called from VFS. When the ioctl
929 * is not recognized we go back to the other translation paths. 925 * is not recognized we go back to the other translation paths.
930 */ 926 */
931static long sd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 927static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
928 unsigned int cmd, unsigned long arg)
932{ 929{
933 struct block_device *bdev = file->f_path.dentry->d_inode->i_bdev; 930 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
934 struct gendisk *disk = bdev->bd_disk;
935 struct scsi_device *sdev = scsi_disk(disk)->device;
936 931
937 /* 932 /*
938 * If we are in the middle of error recovery, don't let anyone 933 * If we are in the middle of error recovery, don't let anyone
@@ -962,7 +957,7 @@ static struct block_device_operations sd_fops = {
962 .owner = THIS_MODULE, 957 .owner = THIS_MODULE,
963 .open = sd_open, 958 .open = sd_open,
964 .release = sd_release, 959 .release = sd_release,
965 .ioctl = sd_ioctl, 960 .locked_ioctl = sd_ioctl,
966 .getgeo = sd_getgeo, 961 .getgeo = sd_getgeo,
967#ifdef CONFIG_COMPAT 962#ifdef CONFIG_COMPAT
968 .compat_ioctl = sd_compat_ioctl, 963 .compat_ioctl = sd_compat_ioctl,
@@ -1054,7 +1049,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1054 good_bytes = sd_completed_bytes(SCpnt); 1049 good_bytes = sd_completed_bytes(SCpnt);
1055 break; 1050 break;
1056 case RECOVERED_ERROR: 1051 case RECOVERED_ERROR:
1057 case NO_SENSE:
1058 /* Inform the user, but make sure that it's not treated 1052 /* Inform the user, but make sure that it's not treated
1059 * as a hard error. 1053 * as a hard error.
1060 */ 1054 */
@@ -1063,6 +1057,15 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1063 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1057 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1064 good_bytes = scsi_bufflen(SCpnt); 1058 good_bytes = scsi_bufflen(SCpnt);
1065 break; 1059 break;
1060 case NO_SENSE:
1061 /* This indicates a false check condition, so ignore it. An
1062 * unknown amount of data was transferred so treat it as an
1063 * error.
1064 */
1065 scsi_print_sense("sd", SCpnt);
1066 SCpnt->result = 0;
1067 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1068 break;
1066 case ABORTED_COMMAND: 1069 case ABORTED_COMMAND:
1067 if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */ 1070 if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */
1068 scsi_print_result(SCpnt); 1071 scsi_print_result(SCpnt);
@@ -1076,15 +1079,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1076 scsi_print_sense("sd", SCpnt); 1079 scsi_print_sense("sd", SCpnt);
1077 good_bytes = sd_completed_bytes(SCpnt); 1080 good_bytes = sd_completed_bytes(SCpnt);
1078 } 1081 }
1079 if (!scsi_device_protection(SCpnt->device) &&
1080 SCpnt->device->use_10_for_rw &&
1081 (SCpnt->cmnd[0] == READ_10 ||
1082 SCpnt->cmnd[0] == WRITE_10))
1083 SCpnt->device->use_10_for_rw = 0;
1084 if (SCpnt->device->use_10_for_ms &&
1085 (SCpnt->cmnd[0] == MODE_SENSE_10 ||
1086 SCpnt->cmnd[0] == MODE_SELECT_10))
1087 SCpnt->device->use_10_for_ms = 0;
1088 break; 1082 break;
1089 default: 1083 default:
1090 break; 1084 break;
@@ -1437,7 +1431,7 @@ got_data:
1437 1431
1438 { 1432 {
1439 char cap_str_2[10], cap_str_10[10]; 1433 char cap_str_2[10], cap_str_10[10];
1440 u64 sz = sdkp->capacity << ffz(~sector_size); 1434 u64 sz = (u64)sdkp->capacity << ilog2(sector_size);
1441 1435
1442 string_get_size(sz, STRING_UNITS_2, cap_str_2, 1436 string_get_size(sz, STRING_UNITS_2, cap_str_2,
1443 sizeof(cap_str_2)); 1437 sizeof(cap_str_2));
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 93bd59a1ed79..5103855242ae 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -327,7 +327,6 @@ sg_release(struct inode *inode, struct file *filp)
327 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 327 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
328 return -ENXIO; 328 return -ENXIO;
329 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 329 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
330 sg_fasync(-1, filp, 0); /* remove filp from async notification list */
331 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */ 330 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
332 if (!sdp->detached) { 331 if (!sdp->detached) {
333 scsi_device_put(sdp->device); 332 scsi_device_put(sdp->device);
@@ -1059,7 +1058,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1059 if (sg_allow_access(filp, &opcode)) 1058 if (sg_allow_access(filp, &opcode))
1060 return -EPERM; 1059 return -EPERM;
1061 } 1060 }
1062 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p); 1061 return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
1063 case SG_SET_DEBUG: 1062 case SG_SET_DEBUG:
1064 result = get_user(val, ip); 1063 result = get_user(val, ip);
1065 if (result) 1064 if (result)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0f17009c99d2..45b66b98a516 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -471,38 +471,31 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
471 return scsi_prep_return(q, rq, ret); 471 return scsi_prep_return(q, rq, ret);
472} 472}
473 473
474static int sr_block_open(struct inode *inode, struct file *file) 474static int sr_block_open(struct block_device *bdev, fmode_t mode)
475{ 475{
476 struct gendisk *disk = inode->i_bdev->bd_disk; 476 struct scsi_cd *cd = scsi_cd_get(bdev->bd_disk);
477 struct scsi_cd *cd; 477 int ret = -ENXIO;
478 int ret = 0;
479
480 if(!(cd = scsi_cd_get(disk)))
481 return -ENXIO;
482
483 if((ret = cdrom_open(&cd->cdi, inode, file)) != 0)
484 scsi_cd_put(cd);
485 478
479 if (cd) {
480 ret = cdrom_open(&cd->cdi, bdev, mode);
481 if (ret)
482 scsi_cd_put(cd);
483 }
486 return ret; 484 return ret;
487} 485}
488 486
489static int sr_block_release(struct inode *inode, struct file *file) 487static int sr_block_release(struct gendisk *disk, fmode_t mode)
490{ 488{
491 int ret; 489 struct scsi_cd *cd = scsi_cd(disk);
492 struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk); 490 cdrom_release(&cd->cdi, mode);
493 ret = cdrom_release(&cd->cdi, file);
494 if(ret)
495 return ret;
496
497 scsi_cd_put(cd); 491 scsi_cd_put(cd);
498
499 return 0; 492 return 0;
500} 493}
501 494
502static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, 495static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
503 unsigned long arg) 496 unsigned long arg)
504{ 497{
505 struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk); 498 struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
506 struct scsi_device *sdev = cd->device; 499 struct scsi_device *sdev = cd->device;
507 void __user *argp = (void __user *)arg; 500 void __user *argp = (void __user *)arg;
508 int ret; 501 int ret;
@@ -517,7 +510,7 @@ static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
517 return scsi_ioctl(sdev, cmd, argp); 510 return scsi_ioctl(sdev, cmd, argp);
518 } 511 }
519 512
520 ret = cdrom_ioctl(file, &cd->cdi, inode, cmd, arg); 513 ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
521 if (ret != -ENOSYS) 514 if (ret != -ENOSYS)
522 return ret; 515 return ret;
523 516
@@ -527,7 +520,8 @@ static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
527 * case fall through to scsi_ioctl, which will return ENDOEV again 520 * case fall through to scsi_ioctl, which will return ENDOEV again
528 * if it doesn't recognise the ioctl 521 * if it doesn't recognise the ioctl
529 */ 522 */
530 ret = scsi_nonblockable_ioctl(sdev, cmd, argp, NULL); 523 ret = scsi_nonblockable_ioctl(sdev, cmd, argp,
524 (mode & FMODE_NDELAY) != 0);
531 if (ret != -ENODEV) 525 if (ret != -ENODEV)
532 return ret; 526 return ret;
533 return scsi_ioctl(sdev, cmd, argp); 527 return scsi_ioctl(sdev, cmd, argp);
@@ -544,7 +538,7 @@ static struct block_device_operations sr_bdops =
544 .owner = THIS_MODULE, 538 .owner = THIS_MODULE,
545 .open = sr_block_open, 539 .open = sr_block_open,
546 .release = sr_block_release, 540 .release = sr_block_release,
547 .ioctl = sr_block_ioctl, 541 .locked_ioctl = sr_block_ioctl,
548 .media_changed = sr_block_media_changed, 542 .media_changed = sr_block_media_changed,
549 /* 543 /*
550 * No compat_ioctl for now because sr_block_ioctl never 544 * No compat_ioctl for now because sr_block_ioctl never
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 5c28d08f18f4..c959bdc55f4f 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3263,7 +3263,8 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
3263 * may try and take the device offline, in which case all further 3263 * may try and take the device offline, in which case all further
3264 * access to the device is prohibited. 3264 * access to the device is prohibited.
3265 */ 3265 */
3266 retval = scsi_nonblockable_ioctl(STp->device, cmd_in, p, file); 3266 retval = scsi_nonblockable_ioctl(STp->device, cmd_in, p,
3267 file->f_flags & O_NDELAY);
3267 if (!scsi_block_when_processing_errors(STp->device) || retval != -ENODEV) 3268 if (!scsi_block_when_processing_errors(STp->device) || retval != -ENODEV)
3268 goto out; 3269 goto out;
3269 retval = 0; 3270 retval = 0;
@@ -3567,8 +3568,8 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
3567 !capable(CAP_SYS_RAWIO)) 3568 !capable(CAP_SYS_RAWIO))
3568 i = -EPERM; 3569 i = -EPERM;
3569 else 3570 else
3570 i = scsi_cmd_ioctl(file, STp->disk->queue, 3571 i = scsi_cmd_ioctl(STp->disk->queue, STp->disk,
3571 STp->disk, cmd_in, p); 3572 file->f_mode, cmd_in, p);
3572 if (i != -ENOTTY) 3573 if (i != -ENOTTY)
3573 return i; 3574 return i;
3574 break; 3575 break;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 3790906a77d1..2fa830c0be27 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -477,7 +477,7 @@ stex_slave_config(struct scsi_device *sdev)
477{ 477{
478 sdev->use_10_for_rw = 1; 478 sdev->use_10_for_rw = 1;
479 sdev->use_10_for_ms = 1; 479 sdev->use_10_for_ms = 1;
480 sdev->timeout = 60 * HZ; 480 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
481 sdev->tagged_supported = 1; 481 sdev->tagged_supported = 1;
482 482
483 return 0; 483 return 0;
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 7514b3a0390e..34a99620e5bd 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -213,7 +213,7 @@ static int __devinit esp_sun3x_probe(struct platform_device *dev)
213 esp->ops = &sun3x_esp_ops; 213 esp->ops = &sun3x_esp_ops;
214 214
215 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 215 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
216 if (!res && !res->start) 216 if (!res || !res->start)
217 goto fail_unlink; 217 goto fail_unlink;
218 218
219 esp->regs = ioremap_nocache(res->start, 0x20); 219 esp->regs = ioremap_nocache(res->start, 0x20);
@@ -221,7 +221,7 @@ static int __devinit esp_sun3x_probe(struct platform_device *dev)
221 goto fail_unmap_regs; 221 goto fail_unmap_regs;
222 222
223 res = platform_get_resource(dev, IORESOURCE_MEM, 1); 223 res = platform_get_resource(dev, IORESOURCE_MEM, 1);
224 if (!res && !res->start) 224 if (!res || !res->start)
225 goto fail_unmap_regs; 225 goto fail_unmap_regs;
226 226
227 esp->dma_regs = ioremap_nocache(res->start, 0x10); 227 esp->dma_regs = ioremap_nocache(res->start, 0x10);
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index c014ffb110e9..5450a0e5ecdb 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -1100,6 +1100,8 @@ enum pci_board_num_t {
1100 pbn_b0_4_1843200_200, 1100 pbn_b0_4_1843200_200,
1101 pbn_b0_8_1843200_200, 1101 pbn_b0_8_1843200_200,
1102 1102
1103 pbn_b0_1_4000000,
1104
1103 pbn_b0_bt_1_115200, 1105 pbn_b0_bt_1_115200,
1104 pbn_b0_bt_2_115200, 1106 pbn_b0_bt_2_115200,
1105 pbn_b0_bt_8_115200, 1107 pbn_b0_bt_8_115200,
@@ -1167,6 +1169,10 @@ enum pci_board_num_t {
1167 pbn_exsys_4055, 1169 pbn_exsys_4055,
1168 pbn_plx_romulus, 1170 pbn_plx_romulus,
1169 pbn_oxsemi, 1171 pbn_oxsemi,
1172 pbn_oxsemi_1_4000000,
1173 pbn_oxsemi_2_4000000,
1174 pbn_oxsemi_4_4000000,
1175 pbn_oxsemi_8_4000000,
1170 pbn_intel_i960, 1176 pbn_intel_i960,
1171 pbn_sgi_ioc3, 1177 pbn_sgi_ioc3,
1172 pbn_computone_4, 1178 pbn_computone_4,
@@ -1290,6 +1296,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
1290 .base_baud = 1843200, 1296 .base_baud = 1843200,
1291 .uart_offset = 0x200, 1297 .uart_offset = 0x200,
1292 }, 1298 },
1299 [pbn_b0_1_4000000] = {
1300 .flags = FL_BASE0,
1301 .num_ports = 1,
1302 .base_baud = 4000000,
1303 .uart_offset = 8,
1304 },
1293 1305
1294 [pbn_b0_bt_1_115200] = { 1306 [pbn_b0_bt_1_115200] = {
1295 .flags = FL_BASE0|FL_BASE_BARS, 1307 .flags = FL_BASE0|FL_BASE_BARS,
@@ -1625,6 +1637,35 @@ static struct pciserial_board pci_boards[] __devinitdata = {
1625 .base_baud = 115200, 1637 .base_baud = 115200,
1626 .uart_offset = 8, 1638 .uart_offset = 8,
1627 }, 1639 },
1640 [pbn_oxsemi_1_4000000] = {
1641 .flags = FL_BASE0,
1642 .num_ports = 1,
1643 .base_baud = 4000000,
1644 .uart_offset = 0x200,
1645 .first_offset = 0x1000,
1646 },
1647 [pbn_oxsemi_2_4000000] = {
1648 .flags = FL_BASE0,
1649 .num_ports = 2,
1650 .base_baud = 4000000,
1651 .uart_offset = 0x200,
1652 .first_offset = 0x1000,
1653 },
1654 [pbn_oxsemi_4_4000000] = {
1655 .flags = FL_BASE0,
1656 .num_ports = 4,
1657 .base_baud = 4000000,
1658 .uart_offset = 0x200,
1659 .first_offset = 0x1000,
1660 },
1661 [pbn_oxsemi_8_4000000] = {
1662 .flags = FL_BASE0,
1663 .num_ports = 8,
1664 .base_baud = 4000000,
1665 .uart_offset = 0x200,
1666 .first_offset = 0x1000,
1667 },
1668
1628 1669
1629 /* 1670 /*
1630 * EKF addition for i960 Boards form EKF with serial port. 1671 * EKF addition for i960 Boards form EKF with serial port.
@@ -1813,6 +1854,39 @@ serial_pci_matches(struct pciserial_board *board,
1813 board->first_offset == guessed->first_offset; 1854 board->first_offset == guessed->first_offset;
1814} 1855}
1815 1856
1857/*
1858 * Oxford Semiconductor Inc.
1859 * Check that device is part of the Tornado range of devices, then determine
1860 * the number of ports available on the device.
1861 */
1862static int pci_oxsemi_tornado_init(struct pci_dev *dev, struct pciserial_board *board)
1863{
1864 u8 __iomem *p;
1865 unsigned long deviceID;
1866 unsigned int number_uarts;
1867
1868 /* OxSemi Tornado devices are all 0xCxxx */
1869 if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
1870 (dev->device & 0xF000) != 0xC000)
1871 return 0;
1872
1873 p = pci_iomap(dev, 0, 5);
1874 if (p == NULL)
1875 return -ENOMEM;
1876
1877 deviceID = ioread32(p);
1878 /* Tornado device */
1879 if (deviceID == 0x07000200) {
1880 number_uarts = ioread8(p + 4);
1881 board->num_ports = number_uarts;
1882 printk(KERN_DEBUG
1883 "%d ports detected on Oxford PCI Express device\n",
1884 number_uarts);
1885 }
1886 pci_iounmap(dev, p);
1887 return 0;
1888}
1889
1816struct serial_private * 1890struct serial_private *
1817pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board) 1891pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board)
1818{ 1892{
@@ -1821,6 +1895,13 @@ pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board)
1821 struct pci_serial_quirk *quirk; 1895 struct pci_serial_quirk *quirk;
1822 int rc, nr_ports, i; 1896 int rc, nr_ports, i;
1823 1897
1898 /*
1899 * Find number of ports on board
1900 */
1901 if (dev->vendor == PCI_VENDOR_ID_OXSEMI ||
1902 dev->vendor == PCI_VENDOR_ID_MAINPINE)
1903 pci_oxsemi_tornado_init(dev, board);
1904
1824 nr_ports = board->num_ports; 1905 nr_ports = board->num_ports;
1825 1906
1826 /* 1907 /*
@@ -2301,6 +2382,156 @@ static struct pci_device_id serial_pci_tbl[] = {
2301 pbn_b0_bt_2_921600 }, 2382 pbn_b0_bt_2_921600 },
2302 2383
2303 /* 2384 /*
2385 * Oxford Semiconductor Inc. Tornado PCI express device range.
2386 */
2387 { PCI_VENDOR_ID_OXSEMI, 0xc101, /* OXPCIe952 1 Legacy UART */
2388 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2389 pbn_b0_1_4000000 },
2390 { PCI_VENDOR_ID_OXSEMI, 0xc105, /* OXPCIe952 1 Legacy UART */
2391 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2392 pbn_b0_1_4000000 },
2393 { PCI_VENDOR_ID_OXSEMI, 0xc11b, /* OXPCIe952 1 Native UART */
2394 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2395 pbn_oxsemi_1_4000000 },
2396 { PCI_VENDOR_ID_OXSEMI, 0xc11f, /* OXPCIe952 1 Native UART */
2397 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2398 pbn_oxsemi_1_4000000 },
2399 { PCI_VENDOR_ID_OXSEMI, 0xc120, /* OXPCIe952 1 Legacy UART */
2400 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2401 pbn_b0_1_4000000 },
2402 { PCI_VENDOR_ID_OXSEMI, 0xc124, /* OXPCIe952 1 Legacy UART */
2403 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2404 pbn_b0_1_4000000 },
2405 { PCI_VENDOR_ID_OXSEMI, 0xc138, /* OXPCIe952 1 Native UART */
2406 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2407 pbn_oxsemi_1_4000000 },
2408 { PCI_VENDOR_ID_OXSEMI, 0xc13d, /* OXPCIe952 1 Native UART */
2409 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2410 pbn_oxsemi_1_4000000 },
2411 { PCI_VENDOR_ID_OXSEMI, 0xc140, /* OXPCIe952 1 Legacy UART */
2412 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2413 pbn_b0_1_4000000 },
2414 { PCI_VENDOR_ID_OXSEMI, 0xc141, /* OXPCIe952 1 Legacy UART */
2415 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2416 pbn_b0_1_4000000 },
2417 { PCI_VENDOR_ID_OXSEMI, 0xc144, /* OXPCIe952 1 Legacy UART */
2418 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2419 pbn_b0_1_4000000 },
2420 { PCI_VENDOR_ID_OXSEMI, 0xc145, /* OXPCIe952 1 Legacy UART */
2421 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2422 pbn_b0_1_4000000 },
2423 { PCI_VENDOR_ID_OXSEMI, 0xc158, /* OXPCIe952 2 Native UART */
2424 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2425 pbn_oxsemi_2_4000000 },
2426 { PCI_VENDOR_ID_OXSEMI, 0xc15d, /* OXPCIe952 2 Native UART */
2427 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2428 pbn_oxsemi_2_4000000 },
2429 { PCI_VENDOR_ID_OXSEMI, 0xc208, /* OXPCIe954 4 Native UART */
2430 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2431 pbn_oxsemi_4_4000000 },
2432 { PCI_VENDOR_ID_OXSEMI, 0xc20d, /* OXPCIe954 4 Native UART */
2433 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2434 pbn_oxsemi_4_4000000 },
2435 { PCI_VENDOR_ID_OXSEMI, 0xc308, /* OXPCIe958 8 Native UART */
2436 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2437 pbn_oxsemi_8_4000000 },
2438 { PCI_VENDOR_ID_OXSEMI, 0xc30d, /* OXPCIe958 8 Native UART */
2439 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2440 pbn_oxsemi_8_4000000 },
2441 { PCI_VENDOR_ID_OXSEMI, 0xc40b, /* OXPCIe200 1 Native UART */
2442 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2443 pbn_oxsemi_1_4000000 },
2444 { PCI_VENDOR_ID_OXSEMI, 0xc40f, /* OXPCIe200 1 Native UART */
2445 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2446 pbn_oxsemi_1_4000000 },
2447 { PCI_VENDOR_ID_OXSEMI, 0xc41b, /* OXPCIe200 1 Native UART */
2448 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2449 pbn_oxsemi_1_4000000 },
2450 { PCI_VENDOR_ID_OXSEMI, 0xc41f, /* OXPCIe200 1 Native UART */
2451 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2452 pbn_oxsemi_1_4000000 },
2453 { PCI_VENDOR_ID_OXSEMI, 0xc42b, /* OXPCIe200 1 Native UART */
2454 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2455 pbn_oxsemi_1_4000000 },
2456 { PCI_VENDOR_ID_OXSEMI, 0xc42f, /* OXPCIe200 1 Native UART */
2457 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2458 pbn_oxsemi_1_4000000 },
2459 { PCI_VENDOR_ID_OXSEMI, 0xc43b, /* OXPCIe200 1 Native UART */
2460 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2461 pbn_oxsemi_1_4000000 },
2462 { PCI_VENDOR_ID_OXSEMI, 0xc43f, /* OXPCIe200 1 Native UART */
2463 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2464 pbn_oxsemi_1_4000000 },
2465 { PCI_VENDOR_ID_OXSEMI, 0xc44b, /* OXPCIe200 1 Native UART */
2466 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2467 pbn_oxsemi_1_4000000 },
2468 { PCI_VENDOR_ID_OXSEMI, 0xc44f, /* OXPCIe200 1 Native UART */
2469 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2470 pbn_oxsemi_1_4000000 },
2471 { PCI_VENDOR_ID_OXSEMI, 0xc45b, /* OXPCIe200 1 Native UART */
2472 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2473 pbn_oxsemi_1_4000000 },
2474 { PCI_VENDOR_ID_OXSEMI, 0xc45f, /* OXPCIe200 1 Native UART */
2475 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2476 pbn_oxsemi_1_4000000 },
2477 { PCI_VENDOR_ID_OXSEMI, 0xc46b, /* OXPCIe200 1 Native UART */
2478 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2479 pbn_oxsemi_1_4000000 },
2480 { PCI_VENDOR_ID_OXSEMI, 0xc46f, /* OXPCIe200 1 Native UART */
2481 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2482 pbn_oxsemi_1_4000000 },
2483 { PCI_VENDOR_ID_OXSEMI, 0xc47b, /* OXPCIe200 1 Native UART */
2484 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2485 pbn_oxsemi_1_4000000 },
2486 { PCI_VENDOR_ID_OXSEMI, 0xc47f, /* OXPCIe200 1 Native UART */
2487 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2488 pbn_oxsemi_1_4000000 },
2489 { PCI_VENDOR_ID_OXSEMI, 0xc48b, /* OXPCIe200 1 Native UART */
2490 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2491 pbn_oxsemi_1_4000000 },
2492 { PCI_VENDOR_ID_OXSEMI, 0xc48f, /* OXPCIe200 1 Native UART */
2493 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2494 pbn_oxsemi_1_4000000 },
2495 { PCI_VENDOR_ID_OXSEMI, 0xc49b, /* OXPCIe200 1 Native UART */
2496 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2497 pbn_oxsemi_1_4000000 },
2498 { PCI_VENDOR_ID_OXSEMI, 0xc49f, /* OXPCIe200 1 Native UART */
2499 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2500 pbn_oxsemi_1_4000000 },
2501 { PCI_VENDOR_ID_OXSEMI, 0xc4ab, /* OXPCIe200 1 Native UART */
2502 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2503 pbn_oxsemi_1_4000000 },
2504 { PCI_VENDOR_ID_OXSEMI, 0xc4af, /* OXPCIe200 1 Native UART */
2505 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2506 pbn_oxsemi_1_4000000 },
2507 { PCI_VENDOR_ID_OXSEMI, 0xc4bb, /* OXPCIe200 1 Native UART */
2508 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2509 pbn_oxsemi_1_4000000 },
2510 { PCI_VENDOR_ID_OXSEMI, 0xc4bf, /* OXPCIe200 1 Native UART */
2511 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2512 pbn_oxsemi_1_4000000 },
2513 { PCI_VENDOR_ID_OXSEMI, 0xc4cb, /* OXPCIe200 1 Native UART */
2514 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2515 pbn_oxsemi_1_4000000 },
2516 { PCI_VENDOR_ID_OXSEMI, 0xc4cf, /* OXPCIe200 1 Native UART */
2517 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2518 pbn_oxsemi_1_4000000 },
2519 /*
2520 * Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado
2521 */
2522 { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */
2523 PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0,
2524 pbn_oxsemi_1_4000000 },
2525 { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */
2526 PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0,
2527 pbn_oxsemi_2_4000000 },
2528 { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */
2529 PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0,
2530 pbn_oxsemi_4_4000000 },
2531 { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */
2532 PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0,
2533 pbn_oxsemi_8_4000000 },
2534 /*
2304 * SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards, 2535 * SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards,
2305 * from skokodyn@yahoo.com 2536 * from skokodyn@yahoo.com
2306 */ 2537 */
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index db783b77a881..579d63a81aa2 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -457,7 +457,7 @@ config SERIAL_SAMSUNG
457 457
458config SERIAL_SAMSUNG_DEBUG 458config SERIAL_SAMSUNG_DEBUG
459 bool "Samsung SoC serial debug" 459 bool "Samsung SoC serial debug"
460 depends on SERIAL_SAMSUNG 460 depends on SERIAL_SAMSUNG && DEBUG_LL
461 help 461 help
462 Add support for debugging the serial driver. Since this is 462 Add support for debugging the serial driver. Since this is
463 generally being used as a console, we use our own output 463 generally being used as a console, we use our own output
@@ -1276,7 +1276,7 @@ config SERIAL_SGI_IOC3
1276 say Y or M. Otherwise, say N. 1276 say Y or M. Otherwise, say N.
1277 1277
1278config SERIAL_NETX 1278config SERIAL_NETX
1279 bool "NetX serial port support" 1279 tristate "NetX serial port support"
1280 depends on ARM && ARCH_NETX 1280 depends on ARM && ARCH_NETX
1281 select SERIAL_CORE 1281 select SERIAL_CORE
1282 help 1282 help
@@ -1288,7 +1288,7 @@ config SERIAL_NETX
1288 1288
1289config SERIAL_NETX_CONSOLE 1289config SERIAL_NETX_CONSOLE
1290 bool "Console on NetX serial port" 1290 bool "Console on NetX serial port"
1291 depends on SERIAL_NETX 1291 depends on SERIAL_NETX=y
1292 select SERIAL_CORE_CONSOLE 1292 select SERIAL_CORE_CONSOLE
1293 help 1293 help
1294 If you have enabled the serial port on the Hilscher NetX SoC 1294 If you have enabled the serial port on the Hilscher NetX SoC
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 61fb8b6d19af..d5efd6c77904 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1258,6 +1258,8 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
1258 atmel_port->clk = clk_get(&pdev->dev, "usart"); 1258 atmel_port->clk = clk_get(&pdev->dev, "usart");
1259 clk_enable(atmel_port->clk); 1259 clk_enable(atmel_port->clk);
1260 port->uartclk = clk_get_rate(atmel_port->clk); 1260 port->uartclk = clk_get_rate(atmel_port->clk);
1261 clk_disable(atmel_port->clk);
1262 /* only enable clock when USART is in use */
1261 } 1263 }
1262 1264
1263 atmel_port->use_dma_rx = data->use_dma_rx; 1265 atmel_port->use_dma_rx = data->use_dma_rx;
@@ -1379,6 +1381,8 @@ static int __init atmel_console_setup(struct console *co, char *options)
1379 return -ENODEV; 1381 return -ENODEV;
1380 } 1382 }
1381 1383
1384 clk_enable(atmel_ports[co->index].clk);
1385
1382 UART_PUT_IDR(port, -1); 1386 UART_PUT_IDR(port, -1);
1383 UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 1387 UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1384 UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); 1388 UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
@@ -1403,7 +1407,7 @@ static struct console atmel_console = {
1403 .data = &atmel_uart, 1407 .data = &atmel_uart,
1404}; 1408};
1405 1409
1406#define ATMEL_CONSOLE_DEVICE &atmel_console 1410#define ATMEL_CONSOLE_DEVICE (&atmel_console)
1407 1411
1408/* 1412/*
1409 * Early console initialization (before VM subsystem initialized). 1413 * Early console initialization (before VM subsystem initialized).
@@ -1534,6 +1538,15 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
1534 if (ret) 1538 if (ret)
1535 goto err_add_port; 1539 goto err_add_port;
1536 1540
1541 if (atmel_is_console_port(&port->uart)
1542 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
1543 /*
1544 * The serial core enabled the clock for us, so undo
1545 * the clk_enable() in atmel_console_setup()
1546 */
1547 clk_disable(port->clk);
1548 }
1549
1537 device_init_wakeup(&pdev->dev, 1); 1550 device_init_wakeup(&pdev->dev, 1);
1538 platform_set_drvdata(pdev, port); 1551 platform_set_drvdata(pdev, port);
1539 1552
@@ -1544,7 +1557,6 @@ err_add_port:
1544 port->rx_ring.buf = NULL; 1557 port->rx_ring.buf = NULL;
1545err_alloc_ring: 1558err_alloc_ring:
1546 if (!atmel_is_console_port(&port->uart)) { 1559 if (!atmel_is_console_port(&port->uart)) {
1547 clk_disable(port->clk);
1548 clk_put(port->clk); 1560 clk_put(port->clk);
1549 port->clk = NULL; 1561 port->clk = NULL;
1550 } 1562 }
@@ -1568,7 +1580,6 @@ static int __devexit atmel_serial_remove(struct platform_device *pdev)
1568 1580
1569 /* "port" is allocated statically, so we shouldn't free it */ 1581 /* "port" is allocated statically, so we shouldn't free it */
1570 1582
1571 clk_disable(atmel_port->clk);
1572 clk_put(atmel_port->clk); 1583 clk_put(atmel_port->clk);
1573 1584
1574 return ret; 1585 return ret;
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 211c21797ce0..8b2c619a09f2 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -34,14 +34,14 @@ static char *serial_version = "$Revision: 1.25 $";
34#include <asm/system.h> 34#include <asm/system.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36 36
37#include <asm/arch/svinto.h> 37#include <arch/svinto.h>
38 38
39/* non-arch dependent serial structures are in linux/serial.h */ 39/* non-arch dependent serial structures are in linux/serial.h */
40#include <linux/serial.h> 40#include <linux/serial.h>
41/* while we keep our own stuff (struct e100_serial) in a local .h file */ 41/* while we keep our own stuff (struct e100_serial) in a local .h file */
42#include "crisv10.h" 42#include "crisv10.h"
43#include <asm/fasttimer.h> 43#include <asm/fasttimer.h>
44#include <asm/arch/io_interface_mux.h> 44#include <arch/io_interface_mux.h>
45 45
46#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER 46#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
47#ifndef CONFIG_ETRAX_FAST_TIMER 47#ifndef CONFIG_ETRAX_FAST_TIMER
diff --git a/drivers/serial/crisv10.h b/drivers/serial/crisv10.h
index e3c5c8c3c09b..f36a729280bc 100644
--- a/drivers/serial/crisv10.h
+++ b/drivers/serial/crisv10.h
@@ -10,7 +10,7 @@
10#include <linux/circ_buf.h> 10#include <linux/circ_buf.h>
11#include <asm/termios.h> 11#include <asm/termios.h>
12#include <asm/dma.h> 12#include <asm/dma.h>
13#include <asm/arch/io_interface_mux.h> 13#include <arch/io_interface_mux.h>
14 14
15/* Software state per channel */ 15/* Software state per channel */
16 16
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 6dd98f9fb89c..ae3699d77dd0 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -2149,7 +2149,7 @@ out4:
2149 return ret; 2149 return ret;
2150} 2150}
2151 2151
2152static struct ioc3_submodule ioc3uart_submodule = { 2152static struct ioc3_submodule ioc3uart_ops = {
2153 .name = "IOC3uart", 2153 .name = "IOC3uart",
2154 .probe = ioc3uart_probe, 2154 .probe = ioc3uart_probe,
2155 .remove = ioc3uart_remove, 2155 .remove = ioc3uart_remove,
@@ -2173,7 +2173,7 @@ static int __devinit ioc3uart_init(void)
2173 __func__); 2173 __func__);
2174 return ret; 2174 return ret;
2175 } 2175 }
2176 ret = ioc3_register_submodule(&ioc3uart_submodule); 2176 ret = ioc3_register_submodule(&ioc3uart_ops);
2177 if (ret) 2177 if (ret)
2178 uart_unregister_driver(&ioc3_uart); 2178 uart_unregister_driver(&ioc3_uart);
2179 return ret; 2179 return ret;
@@ -2181,7 +2181,7 @@ static int __devinit ioc3uart_init(void)
2181 2181
2182static void __devexit ioc3uart_exit(void) 2182static void __devexit ioc3uart_exit(void)
2183{ 2183{
2184 ioc3_unregister_submodule(&ioc3uart_submodule); 2184 ioc3_unregister_submodule(&ioc3uart_ops);
2185 uart_unregister_driver(&ioc3_uart); 2185 uart_unregister_driver(&ioc3_uart);
2186} 2186}
2187 2187
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 6117d3db0b66..28c00c3d58f5 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -591,8 +591,8 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
591 /* Update the per-port timeout */ 591 /* Update the per-port timeout */
592 uart_update_timeout(port, new->c_cflag, baud); 592 uart_update_timeout(port, new->c_cflag, baud);
593 593
594 /* Do our best to flush TX & RX, so we don't loose anything */ 594 /* Do our best to flush TX & RX, so we don't lose anything */
595 /* But we don't wait indefinitly ! */ 595 /* But we don't wait indefinitely ! */
596 j = 5000000; /* Maximum wait */ 596 j = 5000000; /* Maximum wait */
597 /* FIXME Can't receive chars since set_termios might be called at early 597 /* FIXME Can't receive chars since set_termios might be called at early
598 * boot for the console, all stuff is not yet ready to receive at that 598 * boot for the console, all stuff is not yet ready to receive at that
diff --git a/drivers/serial/netx-serial.c b/drivers/serial/netx-serial.c
index 3f489329e8d3..3e5dda8518b7 100644
--- a/drivers/serial/netx-serial.c
+++ b/drivers/serial/netx-serial.c
@@ -42,8 +42,6 @@
42#define SERIAL_NX_MAJOR 204 42#define SERIAL_NX_MAJOR 204
43#define MINOR_START 170 43#define MINOR_START 170
44 44
45#ifdef CONFIG_SERIAL_NETX_CONSOLE
46
47enum uart_regs { 45enum uart_regs {
48 UART_DR = 0x00, 46 UART_DR = 0x00,
49 UART_SR = 0x04, 47 UART_SR = 0x04,
@@ -528,6 +526,8 @@ static struct netx_port netx_ports[] = {
528 } 526 }
529}; 527};
530 528
529#ifdef CONFIG_SERIAL_NETX_CONSOLE
530
531static void netx_console_putchar(struct uart_port *port, int ch) 531static void netx_console_putchar(struct uart_port *port, int ch)
532{ 532{
533 while (readl(port->membase + UART_FR) & FR_BUSY); 533 while (readl(port->membase + UART_FR) & FR_BUSY);
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c
index 317d239ab740..29cbb0afef8e 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/serial/s3c2440.c
@@ -177,5 +177,5 @@ module_exit(s3c2440_serial_exit);
177 177
178MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver"); 178MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver");
179MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 179MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
180MODULE_LICENSE("GPLi v2"); 180MODULE_LICENSE("GPL v2");
181MODULE_ALIAS("platform:s3c2440-uart"); 181MODULE_ALIAS("platform:s3c2440-uart");
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index f0658d2c45b2..165fc010978c 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -144,9 +144,9 @@ static void put_char(struct uart_port *port, char c)
144 status = sci_in(port, SCxSR); 144 status = sci_in(port, SCxSR);
145 } while (!(status & SCxSR_TDxE(port))); 145 } while (!(status & SCxSR_TDxE(port)));
146 146
147 sci_out(port, SCxTDR, c);
148 sci_in(port, SCxSR); /* Dummy read */ 147 sci_in(port, SCxSR); /* Dummy read */
149 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); 148 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
149 sci_out(port, SCxTDR, c);
150 150
151 spin_unlock_irqrestore(&port->lock, flags); 151 spin_unlock_irqrestore(&port->lock, flags);
152} 152}
@@ -250,8 +250,7 @@ static inline void h8300_sci_disable(struct uart_port *port)
250} 250}
251#endif 251#endif
252 252
253#if defined(SCI_ONLY) || defined(SCI_AND_SCIF) && \ 253#if defined(__H8300H__) || defined(__H8300S__)
254 defined(__H8300H__) || defined(__H8300S__)
255static void sci_init_pins_sci(struct uart_port* port, unsigned int cflag) 254static void sci_init_pins_sci(struct uart_port* port, unsigned int cflag)
256{ 255{
257 int ch = (port->mapbase - SMR0) >> 3; 256 int ch = (port->mapbase - SMR0) >> 3;
@@ -285,11 +284,6 @@ static void sci_init_pins_irda(struct uart_port *port, unsigned int cflag)
285#define sci_init_pins_irda NULL 284#define sci_init_pins_irda NULL
286#endif 285#endif
287 286
288#ifdef SCI_ONLY
289#define sci_init_pins_scif NULL
290#endif
291
292#if defined(SCIF_ONLY) || defined(SCI_AND_SCIF)
293#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) 287#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
294static void sci_init_pins_scif(struct uart_port* port, unsigned int cflag) 288static void sci_init_pins_scif(struct uart_port* port, unsigned int cflag)
295{ 289{
@@ -449,7 +443,6 @@ static inline int scif_rxroom(struct uart_port *port)
449 return sci_in(port, SCFDR) & SCIF_RFDC_MASK; 443 return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
450} 444}
451#endif 445#endif
452#endif /* SCIF_ONLY || SCI_AND_SCIF */
453 446
454static inline int sci_txroom(struct uart_port *port) 447static inline int sci_txroom(struct uart_port *port)
455{ 448{
@@ -485,12 +478,10 @@ static void sci_transmit_chars(struct uart_port *port)
485 return; 478 return;
486 } 479 }
487 480
488#ifndef SCI_ONLY 481 if (port->type == PORT_SCI)
489 if (port->type == PORT_SCIF)
490 count = scif_txroom(port);
491 else
492#endif
493 count = sci_txroom(port); 482 count = sci_txroom(port);
483 else
484 count = scif_txroom(port);
494 485
495 do { 486 do {
496 unsigned char c; 487 unsigned char c;
@@ -519,12 +510,10 @@ static void sci_transmit_chars(struct uart_port *port)
519 } else { 510 } else {
520 ctrl = sci_in(port, SCSCR); 511 ctrl = sci_in(port, SCSCR);
521 512
522#if !defined(SCI_ONLY) 513 if (port->type != PORT_SCI) {
523 if (port->type == PORT_SCIF) {
524 sci_in(port, SCxSR); /* Dummy read */ 514 sci_in(port, SCxSR); /* Dummy read */
525 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); 515 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
526 } 516 }
527#endif
528 517
529 ctrl |= SCI_CTRL_FLAGS_TIE; 518 ctrl |= SCI_CTRL_FLAGS_TIE;
530 sci_out(port, SCSCR, ctrl); 519 sci_out(port, SCSCR, ctrl);
@@ -547,12 +536,10 @@ static inline void sci_receive_chars(struct uart_port *port)
547 return; 536 return;
548 537
549 while (1) { 538 while (1) {
550#if !defined(SCI_ONLY) 539 if (port->type == PORT_SCI)
551 if (port->type == PORT_SCIF)
552 count = scif_rxroom(port);
553 else
554#endif
555 count = sci_rxroom(port); 540 count = sci_rxroom(port);
541 else
542 count = scif_rxroom(port);
556 543
557 /* Don't copy more bytes than there is room for in the buffer */ 544 /* Don't copy more bytes than there is room for in the buffer */
558 count = tty_buffer_request_room(tty, count); 545 count = tty_buffer_request_room(tty, count);
@@ -727,7 +714,7 @@ static inline int sci_handle_breaks(struct uart_port *port)
727 714
728#if defined(SCIF_ORER) 715#if defined(SCIF_ORER)
729 /* XXX: Handle SCIF overrun error */ 716 /* XXX: Handle SCIF overrun error */
730 if (port->type == PORT_SCIF && (sci_in(port, SCLSR) & SCIF_ORER) != 0) { 717 if (port->type != PORT_SCI && (sci_in(port, SCLSR) & SCIF_ORER) != 0) {
731 sci_out(port, SCLSR, 0); 718 sci_out(port, SCLSR, 0);
732 if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) { 719 if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) {
733 copied++; 720 copied++;
@@ -810,26 +797,27 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
810 797
811static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) 798static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
812{ 799{
813 unsigned short ssr_status, scr_status; 800 unsigned short ssr_status, scr_status;
814 struct uart_port *port = ptr; 801 struct uart_port *port = ptr;
802 irqreturn_t ret = IRQ_NONE;
815 803
816 ssr_status = sci_in(port,SCxSR); 804 ssr_status = sci_in(port,SCxSR);
817 scr_status = sci_in(port,SCSCR); 805 scr_status = sci_in(port,SCSCR);
818 806
819 /* Tx Interrupt */ 807 /* Tx Interrupt */
820 if ((ssr_status & 0x0020) && (scr_status & 0x0080)) 808 if ((ssr_status & 0x0020) && (scr_status & SCI_CTRL_FLAGS_TIE))
821 sci_tx_interrupt(irq, ptr); 809 ret = sci_tx_interrupt(irq, ptr);
822 /* Rx Interrupt */ 810 /* Rx Interrupt */
823 if ((ssr_status & 0x0002) && (scr_status & 0x0040)) 811 if ((ssr_status & 0x0002) && (scr_status & SCI_CTRL_FLAGS_RIE))
824 sci_rx_interrupt(irq, ptr); 812 ret = sci_rx_interrupt(irq, ptr);
825 /* Error Interrupt */ 813 /* Error Interrupt */
826 if ((ssr_status & 0x0080) && (scr_status & 0x0400)) 814 if ((ssr_status & 0x0080) && (scr_status & SCI_CTRL_FLAGS_REIE))
827 sci_er_interrupt(irq, ptr); 815 ret = sci_er_interrupt(irq, ptr);
828 /* Break Interrupt */ 816 /* Break Interrupt */
829 if ((ssr_status & 0x0010) && (scr_status & 0x0200)) 817 if ((ssr_status & 0x0010) && (scr_status & SCI_CTRL_FLAGS_REIE))
830 sci_br_interrupt(irq, ptr); 818 ret = sci_br_interrupt(irq, ptr);
831 819
832 return IRQ_HANDLED; 820 return ret;
833} 821}
834 822
835#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_HAVE_CLK) 823#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_HAVE_CLK)
@@ -1054,10 +1042,8 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1054 1042
1055 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ 1043 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1056 1044
1057#if !defined(SCI_ONLY) 1045 if (port->type != PORT_SCI)
1058 if (port->type == PORT_SCIF)
1059 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); 1046 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
1060#endif
1061 1047
1062 smr_val = sci_in(port, SCSMR) & 3; 1048 smr_val = sci_in(port, SCSMR) & 3;
1063 if ((termios->c_cflag & CSIZE) == CS7) 1049 if ((termios->c_cflag & CSIZE) == CS7)
@@ -1099,6 +1085,7 @@ static const char *sci_type(struct uart_port *port)
1099 case PORT_SCI: return "sci"; 1085 case PORT_SCI: return "sci";
1100 case PORT_SCIF: return "scif"; 1086 case PORT_SCIF: return "scif";
1101 case PORT_IRDA: return "irda"; 1087 case PORT_IRDA: return "irda";
1088 case PORT_SCIFA: return "scifa";
1102 } 1089 }
1103 1090
1104 return NULL; 1091 return NULL;
@@ -1126,6 +1113,7 @@ static void sci_config_port(struct uart_port *port, int flags)
1126 s->init_pins = sci_init_pins_sci; 1113 s->init_pins = sci_init_pins_sci;
1127 break; 1114 break;
1128 case PORT_SCIF: 1115 case PORT_SCIF:
1116 case PORT_SCIFA:
1129 s->init_pins = sci_init_pins_scif; 1117 s->init_pins = sci_init_pins_scif;
1130 break; 1118 break;
1131 case PORT_IRDA: 1119 case PORT_IRDA:
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 7cd28b226800..9f33b064172e 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -16,7 +16,6 @@
16# define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */ 16# define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */
17# define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */ 17# define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */
18# define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ 18# define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
19# define SCI_AND_SCIF
20#elif defined(CONFIG_CPU_SUBTYPE_SH7705) 19#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
21# define SCIF0 0xA4400000 20# define SCIF0 0xA4400000
22# define SCIF2 0xA4410000 21# define SCIF2 0xA4410000
@@ -30,17 +29,15 @@
30 * SCIF0 (0xA4400000) -> Internal clock, SCK pin as serial clock output 29 * SCIF0 (0xA4400000) -> Internal clock, SCK pin as serial clock output
31 */ 30 */
32# define SCSCR_INIT(port) (port->mapbase == SCIF2) ? 0xF3 : 0xF0 31# define SCSCR_INIT(port) (port->mapbase == SCIF2) ? 0xF3 : 0xF0
33# define SCIF_ONLY
34#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 32#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
35 defined(CONFIG_CPU_SUBTYPE_SH7721) 33 defined(CONFIG_CPU_SUBTYPE_SH7721)
36# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */ 34# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */
37# define SCIF_ONLY
38#define SCIF_ORER 0x0200 /* overrun error bit */ 35#define SCIF_ORER 0x0200 /* overrun error bit */
39#elif defined(CONFIG_SH_RTS7751R2D) 36#elif defined(CONFIG_SH_RTS7751R2D)
37# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */
40# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ 38# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
41# define SCIF_ORER 0x0001 /* overrun error bit */ 39# define SCIF_ORER 0x0001 /* overrun error bit */
42# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 40# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
43# define SCIF_ONLY
44#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ 41#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
45 defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ 42 defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
46 defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ 43 defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
@@ -53,28 +50,24 @@
53# define SCSCR_INIT(port) (((port)->type == PORT_SCI) ? \ 50# define SCSCR_INIT(port) (((port)->type == PORT_SCI) ? \
54 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ : \ 51 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ : \
55 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ ) 52 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ )
56# define SCI_AND_SCIF
57#elif defined(CONFIG_CPU_SUBTYPE_SH7760) 53#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
58# define SCSPTR0 0xfe600024 /* 16 bit SCIF */ 54# define SCSPTR0 0xfe600024 /* 16 bit SCIF */
59# define SCSPTR1 0xfe610024 /* 16 bit SCIF */ 55# define SCSPTR1 0xfe610024 /* 16 bit SCIF */
60# define SCSPTR2 0xfe620024 /* 16 bit SCIF */ 56# define SCSPTR2 0xfe620024 /* 16 bit SCIF */
61# define SCIF_ORER 0x0001 /* overrun error bit */ 57# define SCIF_ORER 0x0001 /* overrun error bit */
62# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 58# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
63# define SCIF_ONLY
64#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) 59#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
65# define SCSPTR0 0xA4400000 /* 16 bit SCIF */ 60# define SCSPTR0 0xA4400000 /* 16 bit SCIF */
66# define SCIF_ORER 0x0001 /* overrun error bit */ 61# define SCIF_ORER 0x0001 /* overrun error bit */
67# define PACR 0xa4050100 62# define PACR 0xa4050100
68# define PBCR 0xa4050102 63# define PBCR 0xa4050102
69# define SCSCR_INIT(port) 0x3B 64# define SCSCR_INIT(port) 0x3B
70# define SCIF_ONLY
71#elif defined(CONFIG_CPU_SUBTYPE_SH7343) 65#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
72# define SCSPTR0 0xffe00010 /* 16 bit SCIF */ 66# define SCSPTR0 0xffe00010 /* 16 bit SCIF */
73# define SCSPTR1 0xffe10010 /* 16 bit SCIF */ 67# define SCSPTR1 0xffe10010 /* 16 bit SCIF */
74# define SCSPTR2 0xffe20010 /* 16 bit SCIF */ 68# define SCSPTR2 0xffe20010 /* 16 bit SCIF */
75# define SCSPTR3 0xffe30010 /* 16 bit SCIF */ 69# define SCSPTR3 0xffe30010 /* 16 bit SCIF */
76# define SCSCR_INIT(port) 0x32 /* TIE=0,RIE=0,TE=1,RE=1,REIE=0,CKE=1 */ 70# define SCSCR_INIT(port) 0x32 /* TIE=0,RIE=0,TE=1,RE=1,REIE=0,CKE=1 */
77# define SCIF_ONLY
78#elif defined(CONFIG_CPU_SUBTYPE_SH7722) 71#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
79# define PADR 0xA4050120 72# define PADR 0xA4050120
80# define PSDR 0xA405013e 73# define PSDR 0xA405013e
@@ -82,7 +75,6 @@
82# define PSCR 0xA405011E 75# define PSCR 0xA405011E
83# define SCIF_ORER 0x0001 /* overrun error bit */ 76# define SCIF_ORER 0x0001 /* overrun error bit */
84# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 77# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
85# define SCIF_ONLY
86#elif defined(CONFIG_CPU_SUBTYPE_SH7366) 78#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
87# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */ 79# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */
88# define SCSPTR0 SCPDR0 80# define SCSPTR0 SCPDR0
@@ -97,12 +89,10 @@
97# define SCSPTR5 0xa4050128 89# define SCSPTR5 0xa4050128
98# define SCIF_ORER 0x0001 /* overrun error bit */ 90# define SCIF_ORER 0x0001 /* overrun error bit */
99# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 91# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
100# define SCIF_ONLY
101#elif defined(CONFIG_CPU_SUBTYPE_SH4_202) 92#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
102# define SCSPTR2 0xffe80020 /* 16 bit SCIF */ 93# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
103# define SCIF_ORER 0x0001 /* overrun error bit */ 94# define SCIF_ORER 0x0001 /* overrun error bit */
104# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 95# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
105# define SCIF_ONLY
106#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103) 96#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
107# define SCIF_BASE_ADDR 0x01030000 97# define SCIF_BASE_ADDR 0x01030000
108# define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR 98# define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR
@@ -111,14 +101,11 @@
111# define SCSPTR2 ((port->mapbase)+SCIF_PTR2_OFFS) /* 16 bit SCIF */ 101# define SCSPTR2 ((port->mapbase)+SCIF_PTR2_OFFS) /* 16 bit SCIF */
112# define SCLSR2 ((port->mapbase)+SCIF_LSR2_OFFS) /* 16 bit SCIF */ 102# define SCLSR2 ((port->mapbase)+SCIF_LSR2_OFFS) /* 16 bit SCIF */
113# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0, TE=1,RE=1,REIE=1 */ 103# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0, TE=1,RE=1,REIE=1 */
114# define SCIF_ONLY
115#elif defined(CONFIG_H83007) || defined(CONFIG_H83068) 104#elif defined(CONFIG_H83007) || defined(CONFIG_H83068)
116# define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ 105# define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
117# define SCI_ONLY
118# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) 106# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
119#elif defined(CONFIG_H8S2678) 107#elif defined(CONFIG_H8S2678)
120# define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ 108# define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
121# define SCI_ONLY
122# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) 109# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
123#elif defined(CONFIG_CPU_SUBTYPE_SH7763) 110#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
124# define SCSPTR0 0xffe00024 /* 16 bit SCIF */ 111# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
@@ -126,20 +113,17 @@
126# define SCSPTR2 0xffe10020 /* 16 bit SCIF/IRDA */ 113# define SCSPTR2 0xffe10020 /* 16 bit SCIF/IRDA */
127# define SCIF_ORER 0x0001 /* overrun error bit */ 114# define SCIF_ORER 0x0001 /* overrun error bit */
128# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 115# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
129# define SCIF_ONLY
130#elif defined(CONFIG_CPU_SUBTYPE_SH7770) 116#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
131# define SCSPTR0 0xff923020 /* 16 bit SCIF */ 117# define SCSPTR0 0xff923020 /* 16 bit SCIF */
132# define SCSPTR1 0xff924020 /* 16 bit SCIF */ 118# define SCSPTR1 0xff924020 /* 16 bit SCIF */
133# define SCSPTR2 0xff925020 /* 16 bit SCIF */ 119# define SCSPTR2 0xff925020 /* 16 bit SCIF */
134# define SCIF_ORER 0x0001 /* overrun error bit */ 120# define SCIF_ORER 0x0001 /* overrun error bit */
135# define SCSCR_INIT(port) 0x3c /* TIE=0,RIE=0,TE=1,RE=1,REIE=1,cke=2 */ 121# define SCSCR_INIT(port) 0x3c /* TIE=0,RIE=0,TE=1,RE=1,REIE=1,cke=2 */
136# define SCIF_ONLY
137#elif defined(CONFIG_CPU_SUBTYPE_SH7780) 122#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
138# define SCSPTR0 0xffe00024 /* 16 bit SCIF */ 123# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
139# define SCSPTR1 0xffe10024 /* 16 bit SCIF */ 124# define SCSPTR1 0xffe10024 /* 16 bit SCIF */
140# define SCIF_ORER 0x0001 /* Overrun error bit */ 125# define SCIF_ORER 0x0001 /* Overrun error bit */
141# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 126# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
142# define SCIF_ONLY
143#elif defined(CONFIG_CPU_SUBTYPE_SH7785) 127#elif defined(CONFIG_CPU_SUBTYPE_SH7785)
144# define SCSPTR0 0xffea0024 /* 16 bit SCIF */ 128# define SCSPTR0 0xffea0024 /* 16 bit SCIF */
145# define SCSPTR1 0xffeb0024 /* 16 bit SCIF */ 129# define SCSPTR1 0xffeb0024 /* 16 bit SCIF */
@@ -149,7 +133,6 @@
149# define SCSPTR5 0xffef0024 /* 16 bit SCIF */ 133# define SCSPTR5 0xffef0024 /* 16 bit SCIF */
150# define SCIF_OPER 0x0001 /* Overrun error bit */ 134# define SCIF_OPER 0x0001 /* Overrun error bit */
151# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 135# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
152# define SCIF_ONLY
153#elif defined(CONFIG_CPU_SUBTYPE_SH7203) || \ 136#elif defined(CONFIG_CPU_SUBTYPE_SH7203) || \
154 defined(CONFIG_CPU_SUBTYPE_SH7206) || \ 137 defined(CONFIG_CPU_SUBTYPE_SH7206) || \
155 defined(CONFIG_CPU_SUBTYPE_SH7263) 138 defined(CONFIG_CPU_SUBTYPE_SH7263)
@@ -158,14 +141,12 @@
158# define SCSPTR2 0xfffe9020 /* 16 bit SCIF */ 141# define SCSPTR2 0xfffe9020 /* 16 bit SCIF */
159# define SCSPTR3 0xfffe9820 /* 16 bit SCIF */ 142# define SCSPTR3 0xfffe9820 /* 16 bit SCIF */
160# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 143# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
161# define SCIF_ONLY
162#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 144#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
163# define SCSPTR0 0xf8400020 /* 16 bit SCIF */ 145# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
164# define SCSPTR1 0xf8410020 /* 16 bit SCIF */ 146# define SCSPTR1 0xf8410020 /* 16 bit SCIF */
165# define SCSPTR2 0xf8420020 /* 16 bit SCIF */ 147# define SCSPTR2 0xf8420020 /* 16 bit SCIF */
166# define SCIF_ORER 0x0001 /* overrun error bit */ 148# define SCIF_ORER 0x0001 /* overrun error bit */
167# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 149# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
168# define SCIF_ONLY
169#elif defined(CONFIG_CPU_SUBTYPE_SHX3) 150#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
170# define SCSPTR0 0xffc30020 /* 16 bit SCIF */ 151# define SCSPTR0 0xffc30020 /* 16 bit SCIF */
171# define SCSPTR1 0xffc40020 /* 16 bit SCIF */ 152# define SCSPTR1 0xffc40020 /* 16 bit SCIF */
@@ -173,7 +154,6 @@
173# define SCSPTR3 0xffc60020 /* 16 bit SCIF */ 154# define SCSPTR3 0xffc60020 /* 16 bit SCIF */
174# define SCIF_ORER 0x0001 /* Overrun error bit */ 155# define SCIF_ORER 0x0001 /* Overrun error bit */
175# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 156# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
176# define SCIF_ONLY
177#else 157#else
178# error CPU subtype not defined 158# error CPU subtype not defined
179#endif 159#endif
@@ -186,6 +166,7 @@
186#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \ 166#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
187 defined(CONFIG_CPU_SUBTYPE_SH7091) || \ 167 defined(CONFIG_CPU_SUBTYPE_SH7091) || \
188 defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ 168 defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
169 defined(CONFIG_CPU_SUBTYPE_SH7722) || \
189 defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ 170 defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
190 defined(CONFIG_CPU_SUBTYPE_SH7751) || \ 171 defined(CONFIG_CPU_SUBTYPE_SH7751) || \
191 defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ 172 defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
@@ -244,55 +225,28 @@
244# define SCIF_TXROOM_MAX 16 225# define SCIF_TXROOM_MAX 16
245#endif 226#endif
246 227
247#if defined(SCI_ONLY) 228#define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND)
248# define SCxSR_TEND(port) SCI_TEND 229#define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS)
249# define SCxSR_ERRORS(port) SCI_ERRORS 230#define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF)
250# define SCxSR_RDxF(port) SCI_RDRF 231#define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE)
251# define SCxSR_TDxE(port) SCI_TDRE 232#define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER)
252# define SCxSR_ORER(port) SCI_ORER 233#define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER)
253# define SCxSR_FER(port) SCI_FER 234#define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK)
254# define SCxSR_PER(port) SCI_PER 235
255# define SCxSR_BRK(port) 0x00
256# define SCxSR_RDxF_CLEAR(port) 0xbc
257# define SCxSR_ERROR_CLEAR(port) 0xc4
258# define SCxSR_TDxE_CLEAR(port) 0x78
259# define SCxSR_BREAK_CLEAR(port) 0xc4
260#elif defined(SCIF_ONLY)
261# define SCxSR_TEND(port) SCIF_TEND
262# define SCxSR_ERRORS(port) SCIF_ERRORS
263# define SCxSR_RDxF(port) SCIF_RDF
264# define SCxSR_TDxE(port) SCIF_TDFE
265#if defined(CONFIG_CPU_SUBTYPE_SH7705) 236#if defined(CONFIG_CPU_SUBTYPE_SH7705)
266# define SCxSR_ORER(port) SCIF_ORER 237# define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER)
267#else 238#else
268# define SCxSR_ORER(port) 0x0000 239# define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : 0x0000)
269#endif 240#endif
270# define SCxSR_FER(port) SCIF_FER 241
271# define SCxSR_PER(port) SCIF_PER
272# define SCxSR_BRK(port) SCIF_BRK
273#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ 242#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
274 defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 243 defined(CONFIG_CPU_SUBTYPE_SH7720) || \
275 defined(CONFIG_CPU_SUBTYPE_SH7721) 244 defined(CONFIG_CPU_SUBTYPE_SH7721)
276# define SCxSR_RDxF_CLEAR(port) (sci_in(port,SCxSR)&0xfffc) 245# define SCxSR_RDxF_CLEAR(port) (sci_in(port, SCxSR) & 0xfffc)
277# define SCxSR_ERROR_CLEAR(port) (sci_in(port,SCxSR)&0xfd73) 246# define SCxSR_ERROR_CLEAR(port) (sci_in(port, SCxSR) & 0xfd73)
278# define SCxSR_TDxE_CLEAR(port) (sci_in(port,SCxSR)&0xffdf) 247# define SCxSR_TDxE_CLEAR(port) (sci_in(port, SCxSR) & 0xffdf)
279# define SCxSR_BREAK_CLEAR(port) (sci_in(port,SCxSR)&0xffe3) 248# define SCxSR_BREAK_CLEAR(port) (sci_in(port, SCxSR) & 0xffe3)
280#else
281/* SH7705 can also use this, clearing is same between 7705 and 7709 */
282# define SCxSR_RDxF_CLEAR(port) 0x00fc
283# define SCxSR_ERROR_CLEAR(port) 0x0073
284# define SCxSR_TDxE_CLEAR(port) 0x00df
285# define SCxSR_BREAK_CLEAR(port) 0x00e3
286#endif
287#else 249#else
288# define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND)
289# define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS)
290# define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF)
291# define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE)
292# define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : 0x0000)
293# define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER)
294# define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER)
295# define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK)
296# define SCxSR_RDxF_CLEAR(port) (((port)->type == PORT_SCI) ? 0xbc : 0x00fc) 250# define SCxSR_RDxF_CLEAR(port) (((port)->type == PORT_SCI) ? 0xbc : 0x00fc)
297# define SCxSR_ERROR_CLEAR(port) (((port)->type == PORT_SCI) ? 0xc4 : 0x0073) 251# define SCxSR_ERROR_CLEAR(port) (((port)->type == PORT_SCI) ? 0xc4 : 0x0073)
298# define SCxSR_TDxE_CLEAR(port) (((port)->type == PORT_SCI) ? 0x78 : 0x00df) 252# define SCxSR_TDxE_CLEAR(port) (((port)->type == PORT_SCI) ? 0x78 : 0x00df)
@@ -335,18 +289,18 @@
335#define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\ 289#define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\
336 static inline unsigned int sci_##name##_in(struct uart_port *port) \ 290 static inline unsigned int sci_##name##_in(struct uart_port *port) \
337 { \ 291 { \
338 if (port->type == PORT_SCI) { \ 292 if (port->type == PORT_SCIF) { \
339 SCI_IN(sci_size, sci_offset) \ 293 SCI_IN(scif_size, scif_offset) \
340 } else { \ 294 } else { /* PORT_SCI or PORT_SCIFA */ \
341 SCI_IN(scif_size, scif_offset); \ 295 SCI_IN(sci_size, sci_offset); \
342 } \ 296 } \
343 } \ 297 } \
344 static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ 298 static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
345 { \ 299 { \
346 if (port->type == PORT_SCI) { \ 300 if (port->type == PORT_SCIF) { \
347 SCI_OUT(sci_size, sci_offset, value) \ 301 SCI_OUT(scif_size, scif_offset, value) \
348 } else { \ 302 } else { /* PORT_SCI or PORT_SCIFA */ \
349 SCI_OUT(scif_size, scif_offset, value); \ 303 SCI_OUT(sci_size, sci_offset, value); \
350 } \ 304 } \
351 } 305 }
352 306
@@ -574,18 +528,20 @@ static inline int sci_rxd_in(struct uart_port *port)
574 defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ 528 defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
575 defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ 529 defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
576 defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ 530 defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
577 defined(CONFIG_CPU_SUBTYPE_SH7091) || \ 531 defined(CONFIG_CPU_SUBTYPE_SH7091)
578 defined(CONFIG_CPU_SUBTYPE_SH4_202)
579static inline int sci_rxd_in(struct uart_port *port) 532static inline int sci_rxd_in(struct uart_port *port)
580{ 533{
581#ifndef SCIF_ONLY
582 if (port->mapbase == 0xffe00000) 534 if (port->mapbase == 0xffe00000)
583 return ctrl_inb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */ 535 return ctrl_inb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
584#endif
585#ifndef SCI_ONLY
586 if (port->mapbase == 0xffe80000) 536 if (port->mapbase == 0xffe80000)
587 return ctrl_inw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */ 537 return ctrl_inw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
588#endif 538 return 1;
539}
540#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
541static inline int sci_rxd_in(struct uart_port *port)
542{
543 if (port->mapbase == 0xffe80000)
544 return ctrl_inw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
589 return 1; 545 return 1;
590} 546}
591#elif defined(CONFIG_CPU_SUBTYPE_SH7760) 547#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
@@ -651,7 +607,7 @@ static inline int sci_rxd_in(struct uart_port *port)
651#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103) 607#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
652static inline int sci_rxd_in(struct uart_port *port) 608static inline int sci_rxd_in(struct uart_port *port)
653{ 609{
654 return sci_in(port, SCSPTR)&0x0001 ? 1 : 0; /* SCIF */ 610 return sci_in(port, SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
655} 611}
656#elif defined(__H8300H__) || defined(__H8300S__) 612#elif defined(__H8300H__) || defined(__H8300S__)
657static inline int sci_rxd_in(struct uart_port *port) 613static inline int sci_rxd_in(struct uart_port *port)
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index 6a3f8fb0c9dd..3317148a4b93 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -286,8 +286,8 @@ static void ulite_release_port(struct uart_port *port)
286 286
287static int ulite_request_port(struct uart_port *port) 287static int ulite_request_port(struct uart_port *port)
288{ 288{
289 pr_debug("ulite console: port=%p; port->mapbase=%x\n", 289 pr_debug("ulite console: port=%p; port->mapbase=%llx\n",
290 port, port->mapbase); 290 port, (unsigned long long) port->mapbase);
291 291
292 if (!request_mem_region(port->mapbase, ULITE_REGION, "uartlite")) { 292 if (!request_mem_region(port->mapbase, ULITE_REGION, "uartlite")) {
293 dev_err(port->dev, "Memory region busy\n"); 293 dev_err(port->dev, "Memory region busy\n");
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index d1812d32f47d..63f0de29aa14 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -827,7 +827,7 @@ static int __init maple_bus_init(void)
827 827
828 maple_queue_cache = 828 maple_queue_cache =
829 kmem_cache_create("maple_queue_cache", 0x400, 0, 829 kmem_cache_create("maple_queue_cache", 0x400, 0,
830 SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL); 830 SLAB_HWCACHE_ALIGN, NULL);
831 831
832 if (!maple_queue_cache) 832 if (!maple_queue_cache)
833 goto cleanup_bothirqs; 833 goto cleanup_bothirqs;
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 02f9320f3efc..8abae4ad0fa5 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -766,6 +766,7 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
766 /* Initialize the hardware */ 766 /* Initialize the hardware */
767 clk_enable(clk); 767 clk_enable(clk);
768 spi_writel(as, CR, SPI_BIT(SWRST)); 768 spi_writel(as, CR, SPI_BIT(SWRST));
769 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
769 spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); 770 spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
770 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 771 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
771 spi_writel(as, CR, SPI_BIT(SPIEN)); 772 spi_writel(as, CR, SPI_BIT(SPIEN));
@@ -782,6 +783,7 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
782 783
783out_reset_hw: 784out_reset_hw:
784 spi_writel(as, CR, SPI_BIT(SWRST)); 785 spi_writel(as, CR, SPI_BIT(SWRST));
786 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
785 clk_disable(clk); 787 clk_disable(clk);
786 free_irq(irq, master); 788 free_irq(irq, master);
787out_unmap_regs: 789out_unmap_regs:
@@ -805,6 +807,7 @@ static int __exit atmel_spi_remove(struct platform_device *pdev)
805 spin_lock_irq(&as->lock); 807 spin_lock_irq(&as->lock);
806 as->stopping = 1; 808 as->stopping = 1;
807 spi_writel(as, CR, SPI_BIT(SWRST)); 809 spi_writel(as, CR, SPI_BIT(SWRST));
810 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
808 spi_readl(as, SR); 811 spi_readl(as, SR);
809 spin_unlock_irq(&as->lock); 812 spin_unlock_irq(&as->lock);
810 813
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index 87b73e0169c5..b02f25c702fd 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -369,10 +369,23 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
369 dma_rx_addr = t->rx_dma; 369 dma_rx_addr = t->rx_dma;
370 370
371 /* 371 /*
372 * check if buffers are already dma mapped, map them otherwise 372 * check if buffers are already dma mapped, map them otherwise:
373 * - first map the TX buffer, so cache data gets written to memory
374 * - then map the RX buffer, so that cache entries (with
375 * soon-to-be-stale data) get removed
373 * use rx buffer in place of tx if tx buffer was not provided 376 * use rx buffer in place of tx if tx buffer was not provided
374 * use temp rx buffer (preallocated or realloc to fit) for rx dma 377 * use temp rx buffer (preallocated or realloc to fit) for rx dma
375 */ 378 */
379 if (t->tx_buf) {
380 if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */
381 dma_tx_addr = dma_map_single(hw->dev,
382 (void *)t->tx_buf,
383 t->len, DMA_TO_DEVICE);
384 if (dma_mapping_error(hw->dev, dma_tx_addr))
385 dev_err(hw->dev, "tx dma map error\n");
386 }
387 }
388
376 if (t->rx_buf) { 389 if (t->rx_buf) {
377 if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ 390 if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */
378 dma_rx_addr = dma_map_single(hw->dev, 391 dma_rx_addr = dma_map_single(hw->dev,
@@ -396,15 +409,8 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
396 dma_sync_single_for_device(hw->dev, dma_rx_addr, 409 dma_sync_single_for_device(hw->dev, dma_rx_addr,
397 t->len, DMA_FROM_DEVICE); 410 t->len, DMA_FROM_DEVICE);
398 } 411 }
399 if (t->tx_buf) { 412
400 if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ 413 if (!t->tx_buf) {
401 dma_tx_addr = dma_map_single(hw->dev,
402 (void *)t->tx_buf,
403 t->len, DMA_TO_DEVICE);
404 if (dma_mapping_error(hw->dev, dma_tx_addr))
405 dev_err(hw->dev, "tx dma map error\n");
406 }
407 } else {
408 dma_sync_single_for_device(hw->dev, dma_rx_addr, 414 dma_sync_single_for_device(hw->dev, dma_rx_addr,
409 t->len, DMA_BIDIRECTIONAL); 415 t->len, DMA_BIDIRECTIONAL);
410 hw->tx = hw->rx; 416 hw->tx = hw->rx;
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 0debe11b67b4..3b97803e1d11 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -142,6 +142,7 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
142 unsigned rfalarm; 142 unsigned rfalarm;
143 unsigned send_at_once = MPC52xx_PSC_BUFSIZE; 143 unsigned send_at_once = MPC52xx_PSC_BUFSIZE;
144 unsigned recv_at_once; 144 unsigned recv_at_once;
145 int last_block = 0;
145 146
146 if (!t->tx_buf && !t->rx_buf && t->len) 147 if (!t->tx_buf && !t->rx_buf && t->len)
147 return -EINVAL; 148 return -EINVAL;
@@ -151,15 +152,17 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
151 while (rb < t->len) { 152 while (rb < t->len) {
152 if (t->len - rb > MPC52xx_PSC_BUFSIZE) { 153 if (t->len - rb > MPC52xx_PSC_BUFSIZE) {
153 rfalarm = MPC52xx_PSC_RFALARM; 154 rfalarm = MPC52xx_PSC_RFALARM;
155 last_block = 0;
154 } else { 156 } else {
155 send_at_once = t->len - sb; 157 send_at_once = t->len - sb;
156 rfalarm = MPC52xx_PSC_BUFSIZE - (t->len - rb); 158 rfalarm = MPC52xx_PSC_BUFSIZE - (t->len - rb);
159 last_block = 1;
157 } 160 }
158 161
159 dev_dbg(&spi->dev, "send %d bytes...\n", send_at_once); 162 dev_dbg(&spi->dev, "send %d bytes...\n", send_at_once);
160 for (; send_at_once; sb++, send_at_once--) { 163 for (; send_at_once; sb++, send_at_once--) {
161 /* set EOF flag before the last word is sent */ 164 /* set EOF flag before the last word is sent */
162 if (send_at_once == 1) 165 if (send_at_once == 1 && last_block)
163 out_8(&psc->ircr2, 0x01); 166 out_8(&psc->ircr2, 0x01);
164 167
165 if (tx_buf) 168 if (tx_buf)
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index dae87b1a4c6e..cf12f2d84be2 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -352,21 +352,21 @@ static int map_dma_buffers(struct driver_data *drv_data)
352 } else 352 } else
353 drv_data->tx_map_len = drv_data->len; 353 drv_data->tx_map_len = drv_data->len;
354 354
355 /* Stream map the rx buffer */ 355 /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
356 drv_data->rx_dma = dma_map_single(dev, drv_data->rx, 356 * so we flush the cache *before* invalidating it, in case
357 drv_data->rx_map_len, 357 * the tx and rx buffers overlap.
358 DMA_FROM_DEVICE); 358 */
359 if (dma_mapping_error(dev, drv_data->rx_dma))
360 return 0;
361
362 /* Stream map the tx buffer */
363 drv_data->tx_dma = dma_map_single(dev, drv_data->tx, 359 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
364 drv_data->tx_map_len, 360 drv_data->tx_map_len, DMA_TO_DEVICE);
365 DMA_TO_DEVICE); 361 if (dma_mapping_error(dev, drv_data->tx_dma))
362 return 0;
366 363
367 if (dma_mapping_error(dev, drv_data->tx_dma)) { 364 /* Stream map the rx buffer */
368 dma_unmap_single(dev, drv_data->rx_dma, 365 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
369 drv_data->rx_map_len, DMA_FROM_DEVICE); 366 drv_data->rx_map_len, DMA_FROM_DEVICE);
367 if (dma_mapping_error(dev, drv_data->rx_dma)) {
368 dma_unmap_single(dev, drv_data->tx_dma,
369 drv_data->tx_map_len, DMA_TO_DEVICE);
370 return 0; 370 return 0;
371 } 371 }
372 372
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 61ba147e384d..269a55ec52ef 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -506,20 +506,6 @@ static int map_dma_buffers(struct driver_data *drv_data)
506 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) 506 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
507 return -1; 507 return -1;
508 508
509 /* NULL rx means write-only transfer and no map needed
510 since rx DMA will not be used */
511 if (drv_data->rx) {
512 buf = drv_data->rx;
513 drv_data->rx_dma = dma_map_single(
514 dev,
515 buf,
516 drv_data->len,
517 DMA_FROM_DEVICE);
518 if (dma_mapping_error(dev, drv_data->rx_dma))
519 return -1;
520 drv_data->rx_dma_needs_unmap = 1;
521 }
522
523 if (drv_data->tx == NULL) { 509 if (drv_data->tx == NULL) {
524 /* Read only message --> use drv_data->dummy_dma_buf for dummy 510 /* Read only message --> use drv_data->dummy_dma_buf for dummy
525 writes to achive reads */ 511 writes to achive reads */
@@ -533,18 +519,31 @@ static int map_dma_buffers(struct driver_data *drv_data)
533 buf, 519 buf,
534 drv_data->tx_map_len, 520 drv_data->tx_map_len,
535 DMA_TO_DEVICE); 521 DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, drv_data->tx_dma)) { 522 if (dma_mapping_error(dev, drv_data->tx_dma))
537 if (drv_data->rx_dma) {
538 dma_unmap_single(dev,
539 drv_data->rx_dma,
540 drv_data->len,
541 DMA_FROM_DEVICE);
542 drv_data->rx_dma_needs_unmap = 0;
543 }
544 return -1; 523 return -1;
545 }
546 drv_data->tx_dma_needs_unmap = 1; 524 drv_data->tx_dma_needs_unmap = 1;
547 525
526 /* NULL rx means write-only transfer and no map needed
527 * since rx DMA will not be used */
528 if (drv_data->rx) {
529 buf = drv_data->rx;
530 drv_data->rx_dma = dma_map_single(dev,
531 buf,
532 drv_data->len,
533 DMA_FROM_DEVICE);
534 if (dma_mapping_error(dev, drv_data->rx_dma)) {
535 if (drv_data->tx_dma) {
536 dma_unmap_single(dev,
537 drv_data->tx_dma,
538 drv_data->tx_map_len,
539 DMA_TO_DEVICE);
540 drv_data->tx_dma_needs_unmap = 0;
541 }
542 return -1;
543 }
544 drv_data->rx_dma_needs_unmap = 1;
545 }
546
548 return 0; 547 return 0;
549} 548}
550 549
@@ -1457,7 +1456,7 @@ static int __init spi_imx_probe(struct platform_device *pdev)
1457 struct device *dev = &pdev->dev; 1456 struct device *dev = &pdev->dev;
1458 struct spi_imx_master *platform_info; 1457 struct spi_imx_master *platform_info;
1459 struct spi_master *master; 1458 struct spi_master *master;
1460 struct driver_data *drv_data = NULL; 1459 struct driver_data *drv_data;
1461 struct resource *res; 1460 struct resource *res;
1462 int irq, status = 0; 1461 int irq, status = 0;
1463 1462
@@ -1468,14 +1467,6 @@ static int __init spi_imx_probe(struct platform_device *pdev)
1468 goto err_no_pdata; 1467 goto err_no_pdata;
1469 } 1468 }
1470 1469
1471 drv_data->clk = clk_get(&pdev->dev, "perclk2");
1472 if (IS_ERR(drv_data->clk)) {
1473 dev_err(&pdev->dev, "probe - cannot get get\n");
1474 status = PTR_ERR(drv_data->clk);
1475 goto err_no_clk;
1476 }
1477 clk_enable(drv_data->clk);
1478
1479 /* Allocate master with space for drv_data */ 1470 /* Allocate master with space for drv_data */
1480 master = spi_alloc_master(dev, sizeof(struct driver_data)); 1471 master = spi_alloc_master(dev, sizeof(struct driver_data));
1481 if (!master) { 1472 if (!master) {
@@ -1496,6 +1487,14 @@ static int __init spi_imx_probe(struct platform_device *pdev)
1496 1487
1497 drv_data->dummy_dma_buf = SPI_DUMMY_u32; 1488 drv_data->dummy_dma_buf = SPI_DUMMY_u32;
1498 1489
1490 drv_data->clk = clk_get(&pdev->dev, "perclk2");
1491 if (IS_ERR(drv_data->clk)) {
1492 dev_err(&pdev->dev, "probe - cannot get clock\n");
1493 status = PTR_ERR(drv_data->clk);
1494 goto err_no_clk;
1495 }
1496 clk_enable(drv_data->clk);
1497
1499 /* Find and map resources */ 1498 /* Find and map resources */
1500 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1499 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1501 if (!res) { 1500 if (!res) {
@@ -1631,12 +1630,13 @@ err_no_iomap:
1631 kfree(drv_data->ioarea); 1630 kfree(drv_data->ioarea);
1632 1631
1633err_no_iores: 1632err_no_iores:
1634 spi_master_put(master);
1635
1636err_no_pdata:
1637 clk_disable(drv_data->clk); 1633 clk_disable(drv_data->clk);
1638 clk_put(drv_data->clk); 1634 clk_put(drv_data->clk);
1635
1639err_no_clk: 1636err_no_clk:
1637 spi_master_put(master);
1638
1639err_no_pdata:
1640err_no_mem: 1640err_no_mem:
1641 return status; 1641 return status;
1642} 1642}
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
index cc1f647f579b..f2447a5476bb 100644
--- a/drivers/spi/spi_s3c24xx_gpio.c
+++ b/drivers/spi/spi_s3c24xx_gpio.c
@@ -34,7 +34,7 @@ struct s3c2410_spigpio {
34 34
35static inline struct s3c2410_spigpio *spidev_to_sg(struct spi_device *spi) 35static inline struct s3c2410_spigpio *spidev_to_sg(struct spi_device *spi)
36{ 36{
37 return spi->controller_data; 37 return spi_master_get_devdata(spi->master);
38} 38}
39 39
40static inline void setsck(struct spi_device *dev, int on) 40static inline void setsck(struct spi_device *dev, int on)
@@ -118,6 +118,7 @@ static int s3c2410_spigpio_probe(struct platform_device *dev)
118 /* setup spi bitbang adaptor */ 118 /* setup spi bitbang adaptor */
119 sp->bitbang.master = spi_master_get(master); 119 sp->bitbang.master = spi_master_get(master);
120 sp->bitbang.master->bus_num = info->bus_num; 120 sp->bitbang.master->bus_num = info->bus_num;
121 sp->bitbang.master->num_chipselect = info->num_chipselect;
121 sp->bitbang.chipselect = s3c2410_spigpio_chipselect; 122 sp->bitbang.chipselect = s3c2410_spigpio_chipselect;
122 123
123 sp->bitbang.txrx_word[SPI_MODE_0] = s3c2410_spigpio_txrx_mode0; 124 sp->bitbang.txrx_word[SPI_MODE_0] = s3c2410_spigpio_txrx_mode0;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 89a43755a453..5d869c4d3eb2 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -597,7 +597,9 @@ static int spidev_probe(struct spi_device *spi)
597 } 597 }
598 mutex_unlock(&device_list_lock); 598 mutex_unlock(&device_list_lock);
599 599
600 if (status != 0) 600 if (status == 0)
601 spi_set_drvdata(spi, spidev);
602 else
601 kfree(spidev); 603 kfree(spidev);
602 604
603 return status; 605 return status;
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 307b1f62d949..b1b947edcf01 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -1,10 +1,11 @@
1menu "Sonics Silicon Backplane"
2
3config SSB_POSSIBLE 1config SSB_POSSIBLE
4 bool 2 bool
5 depends on HAS_IOMEM && HAS_DMA 3 depends on HAS_IOMEM && HAS_DMA
6 default y 4 default y
7 5
6menu "Sonics Silicon Backplane"
7 depends on SSB_POSSIBLE
8
8config SSB 9config SSB
9 tristate "Sonics Silicon Backplane support" 10 tristate "Sonics Silicon Backplane support"
10 depends on SSB_POSSIBLE 11 depends on SSB_POSSIBLE
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 2a79decd7dfc..5d457c96bd7e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -21,8 +21,26 @@ menuconfig STAGING
21 21
22 If in doubt, say N here. 22 If in doubt, say N here.
23 23
24
24if STAGING 25if STAGING
25 26
27config STAGING_EXCLUDE_BUILD
28 bool "Exclude Staging drivers from being built" if STAGING
29 default y
30 ---help---
31 Are you sure you really want to build the staging drivers?
32 They taint your kernel, don't live up to the normal Linux
33 kernel quality standards, are a bit crufty around the edges,
34 and might go off and kick your dog when you aren't paying
35 attention.
36
37 Say N here to be able to select and build the Staging drivers.
38 This option is primarily here to prevent them from being built
39 when selecting 'make allyesconfg' and 'make allmodconfig' so
40 don't be all that put off, your dog will be just fine.
41
42if !STAGING_EXCLUDE_BUILD
43
26source "drivers/staging/et131x/Kconfig" 44source "drivers/staging/et131x/Kconfig"
27 45
28source "drivers/staging/slicoss/Kconfig" 46source "drivers/staging/slicoss/Kconfig"
@@ -43,4 +61,7 @@ source "drivers/staging/echo/Kconfig"
43 61
44source "drivers/staging/at76_usb/Kconfig" 62source "drivers/staging/at76_usb/Kconfig"
45 63
64source "drivers/staging/poch/Kconfig"
65
66endif # !STAGING_EXCLUDE_BUILD
46endif # STAGING 67endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 325bca4f71c0..71c4d53760b8 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_W35UND) += winbond/
13obj-$(CONFIG_PRISM2_USB) += wlan-ng/ 13obj-$(CONFIG_PRISM2_USB) += wlan-ng/
14obj-$(CONFIG_ECHO) += echo/ 14obj-$(CONFIG_ECHO) += echo/
15obj-$(CONFIG_USB_ATMEL) += at76_usb/ 15obj-$(CONFIG_USB_ATMEL) += at76_usb/
16obj-$(CONFIG_POCH) += poch/
diff --git a/drivers/staging/at76_usb/at76_usb.c b/drivers/staging/at76_usb/at76_usb.c
index 52df0c665183..174e2bec9223 100644
--- a/drivers/staging/at76_usb/at76_usb.c
+++ b/drivers/staging/at76_usb/at76_usb.c
@@ -2319,9 +2319,11 @@ static int at76_iw_handler_get_scan(struct net_device *netdev,
2319 if (!iwe) 2319 if (!iwe)
2320 return -ENOMEM; 2320 return -ENOMEM;
2321 2321
2322 if (priv->scan_state != SCAN_COMPLETED) 2322 if (priv->scan_state != SCAN_COMPLETED) {
2323 /* scan not yet finished */ 2323 /* scan not yet finished */
2324 kfree(iwe);
2324 return -EAGAIN; 2325 return -EAGAIN;
2326 }
2325 2327
2326 spin_lock_irqsave(&priv->bss_list_spinlock, flags); 2328 spin_lock_irqsave(&priv->bss_list_spinlock, flags);
2327 2329
diff --git a/drivers/staging/echo/bit_operations.h b/drivers/staging/echo/bit_operations.h
index b32f4bf99397..cecdcf3fd755 100644
--- a/drivers/staging/echo/bit_operations.h
+++ b/drivers/staging/echo/bit_operations.h
@@ -30,114 +30,98 @@
30#if !defined(_BIT_OPERATIONS_H_) 30#if !defined(_BIT_OPERATIONS_H_)
31#define _BIT_OPERATIONS_H_ 31#define _BIT_OPERATIONS_H_
32 32
33#ifdef __cplusplus
34extern "C" {
35#endif
36
37#if defined(__i386__) || defined(__x86_64__) 33#if defined(__i386__) || defined(__x86_64__)
38/*! \brief Find the bit position of the highest set bit in a word 34/*! \brief Find the bit position of the highest set bit in a word
39 \param bits The word to be searched 35 \param bits The word to be searched
40 \return The bit number of the highest set bit, or -1 if the word is zero. */ 36 \return The bit number of the highest set bit, or -1 if the word is zero. */
41static __inline__ int top_bit(unsigned int bits) 37static __inline__ int top_bit(unsigned int bits)
42{ 38{
43 int res; 39 int res;
44 40
45 __asm__ (" xorl %[res],%[res];\n" 41 __asm__(" xorl %[res],%[res];\n"
46 " decl %[res];\n" 42 " decl %[res];\n"
47 " bsrl %[bits],%[res]\n" 43 " bsrl %[bits],%[res]\n"
48 : [res] "=&r" (res) 44 :[res] "=&r" (res)
49 : [bits] "rm" (bits)); 45 :[bits] "rm"(bits)
50 return res; 46 );
47 return res;
51} 48}
52/*- End of function --------------------------------------------------------*/
53 49
54/*! \brief Find the bit position of the lowest set bit in a word 50/*! \brief Find the bit position of the lowest set bit in a word
55 \param bits The word to be searched 51 \param bits The word to be searched
56 \return The bit number of the lowest set bit, or -1 if the word is zero. */ 52 \return The bit number of the lowest set bit, or -1 if the word is zero. */
57static __inline__ int bottom_bit(unsigned int bits) 53static __inline__ int bottom_bit(unsigned int bits)
58{ 54{
59 int res; 55 int res;
60 56
61 __asm__ (" xorl %[res],%[res];\n" 57 __asm__(" xorl %[res],%[res];\n"
62 " decl %[res];\n" 58 " decl %[res];\n"
63 " bsfl %[bits],%[res]\n" 59 " bsfl %[bits],%[res]\n"
64 : [res] "=&r" (res) 60 :[res] "=&r" (res)
65 : [bits] "rm" (bits)); 61 :[bits] "rm"(bits)
66 return res; 62 );
63 return res;
67} 64}
68/*- End of function --------------------------------------------------------*/
69#else 65#else
70static __inline__ int top_bit(unsigned int bits) 66static __inline__ int top_bit(unsigned int bits)
71{ 67{
72 int i; 68 int i;
73 69
74 if (bits == 0) 70 if (bits == 0)
75 return -1; 71 return -1;
76 i = 0; 72 i = 0;
77 if (bits & 0xFFFF0000) 73 if (bits & 0xFFFF0000) {
78 { 74 bits &= 0xFFFF0000;
79 bits &= 0xFFFF0000; 75 i += 16;
80 i += 16; 76 }
81 } 77 if (bits & 0xFF00FF00) {
82 if (bits & 0xFF00FF00) 78 bits &= 0xFF00FF00;
83 { 79 i += 8;
84 bits &= 0xFF00FF00; 80 }
85 i += 8; 81 if (bits & 0xF0F0F0F0) {
86 } 82 bits &= 0xF0F0F0F0;
87 if (bits & 0xF0F0F0F0) 83 i += 4;
88 { 84 }
89 bits &= 0xF0F0F0F0; 85 if (bits & 0xCCCCCCCC) {
90 i += 4; 86 bits &= 0xCCCCCCCC;
91 } 87 i += 2;
92 if (bits & 0xCCCCCCCC) 88 }
93 { 89 if (bits & 0xAAAAAAAA) {
94 bits &= 0xCCCCCCCC; 90 bits &= 0xAAAAAAAA;
95 i += 2; 91 i += 1;
96 } 92 }
97 if (bits & 0xAAAAAAAA) 93 return i;
98 {
99 bits &= 0xAAAAAAAA;
100 i += 1;
101 }
102 return i;
103} 94}
104/*- End of function --------------------------------------------------------*/
105 95
106static __inline__ int bottom_bit(unsigned int bits) 96static __inline__ int bottom_bit(unsigned int bits)
107{ 97{
108 int i; 98 int i;
109 99
110 if (bits == 0) 100 if (bits == 0)
111 return -1; 101 return -1;
112 i = 32; 102 i = 32;
113 if (bits & 0x0000FFFF) 103 if (bits & 0x0000FFFF) {
114 { 104 bits &= 0x0000FFFF;
115 bits &= 0x0000FFFF; 105 i -= 16;
116 i -= 16; 106 }
117 } 107 if (bits & 0x00FF00FF) {
118 if (bits & 0x00FF00FF) 108 bits &= 0x00FF00FF;
119 { 109 i -= 8;
120 bits &= 0x00FF00FF; 110 }
121 i -= 8; 111 if (bits & 0x0F0F0F0F) {
122 } 112 bits &= 0x0F0F0F0F;
123 if (bits & 0x0F0F0F0F) 113 i -= 4;
124 { 114 }
125 bits &= 0x0F0F0F0F; 115 if (bits & 0x33333333) {
126 i -= 4; 116 bits &= 0x33333333;
127 } 117 i -= 2;
128 if (bits & 0x33333333) 118 }
129 { 119 if (bits & 0x55555555) {
130 bits &= 0x33333333; 120 bits &= 0x55555555;
131 i -= 2; 121 i -= 1;
132 } 122 }
133 if (bits & 0x55555555) 123 return i;
134 {
135 bits &= 0x55555555;
136 i -= 1;
137 }
138 return i;
139} 124}
140/*- End of function --------------------------------------------------------*/
141#endif 125#endif
142 126
143/*! \brief Bit reverse a byte. 127/*! \brief Bit reverse a byte.
@@ -146,16 +130,16 @@ static __inline__ int bottom_bit(unsigned int bits)
146static __inline__ uint8_t bit_reverse8(uint8_t x) 130static __inline__ uint8_t bit_reverse8(uint8_t x)
147{ 131{
148#if defined(__i386__) || defined(__x86_64__) 132#if defined(__i386__) || defined(__x86_64__)
149 /* If multiply is fast */ 133 /* If multiply is fast */
150 return ((x*0x0802U & 0x22110U) | (x*0x8020U & 0x88440U))*0x10101U >> 16; 134 return ((x * 0x0802U & 0x22110U) | (x * 0x8020U & 0x88440U)) *
135 0x10101U >> 16;
151#else 136#else
152 /* If multiply is slow, but we have a barrel shifter */ 137 /* If multiply is slow, but we have a barrel shifter */
153 x = (x >> 4) | (x << 4); 138 x = (x >> 4) | (x << 4);
154 x = ((x & 0xCC) >> 2) | ((x & 0x33) << 2); 139 x = ((x & 0xCC) >> 2) | ((x & 0x33) << 2);
155 return ((x & 0xAA) >> 1) | ((x & 0x55) << 1); 140 return ((x & 0xAA) >> 1) | ((x & 0x55) << 1);
156#endif 141#endif
157} 142}
158/*- End of function --------------------------------------------------------*/
159 143
160/*! \brief Bit reverse a 16 bit word. 144/*! \brief Bit reverse a 16 bit word.
161 \param data The word to be reversed. 145 \param data The word to be reversed.
@@ -193,9 +177,8 @@ uint16_t make_mask16(uint16_t x);
193 \return The word with the single set bit. */ 177 \return The word with the single set bit. */
194static __inline__ uint32_t least_significant_one32(uint32_t x) 178static __inline__ uint32_t least_significant_one32(uint32_t x)
195{ 179{
196 return (x & (-(int32_t) x)); 180 return (x & (-(int32_t) x));
197} 181}
198/*- End of function --------------------------------------------------------*/
199 182
200/*! \brief Find the most significant one in a word, and return a word 183/*! \brief Find the most significant one in a word, and return a word
201 with just that bit set. 184 with just that bit set.
@@ -204,50 +187,42 @@ static __inline__ uint32_t least_significant_one32(uint32_t x)
204static __inline__ uint32_t most_significant_one32(uint32_t x) 187static __inline__ uint32_t most_significant_one32(uint32_t x)
205{ 188{
206#if defined(__i386__) || defined(__x86_64__) 189#if defined(__i386__) || defined(__x86_64__)
207 return 1 << top_bit(x); 190 return 1 << top_bit(x);
208#else 191#else
209 x = make_mask32(x); 192 x = make_mask32(x);
210 return (x ^ (x >> 1)); 193 return (x ^ (x >> 1));
211#endif 194#endif
212} 195}
213/*- End of function --------------------------------------------------------*/
214 196
215/*! \brief Find the parity of a byte. 197/*! \brief Find the parity of a byte.
216 \param x The byte to be checked. 198 \param x The byte to be checked.
217 \return 1 for odd, or 0 for even. */ 199 \return 1 for odd, or 0 for even. */
218static __inline__ int parity8(uint8_t x) 200static __inline__ int parity8(uint8_t x)
219{ 201{
220 x = (x ^ (x >> 4)) & 0x0F; 202 x = (x ^ (x >> 4)) & 0x0F;
221 return (0x6996 >> x) & 1; 203 return (0x6996 >> x) & 1;
222} 204}
223/*- End of function --------------------------------------------------------*/
224 205
225/*! \brief Find the parity of a 16 bit word. 206/*! \brief Find the parity of a 16 bit word.
226 \param x The word to be checked. 207 \param x The word to be checked.
227 \return 1 for odd, or 0 for even. */ 208 \return 1 for odd, or 0 for even. */
228static __inline__ int parity16(uint16_t x) 209static __inline__ int parity16(uint16_t x)
229{ 210{
230 x ^= (x >> 8); 211 x ^= (x >> 8);
231 x = (x ^ (x >> 4)) & 0x0F; 212 x = (x ^ (x >> 4)) & 0x0F;
232 return (0x6996 >> x) & 1; 213 return (0x6996 >> x) & 1;
233} 214}
234/*- End of function --------------------------------------------------------*/
235 215
236/*! \brief Find the parity of a 32 bit word. 216/*! \brief Find the parity of a 32 bit word.
237 \param x The word to be checked. 217 \param x The word to be checked.
238 \return 1 for odd, or 0 for even. */ 218 \return 1 for odd, or 0 for even. */
239static __inline__ int parity32(uint32_t x) 219static __inline__ int parity32(uint32_t x)
240{ 220{
241 x ^= (x >> 16); 221 x ^= (x >> 16);
242 x ^= (x >> 8); 222 x ^= (x >> 8);
243 x = (x ^ (x >> 4)) & 0x0F; 223 x = (x ^ (x >> 4)) & 0x0F;
244 return (0x6996 >> x) & 1; 224 return (0x6996 >> x) & 1;
245} 225}
246/*- End of function --------------------------------------------------------*/
247
248#ifdef __cplusplus
249}
250#endif
251 226
252#endif 227#endif
253/*- End of file ------------------------------------------------------------*/ 228/*- End of file ------------------------------------------------------------*/
diff --git a/drivers/staging/echo/echo.c b/drivers/staging/echo/echo.c
index 4a281b14fc58..fd4007e329e7 100644
--- a/drivers/staging/echo/echo.c
+++ b/drivers/staging/echo/echo.c
@@ -74,7 +74,6 @@
74 74
75 Steve also has some nice notes on echo cancellers in echo.h 75 Steve also has some nice notes on echo cancellers in echo.h
76 76
77
78 References: 77 References:
79 78
80 [1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo 79 [1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo
@@ -105,20 +104,17 @@
105 Mark, Pawel, and Pavel. 104 Mark, Pawel, and Pavel.
106*/ 105*/
107 106
108#include <linux/kernel.h> /* We're doing kernel work */ 107#include <linux/kernel.h> /* We're doing kernel work */
109#include <linux/module.h> 108#include <linux/module.h>
110#include <linux/kernel.h>
111#include <linux/slab.h> 109#include <linux/slab.h>
112#define malloc(a) kmalloc((a), GFP_KERNEL)
113#define free(a) kfree(a)
114 110
115#include "bit_operations.h" 111#include "bit_operations.h"
116#include "echo.h" 112#include "echo.h"
117 113
118#define MIN_TX_POWER_FOR_ADAPTION 64 114#define MIN_TX_POWER_FOR_ADAPTION 64
119#define MIN_RX_POWER_FOR_ADAPTION 64 115#define MIN_RX_POWER_FOR_ADAPTION 64
120#define DTD_HANGOVER 600 /* 600 samples, or 75ms */ 116#define DTD_HANGOVER 600 /* 600 samples, or 75ms */
121#define DC_LOG2BETA 3 /* log2() of DC filter Beta */ 117#define DC_LOG2BETA 3 /* log2() of DC filter Beta */
122 118
123/*-----------------------------------------------------------------------*\ 119/*-----------------------------------------------------------------------*\
124 FUNCTIONS 120 FUNCTIONS
@@ -126,59 +122,58 @@
126 122
127/* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */ 123/* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */
128 124
129 125#ifdef __bfin__
130#ifdef __BLACKFIN_ASM__ 126static void __inline__ lms_adapt_bg(struct oslec_state *ec, int clean,
131static void __inline__ lms_adapt_bg(echo_can_state_t *ec, int clean, int shift) 127 int shift)
132{ 128{
133 int i, j; 129 int i, j;
134 int offset1; 130 int offset1;
135 int offset2; 131 int offset2;
136 int factor; 132 int factor;
137 int exp; 133 int exp;
138 int16_t *phist; 134 int16_t *phist;
139 int n; 135 int n;
140 136
141 if (shift > 0) 137 if (shift > 0)
142 factor = clean << shift; 138 factor = clean << shift;
143 else 139 else
144 factor = clean >> -shift; 140 factor = clean >> -shift;
145 141
146 /* Update the FIR taps */ 142 /* Update the FIR taps */
147 143
148 offset2 = ec->curr_pos; 144 offset2 = ec->curr_pos;
149 offset1 = ec->taps - offset2; 145 offset1 = ec->taps - offset2;
150 phist = &ec->fir_state_bg.history[offset2]; 146 phist = &ec->fir_state_bg.history[offset2];
151 147
152 /* st: and en: help us locate the assembler in echo.s */ 148 /* st: and en: help us locate the assembler in echo.s */
153 149
154 //asm("st:"); 150 //asm("st:");
155 n = ec->taps; 151 n = ec->taps;
156 for (i = 0, j = offset2; i < n; i++, j++) 152 for (i = 0, j = offset2; i < n; i++, j++) {
157 { 153 exp = *phist++ * factor;
158 exp = *phist++ * factor; 154 ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
159 ec->fir_taps16[1][i] += (int16_t) ((exp+(1<<14)) >> 15); 155 }
160 } 156 //asm("en:");
161 //asm("en:"); 157
162 158 /* Note the asm for the inner loop above generated by Blackfin gcc
163 /* Note the asm for the inner loop above generated by Blackfin gcc 159 4.1.1 is pretty good (note even parallel instructions used):
164 4.1.1 is pretty good (note even parallel instructions used): 160
165 161 R0 = W [P0++] (X);
166 R0 = W [P0++] (X); 162 R0 *= R2;
167 R0 *= R2; 163 R0 = R0 + R3 (NS) ||
168 R0 = R0 + R3 (NS) || 164 R1 = W [P1] (X) ||
169 R1 = W [P1] (X) || 165 nop;
170 nop; 166 R0 >>>= 15;
171 R0 >>>= 15; 167 R0 = R0 + R1;
172 R0 = R0 + R1; 168 W [P1++] = R0;
173 W [P1++] = R0; 169
174 170 A block based update algorithm would be much faster but the
175 A block based update algorithm would be much faster but the 171 above can't be improved on much. Every instruction saved in
176 above can't be improved on much. Every instruction saved in 172 the loop above is 2 MIPs/ch! The for loop above is where the
177 the loop above is 2 MIPs/ch! The for loop above is where the 173 Blackfin spends most of it's time - about 17 MIPs/ch measured
178 Blackfin spends most of it's time - about 17 MIPs/ch measured 174 with speedtest.c with 256 taps (32ms). Write-back and
179 with speedtest.c with 256 taps (32ms). Write-back and 175 Write-through cache gave about the same performance.
180 Write-through cache gave about the same performance. 176 */
181 */
182} 177}
183 178
184/* 179/*
@@ -200,392 +195,393 @@ static void __inline__ lms_adapt_bg(echo_can_state_t *ec, int clean, int shift)
200*/ 195*/
201 196
202#else 197#else
203static __inline__ void lms_adapt_bg(echo_can_state_t *ec, int clean, int shift) 198static __inline__ void lms_adapt_bg(struct oslec_state *ec, int clean,
199 int shift)
204{ 200{
205 int i; 201 int i;
206 202
207 int offset1; 203 int offset1;
208 int offset2; 204 int offset2;
209 int factor; 205 int factor;
210 int exp; 206 int exp;
211 207
212 if (shift > 0) 208 if (shift > 0)
213 factor = clean << shift; 209 factor = clean << shift;
214 else 210 else
215 factor = clean >> -shift; 211 factor = clean >> -shift;
216 212
217 /* Update the FIR taps */ 213 /* Update the FIR taps */
218 214
219 offset2 = ec->curr_pos; 215 offset2 = ec->curr_pos;
220 offset1 = ec->taps - offset2; 216 offset1 = ec->taps - offset2;
221 217
222 for (i = ec->taps - 1; i >= offset1; i--) 218 for (i = ec->taps - 1; i >= offset1; i--) {
223 { 219 exp = (ec->fir_state_bg.history[i - offset1] * factor);
224 exp = (ec->fir_state_bg.history[i - offset1]*factor); 220 ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
225 ec->fir_taps16[1][i] += (int16_t) ((exp+(1<<14)) >> 15); 221 }
226 } 222 for (; i >= 0; i--) {
227 for ( ; i >= 0; i--) 223 exp = (ec->fir_state_bg.history[i + offset2] * factor);
228 { 224 ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
229 exp = (ec->fir_state_bg.history[i + offset2]*factor); 225 }
230 ec->fir_taps16[1][i] += (int16_t) ((exp+(1<<14)) >> 15);
231 }
232} 226}
233#endif 227#endif
234 228
235/*- End of function --------------------------------------------------------*/ 229struct oslec_state *oslec_create(int len, int adaption_mode)
236
237echo_can_state_t *echo_can_create(int len, int adaption_mode)
238{ 230{
239 echo_can_state_t *ec; 231 struct oslec_state *ec;
240 int i; 232 int i;
241 int j; 233
242 234 ec = kzalloc(sizeof(*ec), GFP_KERNEL);
243 ec = kmalloc(sizeof(*ec), GFP_KERNEL); 235 if (!ec)
244 if (ec == NULL) 236 return NULL;
245 return NULL; 237
246 memset(ec, 0, sizeof(*ec)); 238 ec->taps = len;
247 239 ec->log2taps = top_bit(len);
248 ec->taps = len; 240 ec->curr_pos = ec->taps - 1;
249 ec->log2taps = top_bit(len); 241
250 ec->curr_pos = ec->taps - 1; 242 for (i = 0; i < 2; i++) {
251 243 ec->fir_taps16[i] =
252 for (i = 0; i < 2; i++) 244 kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
253 { 245 if (!ec->fir_taps16[i])
254 if ((ec->fir_taps16[i] = (int16_t *) malloc((ec->taps)*sizeof(int16_t))) == NULL) 246 goto error_oom;
255 { 247 }
256 for (j = 0; j < i; j++) 248
257 kfree(ec->fir_taps16[j]); 249 fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps);
258 kfree(ec); 250 fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps);
259 return NULL; 251
260 } 252 for (i = 0; i < 5; i++) {
261 memset(ec->fir_taps16[i], 0, (ec->taps)*sizeof(int16_t)); 253 ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0;
262 } 254 }
263 255
264 fir16_create(&ec->fir_state, 256 ec->cng_level = 1000;
265 ec->fir_taps16[0], 257 oslec_adaption_mode(ec, adaption_mode);
266 ec->taps); 258
267 fir16_create(&ec->fir_state_bg, 259 ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
268 ec->fir_taps16[1], 260 if (!ec->snapshot)
269 ec->taps); 261 goto error_oom;
270 262
271 for(i=0; i<5; i++) { 263 ec->cond_met = 0;
272 ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0; 264 ec->Pstates = 0;
273 } 265 ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
274 266 ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
275 ec->cng_level = 1000; 267 ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
276 echo_can_adaption_mode(ec, adaption_mode); 268 ec->Lbgn = ec->Lbgn_acc = 0;
277 269 ec->Lbgn_upper = 200;
278 ec->snapshot = (int16_t*)malloc(ec->taps*sizeof(int16_t)); 270 ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
279 memset(ec->snapshot, 0, sizeof(int16_t)*ec->taps); 271
280 272 return ec;
281 ec->cond_met = 0; 273
282 ec->Pstates = 0; 274 error_oom:
283 ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0; 275 for (i = 0; i < 2; i++)
284 ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0; 276 kfree(ec->fir_taps16[i]);
285 ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; 277
286 ec->Lbgn = ec->Lbgn_acc = 0; 278 kfree(ec);
287 ec->Lbgn_upper = 200; 279 return NULL;
288 ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
289
290 return ec;
291} 280}
292/*- End of function --------------------------------------------------------*/
293 281
294void echo_can_free(echo_can_state_t *ec) 282EXPORT_SYMBOL_GPL(oslec_create);
283
284void oslec_free(struct oslec_state *ec)
295{ 285{
296 int i; 286 int i;
297 287
298 fir16_free(&ec->fir_state); 288 fir16_free(&ec->fir_state);
299 fir16_free(&ec->fir_state_bg); 289 fir16_free(&ec->fir_state_bg);
300 for (i = 0; i < 2; i++) 290 for (i = 0; i < 2; i++)
301 kfree(ec->fir_taps16[i]); 291 kfree(ec->fir_taps16[i]);
302 kfree(ec->snapshot); 292 kfree(ec->snapshot);
303 kfree(ec); 293 kfree(ec);
304} 294}
305/*- End of function --------------------------------------------------------*/
306 295
307void echo_can_adaption_mode(echo_can_state_t *ec, int adaption_mode) 296EXPORT_SYMBOL_GPL(oslec_free);
297
298void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode)
308{ 299{
309 ec->adaption_mode = adaption_mode; 300 ec->adaption_mode = adaption_mode;
310} 301}
311/*- End of function --------------------------------------------------------*/
312 302
313void echo_can_flush(echo_can_state_t *ec) 303EXPORT_SYMBOL_GPL(oslec_adaption_mode);
304
305void oslec_flush(struct oslec_state *ec)
314{ 306{
315 int i; 307 int i;
316 308
317 ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0; 309 ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
318 ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0; 310 ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
319 ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; 311 ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
320 312
321 ec->Lbgn = ec->Lbgn_acc = 0; 313 ec->Lbgn = ec->Lbgn_acc = 0;
322 ec->Lbgn_upper = 200; 314 ec->Lbgn_upper = 200;
323 ec->Lbgn_upper_acc = ec->Lbgn_upper << 13; 315 ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
324 316
325 ec->nonupdate_dwell = 0; 317 ec->nonupdate_dwell = 0;
326 318
327 fir16_flush(&ec->fir_state); 319 fir16_flush(&ec->fir_state);
328 fir16_flush(&ec->fir_state_bg); 320 fir16_flush(&ec->fir_state_bg);
329 ec->fir_state.curr_pos = ec->taps - 1; 321 ec->fir_state.curr_pos = ec->taps - 1;
330 ec->fir_state_bg.curr_pos = ec->taps - 1; 322 ec->fir_state_bg.curr_pos = ec->taps - 1;
331 for (i = 0; i < 2; i++) 323 for (i = 0; i < 2; i++)
332 memset(ec->fir_taps16[i], 0, ec->taps*sizeof(int16_t)); 324 memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t));
333 325
334 ec->curr_pos = ec->taps - 1; 326 ec->curr_pos = ec->taps - 1;
335 ec->Pstates = 0; 327 ec->Pstates = 0;
336} 328}
337/*- End of function --------------------------------------------------------*/
338 329
339void echo_can_snapshot(echo_can_state_t *ec) { 330EXPORT_SYMBOL_GPL(oslec_flush);
340 memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps*sizeof(int16_t)); 331
332void oslec_snapshot(struct oslec_state *ec)
333{
334 memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t));
341} 335}
342/*- End of function --------------------------------------------------------*/ 336
337EXPORT_SYMBOL_GPL(oslec_snapshot);
343 338
344/* Dual Path Echo Canceller ------------------------------------------------*/ 339/* Dual Path Echo Canceller ------------------------------------------------*/
345 340
346int16_t echo_can_update(echo_can_state_t *ec, int16_t tx, int16_t rx) 341int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
347{ 342{
348 int32_t echo_value; 343 int32_t echo_value;
349 int clean_bg; 344 int clean_bg;
350 int tmp, tmp1; 345 int tmp, tmp1;
351 346
352 /* Input scaling was found be required to prevent problems when tx 347 /* Input scaling was found be required to prevent problems when tx
353 starts clipping. Another possible way to handle this would be the 348 starts clipping. Another possible way to handle this would be the
354 filter coefficent scaling. */ 349 filter coefficent scaling. */
355 350
356 ec->tx = tx; ec->rx = rx; 351 ec->tx = tx;
357 tx >>=1; 352 ec->rx = rx;
358 rx >>=1; 353 tx >>= 1;
359 354 rx >>= 1;
360 /* 355
361 Filter DC, 3dB point is 160Hz (I think), note 32 bit precision required 356 /*
362 otherwise values do not track down to 0. Zero at DC, Pole at (1-Beta) 357 Filter DC, 3dB point is 160Hz (I think), note 32 bit precision required
363 only real axis. Some chip sets (like Si labs) don't need 358 otherwise values do not track down to 0. Zero at DC, Pole at (1-Beta)
364 this, but something like a $10 X100P card does. Any DC really slows 359 only real axis. Some chip sets (like Si labs) don't need
365 down convergence. 360 this, but something like a $10 X100P card does. Any DC really slows
366 361 down convergence.
367 Note: removes some low frequency from the signal, this reduces 362
368 the speech quality when listening to samples through headphones 363 Note: removes some low frequency from the signal, this reduces
369 but may not be obvious through a telephone handset. 364 the speech quality when listening to samples through headphones
370 365 but may not be obvious through a telephone handset.
371 Note that the 3dB frequency in radians is approx Beta, e.g. for 366
372 Beta = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz. 367 Note that the 3dB frequency in radians is approx Beta, e.g. for
373 */ 368 Beta = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz.
374 369 */
375 if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) { 370
376 tmp = rx << 15; 371 if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) {
372 tmp = rx << 15;
377#if 1 373#if 1
378 /* Make sure the gain of the HPF is 1.0. This can still saturate a little under 374 /* Make sure the gain of the HPF is 1.0. This can still saturate a little under
379 impulse conditions, and it might roll to 32768 and need clipping on sustained peak 375 impulse conditions, and it might roll to 32768 and need clipping on sustained peak
380 level signals. However, the scale of such clipping is small, and the error due to 376 level signals. However, the scale of such clipping is small, and the error due to
381 any saturation should not markedly affect the downstream processing. */ 377 any saturation should not markedly affect the downstream processing. */
382 tmp -= (tmp >> 4); 378 tmp -= (tmp >> 4);
383#endif 379#endif
384 ec->rx_1 += -(ec->rx_1>>DC_LOG2BETA) + tmp - ec->rx_2; 380 ec->rx_1 += -(ec->rx_1 >> DC_LOG2BETA) + tmp - ec->rx_2;
381
382 /* hard limit filter to prevent clipping. Note that at this stage
383 rx should be limited to +/- 16383 due to right shift above */
384 tmp1 = ec->rx_1 >> 15;
385 if (tmp1 > 16383)
386 tmp1 = 16383;
387 if (tmp1 < -16383)
388 tmp1 = -16383;
389 rx = tmp1;
390 ec->rx_2 = tmp;
391 }
385 392
386 /* hard limit filter to prevent clipping. Note that at this stage 393 /* Block average of power in the filter states. Used for
387 rx should be limited to +/- 16383 due to right shift above */ 394 adaption power calculation. */
388 tmp1 = ec->rx_1 >> 15;
389 if (tmp1 > 16383) tmp1 = 16383;
390 if (tmp1 < -16383) tmp1 = -16383;
391 rx = tmp1;
392 ec->rx_2 = tmp;
393 }
394 395
395 /* Block average of power in the filter states. Used for 396 {
396 adaption power calculation. */ 397 int new, old;
398
399 /* efficient "out with the old and in with the new" algorithm so
400 we don't have to recalculate over the whole block of
401 samples. */
402 new = (int)tx *(int)tx;
403 old = (int)ec->fir_state.history[ec->fir_state.curr_pos] *
404 (int)ec->fir_state.history[ec->fir_state.curr_pos];
405 ec->Pstates +=
406 ((new - old) + (1 << ec->log2taps)) >> ec->log2taps;
407 if (ec->Pstates < 0)
408 ec->Pstates = 0;
409 }
397 410
398 { 411 /* Calculate short term average levels using simple single pole IIRs */
399 int new, old;
400 412
401 /* efficient "out with the old and in with the new" algorithm so 413 ec->Ltxacc += abs(tx) - ec->Ltx;
402 we don't have to recalculate over the whole block of 414 ec->Ltx = (ec->Ltxacc + (1 << 4)) >> 5;
403 samples. */ 415 ec->Lrxacc += abs(rx) - ec->Lrx;
404 new = (int)tx * (int)tx; 416 ec->Lrx = (ec->Lrxacc + (1 << 4)) >> 5;
405 old = (int)ec->fir_state.history[ec->fir_state.curr_pos] *
406 (int)ec->fir_state.history[ec->fir_state.curr_pos];
407 ec->Pstates += ((new - old) + (1<<ec->log2taps)) >> ec->log2taps;
408 if (ec->Pstates < 0) ec->Pstates = 0;
409 }
410
411 /* Calculate short term average levels using simple single pole IIRs */
412
413 ec->Ltxacc += abs(tx) - ec->Ltx;
414 ec->Ltx = (ec->Ltxacc + (1<<4)) >> 5;
415 ec->Lrxacc += abs(rx) - ec->Lrx;
416 ec->Lrx = (ec->Lrxacc + (1<<4)) >> 5;
417
418 /* Foreground filter ---------------------------------------------------*/
419
420 ec->fir_state.coeffs = ec->fir_taps16[0];
421 echo_value = fir16(&ec->fir_state, tx);
422 ec->clean = rx - echo_value;
423 ec->Lcleanacc += abs(ec->clean) - ec->Lclean;
424 ec->Lclean = (ec->Lcleanacc + (1<<4)) >> 5;
425
426 /* Background filter ---------------------------------------------------*/
427
428 echo_value = fir16(&ec->fir_state_bg, tx);
429 clean_bg = rx - echo_value;
430 ec->Lclean_bgacc += abs(clean_bg) - ec->Lclean_bg;
431 ec->Lclean_bg = (ec->Lclean_bgacc + (1<<4)) >> 5;
432
433 /* Background Filter adaption -----------------------------------------*/
434
435 /* Almost always adap bg filter, just simple DT and energy
436 detection to minimise adaption in cases of strong double talk.
437 However this is not critical for the dual path algorithm.
438 */
439 ec->factor = 0;
440 ec->shift = 0;
441 if ((ec->nonupdate_dwell == 0)) {
442 int P, logP, shift;
443
444 /* Determine:
445
446 f = Beta * clean_bg_rx/P ------ (1)
447
448 where P is the total power in the filter states.
449
450 The Boffins have shown that if we obey (1) we converge
451 quickly and avoid instability.
452
453 The correct factor f must be in Q30, as this is the fixed
454 point format required by the lms_adapt_bg() function,
455 therefore the scaled version of (1) is:
456
457 (2^30) * f = (2^30) * Beta * clean_bg_rx/P
458 factor = (2^30) * Beta * clean_bg_rx/P ----- (2)
459
460 We have chosen Beta = 0.25 by experiment, so:
461
462 factor = (2^30) * (2^-2) * clean_bg_rx/P
463
464 (30 - 2 - log2(P))
465 factor = clean_bg_rx 2 ----- (3)
466
467 To avoid a divide we approximate log2(P) as top_bit(P),
468 which returns the position of the highest non-zero bit in
469 P. This approximation introduces an error as large as a
470 factor of 2, but the algorithm seems to handle it OK.
471
472 Come to think of it a divide may not be a big deal on a
473 modern DSP, so its probably worth checking out the cycles
474 for a divide versus a top_bit() implementation.
475 */
476
477 P = MIN_TX_POWER_FOR_ADAPTION + ec->Pstates;
478 logP = top_bit(P) + ec->log2taps;
479 shift = 30 - 2 - logP;
480 ec->shift = shift;
481
482 lms_adapt_bg(ec, clean_bg, shift);
483 }
484
485 /* very simple DTD to make sure we dont try and adapt with strong
486 near end speech */
487
488 ec->adapt = 0;
489 if ((ec->Lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->Lrx > ec->Ltx))
490 ec->nonupdate_dwell = DTD_HANGOVER;
491 if (ec->nonupdate_dwell)
492 ec->nonupdate_dwell--;
493 417
494 /* Transfer logic ------------------------------------------------------*/ 418 /* Foreground filter --------------------------------------------------- */
495 419
496 /* These conditions are from the dual path paper [1], I messed with 420 ec->fir_state.coeffs = ec->fir_taps16[0];
497 them a bit to improve performance. */ 421 echo_value = fir16(&ec->fir_state, tx);
422 ec->clean = rx - echo_value;
423 ec->Lcleanacc += abs(ec->clean) - ec->Lclean;
424 ec->Lclean = (ec->Lcleanacc + (1 << 4)) >> 5;
498 425
499 if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) && 426 /* Background filter --------------------------------------------------- */
500 (ec->nonupdate_dwell == 0) &&
501 (8*ec->Lclean_bg < 7*ec->Lclean) /* (ec->Lclean_bg < 0.875*ec->Lclean) */ &&
502 (8*ec->Lclean_bg < ec->Ltx) /* (ec->Lclean_bg < 0.125*ec->Ltx) */ )
503 {
504 if (ec->cond_met == 6) {
505 /* BG filter has had better results for 6 consecutive samples */
506 ec->adapt = 1;
507 memcpy(ec->fir_taps16[0], ec->fir_taps16[1], ec->taps*sizeof(int16_t));
508 }
509 else
510 ec->cond_met++;
511 }
512 else
513 ec->cond_met = 0;
514 427
515 /* Non-Linear Processing ---------------------------------------------------*/ 428 echo_value = fir16(&ec->fir_state_bg, tx);
429 clean_bg = rx - echo_value;
430 ec->Lclean_bgacc += abs(clean_bg) - ec->Lclean_bg;
431 ec->Lclean_bg = (ec->Lclean_bgacc + (1 << 4)) >> 5;
516 432
517 ec->clean_nlp = ec->clean; 433 /* Background Filter adaption ----------------------------------------- */
518 if (ec->adaption_mode & ECHO_CAN_USE_NLP)
519 {
520 /* Non-linear processor - a fancy way to say "zap small signals, to avoid
521 residual echo due to (uLaw/ALaw) non-linearity in the channel.". */
522 434
523 if ((16*ec->Lclean < ec->Ltx)) 435 /* Almost always adap bg filter, just simple DT and energy
524 { 436 detection to minimise adaption in cases of strong double talk.
525 /* Our e/c has improved echo by at least 24 dB (each factor of 2 is 6dB, 437 However this is not critical for the dual path algorithm.
526 so 2*2*2*2=16 is the same as 6+6+6+6=24dB) */ 438 */
527 if (ec->adaption_mode & ECHO_CAN_USE_CNG) 439 ec->factor = 0;
528 { 440 ec->shift = 0;
529 ec->cng_level = ec->Lbgn; 441 if ((ec->nonupdate_dwell == 0)) {
530 442 int P, logP, shift;
531 /* Very elementary comfort noise generation. Just random 443
532 numbers rolled off very vaguely Hoth-like. DR: This 444 /* Determine:
533 noise doesn't sound quite right to me - I suspect there 445
534 are some overlfow issues in the filtering as it's too 446 f = Beta * clean_bg_rx/P ------ (1)
535 "crackly". TODO: debug this, maybe just play noise at 447
536 high level or look at spectrum. 448 where P is the total power in the filter states.
537 */ 449
538 450 The Boffins have shown that if we obey (1) we converge
539 ec->cng_rndnum = 1664525U*ec->cng_rndnum + 1013904223U; 451 quickly and avoid instability.
540 ec->cng_filter = ((ec->cng_rndnum & 0xFFFF) - 32768 + 5*ec->cng_filter) >> 3; 452
541 ec->clean_nlp = (ec->cng_filter*ec->cng_level*8) >> 14; 453 The correct factor f must be in Q30, as this is the fixed
542 454 point format required by the lms_adapt_bg() function,
543 } 455 therefore the scaled version of (1) is:
544 else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) 456
545 { 457 (2^30) * f = (2^30) * Beta * clean_bg_rx/P
546 /* This sounds much better than CNG */ 458 factor = (2^30) * Beta * clean_bg_rx/P ----- (2)
547 if (ec->clean_nlp > ec->Lbgn) 459
548 ec->clean_nlp = ec->Lbgn; 460 We have chosen Beta = 0.25 by experiment, so:
549 if (ec->clean_nlp < -ec->Lbgn) 461
550 ec->clean_nlp = -ec->Lbgn; 462 factor = (2^30) * (2^-2) * clean_bg_rx/P
463
464 (30 - 2 - log2(P))
465 factor = clean_bg_rx 2 ----- (3)
466
467 To avoid a divide we approximate log2(P) as top_bit(P),
468 which returns the position of the highest non-zero bit in
469 P. This approximation introduces an error as large as a
470 factor of 2, but the algorithm seems to handle it OK.
471
472 Come to think of it a divide may not be a big deal on a
473 modern DSP, so its probably worth checking out the cycles
474 for a divide versus a top_bit() implementation.
475 */
476
477 P = MIN_TX_POWER_FOR_ADAPTION + ec->Pstates;
478 logP = top_bit(P) + ec->log2taps;
479 shift = 30 - 2 - logP;
480 ec->shift = shift;
481
482 lms_adapt_bg(ec, clean_bg, shift);
551 } 483 }
552 else 484
553 { 485 /* very simple DTD to make sure we dont try and adapt with strong
554 /* just mute the residual, doesn't sound very good, used mainly 486 near end speech */
555 in G168 tests */ 487
556 ec->clean_nlp = 0; 488 ec->adapt = 0;
557 } 489 if ((ec->Lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->Lrx > ec->Ltx))
558 } 490 ec->nonupdate_dwell = DTD_HANGOVER;
559 else { 491 if (ec->nonupdate_dwell)
560 /* Background noise estimator. I tried a few algorithms 492 ec->nonupdate_dwell--;
561 here without much luck. This very simple one seems to 493
562 work best, we just average the level using a slow (1 sec 494 /* Transfer logic ------------------------------------------------------ */
563 time const) filter if the current level is less than a 495
564 (experimentally derived) constant. This means we dont 496 /* These conditions are from the dual path paper [1], I messed with
565 include high level signals like near end speech. When 497 them a bit to improve performance. */
566 combined with CNG or especially CLIP seems to work OK. 498
567 */ 499 if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) &&
568 if (ec->Lclean < 40) { 500 (ec->nonupdate_dwell == 0) &&
569 ec->Lbgn_acc += abs(ec->clean) - ec->Lbgn; 501 (8 * ec->Lclean_bg <
570 ec->Lbgn = (ec->Lbgn_acc + (1<<11)) >> 12; 502 7 * ec->Lclean) /* (ec->Lclean_bg < 0.875*ec->Lclean) */ &&
571 } 503 (8 * ec->Lclean_bg <
572 } 504 ec->Ltx) /* (ec->Lclean_bg < 0.125*ec->Ltx) */ ) {
573 } 505 if (ec->cond_met == 6) {
574 506 /* BG filter has had better results for 6 consecutive samples */
575 /* Roll around the taps buffer */ 507 ec->adapt = 1;
576 if (ec->curr_pos <= 0) 508 memcpy(ec->fir_taps16[0], ec->fir_taps16[1],
577 ec->curr_pos = ec->taps; 509 ec->taps * sizeof(int16_t));
578 ec->curr_pos--; 510 } else
579 511 ec->cond_met++;
580 if (ec->adaption_mode & ECHO_CAN_DISABLE) 512 } else
581 ec->clean_nlp = rx; 513 ec->cond_met = 0;
582 514
583 /* Output scaled back up again to match input scaling */ 515 /* Non-Linear Processing --------------------------------------------------- */
584 516
585 return (int16_t) ec->clean_nlp << 1; 517 ec->clean_nlp = ec->clean;
518 if (ec->adaption_mode & ECHO_CAN_USE_NLP) {
519 /* Non-linear processor - a fancy way to say "zap small signals, to avoid
520 residual echo due to (uLaw/ALaw) non-linearity in the channel.". */
521
522 if ((16 * ec->Lclean < ec->Ltx)) {
523 /* Our e/c has improved echo by at least 24 dB (each factor of 2 is 6dB,
524 so 2*2*2*2=16 is the same as 6+6+6+6=24dB) */
525 if (ec->adaption_mode & ECHO_CAN_USE_CNG) {
526 ec->cng_level = ec->Lbgn;
527
528 /* Very elementary comfort noise generation. Just random
529 numbers rolled off very vaguely Hoth-like. DR: This
530 noise doesn't sound quite right to me - I suspect there
531 are some overlfow issues in the filtering as it's too
532 "crackly". TODO: debug this, maybe just play noise at
533 high level or look at spectrum.
534 */
535
536 ec->cng_rndnum =
537 1664525U * ec->cng_rndnum + 1013904223U;
538 ec->cng_filter =
539 ((ec->cng_rndnum & 0xFFFF) - 32768 +
540 5 * ec->cng_filter) >> 3;
541 ec->clean_nlp =
542 (ec->cng_filter * ec->cng_level * 8) >> 14;
543
544 } else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) {
545 /* This sounds much better than CNG */
546 if (ec->clean_nlp > ec->Lbgn)
547 ec->clean_nlp = ec->Lbgn;
548 if (ec->clean_nlp < -ec->Lbgn)
549 ec->clean_nlp = -ec->Lbgn;
550 } else {
551 /* just mute the residual, doesn't sound very good, used mainly
552 in G168 tests */
553 ec->clean_nlp = 0;
554 }
555 } else {
556 /* Background noise estimator. I tried a few algorithms
557 here without much luck. This very simple one seems to
558 work best, we just average the level using a slow (1 sec
559 time const) filter if the current level is less than a
560 (experimentally derived) constant. This means we dont
561 include high level signals like near end speech. When
562 combined with CNG or especially CLIP seems to work OK.
563 */
564 if (ec->Lclean < 40) {
565 ec->Lbgn_acc += abs(ec->clean) - ec->Lbgn;
566 ec->Lbgn = (ec->Lbgn_acc + (1 << 11)) >> 12;
567 }
568 }
569 }
570
571 /* Roll around the taps buffer */
572 if (ec->curr_pos <= 0)
573 ec->curr_pos = ec->taps;
574 ec->curr_pos--;
575
576 if (ec->adaption_mode & ECHO_CAN_DISABLE)
577 ec->clean_nlp = rx;
578
579 /* Output scaled back up again to match input scaling */
580
581 return (int16_t) ec->clean_nlp << 1;
586} 582}
587 583
588/*- End of function --------------------------------------------------------*/ 584EXPORT_SYMBOL_GPL(oslec_update);
589 585
590/* This function is seperated from the echo canceller is it is usually called 586/* This function is seperated from the echo canceller is it is usually called
591 as part of the tx process. See rx HP (DC blocking) filter above, it's 587 as part of the tx process. See rx HP (DC blocking) filter above, it's
@@ -608,25 +604,35 @@ int16_t echo_can_update(echo_can_state_t *ec, int16_t tx, int16_t rx)
608 precision, which noise shapes things, giving very clean DC removal. 604 precision, which noise shapes things, giving very clean DC removal.
609*/ 605*/
610 606
611int16_t echo_can_hpf_tx(echo_can_state_t *ec, int16_t tx) { 607int16_t oslec_hpf_tx(struct oslec_state * ec, int16_t tx)
612 int tmp, tmp1; 608{
609 int tmp, tmp1;
613 610
614 if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) { 611 if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) {
615 tmp = tx << 15; 612 tmp = tx << 15;
616#if 1 613#if 1
617 /* Make sure the gain of the HPF is 1.0. The first can still saturate a little under 614 /* Make sure the gain of the HPF is 1.0. The first can still saturate a little under
618 impulse conditions, and it might roll to 32768 and need clipping on sustained peak 615 impulse conditions, and it might roll to 32768 and need clipping on sustained peak
619 level signals. However, the scale of such clipping is small, and the error due to 616 level signals. However, the scale of such clipping is small, and the error due to
620 any saturation should not markedly affect the downstream processing. */ 617 any saturation should not markedly affect the downstream processing. */
621 tmp -= (tmp >> 4); 618 tmp -= (tmp >> 4);
622#endif 619#endif
623 ec->tx_1 += -(ec->tx_1>>DC_LOG2BETA) + tmp - ec->tx_2; 620 ec->tx_1 += -(ec->tx_1 >> DC_LOG2BETA) + tmp - ec->tx_2;
624 tmp1 = ec->tx_1 >> 15; 621 tmp1 = ec->tx_1 >> 15;
625 if (tmp1 > 32767) tmp1 = 32767; 622 if (tmp1 > 32767)
626 if (tmp1 < -32767) tmp1 = -32767; 623 tmp1 = 32767;
627 tx = tmp1; 624 if (tmp1 < -32767)
628 ec->tx_2 = tmp; 625 tmp1 = -32767;
629 } 626 tx = tmp1;
630 627 ec->tx_2 = tmp;
631 return tx; 628 }
629
630 return tx;
632} 631}
632
633EXPORT_SYMBOL_GPL(oslec_hpf_tx);
634
635MODULE_LICENSE("GPL");
636MODULE_AUTHOR("David Rowe");
637MODULE_DESCRIPTION("Open Source Line Echo Canceller");
638MODULE_VERSION("0.3.0");
diff --git a/drivers/staging/echo/echo.h b/drivers/staging/echo/echo.h
index 7a91b4390f3b..9fb9543c4f13 100644
--- a/drivers/staging/echo/echo.h
+++ b/drivers/staging/echo/echo.h
@@ -118,23 +118,14 @@ a minor burden.
118*/ 118*/
119 119
120#include "fir.h" 120#include "fir.h"
121 121#include "oslec.h"
122/* Mask bits for the adaption mode */
123#define ECHO_CAN_USE_ADAPTION 0x01
124#define ECHO_CAN_USE_NLP 0x02
125#define ECHO_CAN_USE_CNG 0x04
126#define ECHO_CAN_USE_CLIP 0x08
127#define ECHO_CAN_USE_TX_HPF 0x10
128#define ECHO_CAN_USE_RX_HPF 0x20
129#define ECHO_CAN_DISABLE 0x40
130 122
131/*! 123/*!
132 G.168 echo canceller descriptor. This defines the working state for a line 124 G.168 echo canceller descriptor. This defines the working state for a line
133 echo canceller. 125 echo canceller.
134*/ 126*/
135typedef struct 127struct oslec_state {
136{ 128 int16_t tx, rx;
137 int16_t tx,rx;
138 int16_t clean; 129 int16_t clean;
139 int16_t clean_nlp; 130 int16_t clean_nlp;
140 131
@@ -176,45 +167,6 @@ typedef struct
176 167
177 /* snapshot sample of coeffs used for development */ 168 /* snapshot sample of coeffs used for development */
178 int16_t *snapshot; 169 int16_t *snapshot;
179} echo_can_state_t; 170};
180
181/*! Create a voice echo canceller context.
182 \param len The length of the canceller, in samples.
183 \return The new canceller context, or NULL if the canceller could not be created.
184*/
185echo_can_state_t *echo_can_create(int len, int adaption_mode);
186
187/*! Free a voice echo canceller context.
188 \param ec The echo canceller context.
189*/
190void echo_can_free(echo_can_state_t *ec);
191
192/*! Flush (reinitialise) a voice echo canceller context.
193 \param ec The echo canceller context.
194*/
195void echo_can_flush(echo_can_state_t *ec);
196
197/*! Set the adaption mode of a voice echo canceller context.
198 \param ec The echo canceller context.
199 \param adapt The mode.
200*/
201void echo_can_adaption_mode(echo_can_state_t *ec, int adaption_mode);
202
203void echo_can_snapshot(echo_can_state_t *ec);
204
205/*! Process a sample through a voice echo canceller.
206 \param ec The echo canceller context.
207 \param tx The transmitted audio sample.
208 \param rx The received audio sample.
209 \return The clean (echo cancelled) received sample.
210*/
211int16_t echo_can_update(echo_can_state_t *ec, int16_t tx, int16_t rx);
212
213/*! Process to high pass filter the tx signal.
214 \param ec The echo canceller context.
215 \param tx The transmitted auio sample.
216 \return The HP filtered transmit sample, send this to your D/A.
217*/
218int16_t echo_can_hpf_tx(echo_can_state_t *ec, int16_t tx);
219 171
220#endif /* __ECHO_H */ 172#endif /* __ECHO_H */
diff --git a/drivers/staging/echo/fir.h b/drivers/staging/echo/fir.h
index e1bfc4994886..5645cb1b2f90 100644
--- a/drivers/staging/echo/fir.h
+++ b/drivers/staging/echo/fir.h
@@ -72,8 +72,7 @@
72 16 bit integer FIR descriptor. This defines the working state for a single 72 16 bit integer FIR descriptor. This defines the working state for a single
73 instance of an FIR filter using 16 bit integer coefficients. 73 instance of an FIR filter using 16 bit integer coefficients.
74*/ 74*/
75typedef struct 75typedef struct {
76{
77 int taps; 76 int taps;
78 int curr_pos; 77 int curr_pos;
79 const int16_t *coeffs; 78 const int16_t *coeffs;
@@ -85,8 +84,7 @@ typedef struct
85 instance of an FIR filter using 32 bit integer coefficients, and filtering 84 instance of an FIR filter using 32 bit integer coefficients, and filtering
86 16 bit integer data. 85 16 bit integer data.
87*/ 86*/
88typedef struct 87typedef struct {
89{
90 int taps; 88 int taps;
91 int curr_pos; 89 int curr_pos;
92 const int32_t *coeffs; 90 const int32_t *coeffs;
@@ -97,273 +95,201 @@ typedef struct
97 Floating point FIR descriptor. This defines the working state for a single 95 Floating point FIR descriptor. This defines the working state for a single
98 instance of an FIR filter using floating point coefficients and data. 96 instance of an FIR filter using floating point coefficients and data.
99*/ 97*/
100typedef struct 98typedef struct {
101{
102 int taps; 99 int taps;
103 int curr_pos; 100 int curr_pos;
104 const float *coeffs; 101 const float *coeffs;
105 float *history; 102 float *history;
106} fir_float_state_t; 103} fir_float_state_t;
107 104
108#ifdef __cplusplus 105static __inline__ const int16_t *fir16_create(fir16_state_t * fir,
109extern "C" { 106 const int16_t * coeffs, int taps)
110#endif
111
112static __inline__ const int16_t *fir16_create(fir16_state_t *fir,
113 const int16_t *coeffs,
114 int taps)
115{ 107{
116 fir->taps = taps; 108 fir->taps = taps;
117 fir->curr_pos = taps - 1; 109 fir->curr_pos = taps - 1;
118 fir->coeffs = coeffs; 110 fir->coeffs = coeffs;
119#if defined(USE_MMX) || defined(USE_SSE2) || defined(__BLACKFIN_ASM__) 111#if defined(USE_MMX) || defined(USE_SSE2) || defined(__bfin__)
120 if ((fir->history = malloc(2*taps*sizeof(int16_t)))) 112 fir->history = kcalloc(2 * taps, sizeof(int16_t), GFP_KERNEL);
121 memset(fir->history, 0, 2*taps*sizeof(int16_t));
122#else 113#else
123 if ((fir->history = (int16_t *) malloc(taps*sizeof(int16_t)))) 114 fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL);
124 memset(fir->history, 0, taps*sizeof(int16_t));
125#endif 115#endif
126 return fir->history; 116 return fir->history;
127} 117}
128/*- End of function --------------------------------------------------------*/
129 118
130static __inline__ void fir16_flush(fir16_state_t *fir) 119static __inline__ void fir16_flush(fir16_state_t * fir)
131{ 120{
132#if defined(USE_MMX) || defined(USE_SSE2) || defined(__BLACKFIN_ASM__) 121#if defined(USE_MMX) || defined(USE_SSE2) || defined(__bfin__)
133 memset(fir->history, 0, 2*fir->taps*sizeof(int16_t)); 122 memset(fir->history, 0, 2 * fir->taps * sizeof(int16_t));
134#else 123#else
135 memset(fir->history, 0, fir->taps*sizeof(int16_t)); 124 memset(fir->history, 0, fir->taps * sizeof(int16_t));
136#endif 125#endif
137} 126}
138/*- End of function --------------------------------------------------------*/
139 127
140static __inline__ void fir16_free(fir16_state_t *fir) 128static __inline__ void fir16_free(fir16_state_t * fir)
141{ 129{
142 free(fir->history); 130 kfree(fir->history);
143} 131}
144/*- End of function --------------------------------------------------------*/
145 132
146#ifdef __BLACKFIN_ASM__ 133#ifdef __bfin__
147static inline int32_t dot_asm(short *x, short *y, int len) 134static inline int32_t dot_asm(short *x, short *y, int len)
148{ 135{
149 int dot; 136 int dot;
150 137
151 len--; 138 len--;
152 139
153 __asm__ 140 __asm__("I0 = %1;\n\t"
154 ( 141 "I1 = %2;\n\t"
155 "I0 = %1;\n\t" 142 "A0 = 0;\n\t"
156 "I1 = %2;\n\t" 143 "R0.L = W[I0++] || R1.L = W[I1++];\n\t"
157 "A0 = 0;\n\t" 144 "LOOP dot%= LC0 = %3;\n\t"
158 "R0.L = W[I0++] || R1.L = W[I1++];\n\t" 145 "LOOP_BEGIN dot%=;\n\t"
159 "LOOP dot%= LC0 = %3;\n\t" 146 "A0 += R0.L * R1.L (IS) || R0.L = W[I0++] || R1.L = W[I1++];\n\t"
160 "LOOP_BEGIN dot%=;\n\t" 147 "LOOP_END dot%=;\n\t"
161 "A0 += R0.L * R1.L (IS) || R0.L = W[I0++] || R1.L = W[I1++];\n\t" 148 "A0 += R0.L*R1.L (IS);\n\t"
162 "LOOP_END dot%=;\n\t" 149 "R0 = A0;\n\t"
163 "A0 += R0.L*R1.L (IS);\n\t" 150 "%0 = R0;\n\t"
164 "R0 = A0;\n\t" 151 :"=&d"(dot)
165 "%0 = R0;\n\t" 152 :"a"(x), "a"(y), "a"(len)
166 : "=&d" (dot) 153 :"I0", "I1", "A1", "A0", "R0", "R1"
167 : "a" (x), "a" (y), "a" (len) 154 );
168 : "I0", "I1", "A1", "A0", "R0", "R1" 155
169 ); 156 return dot;
170
171 return dot;
172} 157}
173#endif 158#endif
174/*- End of function --------------------------------------------------------*/
175 159
176static __inline__ int16_t fir16(fir16_state_t *fir, int16_t sample) 160static __inline__ int16_t fir16(fir16_state_t * fir, int16_t sample)
177{ 161{
178 int32_t y; 162 int32_t y;
179#if defined(USE_MMX) 163#if defined(USE_MMX)
180 int i; 164 int i;
181 mmx_t *mmx_coeffs; 165 mmx_t *mmx_coeffs;
182 mmx_t *mmx_hist; 166 mmx_t *mmx_hist;
183 167
184 fir->history[fir->curr_pos] = sample; 168 fir->history[fir->curr_pos] = sample;
185 fir->history[fir->curr_pos + fir->taps] = sample; 169 fir->history[fir->curr_pos + fir->taps] = sample;
186 170
187 mmx_coeffs = (mmx_t *) fir->coeffs; 171 mmx_coeffs = (mmx_t *) fir->coeffs;
188 mmx_hist = (mmx_t *) &fir->history[fir->curr_pos]; 172 mmx_hist = (mmx_t *) & fir->history[fir->curr_pos];
189 i = fir->taps; 173 i = fir->taps;
190 pxor_r2r(mm4, mm4); 174 pxor_r2r(mm4, mm4);
191 /* 8 samples per iteration, so the filter must be a multiple of 8 long. */ 175 /* 8 samples per iteration, so the filter must be a multiple of 8 long. */
192 while (i > 0) 176 while (i > 0) {
193 { 177 movq_m2r(mmx_coeffs[0], mm0);
194 movq_m2r(mmx_coeffs[0], mm0); 178 movq_m2r(mmx_coeffs[1], mm2);
195 movq_m2r(mmx_coeffs[1], mm2); 179 movq_m2r(mmx_hist[0], mm1);
196 movq_m2r(mmx_hist[0], mm1); 180 movq_m2r(mmx_hist[1], mm3);
197 movq_m2r(mmx_hist[1], mm3); 181 mmx_coeffs += 2;
198 mmx_coeffs += 2; 182 mmx_hist += 2;
199 mmx_hist += 2; 183 pmaddwd_r2r(mm1, mm0);
200 pmaddwd_r2r(mm1, mm0); 184 pmaddwd_r2r(mm3, mm2);
201 pmaddwd_r2r(mm3, mm2); 185 paddd_r2r(mm0, mm4);
202 paddd_r2r(mm0, mm4); 186 paddd_r2r(mm2, mm4);
203 paddd_r2r(mm2, mm4); 187 i -= 8;
204 i -= 8; 188 }
205 } 189 movq_r2r(mm4, mm0);
206 movq_r2r(mm4, mm0); 190 psrlq_i2r(32, mm0);
207 psrlq_i2r(32, mm0); 191 paddd_r2r(mm0, mm4);
208 paddd_r2r(mm0, mm4); 192 movd_r2m(mm4, y);
209 movd_r2m(mm4, y); 193 emms();
210 emms();
211#elif defined(USE_SSE2) 194#elif defined(USE_SSE2)
212 int i; 195 int i;
213 xmm_t *xmm_coeffs; 196 xmm_t *xmm_coeffs;
214 xmm_t *xmm_hist; 197 xmm_t *xmm_hist;
215 198
216 fir->history[fir->curr_pos] = sample; 199 fir->history[fir->curr_pos] = sample;
217 fir->history[fir->curr_pos + fir->taps] = sample; 200 fir->history[fir->curr_pos + fir->taps] = sample;
218 201
219 xmm_coeffs = (xmm_t *) fir->coeffs; 202 xmm_coeffs = (xmm_t *) fir->coeffs;
220 xmm_hist = (xmm_t *) &fir->history[fir->curr_pos]; 203 xmm_hist = (xmm_t *) & fir->history[fir->curr_pos];
221 i = fir->taps; 204 i = fir->taps;
222 pxor_r2r(xmm4, xmm4); 205 pxor_r2r(xmm4, xmm4);
223 /* 16 samples per iteration, so the filter must be a multiple of 16 long. */ 206 /* 16 samples per iteration, so the filter must be a multiple of 16 long. */
224 while (i > 0) 207 while (i > 0) {
225 { 208 movdqu_m2r(xmm_coeffs[0], xmm0);
226 movdqu_m2r(xmm_coeffs[0], xmm0); 209 movdqu_m2r(xmm_coeffs[1], xmm2);
227 movdqu_m2r(xmm_coeffs[1], xmm2); 210 movdqu_m2r(xmm_hist[0], xmm1);
228 movdqu_m2r(xmm_hist[0], xmm1); 211 movdqu_m2r(xmm_hist[1], xmm3);
229 movdqu_m2r(xmm_hist[1], xmm3); 212 xmm_coeffs += 2;
230 xmm_coeffs += 2; 213 xmm_hist += 2;
231 xmm_hist += 2; 214 pmaddwd_r2r(xmm1, xmm0);
232 pmaddwd_r2r(xmm1, xmm0); 215 pmaddwd_r2r(xmm3, xmm2);
233 pmaddwd_r2r(xmm3, xmm2); 216 paddd_r2r(xmm0, xmm4);
234 paddd_r2r(xmm0, xmm4); 217 paddd_r2r(xmm2, xmm4);
235 paddd_r2r(xmm2, xmm4); 218 i -= 16;
236 i -= 16; 219 }
237 } 220 movdqa_r2r(xmm4, xmm0);
238 movdqa_r2r(xmm4, xmm0); 221 psrldq_i2r(8, xmm0);
239 psrldq_i2r(8, xmm0); 222 paddd_r2r(xmm0, xmm4);
240 paddd_r2r(xmm0, xmm4); 223 movdqa_r2r(xmm4, xmm0);
241 movdqa_r2r(xmm4, xmm0); 224 psrldq_i2r(4, xmm0);
242 psrldq_i2r(4, xmm0); 225 paddd_r2r(xmm0, xmm4);
243 paddd_r2r(xmm0, xmm4); 226 movd_r2m(xmm4, y);
244 movd_r2m(xmm4, y); 227#elif defined(__bfin__)
245#elif defined(__BLACKFIN_ASM__) 228 fir->history[fir->curr_pos] = sample;
246 fir->history[fir->curr_pos] = sample; 229 fir->history[fir->curr_pos + fir->taps] = sample;
247 fir->history[fir->curr_pos + fir->taps] = sample; 230 y = dot_asm((int16_t *) fir->coeffs, &fir->history[fir->curr_pos],
248 y = dot_asm((int16_t*)fir->coeffs, &fir->history[fir->curr_pos], fir->taps); 231 fir->taps);
249#else 232#else
250 int i; 233 int i;
251 int offset1; 234 int offset1;
252 int offset2; 235 int offset2;
253 236
254 fir->history[fir->curr_pos] = sample; 237 fir->history[fir->curr_pos] = sample;
255 238
256 offset2 = fir->curr_pos; 239 offset2 = fir->curr_pos;
257 offset1 = fir->taps - offset2; 240 offset1 = fir->taps - offset2;
258 y = 0; 241 y = 0;
259 for (i = fir->taps - 1; i >= offset1; i--) 242 for (i = fir->taps - 1; i >= offset1; i--)
260 y += fir->coeffs[i]*fir->history[i - offset1]; 243 y += fir->coeffs[i] * fir->history[i - offset1];
261 for ( ; i >= 0; i--) 244 for (; i >= 0; i--)
262 y += fir->coeffs[i]*fir->history[i + offset2]; 245 y += fir->coeffs[i] * fir->history[i + offset2];
263#endif 246#endif
264 if (fir->curr_pos <= 0) 247 if (fir->curr_pos <= 0)
265 fir->curr_pos = fir->taps; 248 fir->curr_pos = fir->taps;
266 fir->curr_pos--; 249 fir->curr_pos--;
267 return (int16_t) (y >> 15); 250 return (int16_t) (y >> 15);
268}
269/*- End of function --------------------------------------------------------*/
270
271static __inline__ const int16_t *fir32_create(fir32_state_t *fir,
272 const int32_t *coeffs,
273 int taps)
274{
275 fir->taps = taps;
276 fir->curr_pos = taps - 1;
277 fir->coeffs = coeffs;
278 fir->history = (int16_t *) malloc(taps*sizeof(int16_t));
279 if (fir->history)
280 memset(fir->history, '\0', taps*sizeof(int16_t));
281 return fir->history;
282}
283/*- End of function --------------------------------------------------------*/
284
285static __inline__ void fir32_flush(fir32_state_t *fir)
286{
287 memset(fir->history, 0, fir->taps*sizeof(int16_t));
288} 251}
289/*- End of function --------------------------------------------------------*/
290 252
291static __inline__ void fir32_free(fir32_state_t *fir) 253static __inline__ const int16_t *fir32_create(fir32_state_t * fir,
254 const int32_t * coeffs, int taps)
292{ 255{
293 free(fir->history); 256 fir->taps = taps;
294} 257 fir->curr_pos = taps - 1;
295/*- End of function --------------------------------------------------------*/ 258 fir->coeffs = coeffs;
296 259 fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL);
297static __inline__ int16_t fir32(fir32_state_t *fir, int16_t sample) 260 return fir->history;
298{
299 int i;
300 int32_t y;
301 int offset1;
302 int offset2;
303
304 fir->history[fir->curr_pos] = sample;
305 offset2 = fir->curr_pos;
306 offset1 = fir->taps - offset2;
307 y = 0;
308 for (i = fir->taps - 1; i >= offset1; i--)
309 y += fir->coeffs[i]*fir->history[i - offset1];
310 for ( ; i >= 0; i--)
311 y += fir->coeffs[i]*fir->history[i + offset2];
312 if (fir->curr_pos <= 0)
313 fir->curr_pos = fir->taps;
314 fir->curr_pos--;
315 return (int16_t) (y >> 15);
316} 261}
317/*- End of function --------------------------------------------------------*/
318 262
319#ifndef __KERNEL__ 263static __inline__ void fir32_flush(fir32_state_t * fir)
320static __inline__ const float *fir_float_create(fir_float_state_t *fir,
321 const float *coeffs,
322 int taps)
323{ 264{
324 fir->taps = taps; 265 memset(fir->history, 0, fir->taps * sizeof(int16_t));
325 fir->curr_pos = taps - 1;
326 fir->coeffs = coeffs;
327 fir->history = (float *) malloc(taps*sizeof(float));
328 if (fir->history)
329 memset(fir->history, '\0', taps*sizeof(float));
330 return fir->history;
331} 266}
332/*- End of function --------------------------------------------------------*/
333 267
334static __inline__ void fir_float_free(fir_float_state_t *fir) 268static __inline__ void fir32_free(fir32_state_t * fir)
335{ 269{
336 free(fir->history); 270 kfree(fir->history);
337} 271}
338/*- End of function --------------------------------------------------------*/
339 272
340static __inline__ int16_t fir_float(fir_float_state_t *fir, int16_t sample) 273static __inline__ int16_t fir32(fir32_state_t * fir, int16_t sample)
341{ 274{
342 int i; 275 int i;
343 float y; 276 int32_t y;
344 int offset1; 277 int offset1;
345 int offset2; 278 int offset2;
346 279
347 fir->history[fir->curr_pos] = sample; 280 fir->history[fir->curr_pos] = sample;
348 281 offset2 = fir->curr_pos;
349 offset2 = fir->curr_pos; 282 offset1 = fir->taps - offset2;
350 offset1 = fir->taps - offset2; 283 y = 0;
351 y = 0; 284 for (i = fir->taps - 1; i >= offset1; i--)
352 for (i = fir->taps - 1; i >= offset1; i--) 285 y += fir->coeffs[i] * fir->history[i - offset1];
353 y += fir->coeffs[i]*fir->history[i - offset1]; 286 for (; i >= 0; i--)
354 for ( ; i >= 0; i--) 287 y += fir->coeffs[i] * fir->history[i + offset2];
355 y += fir->coeffs[i]*fir->history[i + offset2]; 288 if (fir->curr_pos <= 0)
356 if (fir->curr_pos <= 0) 289 fir->curr_pos = fir->taps;
357 fir->curr_pos = fir->taps; 290 fir->curr_pos--;
358 fir->curr_pos--; 291 return (int16_t) (y >> 15);
359 return (int16_t) y;
360} 292}
361/*- End of function --------------------------------------------------------*/
362#endif
363
364#ifdef __cplusplus
365}
366#endif
367 293
368#endif 294#endif
369/*- End of file ------------------------------------------------------------*/ 295/*- End of file ------------------------------------------------------------*/
diff --git a/drivers/staging/echo/mmx.h b/drivers/staging/echo/mmx.h
index b5a3964865b6..35412efe61ce 100644
--- a/drivers/staging/echo/mmx.h
+++ b/drivers/staging/echo/mmx.h
@@ -27,24 +27,23 @@
27 * values by ULL, lest they be truncated by the compiler) 27 * values by ULL, lest they be truncated by the compiler)
28 */ 28 */
29 29
30typedef union { 30typedef union {
31 long long q; /* Quadword (64-bit) value */ 31 long long q; /* Quadword (64-bit) value */
32 unsigned long long uq; /* Unsigned Quadword */ 32 unsigned long long uq; /* Unsigned Quadword */
33 int d[2]; /* 2 Doubleword (32-bit) values */ 33 int d[2]; /* 2 Doubleword (32-bit) values */
34 unsigned int ud[2]; /* 2 Unsigned Doubleword */ 34 unsigned int ud[2]; /* 2 Unsigned Doubleword */
35 short w[4]; /* 4 Word (16-bit) values */ 35 short w[4]; /* 4 Word (16-bit) values */
36 unsigned short uw[4]; /* 4 Unsigned Word */ 36 unsigned short uw[4]; /* 4 Unsigned Word */
37 char b[8]; /* 8 Byte (8-bit) values */ 37 char b[8]; /* 8 Byte (8-bit) values */
38 unsigned char ub[8]; /* 8 Unsigned Byte */ 38 unsigned char ub[8]; /* 8 Unsigned Byte */
39 float s[2]; /* Single-precision (32-bit) value */ 39 float s[2]; /* Single-precision (32-bit) value */
40} mmx_t; /* On an 8-byte (64-bit) boundary */ 40} mmx_t; /* On an 8-byte (64-bit) boundary */
41 41
42/* SSE registers */ 42/* SSE registers */
43typedef union { 43typedef union {
44 char b[16]; 44 char b[16];
45} xmm_t; 45} xmm_t;
46 46
47
48#define mmx_i2r(op,imm,reg) \ 47#define mmx_i2r(op,imm,reg) \
49 __asm__ __volatile__ (#op " %0, %%" #reg \ 48 __asm__ __volatile__ (#op " %0, %%" #reg \
50 : /* nothing */ \ 49 : /* nothing */ \
@@ -63,7 +62,6 @@ typedef union {
63#define mmx_r2r(op,regs,regd) \ 62#define mmx_r2r(op,regs,regd) \
64 __asm__ __volatile__ (#op " %" #regs ", %" #regd) 63 __asm__ __volatile__ (#op " %" #regs ", %" #regd)
65 64
66
67#define emms() __asm__ __volatile__ ("emms") 65#define emms() __asm__ __volatile__ ("emms")
68 66
69#define movd_m2r(var,reg) mmx_m2r (movd, var, reg) 67#define movd_m2r(var,reg) mmx_m2r (movd, var, reg)
@@ -192,16 +190,13 @@ typedef union {
192#define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg) 190#define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg)
193#define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd) 191#define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd)
194 192
195
196/* 3DNOW extensions */ 193/* 3DNOW extensions */
197 194
198#define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg) 195#define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg)
199#define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd) 196#define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd)
200 197
201
202/* AMD MMX extensions - also available in intel SSE */ 198/* AMD MMX extensions - also available in intel SSE */
203 199
204
205#define mmx_m2ri(op,mem,reg,imm) \ 200#define mmx_m2ri(op,mem,reg,imm) \
206 __asm__ __volatile__ (#op " %1, %0, %%" #reg \ 201 __asm__ __volatile__ (#op " %1, %0, %%" #reg \
207 : /* nothing */ \ 202 : /* nothing */ \
@@ -216,7 +211,6 @@ typedef union {
216 : /* nothing */ \ 211 : /* nothing */ \
217 : "m" (mem)) 212 : "m" (mem))
218 213
219
220#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg) 214#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg)
221 215
222#define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var) 216#define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var)
@@ -284,5 +278,4 @@ typedef union {
284#define punpcklqdq_r2r(regs,regd) mmx_r2r (punpcklqdq, regs, regd) 278#define punpcklqdq_r2r(regs,regd) mmx_r2r (punpcklqdq, regs, regd)
285#define punpckhqdq_r2r(regs,regd) mmx_r2r (punpckhqdq, regs, regd) 279#define punpckhqdq_r2r(regs,regd) mmx_r2r (punpckhqdq, regs, regd)
286 280
287
288#endif /* AVCODEC_I386MMX_H */ 281#endif /* AVCODEC_I386MMX_H */
diff --git a/drivers/staging/echo/oslec.h b/drivers/staging/echo/oslec.h
new file mode 100644
index 000000000000..bad852328a2f
--- /dev/null
+++ b/drivers/staging/echo/oslec.h
@@ -0,0 +1,86 @@
1/*
2 * OSLEC - A line echo canceller. This code is being developed
3 * against and partially complies with G168. Using code from SpanDSP
4 *
5 * Written by Steve Underwood <steveu@coppice.org>
6 * and David Rowe <david_at_rowetel_dot_com>
7 *
8 * Copyright (C) 2001 Steve Underwood and 2007-2008 David Rowe
9 *
10 * All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2, as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 */
26
27#ifndef __OSLEC_H
28#define __OSLEC_H
29
30/* TODO: document interface */
31
32/* Mask bits for the adaption mode */
33#define ECHO_CAN_USE_ADAPTION 0x01
34#define ECHO_CAN_USE_NLP 0x02
35#define ECHO_CAN_USE_CNG 0x04
36#define ECHO_CAN_USE_CLIP 0x08
37#define ECHO_CAN_USE_TX_HPF 0x10
38#define ECHO_CAN_USE_RX_HPF 0x20
39#define ECHO_CAN_DISABLE 0x40
40
41/*!
42 G.168 echo canceller descriptor. This defines the working state for a line
43 echo canceller.
44*/
45struct oslec_state;
46
47/*! Create a voice echo canceller context.
48 \param len The length of the canceller, in samples.
49 \return The new canceller context, or NULL if the canceller could not be created.
50*/
51struct oslec_state *oslec_create(int len, int adaption_mode);
52
53/*! Free a voice echo canceller context.
54 \param ec The echo canceller context.
55*/
56void oslec_free(struct oslec_state *ec);
57
58/*! Flush (reinitialise) a voice echo canceller context.
59 \param ec The echo canceller context.
60*/
61void oslec_flush(struct oslec_state *ec);
62
63/*! Set the adaption mode of a voice echo canceller context.
64 \param ec The echo canceller context.
65 \param adapt The mode.
66*/
67void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode);
68
69void oslec_snapshot(struct oslec_state *ec);
70
71/*! Process a sample through a voice echo canceller.
72 \param ec The echo canceller context.
73 \param tx The transmitted audio sample.
74 \param rx The received audio sample.
75 \return The clean (echo cancelled) received sample.
76*/
77int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx);
78
79/*! Process to high pass filter the tx signal.
80 \param ec The echo canceller context.
81 \param tx The transmitted auio sample.
82 \return The HP filtered transmit sample, send this to your D/A.
83*/
84int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx);
85
86#endif /* __OSLEC_H */
diff --git a/drivers/staging/et131x/et1310_phy.c b/drivers/staging/et131x/et1310_phy.c
index 6c4fa54419ea..9dd6dfd9a033 100644
--- a/drivers/staging/et131x/et1310_phy.c
+++ b/drivers/staging/et131x/et1310_phy.c
@@ -84,7 +84,6 @@
84#include <linux/if_arp.h> 84#include <linux/if_arp.h>
85#include <linux/ioport.h> 85#include <linux/ioport.h>
86#include <linux/random.h> 86#include <linux/random.h>
87#include <linux/delay.h>
88 87
89#include "et1310_phy.h" 88#include "et1310_phy.h"
90#include "et1310_pm.h" 89#include "et1310_pm.h"
@@ -95,7 +94,6 @@
95#include "et131x_initpci.h" 94#include "et131x_initpci.h"
96 95
97#include "et1310_address_map.h" 96#include "et1310_address_map.h"
98#include "et1310_jagcore.h"
99#include "et1310_tx.h" 97#include "et1310_tx.h"
100#include "et1310_rx.h" 98#include "et1310_rx.h"
101#include "et1310_mac.h" 99#include "et1310_mac.h"
diff --git a/drivers/staging/et131x/et131x_debug.c b/drivers/staging/et131x/et131x_debug.c
index 9ee5bce92c27..d1dd46e0a9c8 100644
--- a/drivers/staging/et131x/et131x_debug.c
+++ b/drivers/staging/et131x/et131x_debug.c
@@ -97,7 +97,6 @@
97#include "et131x_isr.h" 97#include "et131x_isr.h"
98 98
99#include "et1310_address_map.h" 99#include "et1310_address_map.h"
100#include "et1310_jagcore.h"
101#include "et1310_tx.h" 100#include "et1310_tx.h"
102#include "et1310_rx.h" 101#include "et1310_rx.h"
103#include "et1310_mac.h" 102#include "et1310_mac.h"
diff --git a/drivers/staging/et131x/et131x_initpci.c b/drivers/staging/et131x/et131x_initpci.c
index 4c6f171f5b7c..a18c499d0ae0 100644
--- a/drivers/staging/et131x/et131x_initpci.c
+++ b/drivers/staging/et131x/et131x_initpci.c
@@ -97,7 +97,6 @@
97#include "et131x_isr.h" 97#include "et131x_isr.h"
98 98
99#include "et1310_address_map.h" 99#include "et1310_address_map.h"
100#include "et1310_jagcore.h"
101#include "et1310_tx.h" 100#include "et1310_tx.h"
102#include "et1310_rx.h" 101#include "et1310_rx.h"
103#include "et1310_mac.h" 102#include "et1310_mac.h"
diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/go7007/go7007-driver.c
index 81ae4b0fa890..e4ead96679c8 100644
--- a/drivers/staging/go7007/go7007-driver.c
+++ b/drivers/staging/go7007/go7007-driver.c
@@ -16,7 +16,6 @@
16 */ 16 */
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/version.h>
20#include <linux/init.h> 19#include <linux/init.h>
21#include <linux/delay.h> 20#include <linux/delay.h>
22#include <linux/sched.h> 21#include <linux/sched.h>
diff --git a/drivers/staging/go7007/go7007-fw.c b/drivers/staging/go7007/go7007-fw.c
index c2aea1020b0d..a0e17b0e0ce3 100644
--- a/drivers/staging/go7007/go7007-fw.c
+++ b/drivers/staging/go7007/go7007-fw.c
@@ -26,7 +26,6 @@
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/version.h>
30#include <linux/time.h> 29#include <linux/time.h>
31#include <linux/mm.h> 30#include <linux/mm.h>
32#include <linux/device.h> 31#include <linux/device.h>
diff --git a/drivers/staging/go7007/go7007-i2c.c b/drivers/staging/go7007/go7007-i2c.c
index 10baae3dade6..cd55b76eabc7 100644
--- a/drivers/staging/go7007/go7007-i2c.c
+++ b/drivers/staging/go7007/go7007-i2c.c
@@ -15,7 +15,6 @@
15 * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 15 * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 */ 16 */
17 17
18#include <linux/version.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/init.h> 19#include <linux/init.h>
21#include <linux/delay.h> 20#include <linux/delay.h>
diff --git a/drivers/staging/go7007/go7007-usb.c b/drivers/staging/go7007/go7007-usb.c
index d4ed6d2b715f..3f5ee3424e72 100644
--- a/drivers/staging/go7007/go7007-usb.c
+++ b/drivers/staging/go7007/go7007-usb.c
@@ -16,7 +16,6 @@
16 */ 16 */
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/version.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/init.h> 20#include <linux/init.h>
22#include <linux/wait.h> 21#include <linux/wait.h>
diff --git a/drivers/staging/go7007/snd-go7007.c b/drivers/staging/go7007/snd-go7007.c
index 382740c405ff..a7de401f61ab 100644
--- a/drivers/staging/go7007/snd-go7007.c
+++ b/drivers/staging/go7007/snd-go7007.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/version.h>
21#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
22#include <linux/init.h> 21#include <linux/init.h>
23#include <linux/spinlock.h> 22#include <linux/spinlock.h>
diff --git a/drivers/staging/go7007/wis-ov7640.c b/drivers/staging/go7007/wis-ov7640.c
index f5f11e927af3..2f9efca04606 100644
--- a/drivers/staging/go7007/wis-ov7640.c
+++ b/drivers/staging/go7007/wis-ov7640.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23 22
diff --git a/drivers/staging/go7007/wis-saa7113.c b/drivers/staging/go7007/wis-saa7113.c
index c1aff1b923a0..11689723945e 100644
--- a/drivers/staging/go7007/wis-saa7113.c
+++ b/drivers/staging/go7007/wis-saa7113.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <linux/ioctl.h> 22#include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-saa7115.c b/drivers/staging/go7007/wis-saa7115.c
index 5c94c883b312..59417a7174d7 100644
--- a/drivers/staging/go7007/wis-saa7115.c
+++ b/drivers/staging/go7007/wis-saa7115.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <linux/ioctl.h> 22#include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-sony-tuner.c b/drivers/staging/go7007/wis-sony-tuner.c
index 5997fb479459..5a91ee409a7c 100644
--- a/drivers/staging/go7007/wis-sony-tuner.c
+++ b/drivers/staging/go7007/wis-sony-tuner.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <media/tuner.h> 22#include <media/tuner.h>
diff --git a/drivers/staging/go7007/wis-tw2804.c b/drivers/staging/go7007/wis-tw2804.c
index 27fe4d0d4ed6..57b8f2b1caa3 100644
--- a/drivers/staging/go7007/wis-tw2804.c
+++ b/drivers/staging/go7007/wis-tw2804.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <linux/ioctl.h> 22#include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-tw9903.c b/drivers/staging/go7007/wis-tw9903.c
index d8e41968022e..40627b282cb4 100644
--- a/drivers/staging/go7007/wis-tw9903.c
+++ b/drivers/staging/go7007/wis-tw9903.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <linux/ioctl.h> 22#include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-uda1342.c b/drivers/staging/go7007/wis-uda1342.c
index a0894e3cb8c7..555645c0cc1a 100644
--- a/drivers/staging/go7007/wis-uda1342.c
+++ b/drivers/staging/go7007/wis-uda1342.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <media/tvaudio.h> 22#include <media/tvaudio.h>
diff --git a/drivers/staging/me4000/me4000.c b/drivers/staging/me4000/me4000.c
index 862dd7ffb5c0..0394e2709278 100644
--- a/drivers/staging/me4000/me4000.c
+++ b/drivers/staging/me4000/me4000.c
@@ -25,24 +25,20 @@
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <asm/io.h>
29#include <asm/system.h>
30#include <asm/uaccess.h>
31#include <linux/errno.h> 28#include <linux/errno.h>
32#include <linux/delay.h> 29#include <linux/delay.h>
33#include <linux/fs.h>
34#include <linux/mm.h> 30#include <linux/mm.h>
35#include <linux/unistd.h> 31#include <linux/unistd.h>
36#include <linux/list.h> 32#include <linux/list.h>
37#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
38 34#include <linux/types.h>
39#include <linux/poll.h> 35#include <linux/poll.h>
40#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/slab.h>
41#include <asm/pgtable.h> 38#include <asm/pgtable.h>
42#include <asm/uaccess.h> 39#include <asm/uaccess.h>
43#include <linux/types.h> 40#include <asm/io.h>
44 41#include <asm/system.h>
45#include <linux/slab.h>
46 42
47/* Include-File for the Meilhaus ME-4000 I/O board */ 43/* Include-File for the Meilhaus ME-4000 I/O board */
48#include "me4000.h" 44#include "me4000.h"
@@ -57,14 +53,14 @@ MODULE_SUPPORTED_DEVICE("Meilhaus ME-4000 Multi I/O boards");
57MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
58 54
59/* Board specific data are kept in a global list */ 55/* Board specific data are kept in a global list */
60LIST_HEAD(me4000_board_info_list); 56static LIST_HEAD(me4000_board_info_list);
61 57
62/* Major Device Numbers. 0 means to get it automatically from the System */ 58/* Major Device Numbers. 0 means to get it automatically from the System */
63static int me4000_ao_major_driver_no = 0; 59static int me4000_ao_major_driver_no;
64static int me4000_ai_major_driver_no = 0; 60static int me4000_ai_major_driver_no;
65static int me4000_dio_major_driver_no = 0; 61static int me4000_dio_major_driver_no;
66static int me4000_cnt_major_driver_no = 0; 62static int me4000_cnt_major_driver_no;
67static int me4000_ext_int_major_driver_no = 0; 63static int me4000_ext_int_major_driver_no;
68 64
69/* Let the user specify a custom major driver number */ 65/* Let the user specify a custom major driver number */
70module_param(me4000_ao_major_driver_no, int, 0); 66module_param(me4000_ao_major_driver_no, int, 0);
@@ -88,36 +84,22 @@ MODULE_PARM_DESC(me4000_ext_int_major_driver_no,
88 "Major driver number for external interrupt (default 0)"); 84 "Major driver number for external interrupt (default 0)");
89 85
90/*----------------------------------------------------------------------------- 86/*-----------------------------------------------------------------------------
91 Module stuff
92 ---------------------------------------------------------------------------*/
93int init_module(void);
94void cleanup_module(void);
95
96/*-----------------------------------------------------------------------------
97 Board detection and initialization 87 Board detection and initialization
98 ---------------------------------------------------------------------------*/ 88 ---------------------------------------------------------------------------*/
99static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id); 89static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id);
100static int me4000_xilinx_download(me4000_info_t *); 90static int me4000_xilinx_download(struct me4000_info *);
101static int me4000_reset_board(me4000_info_t *); 91static int me4000_reset_board(struct me4000_info *);
102 92
103static void clear_board_info_list(void); 93static void clear_board_info_list(void);
104static int get_registers(struct pci_dev *dev, me4000_info_t * info); 94static void release_ao_contexts(struct me4000_info *board_info);
105static int init_board_info(struct pci_dev *dev, me4000_info_t * board_info);
106static int alloc_ao_contexts(me4000_info_t * info);
107static void release_ao_contexts(me4000_info_t * board_info);
108static int alloc_ai_context(me4000_info_t * info);
109static int alloc_dio_context(me4000_info_t * info);
110static int alloc_cnt_context(me4000_info_t * info);
111static int alloc_ext_int_context(me4000_info_t * info);
112
113/*----------------------------------------------------------------------------- 95/*-----------------------------------------------------------------------------
114 Stuff used by all device parts 96 Stuff used by all device parts
115 ---------------------------------------------------------------------------*/ 97 ---------------------------------------------------------------------------*/
116static int me4000_open(struct inode *, struct file *); 98static int me4000_open(struct inode *, struct file *);
117static int me4000_release(struct inode *, struct file *); 99static int me4000_release(struct inode *, struct file *);
118 100
119static int me4000_get_user_info(me4000_user_info_t *, 101static int me4000_get_user_info(struct me4000_user_info *,
120 me4000_info_t * board_info); 102 struct me4000_info *board_info);
121static int me4000_read_procmem(char *, char **, off_t, int, int *, void *); 103static int me4000_read_procmem(char *, char **, off_t, int, int *, void *);
122 104
123/*----------------------------------------------------------------------------- 105/*-----------------------------------------------------------------------------
@@ -140,40 +122,42 @@ static int me4000_ao_ioctl_cont(struct inode *, struct file *, unsigned int,
140static unsigned int me4000_ao_poll_cont(struct file *, poll_table *); 122static unsigned int me4000_ao_poll_cont(struct file *, poll_table *);
141static int me4000_ao_fsync_cont(struct file *, struct dentry *, int); 123static int me4000_ao_fsync_cont(struct file *, struct dentry *, int);
142 124
143static int me4000_ao_start(unsigned long *, me4000_ao_context_t *); 125static int me4000_ao_start(unsigned long *, struct me4000_ao_context *);
144static int me4000_ao_stop(me4000_ao_context_t *); 126static int me4000_ao_stop(struct me4000_ao_context *);
145static int me4000_ao_immediate_stop(me4000_ao_context_t *); 127static int me4000_ao_immediate_stop(struct me4000_ao_context *);
146static int me4000_ao_timer_set_divisor(u32 *, me4000_ao_context_t *); 128static int me4000_ao_timer_set_divisor(u32 *, struct me4000_ao_context *);
147static int me4000_ao_preload(me4000_ao_context_t *); 129static int me4000_ao_preload(struct me4000_ao_context *);
148static int me4000_ao_preload_update(me4000_ao_context_t *); 130static int me4000_ao_preload_update(struct me4000_ao_context *);
149static int me4000_ao_ex_trig_set_edge(int *, me4000_ao_context_t *); 131static int me4000_ao_ex_trig_set_edge(int *, struct me4000_ao_context *);
150static int me4000_ao_ex_trig_enable(me4000_ao_context_t *); 132static int me4000_ao_ex_trig_enable(struct me4000_ao_context *);
151static int me4000_ao_ex_trig_disable(me4000_ao_context_t *); 133static int me4000_ao_ex_trig_disable(struct me4000_ao_context *);
152static int me4000_ao_prepare(me4000_ao_context_t * ao_info); 134static int me4000_ao_prepare(struct me4000_ao_context *ao_info);
153static int me4000_ao_reset(me4000_ao_context_t * ao_info); 135static int me4000_ao_reset(struct me4000_ao_context *ao_info);
154static int me4000_ao_enable_do(me4000_ao_context_t *); 136static int me4000_ao_enable_do(struct me4000_ao_context *);
155static int me4000_ao_disable_do(me4000_ao_context_t *); 137static int me4000_ao_disable_do(struct me4000_ao_context *);
156static int me4000_ao_fsm_state(int *, me4000_ao_context_t *); 138static int me4000_ao_fsm_state(int *, struct me4000_ao_context *);
157 139
158static int me4000_ao_simultaneous_ex_trig(me4000_ao_context_t * ao_context); 140static int me4000_ao_simultaneous_ex_trig(struct me4000_ao_context *ao_context);
159static int me4000_ao_simultaneous_sw(me4000_ao_context_t * ao_context); 141static int me4000_ao_simultaneous_sw(struct me4000_ao_context *ao_context);
160static int me4000_ao_simultaneous_disable(me4000_ao_context_t * ao_context); 142static int me4000_ao_simultaneous_disable(struct me4000_ao_context *ao_context);
161static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * channels, 143static int me4000_ao_simultaneous_update(
162 me4000_ao_context_t * ao_context); 144 struct me4000_ao_channel_list *channels,
163 145 struct me4000_ao_context *ao_context);
164static int me4000_ao_synchronous_ex_trig(me4000_ao_context_t * ao_context); 146
165static int me4000_ao_synchronous_sw(me4000_ao_context_t * ao_context); 147static int me4000_ao_synchronous_ex_trig(struct me4000_ao_context *ao_context);
166static int me4000_ao_synchronous_disable(me4000_ao_context_t * ao_context); 148static int me4000_ao_synchronous_sw(struct me4000_ao_context *ao_context);
149static int me4000_ao_synchronous_disable(struct me4000_ao_context *ao_context);
167 150
168static int me4000_ao_ex_trig_timeout(unsigned long *arg, 151static int me4000_ao_ex_trig_timeout(unsigned long *arg,
169 me4000_ao_context_t * ao_context); 152 struct me4000_ao_context *ao_context);
170static int me4000_ao_get_free_buffer(unsigned long *arg, 153static int me4000_ao_get_free_buffer(unsigned long *arg,
171 me4000_ao_context_t * ao_context); 154 struct me4000_ao_context *ao_context);
172 155
173/*----------------------------------------------------------------------------- 156/*-----------------------------------------------------------------------------
174 Analog input stuff 157 Analog input stuff
175 ---------------------------------------------------------------------------*/ 158 ---------------------------------------------------------------------------*/
176static int me4000_ai_single(me4000_ai_single_t *, me4000_ai_context_t *); 159static int me4000_ai_single(struct me4000_ai_single *,
160 struct me4000_ai_context *);
177static int me4000_ai_ioctl_sing(struct inode *, struct file *, unsigned int, 161static int me4000_ai_ioctl_sing(struct inode *, struct file *, unsigned int,
178 unsigned long); 162 unsigned long);
179 163
@@ -186,68 +170,69 @@ static int me4000_ai_fasync(int fd, struct file *file_p, int mode);
186static int me4000_ai_ioctl_ext(struct inode *, struct file *, unsigned int, 170static int me4000_ai_ioctl_ext(struct inode *, struct file *, unsigned int,
187 unsigned long); 171 unsigned long);
188 172
189static int me4000_ai_prepare(me4000_ai_context_t * ai_context); 173static int me4000_ai_prepare(struct me4000_ai_context *ai_context);
190static int me4000_ai_reset(me4000_ai_context_t * ai_context); 174static int me4000_ai_reset(struct me4000_ai_context *ai_context);
191static int me4000_ai_config(me4000_ai_config_t *, me4000_ai_context_t *); 175static int me4000_ai_config(struct me4000_ai_config *,
192static int me4000_ai_start(me4000_ai_context_t *); 176 struct me4000_ai_context *);
193static int me4000_ai_start_ex(unsigned long *, me4000_ai_context_t *); 177static int me4000_ai_start(struct me4000_ai_context *);
194static int me4000_ai_stop(me4000_ai_context_t *); 178static int me4000_ai_start_ex(unsigned long *, struct me4000_ai_context *);
195static int me4000_ai_immediate_stop(me4000_ai_context_t *); 179static int me4000_ai_stop(struct me4000_ai_context *);
196static int me4000_ai_ex_trig_enable(me4000_ai_context_t *); 180static int me4000_ai_immediate_stop(struct me4000_ai_context *);
197static int me4000_ai_ex_trig_disable(me4000_ai_context_t *); 181static int me4000_ai_ex_trig_enable(struct me4000_ai_context *);
198static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t *, 182static int me4000_ai_ex_trig_disable(struct me4000_ai_context *);
199 me4000_ai_context_t *); 183static int me4000_ai_ex_trig_setup(struct me4000_ai_trigger *,
200static int me4000_ai_sc_setup(me4000_ai_sc_t * arg, 184 struct me4000_ai_context *);
201 me4000_ai_context_t * ai_context); 185static int me4000_ai_sc_setup(struct me4000_ai_sc *arg,
202static int me4000_ai_offset_enable(me4000_ai_context_t * ai_context); 186 struct me4000_ai_context *ai_context);
203static int me4000_ai_offset_disable(me4000_ai_context_t * ai_context); 187static int me4000_ai_offset_enable(struct me4000_ai_context *ai_context);
204static int me4000_ai_fullscale_enable(me4000_ai_context_t * ai_context); 188static int me4000_ai_offset_disable(struct me4000_ai_context *ai_context);
205static int me4000_ai_fullscale_disable(me4000_ai_context_t * ai_context); 189static int me4000_ai_fullscale_enable(struct me4000_ai_context *ai_context);
206static int me4000_ai_fsm_state(int *arg, me4000_ai_context_t * ai_context); 190static int me4000_ai_fullscale_disable(struct me4000_ai_context *ai_context);
191static int me4000_ai_fsm_state(int *arg, struct me4000_ai_context *ai_context);
207static int me4000_ai_get_count_buffer(unsigned long *arg, 192static int me4000_ai_get_count_buffer(unsigned long *arg,
208 me4000_ai_context_t * ai_context); 193 struct me4000_ai_context *ai_context);
209 194
210/*----------------------------------------------------------------------------- 195/*-----------------------------------------------------------------------------
211 EEPROM stuff 196 EEPROM stuff
212 ---------------------------------------------------------------------------*/ 197 ---------------------------------------------------------------------------*/
213static int me4000_eeprom_read(me4000_eeprom_t * arg, 198static int me4000_eeprom_read(struct me4000_eeprom *arg,
214 me4000_ai_context_t * ai_context); 199 struct me4000_ai_context *ai_context);
215static int me4000_eeprom_write(me4000_eeprom_t * arg, 200static int me4000_eeprom_write(struct me4000_eeprom *arg,
216 me4000_ai_context_t * ai_context); 201 struct me4000_ai_context *ai_context);
217static unsigned short eeprom_read_cmd(me4000_ai_context_t * ai_context,
218 unsigned long cmd, int length);
219static int eeprom_write_cmd(me4000_ai_context_t * ai_context, unsigned long cmd,
220 int length);
221 202
222/*----------------------------------------------------------------------------- 203/*-----------------------------------------------------------------------------
223 Digital I/O stuff 204 Digital I/O stuff
224 ---------------------------------------------------------------------------*/ 205 ---------------------------------------------------------------------------*/
225static int me4000_dio_ioctl(struct inode *, struct file *, unsigned int, 206static int me4000_dio_ioctl(struct inode *, struct file *, unsigned int,
226 unsigned long); 207 unsigned long);
227static int me4000_dio_config(me4000_dio_config_t *, me4000_dio_context_t *); 208static int me4000_dio_config(struct me4000_dio_config *,
228static int me4000_dio_get_byte(me4000_dio_byte_t *, me4000_dio_context_t *); 209 struct me4000_dio_context *);
229static int me4000_dio_set_byte(me4000_dio_byte_t *, me4000_dio_context_t *); 210static int me4000_dio_get_byte(struct me4000_dio_byte *,
230static int me4000_dio_reset(me4000_dio_context_t *); 211 struct me4000_dio_context *);
212static int me4000_dio_set_byte(struct me4000_dio_byte *,
213 struct me4000_dio_context *);
214static int me4000_dio_reset(struct me4000_dio_context *);
231 215
232/*----------------------------------------------------------------------------- 216/*-----------------------------------------------------------------------------
233 Counter stuff 217 Counter stuff
234 ---------------------------------------------------------------------------*/ 218 ---------------------------------------------------------------------------*/
235static int me4000_cnt_ioctl(struct inode *, struct file *, unsigned int, 219static int me4000_cnt_ioctl(struct inode *, struct file *, unsigned int,
236 unsigned long); 220 unsigned long);
237static int me4000_cnt_config(me4000_cnt_config_t *, me4000_cnt_context_t *); 221static int me4000_cnt_config(struct me4000_cnt_config *,
238static int me4000_cnt_read(me4000_cnt_t *, me4000_cnt_context_t *); 222 struct me4000_cnt_context *);
239static int me4000_cnt_write(me4000_cnt_t *, me4000_cnt_context_t *); 223static int me4000_cnt_read(struct me4000_cnt *, struct me4000_cnt_context *);
240static int me4000_cnt_reset(me4000_cnt_context_t *); 224static int me4000_cnt_write(struct me4000_cnt *, struct me4000_cnt_context *);
225static int me4000_cnt_reset(struct me4000_cnt_context *);
241 226
242/*----------------------------------------------------------------------------- 227/*-----------------------------------------------------------------------------
243 External interrupt routines 228 External interrupt routines
244 ---------------------------------------------------------------------------*/ 229 ---------------------------------------------------------------------------*/
245static int me4000_ext_int_ioctl(struct inode *, struct file *, unsigned int, 230static int me4000_ext_int_ioctl(struct inode *, struct file *, unsigned int,
246 unsigned long); 231 unsigned long);
247static int me4000_ext_int_enable(me4000_ext_int_context_t *); 232static int me4000_ext_int_enable(struct me4000_ext_int_context *);
248static int me4000_ext_int_disable(me4000_ext_int_context_t *); 233static int me4000_ext_int_disable(struct me4000_ext_int_context *);
249static int me4000_ext_int_count(unsigned long *arg, 234static int me4000_ext_int_count(unsigned long *arg,
250 me4000_ext_int_context_t * ext_int_context); 235 struct me4000_ext_int_context *ext_int_context);
251static int me4000_ext_int_fasync(int fd, struct file *file_ptr, int mode); 236static int me4000_ext_int_fasync(int fd, struct file *file_ptr, int mode);
252 237
253/*----------------------------------------------------------------------------- 238/*-----------------------------------------------------------------------------
@@ -260,27 +245,18 @@ static irqreturn_t me4000_ext_int_isr(int, void *);
260/*----------------------------------------------------------------------------- 245/*-----------------------------------------------------------------------------
261 Inline functions 246 Inline functions
262 ---------------------------------------------------------------------------*/ 247 ---------------------------------------------------------------------------*/
263static int inline me4000_buf_count(me4000_circ_buf_t, int);
264static int inline me4000_buf_space(me4000_circ_buf_t, int);
265static int inline me4000_space_to_end(me4000_circ_buf_t, int);
266static int inline me4000_values_to_end(me4000_circ_buf_t, int);
267 248
268static void inline me4000_outb(unsigned char value, unsigned long port); 249static int inline me4000_buf_count(struct me4000_circ_buf buf, int size)
269static void inline me4000_outl(unsigned long value, unsigned long port);
270static unsigned long inline me4000_inl(unsigned long port);
271static unsigned char inline me4000_inb(unsigned long port);
272
273static int me4000_buf_count(me4000_circ_buf_t buf, int size)
274{ 250{
275 return ((buf.head - buf.tail) & (size - 1)); 251 return ((buf.head - buf.tail) & (size - 1));
276} 252}
277 253
278static int me4000_buf_space(me4000_circ_buf_t buf, int size) 254static int inline me4000_buf_space(struct me4000_circ_buf buf, int size)
279{ 255{
280 return ((buf.tail - (buf.head + 1)) & (size - 1)); 256 return ((buf.tail - (buf.head + 1)) & (size - 1));
281} 257}
282 258
283static int me4000_values_to_end(me4000_circ_buf_t buf, int size) 259static int inline me4000_values_to_end(struct me4000_circ_buf buf, int size)
284{ 260{
285 int end; 261 int end;
286 int n; 262 int n;
@@ -289,7 +265,7 @@ static int me4000_values_to_end(me4000_circ_buf_t buf, int size)
289 return (n < end) ? n : end; 265 return (n < end) ? n : end;
290} 266}
291 267
292static int me4000_space_to_end(me4000_circ_buf_t buf, int size) 268static int inline me4000_space_to_end(struct me4000_circ_buf buf, int size)
293{ 269{
294 int end; 270 int end;
295 int n; 271 int n;
@@ -299,19 +275,19 @@ static int me4000_space_to_end(me4000_circ_buf_t buf, int size)
299 return (n <= end) ? n : (end + 1); 275 return (n <= end) ? n : (end + 1);
300} 276}
301 277
302static void me4000_outb(unsigned char value, unsigned long port) 278static void inline me4000_outb(unsigned char value, unsigned long port)
303{ 279{
304 PORT_PDEBUG("--> 0x%02X port 0x%04lX\n", value, port); 280 PORT_PDEBUG("--> 0x%02X port 0x%04lX\n", value, port);
305 outb(value, port); 281 outb(value, port);
306} 282}
307 283
308static void me4000_outl(unsigned long value, unsigned long port) 284static void inline me4000_outl(unsigned long value, unsigned long port)
309{ 285{
310 PORT_PDEBUG("--> 0x%08lX port 0x%04lX\n", value, port); 286 PORT_PDEBUG("--> 0x%08lX port 0x%04lX\n", value, port);
311 outl(value, port); 287 outl(value, port);
312} 288}
313 289
314static unsigned long me4000_inl(unsigned long port) 290static unsigned long inline me4000_inl(unsigned long port)
315{ 291{
316 unsigned long value; 292 unsigned long value;
317 value = inl(port); 293 value = inl(port);
@@ -319,7 +295,7 @@ static unsigned long me4000_inl(unsigned long port)
319 return value; 295 return value;
320} 296}
321 297
322static unsigned char me4000_inb(unsigned long port) 298static unsigned char inline me4000_inb(unsigned long port)
323{ 299{
324 unsigned char value; 300 unsigned char value;
325 value = inb(port); 301 value = inb(port);
@@ -327,102 +303,102 @@ static unsigned char me4000_inb(unsigned long port)
327 return value; 303 return value;
328} 304}
329 305
330struct pci_driver me4000_driver = { 306static struct pci_driver me4000_driver = {
331 .name = ME4000_NAME, 307 .name = ME4000_NAME,
332 .id_table = me4000_pci_table, 308 .id_table = me4000_pci_table,
333 .probe = me4000_probe 309 .probe = me4000_probe
334}; 310};
335 311
336static struct file_operations me4000_ao_fops_sing = { 312static struct file_operations me4000_ao_fops_sing = {
337 owner:THIS_MODULE, 313 .owner = THIS_MODULE,
338 write:me4000_ao_write_sing, 314 .write = me4000_ao_write_sing,
339 ioctl:me4000_ao_ioctl_sing, 315 .ioctl = me4000_ao_ioctl_sing,
340 open:me4000_open, 316 .open = me4000_open,
341 release:me4000_release, 317 .release = me4000_release,
342}; 318};
343 319
344static struct file_operations me4000_ao_fops_wrap = { 320static struct file_operations me4000_ao_fops_wrap = {
345 owner:THIS_MODULE, 321 .owner = THIS_MODULE,
346 write:me4000_ao_write_wrap, 322 .write = me4000_ao_write_wrap,
347 ioctl:me4000_ao_ioctl_wrap, 323 .ioctl = me4000_ao_ioctl_wrap,
348 open:me4000_open, 324 .open = me4000_open,
349 release:me4000_release, 325 .release = me4000_release,
350}; 326};
351 327
352static struct file_operations me4000_ao_fops_cont = { 328static struct file_operations me4000_ao_fops_cont = {
353 owner:THIS_MODULE, 329 .owner = THIS_MODULE,
354 write:me4000_ao_write_cont, 330 .write = me4000_ao_write_cont,
355 poll:me4000_ao_poll_cont, 331 .poll = me4000_ao_poll_cont,
356 ioctl:me4000_ao_ioctl_cont, 332 .ioctl = me4000_ao_ioctl_cont,
357 open:me4000_open, 333 .open = me4000_open,
358 release:me4000_release, 334 .release = me4000_release,
359 fsync:me4000_ao_fsync_cont, 335 .fsync = me4000_ao_fsync_cont,
360}; 336};
361 337
362static struct file_operations me4000_ai_fops_sing = { 338static struct file_operations me4000_ai_fops_sing = {
363 owner:THIS_MODULE, 339 .owner = THIS_MODULE,
364 ioctl:me4000_ai_ioctl_sing, 340 .ioctl = me4000_ai_ioctl_sing,
365 open:me4000_open, 341 .open = me4000_open,
366 release:me4000_release, 342 .release = me4000_release,
367}; 343};
368 344
369static struct file_operations me4000_ai_fops_cont_sw = { 345static struct file_operations me4000_ai_fops_cont_sw = {
370 owner:THIS_MODULE, 346 .owner = THIS_MODULE,
371 read:me4000_ai_read, 347 .read = me4000_ai_read,
372 poll:me4000_ai_poll, 348 .poll = me4000_ai_poll,
373 ioctl:me4000_ai_ioctl_sw, 349 .ioctl = me4000_ai_ioctl_sw,
374 open:me4000_open, 350 .open = me4000_open,
375 release:me4000_release, 351 .release = me4000_release,
376 fasync:me4000_ai_fasync, 352 .fasync = me4000_ai_fasync,
377}; 353};
378 354
379static struct file_operations me4000_ai_fops_cont_et = { 355static struct file_operations me4000_ai_fops_cont_et = {
380 owner:THIS_MODULE, 356 .owner = THIS_MODULE,
381 read:me4000_ai_read, 357 .read = me4000_ai_read,
382 poll:me4000_ai_poll, 358 .poll = me4000_ai_poll,
383 ioctl:me4000_ai_ioctl_ext, 359 .ioctl = me4000_ai_ioctl_ext,
384 open:me4000_open, 360 .open = me4000_open,
385 release:me4000_release, 361 .release = me4000_release,
386}; 362};
387 363
388static struct file_operations me4000_ai_fops_cont_et_value = { 364static struct file_operations me4000_ai_fops_cont_et_value = {
389 owner:THIS_MODULE, 365 .owner = THIS_MODULE,
390 read:me4000_ai_read, 366 .read = me4000_ai_read,
391 poll:me4000_ai_poll, 367 .poll = me4000_ai_poll,
392 ioctl:me4000_ai_ioctl_ext, 368 .ioctl = me4000_ai_ioctl_ext,
393 open:me4000_open, 369 .open = me4000_open,
394 release:me4000_release, 370 .release = me4000_release,
395}; 371};
396 372
397static struct file_operations me4000_ai_fops_cont_et_chanlist = { 373static struct file_operations me4000_ai_fops_cont_et_chanlist = {
398 owner:THIS_MODULE, 374 .owner = THIS_MODULE,
399 read:me4000_ai_read, 375 .read = me4000_ai_read,
400 poll:me4000_ai_poll, 376 .poll = me4000_ai_poll,
401 ioctl:me4000_ai_ioctl_ext, 377 .ioctl = me4000_ai_ioctl_ext,
402 open:me4000_open, 378 .open = me4000_open,
403 release:me4000_release, 379 .release = me4000_release,
404}; 380};
405 381
406static struct file_operations me4000_dio_fops = { 382static struct file_operations me4000_dio_fops = {
407 owner:THIS_MODULE, 383 .owner = THIS_MODULE,
408 ioctl:me4000_dio_ioctl, 384 .ioctl = me4000_dio_ioctl,
409 open:me4000_open, 385 .open = me4000_open,
410 release:me4000_release, 386 .release = me4000_release,
411}; 387};
412 388
413static struct file_operations me4000_cnt_fops = { 389static struct file_operations me4000_cnt_fops = {
414 owner:THIS_MODULE, 390 .owner = THIS_MODULE,
415 ioctl:me4000_cnt_ioctl, 391 .ioctl = me4000_cnt_ioctl,
416 open:me4000_open, 392 .open = me4000_open,
417 release:me4000_release, 393 .release = me4000_release,
418}; 394};
419 395
420static struct file_operations me4000_ext_int_fops = { 396static struct file_operations me4000_ext_int_fops = {
421 owner:THIS_MODULE, 397 .owner = THIS_MODULE,
422 ioctl:me4000_ext_int_ioctl, 398 .ioctl = me4000_ext_int_ioctl,
423 open:me4000_open, 399 .open = me4000_open,
424 release:me4000_release, 400 .release = me4000_release,
425 fasync:me4000_ext_int_fasync, 401 .fasync = me4000_ext_int_fasync,
426}; 402};
427 403
428static struct file_operations *me4000_ao_fops_array[] = { 404static struct file_operations *me4000_ao_fops_array[] = {
@@ -439,9 +415,9 @@ static struct file_operations *me4000_ai_fops_array[] = {
439 &me4000_ai_fops_cont_et_chanlist, // work through one channel list by external trigger 415 &me4000_ai_fops_cont_et_chanlist, // work through one channel list by external trigger
440}; 416};
441 417
442int __init me4000_init_module(void) 418static int __init me4000_init_module(void)
443{ 419{
444 int result = 0; 420 int result;
445 421
446 CALL_PDEBUG("init_module() is executed\n"); 422 CALL_PDEBUG("init_module() is executed\n");
447 423
@@ -533,26 +509,26 @@ int __init me4000_init_module(void)
533 509
534 return 0; 510 return 0;
535 511
536 INIT_ERROR_7: 512INIT_ERROR_7:
537 unregister_chrdev(me4000_ext_int_major_driver_no, ME4000_EXT_INT_NAME); 513 unregister_chrdev(me4000_ext_int_major_driver_no, ME4000_EXT_INT_NAME);
538 514
539 INIT_ERROR_6: 515INIT_ERROR_6:
540 unregister_chrdev(me4000_cnt_major_driver_no, ME4000_CNT_NAME); 516 unregister_chrdev(me4000_cnt_major_driver_no, ME4000_CNT_NAME);
541 517
542 INIT_ERROR_5: 518INIT_ERROR_5:
543 unregister_chrdev(me4000_dio_major_driver_no, ME4000_DIO_NAME); 519 unregister_chrdev(me4000_dio_major_driver_no, ME4000_DIO_NAME);
544 520
545 INIT_ERROR_4: 521INIT_ERROR_4:
546 unregister_chrdev(me4000_ai_major_driver_no, ME4000_AI_NAME); 522 unregister_chrdev(me4000_ai_major_driver_no, ME4000_AI_NAME);
547 523
548 INIT_ERROR_3: 524INIT_ERROR_3:
549 unregister_chrdev(me4000_ao_major_driver_no, ME4000_AO_NAME); 525 unregister_chrdev(me4000_ao_major_driver_no, ME4000_AO_NAME);
550 526
551 INIT_ERROR_2: 527INIT_ERROR_2:
552 pci_unregister_driver(&me4000_driver); 528 pci_unregister_driver(&me4000_driver);
553 clear_board_info_list(); 529 clear_board_info_list();
554 530
555 INIT_ERROR_1: 531INIT_ERROR_1:
556 return result; 532 return result;
557} 533}
558 534
@@ -562,18 +538,18 @@ static void clear_board_info_list(void)
562{ 538{
563 struct list_head *board_p; 539 struct list_head *board_p;
564 struct list_head *dac_p; 540 struct list_head *dac_p;
565 me4000_info_t *board_info; 541 struct me4000_info *board_info;
566 me4000_ao_context_t *ao_context; 542 struct me4000_ao_context *ao_context;
567 543
568 /* Clear context lists */ 544 /* Clear context lists */
569 for (board_p = me4000_board_info_list.next; 545 for (board_p = me4000_board_info_list.next;
570 board_p != &me4000_board_info_list; board_p = board_p->next) { 546 board_p != &me4000_board_info_list; board_p = board_p->next) {
571 board_info = list_entry(board_p, me4000_info_t, list); 547 board_info = list_entry(board_p, struct me4000_info, list);
572 /* Clear analog output context list */ 548 /* Clear analog output context list */
573 while (!list_empty(&board_info->ao_context_list)) { 549 while (!list_empty(&board_info->ao_context_list)) {
574 dac_p = board_info->ao_context_list.next; 550 dac_p = board_info->ao_context_list.next;
575 ao_context = 551 ao_context =
576 list_entry(dac_p, me4000_ao_context_t, list); 552 list_entry(dac_p, struct me4000_ao_context, list);
577 me4000_ao_reset(ao_context); 553 me4000_ao_reset(ao_context);
578 free_irq(ao_context->irq, ao_context); 554 free_irq(ao_context->irq, ao_context);
579 if (ao_context->circ_buf.buf) 555 if (ao_context->circ_buf.buf)
@@ -600,14 +576,14 @@ static void clear_board_info_list(void)
600 /* Clear the board info list */ 576 /* Clear the board info list */
601 while (!list_empty(&me4000_board_info_list)) { 577 while (!list_empty(&me4000_board_info_list)) {
602 board_p = me4000_board_info_list.next; 578 board_p = me4000_board_info_list.next;
603 board_info = list_entry(board_p, me4000_info_t, list); 579 board_info = list_entry(board_p, struct me4000_info, list);
604 pci_release_regions(board_info->pci_dev_p); 580 pci_release_regions(board_info->pci_dev_p);
605 list_del(board_p); 581 list_del(board_p);
606 kfree(board_info); 582 kfree(board_info);
607 } 583 }
608} 584}
609 585
610static int get_registers(struct pci_dev *dev, me4000_info_t * board_info) 586static int get_registers(struct pci_dev *dev, struct me4000_info *board_info)
611{ 587{
612 588
613 /*--------------------------- plx regbase ---------------------------------*/ 589 /*--------------------------- plx regbase ---------------------------------*/
@@ -667,20 +643,20 @@ static int get_registers(struct pci_dev *dev, me4000_info_t * board_info)
667} 643}
668 644
669static int init_board_info(struct pci_dev *pci_dev_p, 645static int init_board_info(struct pci_dev *pci_dev_p,
670 me4000_info_t * board_info) 646 struct me4000_info *board_info)
671{ 647{
672 int i; 648 int i;
673 int result; 649 int result;
674 struct list_head *board_p; 650 struct list_head *board_p;
675 board_info->pci_dev_p = pci_dev_p; 651 board_info->pci_dev_p = pci_dev_p;
676 652
677 for (i = 0; i < ME4000_BOARD_VERSIONS; i++) { 653 for (i = 0; i < ARRAY_SIZE(me4000_boards); i++) {
678 if (me4000_boards[i].device_id == pci_dev_p->device) { 654 if (me4000_boards[i].device_id == pci_dev_p->device) {
679 board_info->board_p = &me4000_boards[i]; 655 board_info->board_p = &me4000_boards[i];
680 break; 656 break;
681 } 657 }
682 } 658 }
683 if (i == ME4000_BOARD_VERSIONS) { 659 if (i == ARRAY_SIZE(me4000_boards)) {
684 printk(KERN_ERR 660 printk(KERN_ERR
685 "ME4000:init_board_info():Device ID not valid\n"); 661 "ME4000:init_board_info():Device ID not valid\n");
686 return -ENODEV; 662 return -ENODEV;
@@ -755,21 +731,21 @@ static int init_board_info(struct pci_dev *pci_dev_p,
755 return 0; 731 return 0;
756} 732}
757 733
758static int alloc_ao_contexts(me4000_info_t * info) 734static int alloc_ao_contexts(struct me4000_info *info)
759{ 735{
760 int i; 736 int i;
761 int err; 737 int err;
762 me4000_ao_context_t *ao_context; 738 struct me4000_ao_context *ao_context;
763 739
764 for (i = 0; i < info->board_p->ao.count; i++) { 740 for (i = 0; i < info->board_p->ao.count; i++) {
765 ao_context = kmalloc(sizeof(me4000_ao_context_t), GFP_KERNEL); 741 ao_context = kzalloc(sizeof(struct me4000_ao_context),
742 GFP_KERNEL);
766 if (!ao_context) { 743 if (!ao_context) {
767 printk(KERN_ERR 744 printk(KERN_ERR
768 "alloc_ao_contexts():Can't get memory for ao context\n"); 745 "alloc_ao_contexts():Can't get memory for ao context\n");
769 release_ao_contexts(info); 746 release_ao_contexts(info);
770 return -ENOMEM; 747 return -ENOMEM;
771 } 748 }
772 memset(ao_context, 0, sizeof(me4000_ao_context_t));
773 749
774 spin_lock_init(&ao_context->use_lock); 750 spin_lock_init(&ao_context->use_lock);
775 spin_lock_init(&ao_context->int_lock); 751 spin_lock_init(&ao_context->int_lock);
@@ -780,15 +756,13 @@ static int alloc_ao_contexts(me4000_info_t * info)
780 if (info->board_p->ao.fifo_count) { 756 if (info->board_p->ao.fifo_count) {
781 /* Allocate circular buffer */ 757 /* Allocate circular buffer */
782 ao_context->circ_buf.buf = 758 ao_context->circ_buf.buf =
783 kmalloc(ME4000_AO_BUFFER_SIZE, GFP_KERNEL); 759 kzalloc(ME4000_AO_BUFFER_SIZE, GFP_KERNEL);
784 if (!ao_context->circ_buf.buf) { 760 if (!ao_context->circ_buf.buf) {
785 printk(KERN_ERR 761 printk(KERN_ERR
786 "alloc_ao_contexts():Can't get circular buffer\n"); 762 "alloc_ao_contexts():Can't get circular buffer\n");
787 release_ao_contexts(info); 763 release_ao_contexts(info);
788 return -ENOMEM; 764 return -ENOMEM;
789 } 765 }
790 memset(ao_context->circ_buf.buf, 0,
791 ME4000_AO_BUFFER_SIZE);
792 766
793 /* Clear the circular buffer */ 767 /* Clear the circular buffer */
794 ao_context->circ_buf.head = 0; 768 ao_context->circ_buf.head = 0;
@@ -872,9 +846,8 @@ static int alloc_ao_contexts(me4000_info_t * info)
872 ME4000_NAME, ao_context); 846 ME4000_NAME, ao_context);
873 if (err) { 847 if (err) {
874 printk(KERN_ERR 848 printk(KERN_ERR
875 "alloc_ao_contexts():Can't get interrupt line"); 849 "%s:Can't get interrupt line", __func__);
876 if (ao_context->circ_buf.buf) 850 kfree(ao_context->circ_buf.buf);
877 kfree(ao_context->circ_buf.buf);
878 kfree(ao_context); 851 kfree(ao_context);
879 release_ao_contexts(info); 852 release_ao_contexts(info);
880 return -ENODEV; 853 return -ENODEV;
@@ -888,35 +861,34 @@ static int alloc_ao_contexts(me4000_info_t * info)
888 return 0; 861 return 0;
889} 862}
890 863
891static void release_ao_contexts(me4000_info_t * board_info) 864static void release_ao_contexts(struct me4000_info *board_info)
892{ 865{
893 struct list_head *dac_p; 866 struct list_head *dac_p;
894 me4000_ao_context_t *ao_context; 867 struct me4000_ao_context *ao_context;
895 868
896 /* Clear analog output context list */ 869 /* Clear analog output context list */
897 while (!list_empty(&board_info->ao_context_list)) { 870 while (!list_empty(&board_info->ao_context_list)) {
898 dac_p = board_info->ao_context_list.next; 871 dac_p = board_info->ao_context_list.next;
899 ao_context = list_entry(dac_p, me4000_ao_context_t, list); 872 ao_context = list_entry(dac_p, struct me4000_ao_context, list);
900 free_irq(ao_context->irq, ao_context); 873 free_irq(ao_context->irq, ao_context);
901 if (ao_context->circ_buf.buf) 874 kfree(ao_context->circ_buf.buf);
902 kfree(ao_context->circ_buf.buf);
903 list_del(dac_p); 875 list_del(dac_p);
904 kfree(ao_context); 876 kfree(ao_context);
905 } 877 }
906} 878}
907 879
908static int alloc_ai_context(me4000_info_t * info) 880static int alloc_ai_context(struct me4000_info *info)
909{ 881{
910 me4000_ai_context_t *ai_context; 882 struct me4000_ai_context *ai_context;
911 883
912 if (info->board_p->ai.count) { 884 if (info->board_p->ai.count) {
913 ai_context = kmalloc(sizeof(me4000_ai_context_t), GFP_KERNEL); 885 ai_context = kzalloc(sizeof(struct me4000_ai_context),
886 GFP_KERNEL);
914 if (!ai_context) { 887 if (!ai_context) {
915 printk(KERN_ERR 888 printk(KERN_ERR
916 "ME4000:alloc_ai_context():Can't get memory for ai context\n"); 889 "ME4000:alloc_ai_context():Can't get memory for ai context\n");
917 return -ENOMEM; 890 return -ENOMEM;
918 } 891 }
919 memset(ai_context, 0, sizeof(me4000_ai_context_t));
920 892
921 info->ai_context = ai_context; 893 info->ai_context = ai_context;
922 894
@@ -958,18 +930,18 @@ static int alloc_ai_context(me4000_info_t * info)
958 return 0; 930 return 0;
959} 931}
960 932
961static int alloc_dio_context(me4000_info_t * info) 933static int alloc_dio_context(struct me4000_info *info)
962{ 934{
963 me4000_dio_context_t *dio_context; 935 struct me4000_dio_context *dio_context;
964 936
965 if (info->board_p->dio.count) { 937 if (info->board_p->dio.count) {
966 dio_context = kmalloc(sizeof(me4000_dio_context_t), GFP_KERNEL); 938 dio_context = kzalloc(sizeof(struct me4000_dio_context),
939 GFP_KERNEL);
967 if (!dio_context) { 940 if (!dio_context) {
968 printk(KERN_ERR 941 printk(KERN_ERR
969 "ME4000:alloc_dio_context():Can't get memory for dio context\n"); 942 "ME4000:alloc_dio_context():Can't get memory for dio context\n");
970 return -ENOMEM; 943 return -ENOMEM;
971 } 944 }
972 memset(dio_context, 0, sizeof(me4000_dio_context_t));
973 945
974 info->dio_context = dio_context; 946 info->dio_context = dio_context;
975 947
@@ -995,18 +967,18 @@ static int alloc_dio_context(me4000_info_t * info)
995 return 0; 967 return 0;
996} 968}
997 969
998static int alloc_cnt_context(me4000_info_t * info) 970static int alloc_cnt_context(struct me4000_info *info)
999{ 971{
1000 me4000_cnt_context_t *cnt_context; 972 struct me4000_cnt_context *cnt_context;
1001 973
1002 if (info->board_p->cnt.count) { 974 if (info->board_p->cnt.count) {
1003 cnt_context = kmalloc(sizeof(me4000_cnt_context_t), GFP_KERNEL); 975 cnt_context = kzalloc(sizeof(struct me4000_cnt_context),
976 GFP_KERNEL);
1004 if (!cnt_context) { 977 if (!cnt_context) {
1005 printk(KERN_ERR 978 printk(KERN_ERR
1006 "ME4000:alloc_cnt_context():Can't get memory for cnt context\n"); 979 "ME4000:alloc_cnt_context():Can't get memory for cnt context\n");
1007 return -ENOMEM; 980 return -ENOMEM;
1008 } 981 }
1009 memset(cnt_context, 0, sizeof(me4000_cnt_context_t));
1010 982
1011 info->cnt_context = cnt_context; 983 info->cnt_context = cnt_context;
1012 984
@@ -1026,19 +998,18 @@ static int alloc_cnt_context(me4000_info_t * info)
1026 return 0; 998 return 0;
1027} 999}
1028 1000
1029static int alloc_ext_int_context(me4000_info_t * info) 1001static int alloc_ext_int_context(struct me4000_info *info)
1030{ 1002{
1031 me4000_ext_int_context_t *ext_int_context; 1003 struct me4000_ext_int_context *ext_int_context;
1032 1004
1033 if (info->board_p->cnt.count) { 1005 if (info->board_p->cnt.count) {
1034 ext_int_context = 1006 ext_int_context =
1035 kmalloc(sizeof(me4000_ext_int_context_t), GFP_KERNEL); 1007 kzalloc(sizeof(struct me4000_ext_int_context), GFP_KERNEL);
1036 if (!ext_int_context) { 1008 if (!ext_int_context) {
1037 printk(KERN_ERR 1009 printk(KERN_ERR
1038 "ME4000:alloc_ext_int_context():Can't get memory for cnt context\n"); 1010 "ME4000:alloc_ext_int_context():Can't get memory for cnt context\n");
1039 return -ENOMEM; 1011 return -ENOMEM;
1040 } 1012 }
1041 memset(ext_int_context, 0, sizeof(me4000_ext_int_context_t));
1042 1013
1043 info->ext_int_context = ext_int_context; 1014 info->ext_int_context = ext_int_context;
1044 1015
@@ -1060,19 +1031,18 @@ static int alloc_ext_int_context(me4000_info_t * info)
1060static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id) 1031static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id)
1061{ 1032{
1062 int result = 0; 1033 int result = 0;
1063 me4000_info_t *board_info; 1034 struct me4000_info *board_info;
1064 1035
1065 CALL_PDEBUG("me4000_probe() is executed\n"); 1036 CALL_PDEBUG("me4000_probe() is executed\n");
1066 1037
1067 /* Allocate structure for board context */ 1038 /* Allocate structure for board context */
1068 board_info = kmalloc(sizeof(me4000_info_t), GFP_KERNEL); 1039 board_info = kzalloc(sizeof(struct me4000_info), GFP_KERNEL);
1069 if (!board_info) { 1040 if (!board_info) {
1070 printk(KERN_ERR 1041 printk(KERN_ERR
1071 "ME4000:Can't get memory for board info structure\n"); 1042 "ME4000:Can't get memory for board info structure\n");
1072 result = -ENOMEM; 1043 result = -ENOMEM;
1073 goto PROBE_ERROR_1; 1044 goto PROBE_ERROR_1;
1074 } 1045 }
1075 memset(board_info, 0, sizeof(me4000_info_t));
1076 1046
1077 /* Add to global linked list */ 1047 /* Add to global linked list */
1078 list_add_tail(&board_info->list, &me4000_board_info_list); 1048 list_add_tail(&board_info->list, &me4000_board_info_list);
@@ -1080,70 +1050,70 @@ static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id)
1080 /* Get the PCI base registers */ 1050 /* Get the PCI base registers */
1081 result = get_registers(dev, board_info); 1051 result = get_registers(dev, board_info);
1082 if (result) { 1052 if (result) {
1083 printk(KERN_ERR "me4000_probe():Cannot get registers\n"); 1053 printk(KERN_ERR "%s:Cannot get registers\n", __func__);
1084 goto PROBE_ERROR_2; 1054 goto PROBE_ERROR_2;
1085 } 1055 }
1086 1056
1087 /* Enable the device */ 1057 /* Enable the device */
1088 result = pci_enable_device(dev); 1058 result = pci_enable_device(dev);
1089 if (result < 0) { 1059 if (result < 0) {
1090 printk(KERN_ERR "me4000_probe():Cannot enable PCI device\n"); 1060 printk(KERN_ERR "%s:Cannot enable PCI device\n", __func__);
1091 goto PROBE_ERROR_2; 1061 goto PROBE_ERROR_2;
1092 } 1062 }
1093 1063
1094 /* Request the PCI register regions */ 1064 /* Request the PCI register regions */
1095 result = pci_request_regions(dev, ME4000_NAME); 1065 result = pci_request_regions(dev, ME4000_NAME);
1096 if (result < 0) { 1066 if (result < 0) {
1097 printk(KERN_ERR "me4000_probe():Cannot request I/O regions\n"); 1067 printk(KERN_ERR "%s:Cannot request I/O regions\n", __func__);
1098 goto PROBE_ERROR_2; 1068 goto PROBE_ERROR_2;
1099 } 1069 }
1100 1070
1101 /* Initialize board info */ 1071 /* Initialize board info */
1102 result = init_board_info(dev, board_info); 1072 result = init_board_info(dev, board_info);
1103 if (result) { 1073 if (result) {
1104 printk(KERN_ERR "me4000_probe():Cannot init baord info\n"); 1074 printk(KERN_ERR "%s:Cannot init baord info\n", __func__);
1105 goto PROBE_ERROR_3; 1075 goto PROBE_ERROR_3;
1106 } 1076 }
1107 1077
1108 /* Download the xilinx firmware */ 1078 /* Download the xilinx firmware */
1109 result = me4000_xilinx_download(board_info); 1079 result = me4000_xilinx_download(board_info);
1110 if (result) { 1080 if (result) {
1111 printk(KERN_ERR "me4000_probe:Can't download firmware\n"); 1081 printk(KERN_ERR "%s:Can't download firmware\n", __func__);
1112 goto PROBE_ERROR_3; 1082 goto PROBE_ERROR_3;
1113 } 1083 }
1114 1084
1115 /* Make a hardware reset */ 1085 /* Make a hardware reset */
1116 result = me4000_reset_board(board_info); 1086 result = me4000_reset_board(board_info);
1117 if (result) { 1087 if (result) {
1118 printk(KERN_ERR "me4000_probe:Can't reset board\n"); 1088 printk(KERN_ERR "%s :Can't reset board\n", __func__);
1119 goto PROBE_ERROR_3; 1089 goto PROBE_ERROR_3;
1120 } 1090 }
1121 1091
1122 /* Allocate analog output context structures */ 1092 /* Allocate analog output context structures */
1123 result = alloc_ao_contexts(board_info); 1093 result = alloc_ao_contexts(board_info);
1124 if (result) { 1094 if (result) {
1125 printk(KERN_ERR "me4000_probe():Cannot allocate ao contexts\n"); 1095 printk(KERN_ERR "%s:Cannot allocate ao contexts\n", __func__);
1126 goto PROBE_ERROR_3; 1096 goto PROBE_ERROR_3;
1127 } 1097 }
1128 1098
1129 /* Allocate analog input context */ 1099 /* Allocate analog input context */
1130 result = alloc_ai_context(board_info); 1100 result = alloc_ai_context(board_info);
1131 if (result) { 1101 if (result) {
1132 printk(KERN_ERR "me4000_probe():Cannot allocate ai context\n"); 1102 printk(KERN_ERR "%s:Cannot allocate ai context\n", __func__);
1133 goto PROBE_ERROR_4; 1103 goto PROBE_ERROR_4;
1134 } 1104 }
1135 1105
1136 /* Allocate digital I/O context */ 1106 /* Allocate digital I/O context */
1137 result = alloc_dio_context(board_info); 1107 result = alloc_dio_context(board_info);
1138 if (result) { 1108 if (result) {
1139 printk(KERN_ERR "me4000_probe():Cannot allocate dio context\n"); 1109 printk(KERN_ERR "%s:Cannot allocate dio context\n", __func__);
1140 goto PROBE_ERROR_5; 1110 goto PROBE_ERROR_5;
1141 } 1111 }
1142 1112
1143 /* Allocate counter context */ 1113 /* Allocate counter context */
1144 result = alloc_cnt_context(board_info); 1114 result = alloc_cnt_context(board_info);
1145 if (result) { 1115 if (result) {
1146 printk(KERN_ERR "me4000_probe():Cannot allocate cnt context\n"); 1116 printk(KERN_ERR "%s:Cannot allocate cnt context\n", __func__);
1147 goto PROBE_ERROR_6; 1117 goto PROBE_ERROR_6;
1148 } 1118 }
1149 1119
@@ -1151,36 +1121,36 @@ static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id)
1151 result = alloc_ext_int_context(board_info); 1121 result = alloc_ext_int_context(board_info);
1152 if (result) { 1122 if (result) {
1153 printk(KERN_ERR 1123 printk(KERN_ERR
1154 "me4000_probe():Cannot allocate ext_int context\n"); 1124 "%s:Cannot allocate ext_int context\n", __func__);
1155 goto PROBE_ERROR_7; 1125 goto PROBE_ERROR_7;
1156 } 1126 }
1157 1127
1158 return 0; 1128 return 0;
1159 1129
1160 PROBE_ERROR_7: 1130PROBE_ERROR_7:
1161 kfree(board_info->cnt_context); 1131 kfree(board_info->cnt_context);
1162 1132
1163 PROBE_ERROR_6: 1133PROBE_ERROR_6:
1164 kfree(board_info->dio_context); 1134 kfree(board_info->dio_context);
1165 1135
1166 PROBE_ERROR_5: 1136PROBE_ERROR_5:
1167 kfree(board_info->ai_context); 1137 kfree(board_info->ai_context);
1168 1138
1169 PROBE_ERROR_4: 1139PROBE_ERROR_4:
1170 release_ao_contexts(board_info); 1140 release_ao_contexts(board_info);
1171 1141
1172 PROBE_ERROR_3: 1142PROBE_ERROR_3:
1173 pci_release_regions(dev); 1143 pci_release_regions(dev);
1174 1144
1175 PROBE_ERROR_2: 1145PROBE_ERROR_2:
1176 list_del(&board_info->list); 1146 list_del(&board_info->list);
1177 kfree(board_info); 1147 kfree(board_info);
1178 1148
1179 PROBE_ERROR_1: 1149PROBE_ERROR_1:
1180 return result; 1150 return result;
1181} 1151}
1182 1152
1183static int me4000_xilinx_download(me4000_info_t * info) 1153static int me4000_xilinx_download(struct me4000_info *info)
1184{ 1154{
1185 int size = 0; 1155 int size = 0;
1186 u32 value = 0; 1156 u32 value = 0;
@@ -1211,7 +1181,7 @@ static int me4000_xilinx_download(me4000_info_t * info)
1211 /* Wait until /INIT pin is set */ 1181 /* Wait until /INIT pin is set */
1212 udelay(20); 1182 udelay(20);
1213 if (!inl(info->plx_regbase + PLX_INTCSR) & 0x20) { 1183 if (!inl(info->plx_regbase + PLX_INTCSR) & 0x20) {
1214 printk(KERN_ERR "me4000_xilinx_download():Can't init Xilinx\n"); 1184 printk(KERN_ERR "%s:Can't init Xilinx\n", __func__);
1215 return -EIO; 1185 return -EIO;
1216 } 1186 }
1217 1187
@@ -1232,7 +1202,7 @@ static int me4000_xilinx_download(me4000_info_t * info)
1232 /* Check if BUSY flag is low */ 1202 /* Check if BUSY flag is low */
1233 if (inl(info->plx_regbase + PLX_ICR) & 0x20) { 1203 if (inl(info->plx_regbase + PLX_ICR) & 0x20) {
1234 printk(KERN_ERR 1204 printk(KERN_ERR
1235 "me4000_xilinx_download():Xilinx is still busy (idx = %d)\n", 1205 "%s:Xilinx is still busy (idx = %d)\n", __func__,
1236 idx); 1206 idx);
1237 return -EIO; 1207 return -EIO;
1238 } 1208 }
@@ -1246,9 +1216,9 @@ static int me4000_xilinx_download(me4000_info_t * info)
1246 PDEBUG("me4000_xilinx_download():Download was successful\n"); 1216 PDEBUG("me4000_xilinx_download():Download was successful\n");
1247 } else { 1217 } else {
1248 printk(KERN_ERR 1218 printk(KERN_ERR
1249 "ME4000:me4000_xilinx_download():DONE flag is not set\n"); 1219 "ME4000:%s:DONE flag is not set\n", __func__);
1250 printk(KERN_ERR 1220 printk(KERN_ERR
1251 "ME4000:me4000_xilinx_download():Download not succesful\n"); 1221 "ME4000:%s:Download not succesful\n", __func__);
1252 return -EIO; 1222 return -EIO;
1253 } 1223 }
1254 1224
@@ -1260,7 +1230,7 @@ static int me4000_xilinx_download(me4000_info_t * info)
1260 return 0; 1230 return 0;
1261} 1231}
1262 1232
1263static int me4000_reset_board(me4000_info_t * info) 1233static int me4000_reset_board(struct me4000_info *info)
1264{ 1234{
1265 unsigned long icr; 1235 unsigned long icr;
1266 1236
@@ -1314,12 +1284,12 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1314 int err = 0; 1284 int err = 0;
1315 int i; 1285 int i;
1316 struct list_head *ptr; 1286 struct list_head *ptr;
1317 me4000_info_t *board_info = NULL; 1287 struct me4000_info *board_info = NULL;
1318 me4000_ao_context_t *ao_context = NULL; 1288 struct me4000_ao_context *ao_context = NULL;
1319 me4000_ai_context_t *ai_context = NULL; 1289 struct me4000_ai_context *ai_context = NULL;
1320 me4000_dio_context_t *dio_context = NULL; 1290 struct me4000_dio_context *dio_context = NULL;
1321 me4000_cnt_context_t *cnt_context = NULL; 1291 struct me4000_cnt_context *cnt_context = NULL;
1322 me4000_ext_int_context_t *ext_int_context = NULL; 1292 struct me4000_ext_int_context *ext_int_context = NULL;
1323 1293
1324 CALL_PDEBUG("me4000_open() is executed\n"); 1294 CALL_PDEBUG("me4000_open() is executed\n");
1325 1295
@@ -1335,7 +1305,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1335 /* Search for the board context */ 1305 /* Search for the board context */
1336 for (ptr = me4000_board_info_list.next, i = 0; 1306 for (ptr = me4000_board_info_list.next, i = 0;
1337 ptr != &me4000_board_info_list; ptr = ptr->next, i++) { 1307 ptr != &me4000_board_info_list; ptr = ptr->next, i++) {
1338 board_info = list_entry(ptr, me4000_info_t, list); 1308 board_info = list_entry(ptr, struct me4000_info, list);
1339 if (i == board) 1309 if (i == board)
1340 break; 1310 break;
1341 } 1311 }
@@ -1351,7 +1321,8 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1351 for (ptr = board_info->ao_context_list.next, i = 0; 1321 for (ptr = board_info->ao_context_list.next, i = 0;
1352 ptr != &board_info->ao_context_list; 1322 ptr != &board_info->ao_context_list;
1353 ptr = ptr->next, i++) { 1323 ptr = ptr->next, i++) {
1354 ao_context = list_entry(ptr, me4000_ao_context_t, list); 1324 ao_context = list_entry(ptr, struct me4000_ao_context,
1325 list);
1355 if (i == dev) 1326 if (i == dev)
1356 break; 1327 break;
1357 } 1328 }
@@ -1415,7 +1386,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1415 /* Search for the board context */ 1386 /* Search for the board context */
1416 for (ptr = me4000_board_info_list.next, i = 0; 1387 for (ptr = me4000_board_info_list.next, i = 0;
1417 ptr != &me4000_board_info_list; ptr = ptr->next, i++) { 1388 ptr != &me4000_board_info_list; ptr = ptr->next, i++) {
1418 board_info = list_entry(ptr, me4000_info_t, list); 1389 board_info = list_entry(ptr, struct me4000_info, list);
1419 if (i == board) 1390 if (i == board)
1420 break; 1391 break;
1421 } 1392 }
@@ -1469,7 +1440,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1469 /* Search for the board context */ 1440 /* Search for the board context */
1470 for (ptr = me4000_board_info_list.next; 1441 for (ptr = me4000_board_info_list.next;
1471 ptr != &me4000_board_info_list; ptr = ptr->next) { 1442 ptr != &me4000_board_info_list; ptr = ptr->next) {
1472 board_info = list_entry(ptr, me4000_info_t, list); 1443 board_info = list_entry(ptr, struct me4000_info, list);
1473 if (board_info->board_count == board) 1444 if (board_info->board_count == board)
1474 break; 1445 break;
1475 } 1446 }
@@ -1514,7 +1485,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1514 /* Search for the board context */ 1485 /* Search for the board context */
1515 for (ptr = me4000_board_info_list.next; 1486 for (ptr = me4000_board_info_list.next;
1516 ptr != &me4000_board_info_list; ptr = ptr->next) { 1487 ptr != &me4000_board_info_list; ptr = ptr->next) {
1517 board_info = list_entry(ptr, me4000_info_t, list); 1488 board_info = list_entry(ptr, struct me4000_info, list);
1518 if (board_info->board_count == board) 1489 if (board_info->board_count == board)
1519 break; 1490 break;
1520 } 1491 }
@@ -1557,7 +1528,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1557 /* Search for the board context */ 1528 /* Search for the board context */
1558 for (ptr = me4000_board_info_list.next; 1529 for (ptr = me4000_board_info_list.next;
1559 ptr != &me4000_board_info_list; ptr = ptr->next) { 1530 ptr != &me4000_board_info_list; ptr = ptr->next) {
1560 board_info = list_entry(ptr, me4000_info_t, list); 1531 board_info = list_entry(ptr, struct me4000_info, list);
1561 if (board_info->board_count == board) 1532 if (board_info->board_count == board)
1562 break; 1533 break;
1563 } 1534 }
@@ -1613,11 +1584,11 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1613 1584
1614static int me4000_release(struct inode *inode_p, struct file *file_p) 1585static int me4000_release(struct inode *inode_p, struct file *file_p)
1615{ 1586{
1616 me4000_ao_context_t *ao_context; 1587 struct me4000_ao_context *ao_context;
1617 me4000_ai_context_t *ai_context; 1588 struct me4000_ai_context *ai_context;
1618 me4000_dio_context_t *dio_context; 1589 struct me4000_dio_context *dio_context;
1619 me4000_cnt_context_t *cnt_context; 1590 struct me4000_cnt_context *cnt_context;
1620 me4000_ext_int_context_t *ext_int_context; 1591 struct me4000_ext_int_context *ext_int_context;
1621 1592
1622 CALL_PDEBUG("me4000_release() is executed\n"); 1593 CALL_PDEBUG("me4000_release() is executed\n");
1623 1594
@@ -1661,9 +1632,6 @@ static int me4000_release(struct inode *inode_p, struct file *file_p)
1661 1632
1662 free_irq(ext_int_context->irq, ext_int_context); 1633 free_irq(ext_int_context->irq, ext_int_context);
1663 1634
1664 /* Delete the fasync structure and free memory */
1665 me4000_ext_int_fasync(0, file_p, 0);
1666
1667 /* Mark as unused */ 1635 /* Mark as unused */
1668 ext_int_context->in_use = 0; 1636 ext_int_context->in_use = 0;
1669 } else { 1637 } else {
@@ -1677,7 +1645,7 @@ static int me4000_release(struct inode *inode_p, struct file *file_p)
1677 1645
1678/*------------------------------- Analog output stuff --------------------------------------*/ 1646/*------------------------------- Analog output stuff --------------------------------------*/
1679 1647
1680static int me4000_ao_prepare(me4000_ao_context_t * ao_context) 1648static int me4000_ao_prepare(struct me4000_ao_context *ao_context)
1681{ 1649{
1682 unsigned long flags; 1650 unsigned long flags;
1683 1651
@@ -1756,7 +1724,7 @@ static int me4000_ao_prepare(me4000_ao_context_t * ao_context)
1756 return 0; 1724 return 0;
1757} 1725}
1758 1726
1759static int me4000_ao_reset(me4000_ao_context_t * ao_context) 1727static int me4000_ao_reset(struct me4000_ao_context *ao_context)
1760{ 1728{
1761 u32 tmp; 1729 u32 tmp;
1762 wait_queue_head_t queue; 1730 wait_queue_head_t queue;
@@ -1777,9 +1745,10 @@ static int me4000_ao_reset(me4000_ao_context_t * ao_context)
1777 tmp |= ME4000_AO_CTRL_BIT_IMMEDIATE_STOP; 1745 tmp |= ME4000_AO_CTRL_BIT_IMMEDIATE_STOP;
1778 me4000_outl(tmp, ao_context->ctrl_reg); 1746 me4000_outl(tmp, ao_context->ctrl_reg);
1779 1747
1780 while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) { 1748 wait_event_timeout(queue,
1781 sleep_on_timeout(&queue, 1); 1749 (inl(ao_context->status_reg) &
1782 } 1750 ME4000_AO_STATUS_BIT_FSM) == 0,
1751 1);
1783 1752
1784 /* Set to transparent mode */ 1753 /* Set to transparent mode */
1785 me4000_ao_simultaneous_disable(ao_context); 1754 me4000_ao_simultaneous_disable(ao_context);
@@ -1812,9 +1781,10 @@ static int me4000_ao_reset(me4000_ao_context_t * ao_context)
1812 me4000_outl(tmp, ao_context->ctrl_reg); 1781 me4000_outl(tmp, ao_context->ctrl_reg);
1813 spin_unlock_irqrestore(&ao_context->int_lock, flags); 1782 spin_unlock_irqrestore(&ao_context->int_lock, flags);
1814 1783
1815 while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) { 1784 wait_event_timeout(queue,
1816 sleep_on_timeout(&queue, 1); 1785 (inl(ao_context->status_reg) &
1817 } 1786 ME4000_AO_STATUS_BIT_FSM) == 0,
1787 1);
1818 1788
1819 /* Clear the circular buffer */ 1789 /* Clear the circular buffer */
1820 ao_context->circ_buf.head = 0; 1790 ao_context->circ_buf.head = 0;
@@ -1853,9 +1823,9 @@ static int me4000_ao_reset(me4000_ao_context_t * ao_context)
1853} 1823}
1854 1824
1855static ssize_t me4000_ao_write_sing(struct file *filep, const char *buff, 1825static ssize_t me4000_ao_write_sing(struct file *filep, const char *buff,
1856 size_t cnt, loff_t * offp) 1826 size_t cnt, loff_t *offp)
1857{ 1827{
1858 me4000_ao_context_t *ao_context = filep->private_data; 1828 struct me4000_ao_context *ao_context = filep->private_data;
1859 u32 value; 1829 u32 value;
1860 const u16 *buffer = (const u16 *)buff; 1830 const u16 *buffer = (const u16 *)buff;
1861 1831
@@ -1863,13 +1833,13 @@ static ssize_t me4000_ao_write_sing(struct file *filep, const char *buff,
1863 1833
1864 if (cnt != 2) { 1834 if (cnt != 2) {
1865 printk(KERN_ERR 1835 printk(KERN_ERR
1866 "me4000_ao_write_sing():Write count is not 2\n"); 1836 "%s:Write count is not 2\n", __func__);
1867 return -EINVAL; 1837 return -EINVAL;
1868 } 1838 }
1869 1839
1870 if (get_user(value, buffer)) { 1840 if (get_user(value, buffer)) {
1871 printk(KERN_ERR 1841 printk(KERN_ERR
1872 "me4000_ao_write_sing():Cannot copy data from user\n"); 1842 "%s:Cannot copy data from user\n", __func__);
1873 return -EFAULT; 1843 return -EFAULT;
1874 } 1844 }
1875 1845
@@ -1879,9 +1849,9 @@ static ssize_t me4000_ao_write_sing(struct file *filep, const char *buff,
1879} 1849}
1880 1850
1881static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff, 1851static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
1882 size_t cnt, loff_t * offp) 1852 size_t cnt, loff_t *offp)
1883{ 1853{
1884 me4000_ao_context_t *ao_context = filep->private_data; 1854 struct me4000_ao_context *ao_context = filep->private_data;
1885 size_t i; 1855 size_t i;
1886 u32 value; 1856 u32 value;
1887 u32 tmp; 1857 u32 tmp;
@@ -1893,13 +1863,13 @@ static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
1893 /* Check if a conversion is already running */ 1863 /* Check if a conversion is already running */
1894 if (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) { 1864 if (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) {
1895 printk(KERN_ERR 1865 printk(KERN_ERR
1896 "ME4000:me4000_ao_write_wrap():There is already a conversion running\n"); 1866 "%s:There is already a conversion running\n", __func__);
1897 return -EBUSY; 1867 return -EBUSY;
1898 } 1868 }
1899 1869
1900 if (count > ME4000_AO_FIFO_COUNT) { 1870 if (count > ME4000_AO_FIFO_COUNT) {
1901 printk(KERN_ERR 1871 printk(KERN_ERR
1902 "me4000_ao_write_wrap():Can't load more than %d values\n", 1872 "%s:Can't load more than %d values\n", __func__,
1903 ME4000_AO_FIFO_COUNT); 1873 ME4000_AO_FIFO_COUNT);
1904 return -ENOSPC; 1874 return -ENOSPC;
1905 } 1875 }
@@ -1914,7 +1884,7 @@ static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
1914 for (i = 0; i < count; i++) { 1884 for (i = 0; i < count; i++) {
1915 if (get_user(value, buffer + i)) { 1885 if (get_user(value, buffer + i)) {
1916 printk(KERN_ERR 1886 printk(KERN_ERR
1917 "me4000_ao_write_single():Cannot copy data from user\n"); 1887 "%s:Cannot copy data from user\n", __func__);
1918 return -EFAULT; 1888 return -EFAULT;
1919 } 1889 }
1920 if (((ao_context->fifo_reg & 0xFF) == ME4000_AO_01_FIFO_REG) 1890 if (((ao_context->fifo_reg & 0xFF) == ME4000_AO_01_FIFO_REG)
@@ -1928,9 +1898,9 @@ static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
1928} 1898}
1929 1899
1930static ssize_t me4000_ao_write_cont(struct file *filep, const char *buff, 1900static ssize_t me4000_ao_write_cont(struct file *filep, const char *buff,
1931 size_t cnt, loff_t * offp) 1901 size_t cnt, loff_t *offp)
1932{ 1902{
1933 me4000_ao_context_t *ao_context = filep->private_data; 1903 struct me4000_ao_context *ao_context = filep->private_data;
1934 const u16 *buffer = (const u16 *)buff; 1904 const u16 *buffer = (const u16 *)buff;
1935 size_t count = cnt / 2; 1905 size_t count = cnt / 2;
1936 unsigned long flags; 1906 unsigned long flags;
@@ -2154,9 +2124,9 @@ static ssize_t me4000_ao_write_cont(struct file *filep, const char *buff,
2154 return 2 * ret; 2124 return 2 * ret;
2155} 2125}
2156 2126
2157static unsigned int me4000_ao_poll_cont(struct file *file_p, poll_table * wait) 2127static unsigned int me4000_ao_poll_cont(struct file *file_p, poll_table *wait)
2158{ 2128{
2159 me4000_ao_context_t *ao_context; 2129 struct me4000_ao_context *ao_context;
2160 unsigned long mask = 0; 2130 unsigned long mask = 0;
2161 2131
2162 CALL_PDEBUG("me4000_ao_poll_cont() is executed\n"); 2132 CALL_PDEBUG("me4000_ao_poll_cont() is executed\n");
@@ -2177,7 +2147,7 @@ static unsigned int me4000_ao_poll_cont(struct file *file_p, poll_table * wait)
2177static int me4000_ao_fsync_cont(struct file *file_p, struct dentry *dentry_p, 2147static int me4000_ao_fsync_cont(struct file *file_p, struct dentry *dentry_p,
2178 int datasync) 2148 int datasync)
2179{ 2149{
2180 me4000_ao_context_t *ao_context; 2150 struct me4000_ao_context *ao_context;
2181 wait_queue_head_t queue; 2151 wait_queue_head_t queue;
2182 2152
2183 CALL_PDEBUG("me4000_ao_fsync_cont() is executed\n"); 2153 CALL_PDEBUG("me4000_ao_fsync_cont() is executed\n");
@@ -2187,15 +2157,19 @@ static int me4000_ao_fsync_cont(struct file *file_p, struct dentry *dentry_p,
2187 2157
2188 while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) { 2158 while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) {
2189 interruptible_sleep_on_timeout(&queue, 1); 2159 interruptible_sleep_on_timeout(&queue, 1);
2160 wait_event_interruptible_timeout(queue,
2161 !(inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM),
2162 1);
2190 if (ao_context->pipe_flag) { 2163 if (ao_context->pipe_flag) {
2191 printk(KERN_ERR 2164 printk(KERN_ERR
2192 "me4000_ao_fsync_cont():Broken pipe detected\n"); 2165 "%s:Broken pipe detected\n", __func__);
2193 return -EPIPE; 2166 return -EPIPE;
2194 } 2167 }
2195 2168
2196 if (signal_pending(current)) { 2169 if (signal_pending(current)) {
2197 printk(KERN_ERR 2170 printk(KERN_ERR
2198 "me4000_ao_fsync_cont():Wait on state machine interrupted\n"); 2171 "%s:Wait on state machine interrupted\n",
2172 __func__);
2199 return -EINTR; 2173 return -EINTR;
2200 } 2174 }
2201 } 2175 }
@@ -2206,7 +2180,7 @@ static int me4000_ao_fsync_cont(struct file *file_p, struct dentry *dentry_p,
2206static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p, 2180static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
2207 unsigned int service, unsigned long arg) 2181 unsigned int service, unsigned long arg)
2208{ 2182{
2209 me4000_ao_context_t *ao_context; 2183 struct me4000_ao_context *ao_context;
2210 2184
2211 CALL_PDEBUG("me4000_ao_ioctl_sing() is executed\n"); 2185 CALL_PDEBUG("me4000_ao_ioctl_sing() is executed\n");
2212 2186
@@ -2229,7 +2203,7 @@ static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
2229 case ME4000_AO_PRELOAD_UPDATE: 2203 case ME4000_AO_PRELOAD_UPDATE:
2230 return me4000_ao_preload_update(ao_context); 2204 return me4000_ao_preload_update(ao_context);
2231 case ME4000_GET_USER_INFO: 2205 case ME4000_GET_USER_INFO:
2232 return me4000_get_user_info((me4000_user_info_t *) arg, 2206 return me4000_get_user_info((struct me4000_user_info *)arg,
2233 ao_context->board_info); 2207 ao_context->board_info);
2234 case ME4000_AO_SIMULTANEOUS_EX_TRIG: 2208 case ME4000_AO_SIMULTANEOUS_EX_TRIG:
2235 return me4000_ao_simultaneous_ex_trig(ao_context); 2209 return me4000_ao_simultaneous_ex_trig(ao_context);
@@ -2239,8 +2213,9 @@ static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
2239 return me4000_ao_simultaneous_disable(ao_context); 2213 return me4000_ao_simultaneous_disable(ao_context);
2240 case ME4000_AO_SIMULTANEOUS_UPDATE: 2214 case ME4000_AO_SIMULTANEOUS_UPDATE:
2241 return 2215 return
2242 me4000_ao_simultaneous_update((me4000_ao_channel_list_t *) 2216 me4000_ao_simultaneous_update(
2243 arg, ao_context); 2217 (struct me4000_ao_channel_list *)arg,
2218 ao_context);
2244 case ME4000_AO_EX_TRIG_TIMEOUT: 2219 case ME4000_AO_EX_TRIG_TIMEOUT:
2245 return me4000_ao_ex_trig_timeout((unsigned long *)arg, 2220 return me4000_ao_ex_trig_timeout((unsigned long *)arg,
2246 ao_context); 2221 ao_context);
@@ -2258,7 +2233,7 @@ static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
2258static int me4000_ao_ioctl_wrap(struct inode *inode_p, struct file *file_p, 2233static int me4000_ao_ioctl_wrap(struct inode *inode_p, struct file *file_p,
2259 unsigned int service, unsigned long arg) 2234 unsigned int service, unsigned long arg)
2260{ 2235{
2261 me4000_ao_context_t *ao_context; 2236 struct me4000_ao_context *ao_context;
2262 2237
2263 CALL_PDEBUG("me4000_ao_ioctl_wrap() is executed\n"); 2238 CALL_PDEBUG("me4000_ao_ioctl_wrap() is executed\n");
2264 2239
@@ -2287,7 +2262,7 @@ static int me4000_ao_ioctl_wrap(struct inode *inode_p, struct file *file_p,
2287 case ME4000_AO_EX_TRIG_DISABLE: 2262 case ME4000_AO_EX_TRIG_DISABLE:
2288 return me4000_ao_ex_trig_disable(ao_context); 2263 return me4000_ao_ex_trig_disable(ao_context);
2289 case ME4000_GET_USER_INFO: 2264 case ME4000_GET_USER_INFO:
2290 return me4000_get_user_info((me4000_user_info_t *) arg, 2265 return me4000_get_user_info((struct me4000_user_info *)arg,
2291 ao_context->board_info); 2266 ao_context->board_info);
2292 case ME4000_AO_FSM_STATE: 2267 case ME4000_AO_FSM_STATE:
2293 return me4000_ao_fsm_state((int *)arg, ao_context); 2268 return me4000_ao_fsm_state((int *)arg, ao_context);
@@ -2310,7 +2285,7 @@ static int me4000_ao_ioctl_wrap(struct inode *inode_p, struct file *file_p,
2310static int me4000_ao_ioctl_cont(struct inode *inode_p, struct file *file_p, 2285static int me4000_ao_ioctl_cont(struct inode *inode_p, struct file *file_p,
2311 unsigned int service, unsigned long arg) 2286 unsigned int service, unsigned long arg)
2312{ 2287{
2313 me4000_ao_context_t *ao_context; 2288 struct me4000_ao_context *ao_context;
2314 2289
2315 CALL_PDEBUG("me4000_ao_ioctl_cont() is executed\n"); 2290 CALL_PDEBUG("me4000_ao_ioctl_cont() is executed\n");
2316 2291
@@ -2345,7 +2320,7 @@ static int me4000_ao_ioctl_cont(struct inode *inode_p, struct file *file_p,
2345 case ME4000_AO_FSM_STATE: 2320 case ME4000_AO_FSM_STATE:
2346 return me4000_ao_fsm_state((int *)arg, ao_context); 2321 return me4000_ao_fsm_state((int *)arg, ao_context);
2347 case ME4000_GET_USER_INFO: 2322 case ME4000_GET_USER_INFO:
2348 return me4000_get_user_info((me4000_user_info_t *) arg, 2323 return me4000_get_user_info((struct me4000_user_info *)arg,
2349 ao_context->board_info); 2324 ao_context->board_info);
2350 case ME4000_AO_SYNCHRONOUS_EX_TRIG: 2325 case ME4000_AO_SYNCHRONOUS_EX_TRIG:
2351 return me4000_ao_synchronous_ex_trig(ao_context); 2326 return me4000_ao_synchronous_ex_trig(ao_context);
@@ -2362,7 +2337,8 @@ static int me4000_ao_ioctl_cont(struct inode *inode_p, struct file *file_p,
2362 return 0; 2337 return 0;
2363} 2338}
2364 2339
2365static int me4000_ao_start(unsigned long *arg, me4000_ao_context_t * ao_context) 2340static int me4000_ao_start(unsigned long *arg,
2341 struct me4000_ao_context *ao_context)
2366{ 2342{
2367 u32 tmp; 2343 u32 tmp;
2368 wait_queue_head_t queue; 2344 wait_queue_head_t queue;
@@ -2412,7 +2388,7 @@ static int me4000_ao_start(unsigned long *arg, me4000_ao_context_t * ao_context)
2412 return 0; 2388 return 0;
2413} 2389}
2414 2390
2415static int me4000_ao_stop(me4000_ao_context_t * ao_context) 2391static int me4000_ao_stop(struct me4000_ao_context *ao_context)
2416{ 2392{
2417 u32 tmp; 2393 u32 tmp;
2418 wait_queue_head_t queue; 2394 wait_queue_head_t queue;
@@ -2445,7 +2421,7 @@ static int me4000_ao_stop(me4000_ao_context_t * ao_context)
2445 return 0; 2421 return 0;
2446} 2422}
2447 2423
2448static int me4000_ao_immediate_stop(me4000_ao_context_t * ao_context) 2424static int me4000_ao_immediate_stop(struct me4000_ao_context *ao_context)
2449{ 2425{
2450 u32 tmp; 2426 u32 tmp;
2451 wait_queue_head_t queue; 2427 wait_queue_head_t queue;
@@ -2477,8 +2453,8 @@ static int me4000_ao_immediate_stop(me4000_ao_context_t * ao_context)
2477 return 0; 2453 return 0;
2478} 2454}
2479 2455
2480static int me4000_ao_timer_set_divisor(u32 * arg, 2456static int me4000_ao_timer_set_divisor(u32 *arg,
2481 me4000_ao_context_t * ao_context) 2457 struct me4000_ao_context *ao_context)
2482{ 2458{
2483 u32 divisor; 2459 u32 divisor;
2484 u32 tmp; 2460 u32 tmp;
@@ -2518,7 +2494,7 @@ static int me4000_ao_timer_set_divisor(u32 * arg,
2518} 2494}
2519 2495
2520static int me4000_ao_ex_trig_set_edge(int *arg, 2496static int me4000_ao_ex_trig_set_edge(int *arg,
2521 me4000_ao_context_t * ao_context) 2497 struct me4000_ao_context *ao_context)
2522{ 2498{
2523 int mode; 2499 int mode;
2524 u32 tmp; 2500 u32 tmp;
@@ -2569,7 +2545,7 @@ static int me4000_ao_ex_trig_set_edge(int *arg,
2569 return 0; 2545 return 0;
2570} 2546}
2571 2547
2572static int me4000_ao_ex_trig_enable(me4000_ao_context_t * ao_context) 2548static int me4000_ao_ex_trig_enable(struct me4000_ao_context *ao_context)
2573{ 2549{
2574 u32 tmp; 2550 u32 tmp;
2575 unsigned long flags; 2551 unsigned long flags;
@@ -2593,7 +2569,7 @@ static int me4000_ao_ex_trig_enable(me4000_ao_context_t * ao_context)
2593 return 0; 2569 return 0;
2594} 2570}
2595 2571
2596static int me4000_ao_ex_trig_disable(me4000_ao_context_t * ao_context) 2572static int me4000_ao_ex_trig_disable(struct me4000_ao_context *ao_context)
2597{ 2573{
2598 u32 tmp; 2574 u32 tmp;
2599 unsigned long flags; 2575 unsigned long flags;
@@ -2617,7 +2593,7 @@ static int me4000_ao_ex_trig_disable(me4000_ao_context_t * ao_context)
2617 return 0; 2593 return 0;
2618} 2594}
2619 2595
2620static int me4000_ao_simultaneous_disable(me4000_ao_context_t * ao_context) 2596static int me4000_ao_simultaneous_disable(struct me4000_ao_context *ao_context)
2621{ 2597{
2622 u32 tmp; 2598 u32 tmp;
2623 2599
@@ -2643,7 +2619,7 @@ static int me4000_ao_simultaneous_disable(me4000_ao_context_t * ao_context)
2643 return 0; 2619 return 0;
2644} 2620}
2645 2621
2646static int me4000_ao_simultaneous_ex_trig(me4000_ao_context_t * ao_context) 2622static int me4000_ao_simultaneous_ex_trig(struct me4000_ao_context *ao_context)
2647{ 2623{
2648 u32 tmp; 2624 u32 tmp;
2649 2625
@@ -2659,7 +2635,7 @@ static int me4000_ao_simultaneous_ex_trig(me4000_ao_context_t * ao_context)
2659 return 0; 2635 return 0;
2660} 2636}
2661 2637
2662static int me4000_ao_simultaneous_sw(me4000_ao_context_t * ao_context) 2638static int me4000_ao_simultaneous_sw(struct me4000_ao_context *ao_context)
2663{ 2639{
2664 u32 tmp; 2640 u32 tmp;
2665 2641
@@ -2675,13 +2651,13 @@ static int me4000_ao_simultaneous_sw(me4000_ao_context_t * ao_context)
2675 return 0; 2651 return 0;
2676} 2652}
2677 2653
2678static int me4000_ao_preload(me4000_ao_context_t * ao_context) 2654static int me4000_ao_preload(struct me4000_ao_context *ao_context)
2679{ 2655{
2680 CALL_PDEBUG("me4000_ao_preload() is executed\n"); 2656 CALL_PDEBUG("me4000_ao_preload() is executed\n");
2681 return me4000_ao_simultaneous_sw(ao_context); 2657 return me4000_ao_simultaneous_sw(ao_context);
2682} 2658}
2683 2659
2684static int me4000_ao_preload_update(me4000_ao_context_t * ao_context) 2660static int me4000_ao_preload_update(struct me4000_ao_context *ao_context)
2685{ 2661{
2686 u32 tmp; 2662 u32 tmp;
2687 u32 ctrl; 2663 u32 ctrl;
@@ -2705,10 +2681,12 @@ static int me4000_ao_preload_update(me4000_ao_context_t * ao_context)
2705 if (! 2681 if (!
2706 (tmp & 2682 (tmp &
2707 (0x1 << 2683 (0x1 <<
2708 (((me4000_ao_context_t *) entry)->index + 16)))) { 2684 (((struct me4000_ao_context *)entry)->index
2685 + 16)))) {
2709 tmp &= 2686 tmp &=
2710 ~(0x1 << 2687 ~(0x1 <<
2711 (((me4000_ao_context_t *) entry)->index)); 2688 (((struct me4000_ao_context *)entry)->
2689 index));
2712 } 2690 }
2713 } 2691 }
2714 } 2692 }
@@ -2718,18 +2696,19 @@ static int me4000_ao_preload_update(me4000_ao_context_t * ao_context)
2718 return 0; 2696 return 0;
2719} 2697}
2720 2698
2721static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * arg, 2699static int me4000_ao_simultaneous_update(struct me4000_ao_channel_list *arg,
2722 me4000_ao_context_t * ao_context) 2700 struct me4000_ao_context *ao_context)
2723{ 2701{
2724 int err; 2702 int err;
2725 int i; 2703 int i;
2726 u32 tmp; 2704 u32 tmp;
2727 me4000_ao_channel_list_t channels; 2705 struct me4000_ao_channel_list channels;
2728 2706
2729 CALL_PDEBUG("me4000_ao_simultaneous_update() is executed\n"); 2707 CALL_PDEBUG("me4000_ao_simultaneous_update() is executed\n");
2730 2708
2731 /* Copy data from user */ 2709 /* Copy data from user */
2732 err = copy_from_user(&channels, arg, sizeof(me4000_ao_channel_list_t)); 2710 err = copy_from_user(&channels, arg,
2711 sizeof(struct me4000_ao_channel_list));
2733 if (err) { 2712 if (err) {
2734 printk(KERN_ERR 2713 printk(KERN_ERR
2735 "ME4000:me4000_ao_simultaneous_update():Can't copy command\n"); 2714 "ME4000:me4000_ao_simultaneous_update():Can't copy command\n");
@@ -2737,13 +2716,12 @@ static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * arg,
2737 } 2716 }
2738 2717
2739 channels.list = 2718 channels.list =
2740 kmalloc(sizeof(unsigned long) * channels.count, GFP_KERNEL); 2719 kzalloc(sizeof(unsigned long) * channels.count, GFP_KERNEL);
2741 if (!channels.list) { 2720 if (!channels.list) {
2742 printk(KERN_ERR 2721 printk(KERN_ERR
2743 "ME4000:me4000_ao_simultaneous_update():Can't get buffer\n"); 2722 "ME4000:me4000_ao_simultaneous_update():Can't get buffer\n");
2744 return -ENOMEM; 2723 return -ENOMEM;
2745 } 2724 }
2746 memset(channels.list, 0, sizeof(unsigned long) * channels.count);
2747 2725
2748 /* Copy channel list from user */ 2726 /* Copy channel list from user */
2749 err = 2727 err =
@@ -2777,7 +2755,7 @@ static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * arg,
2777 return 0; 2755 return 0;
2778} 2756}
2779 2757
2780static int me4000_ao_synchronous_ex_trig(me4000_ao_context_t * ao_context) 2758static int me4000_ao_synchronous_ex_trig(struct me4000_ao_context *ao_context)
2781{ 2759{
2782 u32 tmp; 2760 u32 tmp;
2783 unsigned long flags; 2761 unsigned long flags;
@@ -2813,7 +2791,7 @@ static int me4000_ao_synchronous_ex_trig(me4000_ao_context_t * ao_context)
2813 return 0; 2791 return 0;
2814} 2792}
2815 2793
2816static int me4000_ao_synchronous_sw(me4000_ao_context_t * ao_context) 2794static int me4000_ao_synchronous_sw(struct me4000_ao_context *ao_context)
2817{ 2795{
2818 u32 tmp; 2796 u32 tmp;
2819 unsigned long flags; 2797 unsigned long flags;
@@ -2848,13 +2826,13 @@ static int me4000_ao_synchronous_sw(me4000_ao_context_t * ao_context)
2848 return 0; 2826 return 0;
2849} 2827}
2850 2828
2851static int me4000_ao_synchronous_disable(me4000_ao_context_t * ao_context) 2829static int me4000_ao_synchronous_disable(struct me4000_ao_context *ao_context)
2852{ 2830{
2853 return me4000_ao_simultaneous_disable(ao_context); 2831 return me4000_ao_simultaneous_disable(ao_context);
2854} 2832}
2855 2833
2856static int me4000_ao_get_free_buffer(unsigned long *arg, 2834static int me4000_ao_get_free_buffer(unsigned long *arg,
2857 me4000_ao_context_t * ao_context) 2835 struct me4000_ao_context *ao_context)
2858{ 2836{
2859 unsigned long c; 2837 unsigned long c;
2860 int err; 2838 int err;
@@ -2864,7 +2842,7 @@ static int me4000_ao_get_free_buffer(unsigned long *arg,
2864 err = copy_to_user(arg, &c, sizeof(unsigned long)); 2842 err = copy_to_user(arg, &c, sizeof(unsigned long));
2865 if (err) { 2843 if (err) {
2866 printk(KERN_ERR 2844 printk(KERN_ERR
2867 "ME4000:me4000_ao_get_free_buffer():Can't copy to user space\n"); 2845 "%s:Can't copy to user space\n", __func__);
2868 return -EFAULT; 2846 return -EFAULT;
2869 } 2847 }
2870 2848
@@ -2872,7 +2850,7 @@ static int me4000_ao_get_free_buffer(unsigned long *arg,
2872} 2850}
2873 2851
2874static int me4000_ao_ex_trig_timeout(unsigned long *arg, 2852static int me4000_ao_ex_trig_timeout(unsigned long *arg,
2875 me4000_ao_context_t * ao_context) 2853 struct me4000_ao_context *ao_context)
2876{ 2854{
2877 u32 tmp; 2855 u32 tmp;
2878 wait_queue_head_t queue; 2856 wait_queue_head_t queue;
@@ -2928,7 +2906,7 @@ static int me4000_ao_ex_trig_timeout(unsigned long *arg,
2928 return 0; 2906 return 0;
2929} 2907}
2930 2908
2931static int me4000_ao_enable_do(me4000_ao_context_t * ao_context) 2909static int me4000_ao_enable_do(struct me4000_ao_context *ao_context)
2932{ 2910{
2933 u32 tmp; 2911 u32 tmp;
2934 unsigned long flags; 2912 unsigned long flags;
@@ -2959,7 +2937,7 @@ static int me4000_ao_enable_do(me4000_ao_context_t * ao_context)
2959 return 0; 2937 return 0;
2960} 2938}
2961 2939
2962static int me4000_ao_disable_do(me4000_ao_context_t * ao_context) 2940static int me4000_ao_disable_do(struct me4000_ao_context *ao_context)
2963{ 2941{
2964 u32 tmp; 2942 u32 tmp;
2965 unsigned long flags; 2943 unsigned long flags;
@@ -2989,7 +2967,7 @@ static int me4000_ao_disable_do(me4000_ao_context_t * ao_context)
2989 return 0; 2967 return 0;
2990} 2968}
2991 2969
2992static int me4000_ao_fsm_state(int *arg, me4000_ao_context_t * ao_context) 2970static int me4000_ao_fsm_state(int *arg, struct me4000_ao_context *ao_context)
2993{ 2971{
2994 unsigned long tmp; 2972 unsigned long tmp;
2995 2973
@@ -3012,9 +2990,9 @@ static int me4000_ao_fsm_state(int *arg, me4000_ao_context_t * ao_context)
3012 return 0; 2990 return 0;
3013} 2991}
3014 2992
3015/*------------------------------- Analog input stuff --------------------------------------*/ 2993/*------------------------- Analog input stuff -------------------------------*/
3016 2994
3017static int me4000_ai_prepare(me4000_ai_context_t * ai_context) 2995static int me4000_ai_prepare(struct me4000_ai_context *ai_context)
3018{ 2996{
3019 wait_queue_head_t queue; 2997 wait_queue_head_t queue;
3020 int err; 2998 int err;
@@ -3057,14 +3035,13 @@ static int me4000_ai_prepare(me4000_ai_context_t * ai_context)
3057 3035
3058 /* Allocate circular buffer */ 3036 /* Allocate circular buffer */
3059 ai_context->circ_buf.buf = 3037 ai_context->circ_buf.buf =
3060 kmalloc(ME4000_AI_BUFFER_SIZE, GFP_KERNEL); 3038 kzalloc(ME4000_AI_BUFFER_SIZE, GFP_KERNEL);
3061 if (!ai_context->circ_buf.buf) { 3039 if (!ai_context->circ_buf.buf) {
3062 printk(KERN_ERR 3040 printk(KERN_ERR
3063 "ME4000:me4000_ai_prepare():Can't get circular buffer\n"); 3041 "ME4000:me4000_ai_prepare():Can't get circular buffer\n");
3064 free_irq(ai_context->irq, ai_context); 3042 free_irq(ai_context->irq, ai_context);
3065 return -ENOMEM; 3043 return -ENOMEM;
3066 } 3044 }
3067 memset(ai_context->circ_buf.buf, 0, ME4000_AI_BUFFER_SIZE);
3068 3045
3069 /* Clear the circular buffer */ 3046 /* Clear the circular buffer */
3070 ai_context->circ_buf.head = 0; 3047 ai_context->circ_buf.head = 0;
@@ -3074,7 +3051,7 @@ static int me4000_ai_prepare(me4000_ai_context_t * ai_context)
3074 return 0; 3051 return 0;
3075} 3052}
3076 3053
3077static int me4000_ai_reset(me4000_ai_context_t * ai_context) 3054static int me4000_ai_reset(struct me4000_ai_context *ai_context)
3078{ 3055{
3079 wait_queue_head_t queue; 3056 wait_queue_head_t queue;
3080 u32 tmp; 3057 u32 tmp;
@@ -3139,7 +3116,7 @@ static int me4000_ai_reset(me4000_ai_context_t * ai_context)
3139static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p, 3116static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
3140 unsigned int service, unsigned long arg) 3117 unsigned int service, unsigned long arg)
3141{ 3118{
3142 me4000_ai_context_t *ai_context; 3119 struct me4000_ai_context *ai_context;
3143 3120
3144 CALL_PDEBUG("me4000_ai_ioctl_sing() is executed\n"); 3121 CALL_PDEBUG("me4000_ai_ioctl_sing() is executed\n");
3145 3122
@@ -3157,16 +3134,17 @@ static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
3157 3134
3158 switch (service) { 3135 switch (service) {
3159 case ME4000_AI_SINGLE: 3136 case ME4000_AI_SINGLE:
3160 return me4000_ai_single((me4000_ai_single_t *) arg, ai_context); 3137 return me4000_ai_single((struct me4000_ai_single *)arg,
3138 ai_context);
3161 case ME4000_AI_EX_TRIG_ENABLE: 3139 case ME4000_AI_EX_TRIG_ENABLE:
3162 return me4000_ai_ex_trig_enable(ai_context); 3140 return me4000_ai_ex_trig_enable(ai_context);
3163 case ME4000_AI_EX_TRIG_DISABLE: 3141 case ME4000_AI_EX_TRIG_DISABLE:
3164 return me4000_ai_ex_trig_disable(ai_context); 3142 return me4000_ai_ex_trig_disable(ai_context);
3165 case ME4000_AI_EX_TRIG_SETUP: 3143 case ME4000_AI_EX_TRIG_SETUP:
3166 return me4000_ai_ex_trig_setup((me4000_ai_trigger_t *) arg, 3144 return me4000_ai_ex_trig_setup((struct me4000_ai_trigger *)arg,
3167 ai_context); 3145 ai_context);
3168 case ME4000_GET_USER_INFO: 3146 case ME4000_GET_USER_INFO:
3169 return me4000_get_user_info((me4000_user_info_t *) arg, 3147 return me4000_get_user_info((struct me4000_user_info *)arg,
3170 ai_context->board_info); 3148 ai_context->board_info);
3171 case ME4000_AI_OFFSET_ENABLE: 3149 case ME4000_AI_OFFSET_ENABLE:
3172 return me4000_ai_offset_enable(ai_context); 3150 return me4000_ai_offset_enable(ai_context);
@@ -3177,9 +3155,11 @@ static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
3177 case ME4000_AI_FULLSCALE_DISABLE: 3155 case ME4000_AI_FULLSCALE_DISABLE:
3178 return me4000_ai_fullscale_disable(ai_context); 3156 return me4000_ai_fullscale_disable(ai_context);
3179 case ME4000_AI_EEPROM_READ: 3157 case ME4000_AI_EEPROM_READ:
3180 return me4000_eeprom_read((me4000_eeprom_t *) arg, ai_context); 3158 return me4000_eeprom_read((struct me4000_eeprom *)arg,
3159 ai_context);
3181 case ME4000_AI_EEPROM_WRITE: 3160 case ME4000_AI_EEPROM_WRITE:
3182 return me4000_eeprom_write((me4000_eeprom_t *) arg, ai_context); 3161 return me4000_eeprom_write((struct me4000_eeprom *)arg,
3162 ai_context);
3183 default: 3163 default:
3184 printk(KERN_ERR 3164 printk(KERN_ERR
3185 "me4000_ai_ioctl_sing():Invalid service number\n"); 3165 "me4000_ai_ioctl_sing():Invalid service number\n");
@@ -3188,10 +3168,10 @@ static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
3188 return 0; 3168 return 0;
3189} 3169}
3190 3170
3191static int me4000_ai_single(me4000_ai_single_t * arg, 3171static int me4000_ai_single(struct me4000_ai_single *arg,
3192 me4000_ai_context_t * ai_context) 3172 struct me4000_ai_context *ai_context)
3193{ 3173{
3194 me4000_ai_single_t cmd; 3174 struct me4000_ai_single cmd;
3195 int err; 3175 int err;
3196 u32 tmp; 3176 u32 tmp;
3197 wait_queue_head_t queue; 3177 wait_queue_head_t queue;
@@ -3202,7 +3182,7 @@ static int me4000_ai_single(me4000_ai_single_t * arg,
3202 init_waitqueue_head(&queue); 3182 init_waitqueue_head(&queue);
3203 3183
3204 /* Copy data from user */ 3184 /* Copy data from user */
3205 err = copy_from_user(&cmd, arg, sizeof(me4000_ai_single_t)); 3185 err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_single));
3206 if (err) { 3186 if (err) {
3207 printk(KERN_ERR 3187 printk(KERN_ERR
3208 "ME4000:me4000_ai_single():Can't copy from user space\n"); 3188 "ME4000:me4000_ai_single():Can't copy from user space\n");
@@ -3301,7 +3281,7 @@ static int me4000_ai_single(me4000_ai_single_t * arg,
3301 cmd.value = me4000_inl(ai_context->data_reg) & 0xFFFF; 3281 cmd.value = me4000_inl(ai_context->data_reg) & 0xFFFF;
3302 3282
3303 /* Copy result back to user */ 3283 /* Copy result back to user */
3304 err = copy_to_user(arg, &cmd, sizeof(me4000_ai_single_t)); 3284 err = copy_to_user(arg, &cmd, sizeof(struct me4000_ai_single));
3305 if (err) { 3285 if (err) {
3306 printk(KERN_ERR 3286 printk(KERN_ERR
3307 "ME4000:me4000_ai_single():Can't copy to user space\n"); 3287 "ME4000:me4000_ai_single():Can't copy to user space\n");
@@ -3314,7 +3294,7 @@ static int me4000_ai_single(me4000_ai_single_t * arg,
3314static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p, 3294static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
3315 unsigned int service, unsigned long arg) 3295 unsigned int service, unsigned long arg)
3316{ 3296{
3317 me4000_ai_context_t *ai_context; 3297 struct me4000_ai_context *ai_context;
3318 3298
3319 CALL_PDEBUG("me4000_ai_ioctl_sw() is executed\n"); 3299 CALL_PDEBUG("me4000_ai_ioctl_sw() is executed\n");
3320 3300
@@ -3332,9 +3312,11 @@ static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
3332 3312
3333 switch (service) { 3313 switch (service) {
3334 case ME4000_AI_SC_SETUP: 3314 case ME4000_AI_SC_SETUP:
3335 return me4000_ai_sc_setup((me4000_ai_sc_t *) arg, ai_context); 3315 return me4000_ai_sc_setup((struct me4000_ai_sc *)arg,
3316 ai_context);
3336 case ME4000_AI_CONFIG: 3317 case ME4000_AI_CONFIG:
3337 return me4000_ai_config((me4000_ai_config_t *) arg, ai_context); 3318 return me4000_ai_config((struct me4000_ai_config *)arg,
3319 ai_context);
3338 case ME4000_AI_START: 3320 case ME4000_AI_START:
3339 return me4000_ai_start(ai_context); 3321 return me4000_ai_start(ai_context);
3340 case ME4000_AI_STOP: 3322 case ME4000_AI_STOP:
@@ -3344,19 +3326,20 @@ static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
3344 case ME4000_AI_FSM_STATE: 3326 case ME4000_AI_FSM_STATE:
3345 return me4000_ai_fsm_state((int *)arg, ai_context); 3327 return me4000_ai_fsm_state((int *)arg, ai_context);
3346 case ME4000_GET_USER_INFO: 3328 case ME4000_GET_USER_INFO:
3347 return me4000_get_user_info((me4000_user_info_t *) arg, 3329 return me4000_get_user_info((struct me4000_user_info *)arg,
3348 ai_context->board_info); 3330 ai_context->board_info);
3349 case ME4000_AI_EEPROM_READ: 3331 case ME4000_AI_EEPROM_READ:
3350 return me4000_eeprom_read((me4000_eeprom_t *) arg, ai_context); 3332 return me4000_eeprom_read((struct me4000_eeprom *)arg,
3333 ai_context);
3351 case ME4000_AI_EEPROM_WRITE: 3334 case ME4000_AI_EEPROM_WRITE:
3352 return me4000_eeprom_write((me4000_eeprom_t *) arg, ai_context); 3335 return me4000_eeprom_write((struct me4000_eeprom *)arg,
3336 ai_context);
3353 case ME4000_AI_GET_COUNT_BUFFER: 3337 case ME4000_AI_GET_COUNT_BUFFER:
3354 return me4000_ai_get_count_buffer((unsigned long *)arg, 3338 return me4000_ai_get_count_buffer((unsigned long *)arg,
3355 ai_context); 3339 ai_context);
3356 default: 3340 default:
3357 printk(KERN_ERR 3341 printk(KERN_ERR
3358 "ME4000:me4000_ai_ioctl_sw():Invalid service number %d\n", 3342 "%s:Invalid service number %d\n", __func__, service);
3359 service);
3360 return -ENOTTY; 3343 return -ENOTTY;
3361 } 3344 }
3362 return 0; 3345 return 0;
@@ -3365,7 +3348,7 @@ static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
3365static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p, 3348static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
3366 unsigned int service, unsigned long arg) 3349 unsigned int service, unsigned long arg)
3367{ 3350{
3368 me4000_ai_context_t *ai_context; 3351 struct me4000_ai_context *ai_context;
3369 3352
3370 CALL_PDEBUG("me4000_ai_ioctl_ext() is executed\n"); 3353 CALL_PDEBUG("me4000_ai_ioctl_ext() is executed\n");
3371 3354
@@ -3383,9 +3366,11 @@ static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
3383 3366
3384 switch (service) { 3367 switch (service) {
3385 case ME4000_AI_SC_SETUP: 3368 case ME4000_AI_SC_SETUP:
3386 return me4000_ai_sc_setup((me4000_ai_sc_t *) arg, ai_context); 3369 return me4000_ai_sc_setup((struct me4000_ai_sc *)arg,
3370 ai_context);
3387 case ME4000_AI_CONFIG: 3371 case ME4000_AI_CONFIG:
3388 return me4000_ai_config((me4000_ai_config_t *) arg, ai_context); 3372 return me4000_ai_config((struct me4000_ai_config *)arg,
3373 ai_context);
3389 case ME4000_AI_START: 3374 case ME4000_AI_START:
3390 return me4000_ai_start_ex((unsigned long *)arg, ai_context); 3375 return me4000_ai_start_ex((unsigned long *)arg, ai_context);
3391 case ME4000_AI_STOP: 3376 case ME4000_AI_STOP:
@@ -3397,20 +3382,19 @@ static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
3397 case ME4000_AI_EX_TRIG_DISABLE: 3382 case ME4000_AI_EX_TRIG_DISABLE:
3398 return me4000_ai_ex_trig_disable(ai_context); 3383 return me4000_ai_ex_trig_disable(ai_context);
3399 case ME4000_AI_EX_TRIG_SETUP: 3384 case ME4000_AI_EX_TRIG_SETUP:
3400 return me4000_ai_ex_trig_setup((me4000_ai_trigger_t *) arg, 3385 return me4000_ai_ex_trig_setup((struct me4000_ai_trigger *)arg,
3401 ai_context); 3386 ai_context);
3402 case ME4000_AI_FSM_STATE: 3387 case ME4000_AI_FSM_STATE:
3403 return me4000_ai_fsm_state((int *)arg, ai_context); 3388 return me4000_ai_fsm_state((int *)arg, ai_context);
3404 case ME4000_GET_USER_INFO: 3389 case ME4000_GET_USER_INFO:
3405 return me4000_get_user_info((me4000_user_info_t *) arg, 3390 return me4000_get_user_info((struct me4000_user_info *)arg,
3406 ai_context->board_info); 3391 ai_context->board_info);
3407 case ME4000_AI_GET_COUNT_BUFFER: 3392 case ME4000_AI_GET_COUNT_BUFFER:
3408 return me4000_ai_get_count_buffer((unsigned long *)arg, 3393 return me4000_ai_get_count_buffer((unsigned long *)arg,
3409 ai_context); 3394 ai_context);
3410 default: 3395 default:
3411 printk(KERN_ERR 3396 printk(KERN_ERR
3412 "ME4000:me4000_ai_ioctl_ext():Invalid service number %d\n", 3397 "%s:Invalid service number %d\n", __func__ , service);
3413 service);
3414 return -ENOTTY; 3398 return -ENOTTY;
3415 } 3399 }
3416 return 0; 3400 return 0;
@@ -3418,7 +3402,7 @@ static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
3418 3402
3419static int me4000_ai_fasync(int fd, struct file *file_p, int mode) 3403static int me4000_ai_fasync(int fd, struct file *file_p, int mode)
3420{ 3404{
3421 me4000_ai_context_t *ai_context; 3405 struct me4000_ai_context *ai_context;
3422 3406
3423 CALL_PDEBUG("me4000_ao_fasync_cont() is executed\n"); 3407 CALL_PDEBUG("me4000_ao_fasync_cont() is executed\n");
3424 3408
@@ -3426,10 +3410,10 @@ static int me4000_ai_fasync(int fd, struct file *file_p, int mode)
3426 return fasync_helper(fd, file_p, mode, &ai_context->fasync_p); 3410 return fasync_helper(fd, file_p, mode, &ai_context->fasync_p);
3427} 3411}
3428 3412
3429static int me4000_ai_config(me4000_ai_config_t * arg, 3413static int me4000_ai_config(struct me4000_ai_config *arg,
3430 me4000_ai_context_t * ai_context) 3414 struct me4000_ai_context *ai_context)
3431{ 3415{
3432 me4000_ai_config_t cmd; 3416 struct me4000_ai_config cmd;
3433 u32 *list = NULL; 3417 u32 *list = NULL;
3434 u32 mode; 3418 u32 mode;
3435 int i; 3419 int i;
@@ -3451,7 +3435,7 @@ static int me4000_ai_config(me4000_ai_config_t * arg,
3451 } 3435 }
3452 3436
3453 /* Copy data from user */ 3437 /* Copy data from user */
3454 err = copy_from_user(&cmd, arg, sizeof(me4000_ai_config_t)); 3438 err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_config));
3455 if (err) { 3439 if (err) {
3456 printk(KERN_ERR 3440 printk(KERN_ERR
3457 "ME4000:me4000_ai_config():Can't copy from user space\n"); 3441 "ME4000:me4000_ai_config():Can't copy from user space\n");
@@ -3671,7 +3655,7 @@ static int me4000_ai_config(me4000_ai_config_t * arg,
3671 3655
3672 return 0; 3656 return 0;
3673 3657
3674 AI_CONFIG_ERR: 3658AI_CONFIG_ERR:
3675 3659
3676 /* Reset the timers */ 3660 /* Reset the timers */
3677 ai_context->chan_timer = 66; 3661 ai_context->chan_timer = 66;
@@ -3699,7 +3683,7 @@ static int me4000_ai_config(me4000_ai_config_t * arg,
3699 3683
3700} 3684}
3701 3685
3702static int ai_common_start(me4000_ai_context_t * ai_context) 3686static int ai_common_start(struct me4000_ai_context *ai_context)
3703{ 3687{
3704 u32 tmp; 3688 u32 tmp;
3705 CALL_PDEBUG("ai_common_start() is executed\n"); 3689 CALL_PDEBUG("ai_common_start() is executed\n");
@@ -3762,7 +3746,7 @@ static int ai_common_start(me4000_ai_context_t * ai_context)
3762 return 0; 3746 return 0;
3763} 3747}
3764 3748
3765static int me4000_ai_start(me4000_ai_context_t * ai_context) 3749static int me4000_ai_start(struct me4000_ai_context *ai_context)
3766{ 3750{
3767 int err; 3751 int err;
3768 CALL_PDEBUG("me4000_ai_start() is executed\n"); 3752 CALL_PDEBUG("me4000_ai_start() is executed\n");
@@ -3779,7 +3763,7 @@ static int me4000_ai_start(me4000_ai_context_t * ai_context)
3779} 3763}
3780 3764
3781static int me4000_ai_start_ex(unsigned long *arg, 3765static int me4000_ai_start_ex(unsigned long *arg,
3782 me4000_ai_context_t * ai_context) 3766 struct me4000_ai_context *ai_context)
3783{ 3767{
3784 int err; 3768 int err;
3785 wait_queue_head_t queue; 3769 wait_queue_head_t queue;
@@ -3834,7 +3818,7 @@ static int me4000_ai_start_ex(unsigned long *arg,
3834 return 0; 3818 return 0;
3835} 3819}
3836 3820
3837static int me4000_ai_stop(me4000_ai_context_t * ai_context) 3821static int me4000_ai_stop(struct me4000_ai_context *ai_context)
3838{ 3822{
3839 wait_queue_head_t queue; 3823 wait_queue_head_t queue;
3840 u32 tmp; 3824 u32 tmp;
@@ -3871,7 +3855,7 @@ static int me4000_ai_stop(me4000_ai_context_t * ai_context)
3871 return 0; 3855 return 0;
3872} 3856}
3873 3857
3874static int me4000_ai_immediate_stop(me4000_ai_context_t * ai_context) 3858static int me4000_ai_immediate_stop(struct me4000_ai_context *ai_context)
3875{ 3859{
3876 wait_queue_head_t queue; 3860 wait_queue_head_t queue;
3877 u32 tmp; 3861 u32 tmp;
@@ -3908,7 +3892,7 @@ static int me4000_ai_immediate_stop(me4000_ai_context_t * ai_context)
3908 return 0; 3892 return 0;
3909} 3893}
3910 3894
3911static int me4000_ai_ex_trig_enable(me4000_ai_context_t * ai_context) 3895static int me4000_ai_ex_trig_enable(struct me4000_ai_context *ai_context)
3912{ 3896{
3913 u32 tmp; 3897 u32 tmp;
3914 unsigned long flags; 3898 unsigned long flags;
@@ -3924,7 +3908,7 @@ static int me4000_ai_ex_trig_enable(me4000_ai_context_t * ai_context)
3924 return 0; 3908 return 0;
3925} 3909}
3926 3910
3927static int me4000_ai_ex_trig_disable(me4000_ai_context_t * ai_context) 3911static int me4000_ai_ex_trig_disable(struct me4000_ai_context *ai_context)
3928{ 3912{
3929 u32 tmp; 3913 u32 tmp;
3930 unsigned long flags; 3914 unsigned long flags;
@@ -3940,10 +3924,10 @@ static int me4000_ai_ex_trig_disable(me4000_ai_context_t * ai_context)
3940 return 0; 3924 return 0;
3941} 3925}
3942 3926
3943static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t * arg, 3927static int me4000_ai_ex_trig_setup(struct me4000_ai_trigger *arg,
3944 me4000_ai_context_t * ai_context) 3928 struct me4000_ai_context *ai_context)
3945{ 3929{
3946 me4000_ai_trigger_t cmd; 3930 struct me4000_ai_trigger cmd;
3947 int err; 3931 int err;
3948 u32 tmp; 3932 u32 tmp;
3949 unsigned long flags; 3933 unsigned long flags;
@@ -3951,7 +3935,7 @@ static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t * arg,
3951 CALL_PDEBUG("me4000_ai_ex_trig_setup() is executed\n"); 3935 CALL_PDEBUG("me4000_ai_ex_trig_setup() is executed\n");
3952 3936
3953 /* Copy data from user */ 3937 /* Copy data from user */
3954 err = copy_from_user(&cmd, arg, sizeof(me4000_ai_trigger_t)); 3938 err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_trigger));
3955 if (err) { 3939 if (err) {
3956 printk(KERN_ERR 3940 printk(KERN_ERR
3957 "ME4000:me4000_ai_ex_trig_setup():Can't copy from user space\n"); 3941 "ME4000:me4000_ai_ex_trig_setup():Can't copy from user space\n");
@@ -4000,16 +3984,16 @@ static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t * arg,
4000 return 0; 3984 return 0;
4001} 3985}
4002 3986
4003static int me4000_ai_sc_setup(me4000_ai_sc_t * arg, 3987static int me4000_ai_sc_setup(struct me4000_ai_sc *arg,
4004 me4000_ai_context_t * ai_context) 3988 struct me4000_ai_context *ai_context)
4005{ 3989{
4006 me4000_ai_sc_t cmd; 3990 struct me4000_ai_sc cmd;
4007 int err; 3991 int err;
4008 3992
4009 CALL_PDEBUG("me4000_ai_sc_setup() is executed\n"); 3993 CALL_PDEBUG("me4000_ai_sc_setup() is executed\n");
4010 3994
4011 /* Copy data from user */ 3995 /* Copy data from user */
4012 err = copy_from_user(&cmd, arg, sizeof(me4000_ai_sc_t)); 3996 err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_sc));
4013 if (err) { 3997 if (err) {
4014 printk(KERN_ERR 3998 printk(KERN_ERR
4015 "ME4000:me4000_ai_sc_setup():Can't copy from user space\n"); 3999 "ME4000:me4000_ai_sc_setup():Can't copy from user space\n");
@@ -4023,9 +4007,9 @@ static int me4000_ai_sc_setup(me4000_ai_sc_t * arg,
4023} 4007}
4024 4008
4025static ssize_t me4000_ai_read(struct file *filep, char *buff, size_t cnt, 4009static ssize_t me4000_ai_read(struct file *filep, char *buff, size_t cnt,
4026 loff_t * offp) 4010 loff_t *offp)
4027{ 4011{
4028 me4000_ai_context_t *ai_context = filep->private_data; 4012 struct me4000_ai_context *ai_context = filep->private_data;
4029 s16 *buffer = (s16 *) buff; 4013 s16 *buffer = (s16 *) buff;
4030 size_t count = cnt / 2; 4014 size_t count = cnt / 2;
4031 unsigned long flags; 4015 unsigned long flags;
@@ -4150,9 +4134,9 @@ static ssize_t me4000_ai_read(struct file *filep, char *buff, size_t cnt,
4150 return ret * 2; 4134 return ret * 2;
4151} 4135}
4152 4136
4153static unsigned int me4000_ai_poll(struct file *file_p, poll_table * wait) 4137static unsigned int me4000_ai_poll(struct file *file_p, poll_table *wait)
4154{ 4138{
4155 me4000_ai_context_t *ai_context; 4139 struct me4000_ai_context *ai_context;
4156 unsigned long mask = 0; 4140 unsigned long mask = 0;
4157 4141
4158 CALL_PDEBUG("me4000_ai_poll() is executed\n"); 4142 CALL_PDEBUG("me4000_ai_poll() is executed\n");
@@ -4171,7 +4155,7 @@ static unsigned int me4000_ai_poll(struct file *file_p, poll_table * wait)
4171 return mask; 4155 return mask;
4172} 4156}
4173 4157
4174static int me4000_ai_offset_enable(me4000_ai_context_t * ai_context) 4158static int me4000_ai_offset_enable(struct me4000_ai_context *ai_context)
4175{ 4159{
4176 unsigned long tmp; 4160 unsigned long tmp;
4177 4161
@@ -4184,7 +4168,7 @@ static int me4000_ai_offset_enable(me4000_ai_context_t * ai_context)
4184 return 0; 4168 return 0;
4185} 4169}
4186 4170
4187static int me4000_ai_offset_disable(me4000_ai_context_t * ai_context) 4171static int me4000_ai_offset_disable(struct me4000_ai_context *ai_context)
4188{ 4172{
4189 unsigned long tmp; 4173 unsigned long tmp;
4190 4174
@@ -4197,7 +4181,7 @@ static int me4000_ai_offset_disable(me4000_ai_context_t * ai_context)
4197 return 0; 4181 return 0;
4198} 4182}
4199 4183
4200static int me4000_ai_fullscale_enable(me4000_ai_context_t * ai_context) 4184static int me4000_ai_fullscale_enable(struct me4000_ai_context *ai_context)
4201{ 4185{
4202 unsigned long tmp; 4186 unsigned long tmp;
4203 4187
@@ -4210,7 +4194,7 @@ static int me4000_ai_fullscale_enable(me4000_ai_context_t * ai_context)
4210 return 0; 4194 return 0;
4211} 4195}
4212 4196
4213static int me4000_ai_fullscale_disable(me4000_ai_context_t * ai_context) 4197static int me4000_ai_fullscale_disable(struct me4000_ai_context *ai_context)
4214{ 4198{
4215 unsigned long tmp; 4199 unsigned long tmp;
4216 4200
@@ -4223,7 +4207,7 @@ static int me4000_ai_fullscale_disable(me4000_ai_context_t * ai_context)
4223 return 0; 4207 return 0;
4224} 4208}
4225 4209
4226static int me4000_ai_fsm_state(int *arg, me4000_ai_context_t * ai_context) 4210static int me4000_ai_fsm_state(int *arg, struct me4000_ai_context *ai_context)
4227{ 4211{
4228 unsigned long tmp; 4212 unsigned long tmp;
4229 4213
@@ -4242,7 +4226,7 @@ static int me4000_ai_fsm_state(int *arg, me4000_ai_context_t * ai_context)
4242} 4226}
4243 4227
4244static int me4000_ai_get_count_buffer(unsigned long *arg, 4228static int me4000_ai_get_count_buffer(unsigned long *arg,
4245 me4000_ai_context_t * ai_context) 4229 struct me4000_ai_context *ai_context)
4246{ 4230{
4247 unsigned long c; 4231 unsigned long c;
4248 int err; 4232 int err;
@@ -4252,7 +4236,7 @@ static int me4000_ai_get_count_buffer(unsigned long *arg,
4252 err = copy_to_user(arg, &c, sizeof(unsigned long)); 4236 err = copy_to_user(arg, &c, sizeof(unsigned long));
4253 if (err) { 4237 if (err) {
4254 printk(KERN_ERR 4238 printk(KERN_ERR
4255 "ME4000:me4000_ai_get_count_buffer():Can't copy to user space\n"); 4239 "%s:Can't copy to user space\n", __func__);
4256 return -EFAULT; 4240 return -EFAULT;
4257 } 4241 }
4258 4242
@@ -4261,7 +4245,7 @@ static int me4000_ai_get_count_buffer(unsigned long *arg,
4261 4245
4262/*---------------------------------- EEPROM stuff ---------------------------*/ 4246/*---------------------------------- EEPROM stuff ---------------------------*/
4263 4247
4264static int eeprom_write_cmd(me4000_ai_context_t * ai_context, unsigned long cmd, 4248static int eeprom_write_cmd(struct me4000_ai_context *ai_context, unsigned long cmd,
4265 int length) 4249 int length)
4266{ 4250{
4267 int i; 4251 int i;
@@ -4318,7 +4302,7 @@ static int eeprom_write_cmd(me4000_ai_context_t * ai_context, unsigned long cmd,
4318 return 0; 4302 return 0;
4319} 4303}
4320 4304
4321static unsigned short eeprom_read_cmd(me4000_ai_context_t * ai_context, 4305static unsigned short eeprom_read_cmd(struct me4000_ai_context *ai_context,
4322 unsigned long cmd, int length) 4306 unsigned long cmd, int length)
4323{ 4307{
4324 int i; 4308 int i;
@@ -4397,11 +4381,11 @@ static unsigned short eeprom_read_cmd(me4000_ai_context_t * ai_context,
4397 return id; 4381 return id;
4398} 4382}
4399 4383
4400static int me4000_eeprom_write(me4000_eeprom_t * arg, 4384static int me4000_eeprom_write(struct me4000_eeprom *arg,
4401 me4000_ai_context_t * ai_context) 4385 struct me4000_ai_context *ai_context)
4402{ 4386{
4403 int err; 4387 int err;
4404 me4000_eeprom_t setup; 4388 struct me4000_eeprom setup;
4405 unsigned long cmd; 4389 unsigned long cmd;
4406 unsigned long date_high; 4390 unsigned long date_high;
4407 unsigned long date_low; 4391 unsigned long date_low;
@@ -4594,12 +4578,12 @@ static int me4000_eeprom_write(me4000_eeprom_t * arg,
4594 return 0; 4578 return 0;
4595} 4579}
4596 4580
4597static int me4000_eeprom_read(me4000_eeprom_t * arg, 4581static int me4000_eeprom_read(struct me4000_eeprom *arg,
4598 me4000_ai_context_t * ai_context) 4582 struct me4000_ai_context *ai_context)
4599{ 4583{
4600 int err; 4584 int err;
4601 unsigned long cmd; 4585 unsigned long cmd;
4602 me4000_eeprom_t setup; 4586 struct me4000_eeprom setup;
4603 4587
4604 CALL_PDEBUG("me4000_eeprom_read() is executed\n"); 4588 CALL_PDEBUG("me4000_eeprom_read() is executed\n");
4605 4589
@@ -4687,7 +4671,7 @@ static int me4000_eeprom_read(me4000_eeprom_t * arg,
4687static int me4000_dio_ioctl(struct inode *inode_p, struct file *file_p, 4671static int me4000_dio_ioctl(struct inode *inode_p, struct file *file_p,
4688 unsigned int service, unsigned long arg) 4672 unsigned int service, unsigned long arg)
4689{ 4673{
4690 me4000_dio_context_t *dio_context; 4674 struct me4000_dio_context *dio_context;
4691 4675
4692 CALL_PDEBUG("me4000_dio_ioctl() is executed\n"); 4676 CALL_PDEBUG("me4000_dio_ioctl() is executed\n");
4693 4677
@@ -4704,13 +4688,13 @@ static int me4000_dio_ioctl(struct inode *inode_p, struct file *file_p,
4704 4688
4705 switch (service) { 4689 switch (service) {
4706 case ME4000_DIO_CONFIG: 4690 case ME4000_DIO_CONFIG:
4707 return me4000_dio_config((me4000_dio_config_t *) arg, 4691 return me4000_dio_config((struct me4000_dio_config *)arg,
4708 dio_context); 4692 dio_context);
4709 case ME4000_DIO_SET_BYTE: 4693 case ME4000_DIO_SET_BYTE:
4710 return me4000_dio_set_byte((me4000_dio_byte_t *) arg, 4694 return me4000_dio_set_byte((struct me4000_dio_byte *)arg,
4711 dio_context); 4695 dio_context);
4712 case ME4000_DIO_GET_BYTE: 4696 case ME4000_DIO_GET_BYTE:
4713 return me4000_dio_get_byte((me4000_dio_byte_t *) arg, 4697 return me4000_dio_get_byte((struct me4000_dio_byte *)arg,
4714 dio_context); 4698 dio_context);
4715 case ME4000_DIO_RESET: 4699 case ME4000_DIO_RESET:
4716 return me4000_dio_reset(dio_context); 4700 return me4000_dio_reset(dio_context);
@@ -4723,17 +4707,17 @@ static int me4000_dio_ioctl(struct inode *inode_p, struct file *file_p,
4723 return 0; 4707 return 0;
4724} 4708}
4725 4709
4726static int me4000_dio_config(me4000_dio_config_t * arg, 4710static int me4000_dio_config(struct me4000_dio_config *arg,
4727 me4000_dio_context_t * dio_context) 4711 struct me4000_dio_context *dio_context)
4728{ 4712{
4729 me4000_dio_config_t cmd; 4713 struct me4000_dio_config cmd;
4730 u32 tmp; 4714 u32 tmp;
4731 int err; 4715 int err;
4732 4716
4733 CALL_PDEBUG("me4000_dio_config() is executed\n"); 4717 CALL_PDEBUG("me4000_dio_config() is executed\n");
4734 4718
4735 /* Copy data from user */ 4719 /* Copy data from user */
4736 err = copy_from_user(&cmd, arg, sizeof(me4000_dio_config_t)); 4720 err = copy_from_user(&cmd, arg, sizeof(struct me4000_dio_config));
4737 if (err) { 4721 if (err) {
4738 printk(KERN_ERR 4722 printk(KERN_ERR
4739 "ME4000:me4000_dio_config():Can't copy from user space\n"); 4723 "ME4000:me4000_dio_config():Can't copy from user space\n");
@@ -4964,16 +4948,16 @@ static int me4000_dio_config(me4000_dio_config_t * arg,
4964 return 0; 4948 return 0;
4965} 4949}
4966 4950
4967static int me4000_dio_set_byte(me4000_dio_byte_t * arg, 4951static int me4000_dio_set_byte(struct me4000_dio_byte *arg,
4968 me4000_dio_context_t * dio_context) 4952 struct me4000_dio_context *dio_context)
4969{ 4953{
4970 me4000_dio_byte_t cmd; 4954 struct me4000_dio_byte cmd;
4971 int err; 4955 int err;
4972 4956
4973 CALL_PDEBUG("me4000_dio_set_byte() is executed\n"); 4957 CALL_PDEBUG("me4000_dio_set_byte() is executed\n");
4974 4958
4975 /* Copy data from user */ 4959 /* Copy data from user */
4976 err = copy_from_user(&cmd, arg, sizeof(me4000_dio_byte_t)); 4960 err = copy_from_user(&cmd, arg, sizeof(struct me4000_dio_byte));
4977 if (err) { 4961 if (err) {
4978 printk(KERN_ERR 4962 printk(KERN_ERR
4979 "ME4000:me4000_dio_set_byte():Can't copy from user space\n"); 4963 "ME4000:me4000_dio_set_byte():Can't copy from user space\n");
@@ -5030,16 +5014,16 @@ static int me4000_dio_set_byte(me4000_dio_byte_t * arg,
5030 return 0; 5014 return 0;
5031} 5015}
5032 5016
5033static int me4000_dio_get_byte(me4000_dio_byte_t * arg, 5017static int me4000_dio_get_byte(struct me4000_dio_byte *arg,
5034 me4000_dio_context_t * dio_context) 5018 struct me4000_dio_context *dio_context)
5035{ 5019{
5036 me4000_dio_byte_t cmd; 5020 struct me4000_dio_byte cmd;
5037 int err; 5021 int err;
5038 5022
5039 CALL_PDEBUG("me4000_dio_get_byte() is executed\n"); 5023 CALL_PDEBUG("me4000_dio_get_byte() is executed\n");
5040 5024
5041 /* Copy data from user */ 5025 /* Copy data from user */
5042 err = copy_from_user(&cmd, arg, sizeof(me4000_dio_byte_t)); 5026 err = copy_from_user(&cmd, arg, sizeof(struct me4000_dio_byte));
5043 if (err) { 5027 if (err) {
5044 printk(KERN_ERR 5028 printk(KERN_ERR
5045 "ME4000:me4000_dio_get_byte():Can't copy from user space\n"); 5029 "ME4000:me4000_dio_get_byte():Can't copy from user space\n");
@@ -5070,7 +5054,7 @@ static int me4000_dio_get_byte(me4000_dio_byte_t * arg,
5070 } 5054 }
5071 5055
5072 /* Copy result back to user */ 5056 /* Copy result back to user */
5073 err = copy_to_user(arg, &cmd, sizeof(me4000_dio_byte_t)); 5057 err = copy_to_user(arg, &cmd, sizeof(struct me4000_dio_byte));
5074 if (err) { 5058 if (err) {
5075 printk(KERN_ERR 5059 printk(KERN_ERR
5076 "ME4000:me4000_dio_get_byte():Can't copy to user space\n"); 5060 "ME4000:me4000_dio_get_byte():Can't copy to user space\n");
@@ -5080,7 +5064,7 @@ static int me4000_dio_get_byte(me4000_dio_byte_t * arg,
5080 return 0; 5064 return 0;
5081} 5065}
5082 5066
5083static int me4000_dio_reset(me4000_dio_context_t * dio_context) 5067static int me4000_dio_reset(struct me4000_dio_context *dio_context)
5084{ 5068{
5085 CALL_PDEBUG("me4000_dio_reset() is executed\n"); 5069 CALL_PDEBUG("me4000_dio_reset() is executed\n");
5086 5070
@@ -5101,7 +5085,7 @@ static int me4000_dio_reset(me4000_dio_context_t * dio_context)
5101static int me4000_cnt_ioctl(struct inode *inode_p, struct file *file_p, 5085static int me4000_cnt_ioctl(struct inode *inode_p, struct file *file_p,
5102 unsigned int service, unsigned long arg) 5086 unsigned int service, unsigned long arg)
5103{ 5087{
5104 me4000_cnt_context_t *cnt_context; 5088 struct me4000_cnt_context *cnt_context;
5105 5089
5106 CALL_PDEBUG("me4000_cnt_ioctl() is executed\n"); 5090 CALL_PDEBUG("me4000_cnt_ioctl() is executed\n");
5107 5091
@@ -5118,11 +5102,11 @@ static int me4000_cnt_ioctl(struct inode *inode_p, struct file *file_p,
5118 5102
5119 switch (service) { 5103 switch (service) {
5120 case ME4000_CNT_READ: 5104 case ME4000_CNT_READ:
5121 return me4000_cnt_read((me4000_cnt_t *) arg, cnt_context); 5105 return me4000_cnt_read((struct me4000_cnt *)arg, cnt_context);
5122 case ME4000_CNT_WRITE: 5106 case ME4000_CNT_WRITE:
5123 return me4000_cnt_write((me4000_cnt_t *) arg, cnt_context); 5107 return me4000_cnt_write((struct me4000_cnt *)arg, cnt_context);
5124 case ME4000_CNT_CONFIG: 5108 case ME4000_CNT_CONFIG:
5125 return me4000_cnt_config((me4000_cnt_config_t *) arg, 5109 return me4000_cnt_config((struct me4000_cnt_config *)arg,
5126 cnt_context); 5110 cnt_context);
5127 case ME4000_CNT_RESET: 5111 case ME4000_CNT_RESET:
5128 return me4000_cnt_reset(cnt_context); 5112 return me4000_cnt_reset(cnt_context);
@@ -5135,10 +5119,10 @@ static int me4000_cnt_ioctl(struct inode *inode_p, struct file *file_p,
5135 return 0; 5119 return 0;
5136} 5120}
5137 5121
5138static int me4000_cnt_config(me4000_cnt_config_t * arg, 5122static int me4000_cnt_config(struct me4000_cnt_config *arg,
5139 me4000_cnt_context_t * cnt_context) 5123 struct me4000_cnt_context *cnt_context)
5140{ 5124{
5141 me4000_cnt_config_t cmd; 5125 struct me4000_cnt_config cmd;
5142 u8 counter; 5126 u8 counter;
5143 u8 mode; 5127 u8 mode;
5144 int err; 5128 int err;
@@ -5146,7 +5130,7 @@ static int me4000_cnt_config(me4000_cnt_config_t * arg,
5146 CALL_PDEBUG("me4000_cnt_config() is executed\n"); 5130 CALL_PDEBUG("me4000_cnt_config() is executed\n");
5147 5131
5148 /* Copy data from user */ 5132 /* Copy data from user */
5149 err = copy_from_user(&cmd, arg, sizeof(me4000_cnt_config_t)); 5133 err = copy_from_user(&cmd, arg, sizeof(struct me4000_cnt_config));
5150 if (err) { 5134 if (err) {
5151 printk(KERN_ERR 5135 printk(KERN_ERR
5152 "ME4000:me4000_cnt_config():Can't copy from user space\n"); 5136 "ME4000:me4000_cnt_config():Can't copy from user space\n");
@@ -5204,17 +5188,17 @@ static int me4000_cnt_config(me4000_cnt_config_t * arg,
5204 return 0; 5188 return 0;
5205} 5189}
5206 5190
5207static int me4000_cnt_read(me4000_cnt_t * arg, 5191static int me4000_cnt_read(struct me4000_cnt *arg,
5208 me4000_cnt_context_t * cnt_context) 5192 struct me4000_cnt_context *cnt_context)
5209{ 5193{
5210 me4000_cnt_t cmd; 5194 struct me4000_cnt cmd;
5211 u8 tmp; 5195 u8 tmp;
5212 int err; 5196 int err;
5213 5197
5214 CALL_PDEBUG("me4000_cnt_read() is executed\n"); 5198 CALL_PDEBUG("me4000_cnt_read() is executed\n");
5215 5199
5216 /* Copy data from user */ 5200 /* Copy data from user */
5217 err = copy_from_user(&cmd, arg, sizeof(me4000_cnt_t)); 5201 err = copy_from_user(&cmd, arg, sizeof(struct me4000_cnt));
5218 if (err) { 5202 if (err) {
5219 printk(KERN_ERR 5203 printk(KERN_ERR
5220 "ME4000:me4000_cnt_read():Can't copy from user space\n"); 5204 "ME4000:me4000_cnt_read():Can't copy from user space\n");
@@ -5249,7 +5233,7 @@ static int me4000_cnt_read(me4000_cnt_t * arg,
5249 } 5233 }
5250 5234
5251 /* Copy result back to user */ 5235 /* Copy result back to user */
5252 err = copy_to_user(arg, &cmd, sizeof(me4000_cnt_t)); 5236 err = copy_to_user(arg, &cmd, sizeof(struct me4000_cnt));
5253 if (err) { 5237 if (err) {
5254 printk(KERN_ERR 5238 printk(KERN_ERR
5255 "ME4000:me4000_cnt_read():Can't copy to user space\n"); 5239 "ME4000:me4000_cnt_read():Can't copy to user space\n");
@@ -5259,17 +5243,17 @@ static int me4000_cnt_read(me4000_cnt_t * arg,
5259 return 0; 5243 return 0;
5260} 5244}
5261 5245
5262static int me4000_cnt_write(me4000_cnt_t * arg, 5246static int me4000_cnt_write(struct me4000_cnt *arg,
5263 me4000_cnt_context_t * cnt_context) 5247 struct me4000_cnt_context *cnt_context)
5264{ 5248{
5265 me4000_cnt_t cmd; 5249 struct me4000_cnt cmd;
5266 u8 tmp; 5250 u8 tmp;
5267 int err; 5251 int err;
5268 5252
5269 CALL_PDEBUG("me4000_cnt_write() is executed\n"); 5253 CALL_PDEBUG("me4000_cnt_write() is executed\n");
5270 5254
5271 /* Copy data from user */ 5255 /* Copy data from user */
5272 err = copy_from_user(&cmd, arg, sizeof(me4000_cnt_t)); 5256 err = copy_from_user(&cmd, arg, sizeof(struct me4000_cnt));
5273 if (err) { 5257 if (err) {
5274 printk(KERN_ERR 5258 printk(KERN_ERR
5275 "ME4000:me4000_cnt_write():Can't copy from user space\n"); 5259 "ME4000:me4000_cnt_write():Can't copy from user space\n");
@@ -5306,7 +5290,7 @@ static int me4000_cnt_write(me4000_cnt_t * arg,
5306 return 0; 5290 return 0;
5307} 5291}
5308 5292
5309static int me4000_cnt_reset(me4000_cnt_context_t * cnt_context) 5293static int me4000_cnt_reset(struct me4000_cnt_context *cnt_context)
5310{ 5294{
5311 CALL_PDEBUG("me4000_cnt_reset() is executed\n"); 5295 CALL_PDEBUG("me4000_cnt_reset() is executed\n");
5312 5296
@@ -5333,7 +5317,7 @@ static int me4000_cnt_reset(me4000_cnt_context_t * cnt_context)
5333static int me4000_ext_int_ioctl(struct inode *inode_p, struct file *file_p, 5317static int me4000_ext_int_ioctl(struct inode *inode_p, struct file *file_p,
5334 unsigned int service, unsigned long arg) 5318 unsigned int service, unsigned long arg)
5335{ 5319{
5336 me4000_ext_int_context_t *ext_int_context; 5320 struct me4000_ext_int_context *ext_int_context;
5337 5321
5338 CALL_PDEBUG("me4000_ext_int_ioctl() is executed\n"); 5322 CALL_PDEBUG("me4000_ext_int_ioctl() is executed\n");
5339 5323
@@ -5366,7 +5350,7 @@ static int me4000_ext_int_ioctl(struct inode *inode_p, struct file *file_p,
5366 return 0; 5350 return 0;
5367} 5351}
5368 5352
5369static int me4000_ext_int_enable(me4000_ext_int_context_t * ext_int_context) 5353static int me4000_ext_int_enable(struct me4000_ext_int_context *ext_int_context)
5370{ 5354{
5371 unsigned long tmp; 5355 unsigned long tmp;
5372 5356
@@ -5379,7 +5363,7 @@ static int me4000_ext_int_enable(me4000_ext_int_context_t * ext_int_context)
5379 return 0; 5363 return 0;
5380} 5364}
5381 5365
5382static int me4000_ext_int_disable(me4000_ext_int_context_t * ext_int_context) 5366static int me4000_ext_int_disable(struct me4000_ext_int_context *ext_int_context)
5383{ 5367{
5384 unsigned long tmp; 5368 unsigned long tmp;
5385 5369
@@ -5393,7 +5377,7 @@ static int me4000_ext_int_disable(me4000_ext_int_context_t * ext_int_context)
5393} 5377}
5394 5378
5395static int me4000_ext_int_count(unsigned long *arg, 5379static int me4000_ext_int_count(unsigned long *arg,
5396 me4000_ext_int_context_t * ext_int_context) 5380 struct me4000_ext_int_context *ext_int_context)
5397{ 5381{
5398 5382
5399 CALL_PDEBUG("me4000_ext_int_count() is executed\n"); 5383 CALL_PDEBUG("me4000_ext_int_count() is executed\n");
@@ -5404,10 +5388,10 @@ static int me4000_ext_int_count(unsigned long *arg,
5404 5388
5405/*------------------------------------ General stuff ------------------------------------*/ 5389/*------------------------------------ General stuff ------------------------------------*/
5406 5390
5407static int me4000_get_user_info(me4000_user_info_t * arg, 5391static int me4000_get_user_info(struct me4000_user_info *arg,
5408 me4000_info_t * board_info) 5392 struct me4000_info *board_info)
5409{ 5393{
5410 me4000_user_info_t user_info; 5394 struct me4000_user_info user_info;
5411 5395
5412 CALL_PDEBUG("me4000_get_user_info() is executed\n"); 5396 CALL_PDEBUG("me4000_get_user_info() is executed\n");
5413 5397
@@ -5437,7 +5421,7 @@ static int me4000_get_user_info(me4000_user_info_t * arg,
5437 5421
5438 user_info.cnt_count = board_info->board_p->cnt.count; 5422 user_info.cnt_count = board_info->board_p->cnt.count;
5439 5423
5440 if (copy_to_user(arg, &user_info, sizeof(me4000_user_info_t))) 5424 if (copy_to_user(arg, &user_info, sizeof(struct me4000_user_info)))
5441 return -EFAULT; 5425 return -EFAULT;
5442 5426
5443 return 0; 5427 return 0;
@@ -5448,7 +5432,7 @@ static int me4000_get_user_info(me4000_user_info_t * arg,
5448static int me4000_ext_int_fasync(int fd, struct file *file_ptr, int mode) 5432static int me4000_ext_int_fasync(int fd, struct file *file_ptr, int mode)
5449{ 5433{
5450 int result = 0; 5434 int result = 0;
5451 me4000_ext_int_context_t *ext_int_context; 5435 struct me4000_ext_int_context *ext_int_context;
5452 5436
5453 CALL_PDEBUG("me4000_ext_int_fasync() is executed\n"); 5437 CALL_PDEBUG("me4000_ext_int_fasync() is executed\n");
5454 5438
@@ -5465,7 +5449,7 @@ static irqreturn_t me4000_ao_isr(int irq, void *dev_id)
5465{ 5449{
5466 u32 tmp; 5450 u32 tmp;
5467 u32 value; 5451 u32 value;
5468 me4000_ao_context_t *ao_context; 5452 struct me4000_ao_context *ao_context;
5469 int i; 5453 int i;
5470 int c = 0; 5454 int c = 0;
5471 int c1 = 0; 5455 int c1 = 0;
@@ -5589,7 +5573,7 @@ static irqreturn_t me4000_ao_isr(int irq, void *dev_id)
5589static irqreturn_t me4000_ai_isr(int irq, void *dev_id) 5573static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
5590{ 5574{
5591 u32 tmp; 5575 u32 tmp;
5592 me4000_ai_context_t *ai_context; 5576 struct me4000_ai_context *ai_context;
5593 int i; 5577 int i;
5594 int c = 0; 5578 int c = 0;
5595 int c1 = 0; 5579 int c1 = 0;
@@ -5933,7 +5917,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
5933 5917
5934static irqreturn_t me4000_ext_int_isr(int irq, void *dev_id) 5918static irqreturn_t me4000_ext_int_isr(int irq, void *dev_id)
5935{ 5919{
5936 me4000_ext_int_context_t *ext_int_context; 5920 struct me4000_ext_int_context *ext_int_context;
5937 unsigned long tmp; 5921 unsigned long tmp;
5938 5922
5939 ISR_PDEBUG("me4000_ext_int_isr() is executed\n"); 5923 ISR_PDEBUG("me4000_ext_int_isr() is executed\n");
@@ -5969,10 +5953,10 @@ static irqreturn_t me4000_ext_int_isr(int irq, void *dev_id)
5969 return IRQ_HANDLED; 5953 return IRQ_HANDLED;
5970} 5954}
5971 5955
5972void __exit me4000_module_exit(void) 5956static void __exit me4000_module_exit(void)
5973{ 5957{
5974 struct list_head *board_p; 5958 struct list_head *board_p;
5975 me4000_info_t *board_info; 5959 struct me4000_info *board_info;
5976 5960
5977 CALL_PDEBUG("cleanup_module() is executed\n"); 5961 CALL_PDEBUG("cleanup_module() is executed\n");
5978 5962
@@ -5993,7 +5977,7 @@ void __exit me4000_module_exit(void)
5993 /* Reset the boards */ 5977 /* Reset the boards */
5994 for (board_p = me4000_board_info_list.next; 5978 for (board_p = me4000_board_info_list.next;
5995 board_p != &me4000_board_info_list; board_p = board_p->next) { 5979 board_p != &me4000_board_info_list; board_p = board_p->next) {
5996 board_info = list_entry(board_p, me4000_info_t, list); 5980 board_info = list_entry(board_p, struct me4000_info, list);
5997 me4000_reset_board(board_info); 5981 me4000_reset_board(board_info);
5998 } 5982 }
5999 5983
@@ -6007,7 +5991,7 @@ static int me4000_read_procmem(char *buf, char **start, off_t offset, int count,
6007{ 5991{
6008 int len = 0; 5992 int len = 0;
6009 int limit = count - 1000; 5993 int limit = count - 1000;
6010 me4000_info_t *board_info; 5994 struct me4000_info *board_info;
6011 struct list_head *ptr; 5995 struct list_head *ptr;
6012 5996
6013 len += sprintf(buf + len, "\nME4000 DRIVER VERSION %X.%X.%X\n\n", 5997 len += sprintf(buf + len, "\nME4000 DRIVER VERSION %X.%X.%X\n\n",
@@ -6019,7 +6003,7 @@ static int me4000_read_procmem(char *buf, char **start, off_t offset, int count,
6019 for (ptr = me4000_board_info_list.next; 6003 for (ptr = me4000_board_info_list.next;
6020 (ptr != &me4000_board_info_list) && (len < limit); 6004 (ptr != &me4000_board_info_list) && (len < limit);
6021 ptr = ptr->next) { 6005 ptr = ptr->next) {
6022 board_info = list_entry(ptr, me4000_info_t, list); 6006 board_info = list_entry(ptr, struct me4000_info, list);
6023 6007
6024 len += 6008 len +=
6025 sprintf(buf + len, "Board number %d:\n", 6009 sprintf(buf + len, "Board number %d:\n",
@@ -6029,14 +6013,14 @@ static int me4000_read_procmem(char *buf, char **start, off_t offset, int count,
6029 sprintf(buf + len, "PLX base register = 0x%lX\n", 6013 sprintf(buf + len, "PLX base register = 0x%lX\n",
6030 board_info->plx_regbase); 6014 board_info->plx_regbase);
6031 len += 6015 len +=
6032 sprintf(buf + len, "PLX base register size = 0x%lX\n", 6016 sprintf(buf + len, "PLX base register size = 0x%X\n",
6033 board_info->plx_regbase_size); 6017 (unsigned int)board_info->plx_regbase_size);
6034 len += 6018 len +=
6035 sprintf(buf + len, "ME4000 base register = 0x%lX\n", 6019 sprintf(buf + len, "ME4000 base register = 0x%X\n",
6036 board_info->me4000_regbase); 6020 (unsigned int)board_info->me4000_regbase);
6037 len += 6021 len +=
6038 sprintf(buf + len, "ME4000 base register size = 0x%lX\n", 6022 sprintf(buf + len, "ME4000 base register size = 0x%X\n",
6039 board_info->me4000_regbase_size); 6023 (unsigned int)board_info->me4000_regbase_size);
6040 len += 6024 len +=
6041 sprintf(buf + len, "Serial number = 0x%X\n", 6025 sprintf(buf + len, "Serial number = 0x%X\n",
6042 board_info->serial_no); 6026 board_info->serial_no);
diff --git a/drivers/staging/me4000/me4000.h b/drivers/staging/me4000/me4000.h
index c35e4b9793a0..81c6f4d5e25c 100644
--- a/drivers/staging/me4000/me4000.h
+++ b/drivers/staging/me4000/me4000.h
@@ -329,46 +329,46 @@
329 Circular buffer used for analog input/output reads/writes. 329 Circular buffer used for analog input/output reads/writes.
330 ===========================================================================*/ 330 ===========================================================================*/
331 331
332typedef struct me4000_circ_buf { 332struct me4000_circ_buf {
333 s16 *buf; 333 s16 *buf;
334 int volatile head; 334 int volatile head;
335 int volatile tail; 335 int volatile tail;
336} me4000_circ_buf_t; 336};
337 337
338/*============================================================================= 338/*=============================================================================
339 Information about the hardware capabilities 339 Information about the hardware capabilities
340 ===========================================================================*/ 340 ===========================================================================*/
341 341
342typedef struct me4000_ao_info { 342struct me4000_ao_info {
343 int count; 343 int count;
344 int fifo_count; 344 int fifo_count;
345} me4000_ao_info_t; 345};
346 346
347typedef struct me4000_ai_info { 347struct me4000_ai_info {
348 int count; 348 int count;
349 int sh_count; 349 int sh_count;
350 int diff_count; 350 int diff_count;
351 int ex_trig_analog; 351 int ex_trig_analog;
352} me4000_ai_info_t; 352};
353 353
354typedef struct me4000_dio_info { 354struct me4000_dio_info {
355 int count; 355 int count;
356} me4000_dio_info_t; 356};
357 357
358typedef struct me4000_cnt_info { 358struct me4000_cnt_info {
359 int count; 359 int count;
360} me4000_cnt_info_t; 360};
361 361
362typedef struct me4000_board { 362struct me4000_board {
363 u16 vendor_id; 363 u16 vendor_id;
364 u16 device_id; 364 u16 device_id;
365 me4000_ao_info_t ao; 365 struct me4000_ao_info ao;
366 me4000_ai_info_t ai; 366 struct me4000_ai_info ai;
367 me4000_dio_info_t dio; 367 struct me4000_dio_info dio;
368 me4000_cnt_info_t cnt; 368 struct me4000_cnt_info cnt;
369} me4000_board_t; 369};
370 370
371static me4000_board_t me4000_boards[] = { 371static struct me4000_board me4000_boards[] = {
372 {PCI_VENDOR_ID_MEILHAUS, 0x4610, {0, 0}, {16, 0, 0, 0}, {4}, {3}}, 372 {PCI_VENDOR_ID_MEILHAUS, 0x4610, {0, 0}, {16, 0, 0, 0}, {4}, {3}},
373 373
374 {PCI_VENDOR_ID_MEILHAUS, 0x4650, {0, 0}, {16, 0, 0, 0}, {4}, {0}}, 374 {PCI_VENDOR_ID_MEILHAUS, 0x4650, {0, 0}, {16, 0, 0, 0}, {4}, {0}},
@@ -391,8 +391,6 @@ static me4000_board_t me4000_boards[] = {
391 {0}, 391 {0},
392}; 392};
393 393
394#define ME4000_BOARD_VERSIONS (sizeof(me4000_boards) / sizeof(me4000_board_t) - 1)
395
396/*============================================================================= 394/*=============================================================================
397 PCI device table. 395 PCI device table.
398 This is used by modprobe to translate PCI IDs to drivers. 396 This is used by modprobe to translate PCI IDs to drivers.
@@ -427,19 +425,19 @@ MODULE_DEVICE_TABLE(pci, me4000_pci_table);
427 Global board and subdevice information structures 425 Global board and subdevice information structures
428 ===========================================================================*/ 426 ===========================================================================*/
429 427
430typedef struct me4000_info { 428struct me4000_info {
431 struct list_head list; // List of all detected boards 429 struct list_head list; // List of all detected boards
432 int board_count; // Index of the board after detection 430 int board_count; // Index of the board after detection
433 431
434 unsigned long plx_regbase; // PLX configuration space base address 432 unsigned long plx_regbase; // PLX configuration space base address
435 unsigned long me4000_regbase; // Base address of the ME4000 433 resource_size_t me4000_regbase; // Base address of the ME4000
436 unsigned long timer_regbase; // Base address of the timer circuit 434 resource_size_t timer_regbase; // Base address of the timer circuit
437 unsigned long program_regbase; // Base address to set the program pin for the xilinx 435 resource_size_t program_regbase; // Base address to set the program pin for the xilinx
438 436
439 unsigned long plx_regbase_size; // PLX register set space 437 unsigned long plx_regbase_size; // PLX register set space
440 unsigned long me4000_regbase_size; // ME4000 register set space 438 resource_size_t me4000_regbase_size; // ME4000 register set space
441 unsigned long timer_regbase_size; // Timer circuit register set space 439 resource_size_t timer_regbase_size; // Timer circuit register set space
442 unsigned long program_regbase_size; // Size of program base address of the ME4000 440 resource_size_t program_regbase_size; // Size of program base address of the ME4000
443 441
444 unsigned int serial_no; // Serial number of the board 442 unsigned int serial_no; // Serial number of the board
445 unsigned char hw_revision; // Hardware revision of the board 443 unsigned char hw_revision; // Hardware revision of the board
@@ -451,7 +449,7 @@ typedef struct me4000_info {
451 int pci_func_no; // PCI function number 449 int pci_func_no; // PCI function number
452 struct pci_dev *pci_dev_p; // General PCI information 450 struct pci_dev *pci_dev_p; // General PCI information
453 451
454 me4000_board_t *board_p; // Holds the board capabilities 452 struct me4000_board *board_p; // Holds the board capabilities
455 453
456 unsigned int irq; // IRQ assigned from the PCI BIOS 454 unsigned int irq; // IRQ assigned from the PCI BIOS
457 unsigned int irq_count; // Count of external interrupts 455 unsigned int irq_count; // Count of external interrupts
@@ -464,18 +462,18 @@ typedef struct me4000_info {
464 struct me4000_dio_context *dio_context; // Digital I/O specific context 462 struct me4000_dio_context *dio_context; // Digital I/O specific context
465 struct me4000_cnt_context *cnt_context; // Counter specific context 463 struct me4000_cnt_context *cnt_context; // Counter specific context
466 struct me4000_ext_int_context *ext_int_context; // External interrupt specific context 464 struct me4000_ext_int_context *ext_int_context; // External interrupt specific context
467} me4000_info_t; 465};
468 466
469typedef struct me4000_ao_context { 467struct me4000_ao_context {
470 struct list_head list; // linked list of me4000_ao_context_t 468 struct list_head list; // linked list of me4000_ao_context_t
471 int index; // Index in the list 469 int index; // Index in the list
472 int mode; // Indicates mode (0 = single, 1 = wraparound, 2 = continous) 470 int mode; // Indicates mode (0 = single, 1 = wraparound, 2 = continous)
473 int dac_in_use; // Indicates if already opend 471 int dac_in_use; // Indicates if already opend
474 spinlock_t use_lock; // Guards in_use 472 spinlock_t use_lock; // Guards in_use
475 spinlock_t int_lock; // Used when locking out interrupts 473 spinlock_t int_lock; // Used when locking out interrupts
476 me4000_circ_buf_t circ_buf; // Circular buffer 474 struct me4000_circ_buf circ_buf; // Circular buffer
477 wait_queue_head_t wait_queue; // Wait queue to sleep while blocking write 475 wait_queue_head_t wait_queue; // Wait queue to sleep while blocking write
478 me4000_info_t *board_info; 476 struct me4000_info *board_info;
479 unsigned int irq; // The irq associated with this ADC 477 unsigned int irq; // The irq associated with this ADC
480 int volatile pipe_flag; // Indicates broken pipe set from me4000_ao_isr() 478 int volatile pipe_flag; // Indicates broken pipe set from me4000_ao_isr()
481 unsigned long ctrl_reg; 479 unsigned long ctrl_reg;
@@ -486,9 +484,9 @@ typedef struct me4000_ao_context {
486 unsigned long irq_status_reg; 484 unsigned long irq_status_reg;
487 unsigned long preload_reg; 485 unsigned long preload_reg;
488 struct fasync_struct *fasync_p; // Queue for asynchronous notification 486 struct fasync_struct *fasync_p; // Queue for asynchronous notification
489} me4000_ao_context_t; 487};
490 488
491typedef struct me4000_ai_context { 489struct me4000_ai_context {
492 struct list_head list; // linked list of me4000_ai_info_t 490 struct list_head list; // linked list of me4000_ai_info_t
493 int mode; // Indicates mode 491 int mode; // Indicates mode
494 int in_use; // Indicates if already opend 492 int in_use; // Indicates if already opend
@@ -496,9 +494,9 @@ typedef struct me4000_ai_context {
496 spinlock_t int_lock; // Used when locking out interrupts 494 spinlock_t int_lock; // Used when locking out interrupts
497 int number; // Number of the DAC 495 int number; // Number of the DAC
498 unsigned int irq; // The irq associated with this ADC 496 unsigned int irq; // The irq associated with this ADC
499 me4000_circ_buf_t circ_buf; // Circular buffer 497 struct me4000_circ_buf circ_buf; // Circular buffer
500 wait_queue_head_t wait_queue; // Wait queue to sleep while blocking read 498 wait_queue_head_t wait_queue; // Wait queue to sleep while blocking read
501 me4000_info_t *board_info; 499 struct me4000_info *board_info;
502 500
503 struct fasync_struct *fasync_p; // Queue for asynchronous notification 501 struct fasync_struct *fasync_p; // Queue for asynchronous notification
504 502
@@ -523,48 +521,48 @@ typedef struct me4000_ai_context {
523 unsigned long channel_list_count; 521 unsigned long channel_list_count;
524 unsigned long sample_counter; 522 unsigned long sample_counter;
525 int sample_counter_reload; 523 int sample_counter_reload;
526} me4000_ai_context_t; 524};
527 525
528typedef struct me4000_dio_context { 526struct me4000_dio_context {
529 struct list_head list; // linked list of me4000_dio_context_t 527 struct list_head list; // linked list of me4000_dio_context_t
530 int in_use; // Indicates if already opend 528 int in_use; // Indicates if already opend
531 spinlock_t use_lock; // Guards in_use 529 spinlock_t use_lock; // Guards in_use
532 int number; 530 int number;
533 int dio_count; 531 int dio_count;
534 me4000_info_t *board_info; 532 struct me4000_info *board_info;
535 unsigned long dir_reg; 533 unsigned long dir_reg;
536 unsigned long ctrl_reg; 534 unsigned long ctrl_reg;
537 unsigned long port_0_reg; 535 unsigned long port_0_reg;
538 unsigned long port_1_reg; 536 unsigned long port_1_reg;
539 unsigned long port_2_reg; 537 unsigned long port_2_reg;
540 unsigned long port_3_reg; 538 unsigned long port_3_reg;
541} me4000_dio_context_t; 539};
542 540
543typedef struct me4000_cnt_context { 541struct me4000_cnt_context {
544 struct list_head list; // linked list of me4000_dio_context_t 542 struct list_head list; // linked list of me4000_dio_context_t
545 int in_use; // Indicates if already opend 543 int in_use; // Indicates if already opend
546 spinlock_t use_lock; // Guards in_use 544 spinlock_t use_lock; // Guards in_use
547 int number; 545 int number;
548 int cnt_count; 546 int cnt_count;
549 me4000_info_t *board_info; 547 struct me4000_info *board_info;
550 unsigned long ctrl_reg; 548 unsigned long ctrl_reg;
551 unsigned long counter_0_reg; 549 unsigned long counter_0_reg;
552 unsigned long counter_1_reg; 550 unsigned long counter_1_reg;
553 unsigned long counter_2_reg; 551 unsigned long counter_2_reg;
554} me4000_cnt_context_t; 552};
555 553
556typedef struct me4000_ext_int_context { 554struct me4000_ext_int_context {
557 struct list_head list; // linked list of me4000_dio_context_t 555 struct list_head list; // linked list of me4000_dio_context_t
558 int in_use; // Indicates if already opend 556 int in_use; // Indicates if already opend
559 spinlock_t use_lock; // Guards in_use 557 spinlock_t use_lock; // Guards in_use
560 int number; 558 int number;
561 me4000_info_t *board_info; 559 struct me4000_info *board_info;
562 unsigned int irq; 560 unsigned int irq;
563 unsigned long int_count; 561 unsigned long int_count;
564 struct fasync_struct *fasync_ptr; 562 struct fasync_struct *fasync_ptr;
565 unsigned long ctrl_reg; 563 unsigned long ctrl_reg;
566 unsigned long irq_status_reg; 564 unsigned long irq_status_reg;
567} me4000_ext_int_context_t; 565};
568 566
569#endif 567#endif
570 568
@@ -745,12 +743,12 @@ typedef struct me4000_ext_int_context {
745 General type definitions 743 General type definitions
746 ----------------------------------------------------------------------------*/ 744 ----------------------------------------------------------------------------*/
747 745
748typedef struct me4000_user_info { 746struct me4000_user_info {
749 int board_count; // Index of the board after detection 747 int board_count; // Index of the board after detection
750 unsigned long plx_regbase; // PLX configuration space base address 748 unsigned long plx_regbase; // PLX configuration space base address
751 unsigned long me4000_regbase; // Base address of the ME4000 749 resource_size_t me4000_regbase; // Base address of the ME4000
752 unsigned long plx_regbase_size; // PLX register set space 750 unsigned long plx_regbase_size; // PLX register set space
753 unsigned long me4000_regbase_size; // ME4000 register set space 751 resource_size_t me4000_regbase_size; // ME4000 register set space
754 unsigned long serial_no; // Serial number of the board 752 unsigned long serial_no; // Serial number of the board
755 unsigned char hw_revision; // Hardware revision of the board 753 unsigned char hw_revision; // Hardware revision of the board
756 unsigned short vendor_id; // Meilhaus vendor id (0x1402) 754 unsigned short vendor_id; // Meilhaus vendor id (0x1402)
@@ -773,62 +771,62 @@ typedef struct me4000_user_info {
773 int dio_count; // Count of digital I/O ports 771 int dio_count; // Count of digital I/O ports
774 772
775 int cnt_count; // Count of counters 773 int cnt_count; // Count of counters
776} me4000_user_info_t; 774};
777 775
778/*----------------------------------------------------------------------------- 776/*-----------------------------------------------------------------------------
779 Type definitions for analog output 777 Type definitions for analog output
780 ----------------------------------------------------------------------------*/ 778 ----------------------------------------------------------------------------*/
781 779
782typedef struct me4000_ao_channel_list { 780struct me4000_ao_channel_list {
783 unsigned long count; 781 unsigned long count;
784 unsigned long *list; 782 unsigned long *list;
785} me4000_ao_channel_list_t; 783};
786 784
787/*----------------------------------------------------------------------------- 785/*-----------------------------------------------------------------------------
788 Type definitions for analog input 786 Type definitions for analog input
789 ----------------------------------------------------------------------------*/ 787 ----------------------------------------------------------------------------*/
790 788
791typedef struct me4000_ai_channel_list { 789struct me4000_ai_channel_list {
792 unsigned long count; 790 unsigned long count;
793 unsigned long *list; 791 unsigned long *list;
794} me4000_ai_channel_list_t; 792};
795 793
796typedef struct me4000_ai_timer { 794struct me4000_ai_timer {
797 unsigned long pre_chan; 795 unsigned long pre_chan;
798 unsigned long chan; 796 unsigned long chan;
799 unsigned long scan_low; 797 unsigned long scan_low;
800 unsigned long scan_high; 798 unsigned long scan_high;
801} me4000_ai_timer_t; 799};
802 800
803typedef struct me4000_ai_config { 801struct me4000_ai_config {
804 me4000_ai_timer_t timer; 802 struct me4000_ai_timer timer;
805 me4000_ai_channel_list_t channel_list; 803 struct me4000_ai_channel_list channel_list;
806 int sh; 804 int sh;
807} me4000_ai_config_t; 805};
808 806
809typedef struct me4000_ai_single { 807struct me4000_ai_single {
810 int channel; 808 int channel;
811 int range; 809 int range;
812 int mode; 810 int mode;
813 short value; 811 short value;
814 unsigned long timeout; 812 unsigned long timeout;
815} me4000_ai_single_t; 813};
816 814
817typedef struct me4000_ai_trigger { 815struct me4000_ai_trigger {
818 int mode; 816 int mode;
819 int edge; 817 int edge;
820} me4000_ai_trigger_t; 818};
821 819
822typedef struct me4000_ai_sc { 820struct me4000_ai_sc {
823 unsigned long value; 821 unsigned long value;
824 int reload; 822 int reload;
825} me4000_ai_sc_t; 823};
826 824
827/*----------------------------------------------------------------------------- 825/*-----------------------------------------------------------------------------
828 Type definitions for eeprom 826 Type definitions for eeprom
829 ----------------------------------------------------------------------------*/ 827 ----------------------------------------------------------------------------*/
830 828
831typedef struct me4000_eeprom { 829struct me4000_eeprom {
832 unsigned long date; 830 unsigned long date;
833 short uni_10_offset; 831 short uni_10_offset;
834 short uni_10_fullscale; 832 short uni_10_fullscale;
@@ -842,45 +840,45 @@ typedef struct me4000_eeprom {
842 short diff_10_fullscale; 840 short diff_10_fullscale;
843 short diff_2_5_offset; 841 short diff_2_5_offset;
844 short diff_2_5_fullscale; 842 short diff_2_5_fullscale;
845} me4000_eeprom_t; 843};
846 844
847/*----------------------------------------------------------------------------- 845/*-----------------------------------------------------------------------------
848 Type definitions for digital I/O 846 Type definitions for digital I/O
849 ----------------------------------------------------------------------------*/ 847 ----------------------------------------------------------------------------*/
850 848
851typedef struct me4000_dio_config { 849struct me4000_dio_config {
852 int port; 850 int port;
853 int mode; 851 int mode;
854 int function; 852 int function;
855} me4000_dio_config_t; 853};
856 854
857typedef struct me4000_dio_byte { 855struct me4000_dio_byte {
858 int port; 856 int port;
859 unsigned char byte; 857 unsigned char byte;
860} me4000_dio_byte_t; 858};
861 859
862/*----------------------------------------------------------------------------- 860/*-----------------------------------------------------------------------------
863 Type definitions for counters 861 Type definitions for counters
864 ----------------------------------------------------------------------------*/ 862 ----------------------------------------------------------------------------*/
865 863
866typedef struct me4000_cnt { 864struct me4000_cnt {
867 int counter; 865 int counter;
868 unsigned short value; 866 unsigned short value;
869} me4000_cnt_t; 867};
870 868
871typedef struct me4000_cnt_config { 869struct me4000_cnt_config {
872 int counter; 870 int counter;
873 int mode; 871 int mode;
874} me4000_cnt_config_t; 872};
875 873
876/*----------------------------------------------------------------------------- 874/*-----------------------------------------------------------------------------
877 Type definitions for external interrupt 875 Type definitions for external interrupt
878 ----------------------------------------------------------------------------*/ 876 ----------------------------------------------------------------------------*/
879 877
880typedef struct { 878struct me4000_int {
881 int int1_count; 879 int int1_count;
882 int int2_count; 880 int int2_count;
883} me4000_int_type; 881};
884 882
885/*----------------------------------------------------------------------------- 883/*-----------------------------------------------------------------------------
886 The ioctls of the board 884 The ioctls of the board
@@ -888,7 +886,8 @@ typedef struct {
888 886
889#define ME4000_IOCTL_MAXNR 50 887#define ME4000_IOCTL_MAXNR 50
890#define ME4000_MAGIC 'y' 888#define ME4000_MAGIC 'y'
891#define ME4000_GET_USER_INFO _IOR (ME4000_MAGIC, 0, me4000_user_info_t) 889#define ME4000_GET_USER_INFO _IOR (ME4000_MAGIC, 0, \
890 struct me4000_user_info)
892 891
893#define ME4000_AO_START _IOW (ME4000_MAGIC, 1, unsigned long) 892#define ME4000_AO_START _IOW (ME4000_MAGIC, 1, unsigned long)
894#define ME4000_AO_STOP _IO (ME4000_MAGIC, 2) 893#define ME4000_AO_STOP _IO (ME4000_MAGIC, 2)
@@ -904,25 +903,35 @@ typedef struct {
904#define ME4000_AO_DISABLE_DO _IO (ME4000_MAGIC, 12) 903#define ME4000_AO_DISABLE_DO _IO (ME4000_MAGIC, 12)
905#define ME4000_AO_FSM_STATE _IOR (ME4000_MAGIC, 13, int) 904#define ME4000_AO_FSM_STATE _IOR (ME4000_MAGIC, 13, int)
906 905
907#define ME4000_AI_SINGLE _IOR (ME4000_MAGIC, 14, me4000_ai_single_t) 906#define ME4000_AI_SINGLE _IOR (ME4000_MAGIC, 14, \
907 struct me4000_ai_single)
908#define ME4000_AI_START _IOW (ME4000_MAGIC, 15, unsigned long) 908#define ME4000_AI_START _IOW (ME4000_MAGIC, 15, unsigned long)
909#define ME4000_AI_STOP _IO (ME4000_MAGIC, 16) 909#define ME4000_AI_STOP _IO (ME4000_MAGIC, 16)
910#define ME4000_AI_IMMEDIATE_STOP _IO (ME4000_MAGIC, 17) 910#define ME4000_AI_IMMEDIATE_STOP _IO (ME4000_MAGIC, 17)
911#define ME4000_AI_EX_TRIG_ENABLE _IO (ME4000_MAGIC, 18) 911#define ME4000_AI_EX_TRIG_ENABLE _IO (ME4000_MAGIC, 18)
912#define ME4000_AI_EX_TRIG_DISABLE _IO (ME4000_MAGIC, 19) 912#define ME4000_AI_EX_TRIG_DISABLE _IO (ME4000_MAGIC, 19)
913#define ME4000_AI_EX_TRIG_SETUP _IOW (ME4000_MAGIC, 20, me4000_ai_trigger_t) 913#define ME4000_AI_EX_TRIG_SETUP _IOW (ME4000_MAGIC, 20, \
914#define ME4000_AI_CONFIG _IOW (ME4000_MAGIC, 21, me4000_ai_config_t) 914 struct me4000_ai_trigger)
915#define ME4000_AI_SC_SETUP _IOW (ME4000_MAGIC, 22, me4000_ai_sc_t) 915#define ME4000_AI_CONFIG _IOW (ME4000_MAGIC, 21, \
916 struct me4000_ai_config)
917#define ME4000_AI_SC_SETUP _IOW (ME4000_MAGIC, 22, \
918 struct me4000_ai_sc)
916#define ME4000_AI_FSM_STATE _IOR (ME4000_MAGIC, 23, int) 919#define ME4000_AI_FSM_STATE _IOR (ME4000_MAGIC, 23, int)
917 920
918#define ME4000_DIO_CONFIG _IOW (ME4000_MAGIC, 24, me4000_dio_config_t) 921#define ME4000_DIO_CONFIG _IOW (ME4000_MAGIC, 24, \
919#define ME4000_DIO_GET_BYTE _IOR (ME4000_MAGIC, 25, me4000_dio_byte_t) 922 struct me4000_dio_config)
920#define ME4000_DIO_SET_BYTE _IOW (ME4000_MAGIC, 26, me4000_dio_byte_t) 923#define ME4000_DIO_GET_BYTE _IOR (ME4000_MAGIC, 25, \
924 struct me4000_dio_byte)
925#define ME4000_DIO_SET_BYTE _IOW (ME4000_MAGIC, 26, \
926 struct me4000_dio_byte)
921#define ME4000_DIO_RESET _IO (ME4000_MAGIC, 27) 927#define ME4000_DIO_RESET _IO (ME4000_MAGIC, 27)
922 928
923#define ME4000_CNT_READ _IOR (ME4000_MAGIC, 28, me4000_cnt_t) 929#define ME4000_CNT_READ _IOR (ME4000_MAGIC, 28, \
924#define ME4000_CNT_WRITE _IOW (ME4000_MAGIC, 29, me4000_cnt_t) 930 struct me4000_cnt)
925#define ME4000_CNT_CONFIG _IOW (ME4000_MAGIC, 30, me4000_cnt_config_t) 931#define ME4000_CNT_WRITE _IOW (ME4000_MAGIC, 29, \
932 struct me4000_cnt)
933#define ME4000_CNT_CONFIG _IOW (ME4000_MAGIC, 30, \
934 struct me4000_cnt_config)
926#define ME4000_CNT_RESET _IO (ME4000_MAGIC, 31) 935#define ME4000_CNT_RESET _IO (ME4000_MAGIC, 31)
927 936
928#define ME4000_EXT_INT_DISABLE _IO (ME4000_MAGIC, 32) 937#define ME4000_EXT_INT_DISABLE _IO (ME4000_MAGIC, 32)
@@ -934,13 +943,16 @@ typedef struct {
934#define ME4000_AI_FULLSCALE_ENABLE _IO (ME4000_MAGIC, 37) 943#define ME4000_AI_FULLSCALE_ENABLE _IO (ME4000_MAGIC, 37)
935#define ME4000_AI_FULLSCALE_DISABLE _IO (ME4000_MAGIC, 38) 944#define ME4000_AI_FULLSCALE_DISABLE _IO (ME4000_MAGIC, 38)
936 945
937#define ME4000_AI_EEPROM_READ _IOR (ME4000_MAGIC, 39, me4000_eeprom_t) 946#define ME4000_AI_EEPROM_READ _IOR (ME4000_MAGIC, 39, \
938#define ME4000_AI_EEPROM_WRITE _IOW (ME4000_MAGIC, 40, me4000_eeprom_t) 947 struct me4000_eeprom)
948#define ME4000_AI_EEPROM_WRITE _IOW (ME4000_MAGIC, 40, \
949 struct me4000_eeprom)
939 950
940#define ME4000_AO_SIMULTANEOUS_EX_TRIG _IO (ME4000_MAGIC, 41) 951#define ME4000_AO_SIMULTANEOUS_EX_TRIG _IO (ME4000_MAGIC, 41)
941#define ME4000_AO_SIMULTANEOUS_SW _IO (ME4000_MAGIC, 42) 952#define ME4000_AO_SIMULTANEOUS_SW _IO (ME4000_MAGIC, 42)
942#define ME4000_AO_SIMULTANEOUS_DISABLE _IO (ME4000_MAGIC, 43) 953#define ME4000_AO_SIMULTANEOUS_DISABLE _IO (ME4000_MAGIC, 43)
943#define ME4000_AO_SIMULTANEOUS_UPDATE _IOW (ME4000_MAGIC, 44, me4000_ao_channel_list_t) 954#define ME4000_AO_SIMULTANEOUS_UPDATE _IOW (ME4000_MAGIC, 44, \
955 struct me4000_ao_channel_list)
944 956
945#define ME4000_AO_SYNCHRONOUS_EX_TRIG _IO (ME4000_MAGIC, 45) 957#define ME4000_AO_SYNCHRONOUS_EX_TRIG _IO (ME4000_MAGIC, 45)
946#define ME4000_AO_SYNCHRONOUS_SW _IO (ME4000_MAGIC, 46) 958#define ME4000_AO_SYNCHRONOUS_SW _IO (ME4000_MAGIC, 46)
diff --git a/drivers/staging/poch/Kconfig b/drivers/staging/poch/Kconfig
new file mode 100644
index 000000000000..b3b33b984a57
--- /dev/null
+++ b/drivers/staging/poch/Kconfig
@@ -0,0 +1,6 @@
1config POCH
2 tristate "Redrapids Pocket Change CardBus support"
3 depends on PCI && UIO
4 default N
5 ---help---
6 Enable support for Redrapids Pocket Change CardBus devices.
diff --git a/drivers/staging/poch/Makefile b/drivers/staging/poch/Makefile
new file mode 100644
index 000000000000..d2b96805cb9e
--- /dev/null
+++ b/drivers/staging/poch/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_POCH) += poch.o
diff --git a/drivers/staging/poch/README b/drivers/staging/poch/README
new file mode 100644
index 000000000000..f65e979743ba
--- /dev/null
+++ b/drivers/staging/poch/README
@@ -0,0 +1,7 @@
1TODO:
2 - fix transmit overflows
3 - audit userspace interfaces
4 - get reserved major/minor if needed
5
6Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
7Vijay Kumar <vijaykumar@bravegnu.org> and Jaya Kumar <jayakumar.lkml@gmail.com>
diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
new file mode 100644
index 000000000000..0e113f9a1581
--- /dev/null
+++ b/drivers/staging/poch/poch.c
@@ -0,0 +1,1425 @@
1/*
2 * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
3 *
4 * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
5 *
6 * Licensed under GPL version 2 only.
7 */
8
9#include <linux/device.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/uio_driver.h>
13#include <linux/spinlock.h>
14#include <linux/cdev.h>
15#include <linux/delay.h>
16#include <linux/sysfs.h>
17#include <linux/poll.h>
18#include <linux/idr.h>
19#include <linux/interrupt.h>
20#include <linux/init.h>
21#include <linux/ioctl.h>
22#include <linux/io.h>
23
24#include "poch.h"
25
26#include <asm/cacheflush.h>
27
28#ifndef PCI_VENDOR_ID_RRAPIDS
29#define PCI_VENDOR_ID_RRAPIDS 0x17D2
30#endif
31
32#ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
33#define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
34#endif
35
36#define POCH_NCHANNELS 2
37
38#define MAX_POCH_CARDS 8
39#define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
40
41#define DRV_NAME "poch"
42#define PFX DRV_NAME ": "
43
44/*
45 * BAR0 Bridge Register Definitions
46 */
47
48#define BRIDGE_REV_REG 0x0
49#define BRIDGE_INT_MASK_REG 0x4
50#define BRIDGE_INT_STAT_REG 0x8
51
52#define BRIDGE_INT_ACTIVE (0x1 << 31)
53#define BRIDGE_INT_FPGA (0x1 << 2)
54#define BRIDGE_INT_TEMP_FAIL (0x1 << 1)
55#define BRIDGE_INT_TEMP_WARN (0x1 << 0)
56
57#define BRIDGE_FPGA_RESET_REG 0xC
58
59#define BRIDGE_CARD_POWER_REG 0x10
60#define BRIDGE_CARD_POWER_EN (0x1 << 0)
61#define BRIDGE_CARD_POWER_PROG_DONE (0x1 << 31)
62
63#define BRIDGE_JTAG_REG 0x14
64#define BRIDGE_DMA_GO_REG 0x18
65#define BRIDGE_STAT_0_REG 0x1C
66#define BRIDGE_STAT_1_REG 0x20
67#define BRIDGE_STAT_2_REG 0x24
68#define BRIDGE_STAT_3_REG 0x28
69#define BRIDGE_TEMP_STAT_REG 0x2C
70#define BRIDGE_TEMP_THRESH_REG 0x30
71#define BRIDGE_EEPROM_REVSEL_REG 0x34
72#define BRIDGE_CIS_STRUCT_REG 0x100
73#define BRIDGE_BOARDREV_REG 0x124
74
75/*
76 * BAR1 FPGA Register Definitions
77 */
78
79#define FPGA_IFACE_REV_REG 0x0
80#define FPGA_RX_BLOCK_SIZE_REG 0x8
81#define FPGA_TX_BLOCK_SIZE_REG 0xC
82#define FPGA_RX_BLOCK_COUNT_REG 0x10
83#define FPGA_TX_BLOCK_COUNT_REG 0x14
84#define FPGA_RX_CURR_DMA_BLOCK_REG 0x18
85#define FPGA_TX_CURR_DMA_BLOCK_REG 0x1C
86#define FPGA_RX_GROUP_COUNT_REG 0x20
87#define FPGA_TX_GROUP_COUNT_REG 0x24
88#define FPGA_RX_CURR_GROUP_REG 0x28
89#define FPGA_TX_CURR_GROUP_REG 0x2C
90#define FPGA_RX_CURR_PCI_REG 0x38
91#define FPGA_TX_CURR_PCI_REG 0x3C
92#define FPGA_RX_GROUP0_START_REG 0x40
93#define FPGA_TX_GROUP0_START_REG 0xC0
94#define FPGA_DMA_DESC_1_REG 0x140
95#define FPGA_DMA_DESC_2_REG 0x144
96#define FPGA_DMA_DESC_3_REG 0x148
97#define FPGA_DMA_DESC_4_REG 0x14C
98
99#define FPGA_DMA_INT_STAT_REG 0x150
100#define FPGA_DMA_INT_MASK_REG 0x154
101#define FPGA_DMA_INT_RX (1 << 0)
102#define FPGA_DMA_INT_TX (1 << 1)
103
104#define FPGA_RX_GROUPS_PER_INT_REG 0x158
105#define FPGA_TX_GROUPS_PER_INT_REG 0x15C
106#define FPGA_DMA_ADR_PAGE_REG 0x160
107#define FPGA_FPGA_REV_REG 0x200
108
109#define FPGA_ADC_CLOCK_CTL_REG 0x204
110#define FPGA_ADC_CLOCK_CTL_OSC_EN (0x1 << 3)
111#define FPGA_ADC_CLOCK_LOCAL_CLK (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
112#define FPGA_ADC_CLOCK_EXT_SAMP_CLK 0X0
113
114#define FPGA_ADC_DAC_EN_REG 0x208
115#define FPGA_ADC_DAC_EN_DAC_OFF (0x1 << 1)
116#define FPGA_ADC_DAC_EN_ADC_OFF (0x1 << 0)
117
118#define FPGA_INT_STAT_REG 0x20C
119#define FPGA_INT_MASK_REG 0x210
120#define FPGA_INT_PLL_UNLOCKED (0x1 << 9)
121#define FPGA_INT_DMA_CORE (0x1 << 8)
122#define FPGA_INT_TX_FF_EMPTY (0x1 << 7)
123#define FPGA_INT_RX_FF_EMPTY (0x1 << 6)
124#define FPGA_INT_TX_FF_OVRFLW (0x1 << 3)
125#define FPGA_INT_RX_FF_OVRFLW (0x1 << 2)
126#define FPGA_INT_TX_ACQ_DONE (0x1 << 1)
127#define FPGA_INT_RX_ACQ_DONE (0x1)
128
129#define FPGA_RX_ADC_CTL_REG 0x214
130#define FPGA_RX_ADC_CTL_CONT_CAP (0x0)
131#define FPGA_RX_ADC_CTL_SNAP_CAP (0x1)
132
133#define FPGA_RX_ARM_REG 0x21C
134
135#define FPGA_DOM_REG 0x224
136#define FPGA_DOM_DCM_RESET (0x1 << 5)
137#define FPGA_DOM_SOFT_RESET (0x1 << 4)
138#define FPGA_DOM_DUAL_M_SG_DMA (0x0)
139#define FPGA_DOM_TARGET_ACCESS (0x1)
140
141#define FPGA_TX_CTL_REG 0x228
142#define FPGA_TX_CTL_FIFO_FLUSH (0x1 << 9)
143#define FPGA_TX_CTL_OUTPUT_ZERO (0x0 << 2)
144#define FPGA_TX_CTL_OUTPUT_CARDBUS (0x1 << 2)
145#define FPGA_TX_CTL_OUTPUT_ADC (0x2 << 2)
146#define FPGA_TX_CTL_OUTPUT_SNAPSHOT (0x3 << 2)
147#define FPGA_TX_CTL_LOOPBACK (0x1 << 0)
148
149#define FPGA_ENDIAN_MODE_REG 0x22C
150#define FPGA_RX_FIFO_COUNT_REG 0x28C
151#define FPGA_TX_ENABLE_REG 0x298
152#define FPGA_TX_TRIGGER_REG 0x29C
153#define FPGA_TX_DATAMEM_COUNT_REG 0x2A8
154#define FPGA_CAP_FIFO_REG 0x300
155#define FPGA_TX_SNAPSHOT_REG 0x8000
156
157/*
158 * Channel Index Definitions
159 */
160
161enum {
162 CHNO_RX_CHANNEL,
163 CHNO_TX_CHANNEL,
164};
165
166struct poch_dev;
167
168enum channel_dir {
169 CHANNEL_DIR_RX,
170 CHANNEL_DIR_TX,
171};
172
173struct poch_group_info {
174 struct page *pg;
175 dma_addr_t dma_addr;
176 unsigned long user_offset;
177};
178
179struct channel_info {
180 unsigned int chno;
181
182 atomic_t sys_block_size;
183 atomic_t sys_group_size;
184 atomic_t sys_group_count;
185
186 enum channel_dir dir;
187
188 unsigned long block_size;
189 unsigned long group_size;
190 unsigned long group_count;
191
192 /* Contains the DMA address and VM offset of each group. */
193 struct poch_group_info *groups;
194
195 /* Contains the header and circular buffer exported to userspace. */
196 spinlock_t group_offsets_lock;
197 struct poch_cbuf_header *header;
198 struct page *header_pg;
199 unsigned long header_size;
200
201 /* Last group indicated as 'complete' to user space. */
202 unsigned int transfer;
203
204 wait_queue_head_t wq;
205
206 union {
207 unsigned int data_available;
208 unsigned int space_available;
209 };
210
211 void __iomem *bridge_iomem;
212 void __iomem *fpga_iomem;
213 spinlock_t *iomem_lock;
214
215 atomic_t free;
216 atomic_t inited;
217
218 /* Error counters */
219 struct poch_counters counters;
220 spinlock_t counters_lock;
221
222 struct device *dev;
223};
224
225struct poch_dev {
226 struct uio_info uio;
227 struct pci_dev *pci_dev;
228 unsigned int nchannels;
229 struct channel_info channels[POCH_NCHANNELS];
230 struct cdev cdev;
231
232 /* Counts the no. of channels that have been opened. On first
233 * open, the card is powered on. On last channel close, the
234 * card is powered off.
235 */
236 atomic_t usage;
237
238 void __iomem *bridge_iomem;
239 void __iomem *fpga_iomem;
240 spinlock_t iomem_lock;
241
242 struct device *dev;
243};
244
245static dev_t poch_first_dev;
246static struct class *poch_cls;
247static DEFINE_IDR(poch_ids);
248
249static ssize_t store_block_size(struct device *dev,
250 struct device_attribute *attr,
251 const char *buf, size_t count)
252{
253 struct channel_info *channel = dev_get_drvdata(dev);
254 unsigned long block_size;
255
256 sscanf(buf, "%lu", &block_size);
257 atomic_set(&channel->sys_block_size, block_size);
258
259 return count;
260}
261static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size);
262
263static ssize_t store_group_size(struct device *dev,
264 struct device_attribute *attr,
265 const char *buf, size_t count)
266{
267 struct channel_info *channel = dev_get_drvdata(dev);
268 unsigned long group_size;
269
270 sscanf(buf, "%lu", &group_size);
271 atomic_set(&channel->sys_group_size, group_size);
272
273 return count;
274}
275static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size);
276
277static ssize_t store_group_count(struct device *dev,
278 struct device_attribute *attr,
279 const char *buf, size_t count)
280{
281 struct channel_info *channel = dev_get_drvdata(dev);
282 unsigned long group_count;
283
284 sscanf(buf, "%lu", &group_count);
285 atomic_set(&channel->sys_group_count, group_count);
286
287 return count;
288}
289static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count);
290
291static ssize_t show_direction(struct device *dev,
292 struct device_attribute *attr, char *buf)
293{
294 struct channel_info *channel = dev_get_drvdata(dev);
295 int len;
296
297 len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx"));
298 return len;
299}
300static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL);
301
302static ssize_t show_mmap_size(struct device *dev,
303 struct device_attribute *attr, char *buf)
304{
305 struct channel_info *channel = dev_get_drvdata(dev);
306 int len;
307 unsigned long mmap_size;
308 unsigned long group_pages;
309 unsigned long header_pages;
310 unsigned long total_group_pages;
311
312 /* FIXME: We do not have to add 1, if group_size a multiple of
313 PAGE_SIZE. */
314 group_pages = (channel->group_size / PAGE_SIZE) + 1;
315 header_pages = (channel->header_size / PAGE_SIZE) + 1;
316 total_group_pages = group_pages * channel->group_count;
317
318 mmap_size = (header_pages + total_group_pages) * PAGE_SIZE;
319 len = sprintf(buf, "%lu\n", mmap_size);
320 return len;
321}
322static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL);
323
324static struct device_attribute *poch_class_attrs[] = {
325 &dev_attr_block_size,
326 &dev_attr_group_size,
327 &dev_attr_group_count,
328 &dev_attr_dir,
329 &dev_attr_mmap_size,
330};
331
332static void poch_channel_free_groups(struct channel_info *channel)
333{
334 unsigned long i;
335
336 for (i = 0; i < channel->group_count; i++) {
337 struct poch_group_info *group;
338 unsigned int order;
339
340 group = &channel->groups[i];
341 order = get_order(channel->group_size);
342 if (group->pg)
343 __free_pages(group->pg, order);
344 }
345}
346
347static int poch_channel_alloc_groups(struct channel_info *channel)
348{
349 unsigned long i;
350 unsigned long group_pages;
351 unsigned long header_pages;
352
353 group_pages = (channel->group_size / PAGE_SIZE) + 1;
354 header_pages = (channel->header_size / PAGE_SIZE) + 1;
355
356 for (i = 0; i < channel->group_count; i++) {
357 struct poch_group_info *group;
358 unsigned int order;
359 gfp_t gfp_mask;
360
361 group = &channel->groups[i];
362 order = get_order(channel->group_size);
363
364 /*
365 * __GFP_COMP is required here since we are going to
366 * perform non-linear mapping to userspace. For more
367 * information read the vm_insert_page() function
368 * comments.
369 */
370
371 gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO;
372 group->pg = alloc_pages(gfp_mask, order);
373 if (!group->pg) {
374 poch_channel_free_groups(channel);
375 return -ENOMEM;
376 }
377
378 /* FIXME: This is the physical address not the bus
379 * address! This won't work in architectures that
380 * have an IOMMU. Can we use pci_map_single() for
381 * this?
382 */
383 group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE;
384 group->user_offset =
385 (header_pages + (i * group_pages)) * PAGE_SIZE;
386
387 printk(KERN_INFO PFX "%ld: user_offset: 0x%lx dma: 0x%x\n", i,
388 group->user_offset, group->dma_addr);
389 }
390
391 return 0;
392}
393
394static void channel_latch_attr(struct channel_info *channel)
395{
396 channel->group_count = atomic_read(&channel->sys_group_count);
397 channel->group_size = atomic_read(&channel->sys_group_size);
398 channel->block_size = atomic_read(&channel->sys_block_size);
399}
400
401/*
402 * Configure DMA group registers
403 */
404static void channel_dma_init(struct channel_info *channel)
405{
406 void __iomem *fpga = channel->fpga_iomem;
407 u32 group_regs_base;
408 u32 group_reg;
409 unsigned int page;
410 unsigned int group_in_page;
411 unsigned long i;
412 u32 block_size_reg;
413 u32 block_count_reg;
414 u32 group_count_reg;
415 u32 groups_per_int_reg;
416 u32 curr_pci_reg;
417
418 if (channel->chno == CHNO_RX_CHANNEL) {
419 group_regs_base = FPGA_RX_GROUP0_START_REG;
420 block_size_reg = FPGA_RX_BLOCK_SIZE_REG;
421 block_count_reg = FPGA_RX_BLOCK_COUNT_REG;
422 group_count_reg = FPGA_RX_GROUP_COUNT_REG;
423 groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG;
424 curr_pci_reg = FPGA_RX_CURR_PCI_REG;
425 } else {
426 group_regs_base = FPGA_TX_GROUP0_START_REG;
427 block_size_reg = FPGA_TX_BLOCK_SIZE_REG;
428 block_count_reg = FPGA_TX_BLOCK_COUNT_REG;
429 group_count_reg = FPGA_TX_GROUP_COUNT_REG;
430 groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG;
431 curr_pci_reg = FPGA_TX_CURR_PCI_REG;
432 }
433
434 printk(KERN_WARNING "block_size, group_size, group_count\n");
435 iowrite32(channel->block_size, fpga + block_size_reg);
436 iowrite32(channel->group_size / channel->block_size,
437 fpga + block_count_reg);
438 iowrite32(channel->group_count, fpga + group_count_reg);
439 /* FIXME: Hardcoded groups per int. Get it from sysfs? */
440 iowrite32(1, fpga + groups_per_int_reg);
441
442 /* Unlock PCI address? Not defined in the data sheet, but used
443 * in the reference code by Redrapids.
444 */
445 iowrite32(0x1, fpga + curr_pci_reg);
446
447 /* The DMA address page register is shared between the RX and
448 * TX channels, so acquire lock.
449 */
450 spin_lock(channel->iomem_lock);
451 for (i = 0; i < channel->group_count; i++) {
452 page = i / 32;
453 group_in_page = i % 32;
454
455 group_reg = group_regs_base + (group_in_page * 4);
456
457 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
458 iowrite32(channel->groups[i].dma_addr, fpga + group_reg);
459 }
460 for (i = 0; i < channel->group_count; i++) {
461 page = i / 32;
462 group_in_page = i % 32;
463
464 group_reg = group_regs_base + (group_in_page * 4);
465
466 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
467 printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i,
468 ioread32(fpga + group_reg));
469 }
470 spin_unlock(channel->iomem_lock);
471
472}
473
474static int poch_channel_alloc_header(struct channel_info *channel)
475{
476 struct poch_cbuf_header *header = channel->header;
477 unsigned long group_offset_size;
478 unsigned long tot_group_offsets_size;
479
480 /* Allocate memory to hold header exported userspace */
481 group_offset_size = sizeof(header->group_offsets[0]);
482 tot_group_offsets_size = group_offset_size * channel->group_count;
483 channel->header_size = sizeof(*header) + tot_group_offsets_size;
484 channel->header_pg = alloc_pages(GFP_KERNEL | __GFP_ZERO,
485 get_order(channel->header_size));
486 if (!channel->header_pg)
487 return -ENOMEM;
488
489 channel->header = page_address(channel->header_pg);
490
491 return 0;
492}
493
494static void poch_channel_free_header(struct channel_info *channel)
495{
496 unsigned int order;
497
498 order = get_order(channel->header_size);
499 __free_pages(channel->header_pg, order);
500}
501
502static void poch_channel_init_header(struct channel_info *channel)
503{
504 int i;
505 struct poch_group_info *groups;
506 s32 *group_offsets;
507
508 channel->header->group_size_bytes = channel->group_size;
509 channel->header->group_count = channel->group_count;
510
511 spin_lock_init(&channel->group_offsets_lock);
512
513 group_offsets = channel->header->group_offsets;
514 groups = channel->groups;
515
516 for (i = 0; i < channel->group_count; i++) {
517 if (channel->dir == CHANNEL_DIR_RX)
518 group_offsets[i] = -1;
519 else
520 group_offsets[i] = groups[i].user_offset;
521 }
522}
523
524static void __poch_channel_clear_counters(struct channel_info *channel)
525{
526 channel->counters.pll_unlock = 0;
527 channel->counters.fifo_empty = 0;
528 channel->counters.fifo_overflow = 0;
529}
530
531static int poch_channel_init(struct channel_info *channel,
532 struct poch_dev *poch_dev)
533{
534 struct pci_dev *pdev = poch_dev->pci_dev;
535 struct device *dev = &pdev->dev;
536 unsigned long alloc_size;
537 int ret;
538
539 printk(KERN_WARNING "channel_latch_attr\n");
540
541 channel_latch_attr(channel);
542
543 channel->transfer = 0;
544
545 /* Allocate memory to hold group information. */
546 alloc_size = channel->group_count * sizeof(struct poch_group_info);
547 channel->groups = kzalloc(alloc_size, GFP_KERNEL);
548 if (!channel->groups) {
549 dev_err(dev, "error allocating memory for group info\n");
550 ret = -ENOMEM;
551 goto out;
552 }
553
554 printk(KERN_WARNING "poch_channel_alloc_groups\n");
555
556 ret = poch_channel_alloc_groups(channel);
557 if (ret) {
558 dev_err(dev, "error allocating groups of order %d\n",
559 get_order(channel->group_size));
560 goto out_free_group_info;
561 }
562
563 ret = poch_channel_alloc_header(channel);
564 if (ret) {
565 dev_err(dev, "error allocating user space header\n");
566 goto out_free_groups;
567 }
568
569 channel->fpga_iomem = poch_dev->fpga_iomem;
570 channel->bridge_iomem = poch_dev->bridge_iomem;
571 channel->iomem_lock = &poch_dev->iomem_lock;
572 spin_lock_init(&channel->counters_lock);
573
574 __poch_channel_clear_counters(channel);
575
576 printk(KERN_WARNING "poch_channel_init_header\n");
577
578 poch_channel_init_header(channel);
579
580 return 0;
581
582 out_free_groups:
583 poch_channel_free_groups(channel);
584 out_free_group_info:
585 kfree(channel->groups);
586 out:
587 return ret;
588}
589
590static int poch_wait_fpga_prog(void __iomem *bridge)
591{
592 unsigned long total_wait;
593 const unsigned long wait_period = 100;
594 /* FIXME: Get the actual timeout */
595 const unsigned long prog_timeo = 10000; /* 10 Seconds */
596 u32 card_power;
597
598 printk(KERN_WARNING "poch_wait_fpg_prog\n");
599
600 printk(KERN_INFO PFX "programming fpga ...\n");
601 total_wait = 0;
602 while (1) {
603 msleep(wait_period);
604 total_wait += wait_period;
605
606 card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
607 if (card_power & BRIDGE_CARD_POWER_PROG_DONE) {
608 printk(KERN_INFO PFX "programming done\n");
609 return 0;
610 }
611 if (total_wait > prog_timeo) {
612 printk(KERN_ERR PFX
613 "timed out while programming FPGA\n");
614 return -EIO;
615 }
616 }
617}
618
619static void poch_card_power_off(struct poch_dev *poch_dev)
620{
621 void __iomem *bridge = poch_dev->bridge_iomem;
622 u32 card_power;
623
624 iowrite32(0, bridge + BRIDGE_INT_MASK_REG);
625 iowrite32(0, bridge + BRIDGE_DMA_GO_REG);
626
627 card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
628 iowrite32(card_power & ~BRIDGE_CARD_POWER_EN,
629 bridge + BRIDGE_CARD_POWER_REG);
630}
631
632enum clk_src {
633 CLK_SRC_ON_BOARD,
634 CLK_SRC_EXTERNAL
635};
636
637static void poch_card_clock_on(void __iomem *fpga)
638{
639 /* FIXME: Get this data through sysfs? */
640 enum clk_src clk_src = CLK_SRC_ON_BOARD;
641
642 if (clk_src == CLK_SRC_ON_BOARD) {
643 iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN,
644 fpga + FPGA_ADC_CLOCK_CTL_REG);
645 } else if (clk_src == CLK_SRC_EXTERNAL) {
646 iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK,
647 fpga + FPGA_ADC_CLOCK_CTL_REG);
648 }
649}
650
651static int poch_card_power_on(struct poch_dev *poch_dev)
652{
653 void __iomem *bridge = poch_dev->bridge_iomem;
654 void __iomem *fpga = poch_dev->fpga_iomem;
655
656 iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG);
657
658 if (poch_wait_fpga_prog(bridge) != 0) {
659 poch_card_power_off(poch_dev);
660 return -EIO;
661 }
662
663 poch_card_clock_on(fpga);
664
665 /* Sync to new clock, reset state machines, set DMA mode. */
666 iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET
667 | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG);
668
669 /* FIXME: The time required for sync. needs to be tuned. */
670 msleep(1000);
671
672 return 0;
673}
674
675static void poch_channel_analog_on(struct channel_info *channel)
676{
677 void __iomem *fpga = channel->fpga_iomem;
678 u32 adc_dac_en;
679
680 spin_lock(channel->iomem_lock);
681 adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG);
682 switch (channel->chno) {
683 case CHNO_RX_CHANNEL:
684 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF,
685 fpga + FPGA_ADC_DAC_EN_REG);
686 break;
687 case CHNO_TX_CHANNEL:
688 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF,
689 fpga + FPGA_ADC_DAC_EN_REG);
690 break;
691 }
692 spin_unlock(channel->iomem_lock);
693}
694
695static int poch_open(struct inode *inode, struct file *filp)
696{
697 struct poch_dev *poch_dev;
698 struct channel_info *channel;
699 void __iomem *bridge;
700 void __iomem *fpga;
701 int chno;
702 int usage;
703 int ret;
704
705 poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
706 bridge = poch_dev->bridge_iomem;
707 fpga = poch_dev->fpga_iomem;
708
709 chno = iminor(inode) % poch_dev->nchannels;
710 channel = &poch_dev->channels[chno];
711
712 if (!atomic_dec_and_test(&channel->free)) {
713 atomic_inc(&channel->free);
714 ret = -EBUSY;
715 goto out;
716 }
717
718 usage = atomic_inc_return(&poch_dev->usage);
719
720 printk(KERN_WARNING "poch_card_power_on\n");
721
722 if (usage == 1) {
723 ret = poch_card_power_on(poch_dev);
724 if (ret)
725 goto out_dec_usage;
726 }
727
728 printk(KERN_INFO "CardBus Bridge Revision: %x\n",
729 ioread32(bridge + BRIDGE_REV_REG));
730 printk(KERN_INFO "CardBus Interface Revision: %x\n",
731 ioread32(fpga + FPGA_IFACE_REV_REG));
732
733 channel->chno = chno;
734 filp->private_data = channel;
735
736 printk(KERN_WARNING "poch_channel_init\n");
737
738 ret = poch_channel_init(channel, poch_dev);
739 if (ret)
740 goto out_power_off;
741
742 poch_channel_analog_on(channel);
743
744 printk(KERN_WARNING "channel_dma_init\n");
745
746 channel_dma_init(channel);
747
748 printk(KERN_WARNING "poch_channel_analog_on\n");
749
750 if (usage == 1) {
751 printk(KERN_WARNING "setting up DMA\n");
752
753 /* Initialize DMA Controller. */
754 iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG);
755 iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG);
756
757 ioread32(fpga + FPGA_DMA_INT_STAT_REG);
758 ioread32(fpga + FPGA_INT_STAT_REG);
759 ioread32(bridge + BRIDGE_INT_STAT_REG);
760
761 /* Initialize Interrupts. FIXME: Enable temperature
762 * handling We are enabling both Tx and Rx channel
763 * interrupts here. Do we need to enable interrupts
764 * only for the current channel? Anyways we won't get
765 * the interrupt unless the DMA is activated.
766 */
767 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
768 iowrite32(FPGA_INT_DMA_CORE
769 | FPGA_INT_PLL_UNLOCKED
770 | FPGA_INT_TX_FF_EMPTY
771 | FPGA_INT_RX_FF_EMPTY
772 | FPGA_INT_TX_FF_OVRFLW
773 | FPGA_INT_RX_FF_OVRFLW,
774 fpga + FPGA_INT_MASK_REG);
775 iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX,
776 fpga + FPGA_DMA_INT_MASK_REG);
777 }
778
779 if (channel->dir == CHANNEL_DIR_TX) {
780 /* Flush TX FIFO and output data from cardbus. */
781 iowrite32(FPGA_TX_CTL_FIFO_FLUSH
782 | FPGA_TX_CTL_OUTPUT_CARDBUS,
783 fpga + FPGA_TX_CTL_REG);
784 }
785
786 atomic_inc(&channel->inited);
787
788 return 0;
789
790 out_power_off:
791 if (usage == 1)
792 poch_card_power_off(poch_dev);
793 out_dec_usage:
794 atomic_dec(&poch_dev->usage);
795 atomic_inc(&channel->free);
796 out:
797 return ret;
798}
799
800static int poch_release(struct inode *inode, struct file *filp)
801{
802 struct channel_info *channel = filp->private_data;
803 struct poch_dev *poch_dev;
804 int usage;
805
806 poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
807
808 usage = atomic_dec_return(&poch_dev->usage);
809 if (usage == 0) {
810 printk(KERN_WARNING "poch_card_power_off\n");
811 poch_card_power_off(poch_dev);
812 }
813
814 atomic_dec(&channel->inited);
815 poch_channel_free_header(channel);
816 poch_channel_free_groups(channel);
817 kfree(channel->groups);
818 atomic_inc(&channel->free);
819
820 return 0;
821}
822
823/*
824 * Map the header and the group buffers, to user space.
825 */
826static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
827{
828 struct channel_info *channel = filp->private_data;
829
830 unsigned long start;
831 unsigned long size;
832
833 unsigned long group_pages;
834 unsigned long header_pages;
835 unsigned long total_group_pages;
836
837 int pg_num;
838 struct page *pg;
839
840 int i;
841 int ret;
842
843 printk(KERN_WARNING "poch_mmap\n");
844
845 if (vma->vm_pgoff) {
846 printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff);
847 return -EINVAL;
848 }
849
850 group_pages = (channel->group_size / PAGE_SIZE) + 1;
851 header_pages = (channel->header_size / PAGE_SIZE) + 1;
852 total_group_pages = group_pages * channel->group_count;
853
854 size = vma->vm_end - vma->vm_start;
855 if (size != (header_pages + total_group_pages) * PAGE_SIZE) {
856 printk(KERN_WARNING PFX "required %lu bytes\n", size);
857 return -EINVAL;
858 }
859
860 start = vma->vm_start;
861
862 /* FIXME: Cleanup required on failure? */
863 pg = channel->header_pg;
864 for (pg_num = 0; pg_num < header_pages; pg_num++, pg++) {
865 printk(KERN_DEBUG PFX "page_count: %d\n", page_count(pg));
866 printk(KERN_DEBUG PFX "%d: header: 0x%lx\n", pg_num, start);
867 ret = vm_insert_page(vma, start, pg);
868 if (ret) {
869 printk(KERN_DEBUG "vm_insert 1 failed at %lx\n", start);
870 return ret;
871 }
872 start += PAGE_SIZE;
873 }
874
875 for (i = 0; i < channel->group_count; i++) {
876 pg = channel->groups[i].pg;
877 for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) {
878 printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n",
879 pg_num, i, start);
880 ret = vm_insert_page(vma, start, pg);
881 if (ret) {
882 printk(KERN_DEBUG PFX
883 "vm_insert 2 failed at %d\n", pg_num);
884 return ret;
885 }
886 start += PAGE_SIZE;
887 }
888 }
889
890 return 0;
891}
892
893/*
894 * Check whether there is some group that the user space has not
895 * consumed yet. When the user space consumes a group, it sets it to
896 * -1. Cosuming could be reading data in case of RX and filling a
897 * buffer in case of TX.
898 */
899static int poch_channel_available(struct channel_info *channel)
900{
901 int i;
902
903 spin_lock_irq(&channel->group_offsets_lock);
904
905 for (i = 0; i < channel->group_count; i++) {
906 if (channel->dir == CHANNEL_DIR_RX
907 && channel->header->group_offsets[i] == -1) {
908 spin_unlock_irq(&channel->group_offsets_lock);
909 return 1;
910 }
911
912 if (channel->dir == CHANNEL_DIR_TX
913 && channel->header->group_offsets[i] != -1) {
914 spin_unlock_irq(&channel->group_offsets_lock);
915 return 1;
916 }
917 }
918
919 spin_unlock_irq(&channel->group_offsets_lock);
920
921 return 0;
922}
923
924static unsigned int poch_poll(struct file *filp, poll_table *pt)
925{
926 struct channel_info *channel = filp->private_data;
927 unsigned int ret = 0;
928
929 poll_wait(filp, &channel->wq, pt);
930
931 if (poch_channel_available(channel)) {
932 if (channel->dir == CHANNEL_DIR_RX)
933 ret = POLLIN | POLLRDNORM;
934 else
935 ret = POLLOUT | POLLWRNORM;
936 }
937
938 return ret;
939}
940
941static int poch_ioctl(struct inode *inode, struct file *filp,
942 unsigned int cmd, unsigned long arg)
943{
944 struct channel_info *channel = filp->private_data;
945 void __iomem *fpga = channel->fpga_iomem;
946 void __iomem *bridge = channel->bridge_iomem;
947 void __user *argp = (void __user *)arg;
948 struct vm_area_struct *vms;
949 struct poch_counters counters;
950 int ret;
951
952 switch (cmd) {
953 case POCH_IOC_TRANSFER_START:
954 switch (channel->chno) {
955 case CHNO_TX_CHANNEL:
956 printk(KERN_INFO PFX "ioctl: Tx start\n");
957 iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG);
958 iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG);
959
960 /* FIXME: Does it make sense to do a DMA GO
961 * twice, once in Tx and once in Rx.
962 */
963 iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
964 break;
965 case CHNO_RX_CHANNEL:
966 printk(KERN_INFO PFX "ioctl: Rx start\n");
967 iowrite32(0x1, fpga + FPGA_RX_ARM_REG);
968 iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
969 break;
970 }
971 break;
972 case POCH_IOC_TRANSFER_STOP:
973 switch (channel->chno) {
974 case CHNO_TX_CHANNEL:
975 printk(KERN_INFO PFX "ioctl: Tx stop\n");
976 iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG);
977 iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG);
978 iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
979 break;
980 case CHNO_RX_CHANNEL:
981 printk(KERN_INFO PFX "ioctl: Rx stop\n");
982 iowrite32(0x0, fpga + FPGA_RX_ARM_REG);
983 iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
984 break;
985 }
986 break;
987 case POCH_IOC_GET_COUNTERS:
988 if (access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
989 return -EFAULT;
990
991 spin_lock_irq(&channel->counters_lock);
992 counters = channel->counters;
993 __poch_channel_clear_counters(channel);
994 spin_unlock_irq(&channel->counters_lock);
995
996 ret = copy_to_user(argp, &counters,
997 sizeof(struct poch_counters));
998 if (ret)
999 return ret;
1000
1001 break;
1002 case POCH_IOC_SYNC_GROUP_FOR_USER:
1003 case POCH_IOC_SYNC_GROUP_FOR_DEVICE:
1004 vms = find_vma(current->mm, arg);
1005 if (!vms)
1006 /* Address not mapped. */
1007 return -EINVAL;
1008 if (vms->vm_file != filp)
1009 /* Address mapped from different device/file. */
1010 return -EINVAL;
1011
1012 flush_cache_range(vms, arg, arg + channel->group_size);
1013 break;
1014 }
1015 return 0;
1016}
1017
1018static struct file_operations poch_fops = {
1019 .owner = THIS_MODULE,
1020 .open = poch_open,
1021 .release = poch_release,
1022 .ioctl = poch_ioctl,
1023 .poll = poch_poll,
1024 .mmap = poch_mmap
1025};
1026
1027static void poch_irq_dma(struct channel_info *channel)
1028{
1029 u32 prev_transfer;
1030 u32 curr_transfer;
1031 long groups_done;
1032 unsigned long i, j;
1033 struct poch_group_info *groups;
1034 s32 *group_offsets;
1035 u32 curr_group_reg;
1036
1037 if (!atomic_read(&channel->inited))
1038 return;
1039
1040 prev_transfer = channel->transfer;
1041
1042 if (channel->chno == CHNO_RX_CHANNEL)
1043 curr_group_reg = FPGA_RX_CURR_GROUP_REG;
1044 else
1045 curr_group_reg = FPGA_TX_CURR_GROUP_REG;
1046
1047 curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg);
1048
1049 groups_done = curr_transfer - prev_transfer;
1050 /* Check wrap over, and handle it. */
1051 if (groups_done <= 0)
1052 groups_done += channel->group_count;
1053
1054 group_offsets = channel->header->group_offsets;
1055 groups = channel->groups;
1056
1057 spin_lock(&channel->group_offsets_lock);
1058
1059 for (i = 0; i < groups_done; i++) {
1060 j = (prev_transfer + i) % channel->group_count;
1061 if (channel->dir == CHANNEL_DIR_RX)
1062 group_offsets[j] = -1;
1063 else
1064 group_offsets[j] = groups[j].user_offset;
1065 }
1066
1067 spin_unlock(&channel->group_offsets_lock);
1068
1069 channel->transfer = curr_transfer;
1070
1071 wake_up_interruptible(&channel->wq);
1072}
1073
1074static irqreturn_t poch_irq_handler(int irq, void *p)
1075{
1076 struct poch_dev *poch_dev = p;
1077 void __iomem *bridge = poch_dev->bridge_iomem;
1078 void __iomem *fpga = poch_dev->fpga_iomem;
1079 struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL];
1080 struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL];
1081 u32 bridge_stat;
1082 u32 fpga_stat;
1083 u32 dma_stat;
1084
1085 bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG);
1086 fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG);
1087 dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1088
1089 ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1090 ioread32(fpga + FPGA_INT_STAT_REG);
1091 ioread32(bridge + BRIDGE_INT_STAT_REG);
1092
1093 if (bridge_stat & BRIDGE_INT_FPGA) {
1094 if (fpga_stat & FPGA_INT_DMA_CORE) {
1095 if (dma_stat & FPGA_DMA_INT_RX)
1096 poch_irq_dma(channel_rx);
1097 if (dma_stat & FPGA_DMA_INT_TX)
1098 poch_irq_dma(channel_tx);
1099 }
1100 if (fpga_stat & FPGA_INT_PLL_UNLOCKED) {
1101 channel_tx->counters.pll_unlock++;
1102 channel_rx->counters.pll_unlock++;
1103 if (printk_ratelimit())
1104 printk(KERN_WARNING PFX "PLL unlocked\n");
1105 }
1106 if (fpga_stat & FPGA_INT_TX_FF_EMPTY)
1107 channel_tx->counters.fifo_empty++;
1108 if (fpga_stat & FPGA_INT_TX_FF_OVRFLW)
1109 channel_tx->counters.fifo_overflow++;
1110 if (fpga_stat & FPGA_INT_RX_FF_EMPTY)
1111 channel_rx->counters.fifo_empty++;
1112 if (fpga_stat & FPGA_INT_RX_FF_OVRFLW)
1113 channel_rx->counters.fifo_overflow++;
1114
1115 /*
1116 * FIXME: These errors should be notified through the
1117 * poll interface as POLLERR.
1118 */
1119
1120 /* Re-enable interrupts. */
1121 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
1122
1123 return IRQ_HANDLED;
1124 }
1125
1126 return IRQ_NONE;
1127}
1128
1129static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id)
1130{
1131 int i, j;
1132 int nattrs;
1133 struct channel_info *channel;
1134 dev_t devno;
1135
1136 if (poch_dev->dev == NULL)
1137 return;
1138
1139 for (i = 0; i < poch_dev->nchannels; i++) {
1140 channel = &poch_dev->channels[i];
1141 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1142
1143 if (!channel->dev)
1144 continue;
1145
1146 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1147 for (j = 0; j < nattrs; j++)
1148 device_remove_file(channel->dev, poch_class_attrs[j]);
1149
1150 device_unregister(channel->dev);
1151 }
1152
1153 device_unregister(poch_dev->dev);
1154}
1155
1156static int __devinit poch_class_dev_register(struct poch_dev *poch_dev,
1157 int id)
1158{
1159 struct device *dev = &poch_dev->pci_dev->dev;
1160 int i, j;
1161 int nattrs;
1162 int ret;
1163 struct channel_info *channel;
1164 dev_t devno;
1165
1166 poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev,
1167 MKDEV(0, 0), NULL, "poch%d", id);
1168 if (IS_ERR(poch_dev->dev)) {
1169 dev_err(dev, "error creating parent class device");
1170 ret = PTR_ERR(poch_dev->dev);
1171 poch_dev->dev = NULL;
1172 return ret;
1173 }
1174
1175 for (i = 0; i < poch_dev->nchannels; i++) {
1176 channel = &poch_dev->channels[i];
1177
1178 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1179 channel->dev = device_create(poch_cls, poch_dev->dev, devno,
1180 NULL, "ch%d", i);
1181 if (IS_ERR(channel->dev)) {
1182 dev_err(dev, "error creating channel class device");
1183 ret = PTR_ERR(channel->dev);
1184 channel->dev = NULL;
1185 poch_class_dev_unregister(poch_dev, id);
1186 return ret;
1187 }
1188
1189 dev_set_drvdata(channel->dev, channel);
1190 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1191 for (j = 0; j < nattrs; j++) {
1192 ret = device_create_file(channel->dev,
1193 poch_class_attrs[j]);
1194 if (ret) {
1195 dev_err(dev, "error creating attribute file");
1196 poch_class_dev_unregister(poch_dev, id);
1197 return ret;
1198 }
1199 }
1200 }
1201
1202 return 0;
1203}
1204
1205static int __devinit poch_pci_probe(struct pci_dev *pdev,
1206 const struct pci_device_id *pci_id)
1207{
1208 struct device *dev = &pdev->dev;
1209 struct poch_dev *poch_dev;
1210 struct uio_info *uio;
1211 int ret;
1212 int id;
1213 int i;
1214
1215 poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL);
1216 if (!poch_dev) {
1217 dev_err(dev, "error allocating priv. data memory\n");
1218 return -ENOMEM;
1219 }
1220
1221 poch_dev->pci_dev = pdev;
1222 uio = &poch_dev->uio;
1223
1224 pci_set_drvdata(pdev, poch_dev);
1225
1226 spin_lock_init(&poch_dev->iomem_lock);
1227
1228 poch_dev->nchannels = POCH_NCHANNELS;
1229 poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX;
1230 poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX;
1231
1232 for (i = 0; i < poch_dev->nchannels; i++) {
1233 init_waitqueue_head(&poch_dev->channels[i].wq);
1234 atomic_set(&poch_dev->channels[i].free, 1);
1235 atomic_set(&poch_dev->channels[i].inited, 0);
1236 }
1237
1238 ret = pci_enable_device(pdev);
1239 if (ret) {
1240 dev_err(dev, "error enabling device\n");
1241 goto out_free;
1242 }
1243
1244 ret = pci_request_regions(pdev, "poch");
1245 if (ret) {
1246 dev_err(dev, "error requesting resources\n");
1247 goto out_disable;
1248 }
1249
1250 uio->mem[0].addr = pci_resource_start(pdev, 1);
1251 if (!uio->mem[0].addr) {
1252 dev_err(dev, "invalid BAR1\n");
1253 ret = -ENODEV;
1254 goto out_release;
1255 }
1256
1257 uio->mem[0].size = pci_resource_len(pdev, 1);
1258 uio->mem[0].memtype = UIO_MEM_PHYS;
1259
1260 uio->name = "poch";
1261 uio->version = "0.0.1";
1262 uio->irq = -1;
1263 ret = uio_register_device(dev, uio);
1264 if (ret) {
1265 dev_err(dev, "error register UIO device: %d\n", ret);
1266 goto out_release;
1267 }
1268
1269 poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0),
1270 pci_resource_len(pdev, 0));
1271 if (poch_dev->bridge_iomem == NULL) {
1272 dev_err(dev, "error mapping bridge (bar0) registers\n");
1273 ret = -ENOMEM;
1274 goto out_uio_unreg;
1275 }
1276
1277 poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1),
1278 pci_resource_len(pdev, 1));
1279 if (poch_dev->fpga_iomem == NULL) {
1280 dev_err(dev, "error mapping fpga (bar1) registers\n");
1281 ret = -ENOMEM;
1282 goto out_bar0_unmap;
1283 }
1284
1285 ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED,
1286 dev->bus_id, poch_dev);
1287 if (ret) {
1288 dev_err(dev, "error requesting IRQ %u\n", pdev->irq);
1289 ret = -ENOMEM;
1290 goto out_bar1_unmap;
1291 }
1292
1293 if (!idr_pre_get(&poch_ids, GFP_KERNEL)) {
1294 dev_err(dev, "error allocating memory ids\n");
1295 ret = -ENOMEM;
1296 goto out_free_irq;
1297 }
1298
1299 idr_get_new(&poch_ids, poch_dev, &id);
1300 if (id >= MAX_POCH_CARDS) {
1301 dev_err(dev, "minors exhausted\n");
1302 ret = -EBUSY;
1303 goto out_free_irq;
1304 }
1305
1306 cdev_init(&poch_dev->cdev, &poch_fops);
1307 poch_dev->cdev.owner = THIS_MODULE;
1308 ret = cdev_add(&poch_dev->cdev,
1309 poch_first_dev + (id * poch_dev->nchannels),
1310 poch_dev->nchannels);
1311 if (ret) {
1312 dev_err(dev, "error register character device\n");
1313 goto out_idr_remove;
1314 }
1315
1316 ret = poch_class_dev_register(poch_dev, id);
1317 if (ret)
1318 goto out_cdev_del;
1319
1320 return 0;
1321
1322 out_cdev_del:
1323 cdev_del(&poch_dev->cdev);
1324 out_idr_remove:
1325 idr_remove(&poch_ids, id);
1326 out_free_irq:
1327 free_irq(pdev->irq, poch_dev);
1328 out_bar1_unmap:
1329 iounmap(poch_dev->fpga_iomem);
1330 out_bar0_unmap:
1331 iounmap(poch_dev->bridge_iomem);
1332 out_uio_unreg:
1333 uio_unregister_device(uio);
1334 out_release:
1335 pci_release_regions(pdev);
1336 out_disable:
1337 pci_disable_device(pdev);
1338 out_free:
1339 kfree(poch_dev);
1340 return ret;
1341}
1342
1343/*
1344 * FIXME: We are yet to handle the hot unplug case.
1345 */
1346static void poch_pci_remove(struct pci_dev *pdev)
1347{
1348 struct poch_dev *poch_dev = pci_get_drvdata(pdev);
1349 struct uio_info *uio = &poch_dev->uio;
1350 unsigned int minor = MINOR(poch_dev->cdev.dev);
1351 unsigned int id = minor / poch_dev->nchannels;
1352
1353 /* FIXME: unmap fpga_iomem and bridge_iomem */
1354
1355 poch_class_dev_unregister(poch_dev, id);
1356 cdev_del(&poch_dev->cdev);
1357 idr_remove(&poch_ids, id);
1358 free_irq(pdev->irq, poch_dev);
1359 uio_unregister_device(uio);
1360 pci_release_regions(pdev);
1361 pci_disable_device(pdev);
1362 pci_set_drvdata(pdev, NULL);
1363 iounmap(uio->mem[0].internal_addr);
1364
1365 kfree(poch_dev);
1366}
1367
1368static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = {
1369 { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS,
1370 PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) },
1371 { 0, }
1372};
1373
1374static struct pci_driver poch_pci_driver = {
1375 .name = DRV_NAME,
1376 .id_table = poch_pci_ids,
1377 .probe = poch_pci_probe,
1378 .remove = poch_pci_remove,
1379};
1380
1381static int __init poch_init_module(void)
1382{
1383 int ret = 0;
1384
1385 ret = alloc_chrdev_region(&poch_first_dev, 0,
1386 MAX_POCH_DEVICES, DRV_NAME);
1387 if (ret) {
1388 printk(KERN_ERR PFX "error allocating device no.");
1389 return ret;
1390 }
1391
1392 poch_cls = class_create(THIS_MODULE, "pocketchange");
1393 if (IS_ERR(poch_cls)) {
1394 ret = PTR_ERR(poch_cls);
1395 goto out_unreg_chrdev;
1396 }
1397
1398 ret = pci_register_driver(&poch_pci_driver);
1399 if (ret) {
1400 printk(KERN_ERR PFX "error register PCI device");
1401 goto out_class_destroy;
1402 }
1403
1404 return 0;
1405
1406 out_class_destroy:
1407 class_destroy(poch_cls);
1408
1409 out_unreg_chrdev:
1410 unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1411
1412 return ret;
1413}
1414
1415static void __exit poch_exit_module(void)
1416{
1417 pci_unregister_driver(&poch_pci_driver);
1418 class_destroy(poch_cls);
1419 unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1420}
1421
1422module_init(poch_init_module);
1423module_exit(poch_exit_module);
1424
1425MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/poch/poch.h b/drivers/staging/poch/poch.h
new file mode 100644
index 000000000000..51a2d145798e
--- /dev/null
+++ b/drivers/staging/poch/poch.h
@@ -0,0 +1,29 @@
1/*
2 * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
3 *
4 * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
5 *
6 * Part of userspace API. Should be moved to a header file in
7 * include/linux for final version.
8 *
9 */
10struct poch_cbuf_header {
11 __s32 group_size_bytes;
12 __s32 group_count;
13 __s32 group_offsets[0];
14};
15
16struct poch_counters {
17 __u32 fifo_empty;
18 __u32 fifo_overflow;
19 __u32 pll_unlock;
20};
21
22#define POCH_IOC_NUM '9'
23
24#define POCH_IOC_TRANSFER_START _IO(POCH_IOC_NUM, 0)
25#define POCH_IOC_TRANSFER_STOP _IO(POCH_IOC_NUM, 1)
26#define POCH_IOC_GET_COUNTERS _IOR(POCH_IOC_NUM, 2, \
27 struct poch_counters)
28#define POCH_IOC_SYNC_GROUP_FOR_USER _IO(POCH_IOC_NUM, 3)
29#define POCH_IOC_SYNC_GROUP_FOR_DEVICE _IO(POCH_IOC_NUM, 4)
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index b61ac4b2db9e..8fa9490b3e2c 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -54,7 +54,6 @@
54 * IS-NIC driver. 54 * IS-NIC driver.
55 */ 55 */
56 56
57#include <linux/version.h>
58 57
59#define SLIC_DUMP_ENABLED 0 58#define SLIC_DUMP_ENABLED 0
60#define KLUDGE_FOR_4GB_BOUNDARY 1 59#define KLUDGE_FOR_4GB_BOUNDARY 1
@@ -96,17 +95,9 @@
96#include <linux/moduleparam.h> 95#include <linux/moduleparam.h>
97 96
98#include <linux/types.h> 97#include <linux/types.h>
99#include <linux/slab.h>
100#include <linux/delay.h>
101#include <linux/init.h>
102#include <linux/pci.h>
103#include <linux/dma-mapping.h> 98#include <linux/dma-mapping.h>
104#include <linux/netdevice.h>
105#include <linux/etherdevice.h>
106#include <linux/mii.h> 99#include <linux/mii.h>
107#include <linux/if_vlan.h> 100#include <linux/if_vlan.h>
108#include <linux/skbuff.h>
109#include <linux/string.h>
110#include <asm/unaligned.h> 101#include <asm/unaligned.h>
111 102
112#include <linux/ethtool.h> 103#include <linux/ethtool.h>
@@ -275,7 +266,6 @@ static void slic_dbg_register_trace(struct adapter *adapter,
275 card->reg_value[i], card->reg_valueh[i]); 266 card->reg_value[i], card->reg_valueh[i]);
276 } 267 }
277} 268}
278}
279#endif 269#endif
280 270
281static void slic_init_adapter(struct net_device *netdev, 271static void slic_init_adapter(struct net_device *netdev,
@@ -606,6 +596,7 @@ static void __devexit slic_entry_remove(struct pci_dev *pcidev)
606 uint mmio_len = 0; 596 uint mmio_len = 0;
607 struct adapter *adapter = (struct adapter *) netdev_priv(dev); 597 struct adapter *adapter = (struct adapter *) netdev_priv(dev);
608 struct sliccard *card; 598 struct sliccard *card;
599 struct mcast_address *mcaddr, *mlist;
609 600
610 ASSERT(adapter); 601 ASSERT(adapter);
611 DBG_MSG("slicoss: %s ENTER dev[%p] adapter[%p]\n", __func__, dev, 602 DBG_MSG("slicoss: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
@@ -625,6 +616,13 @@ static void __devexit slic_entry_remove(struct pci_dev *pcidev)
625 DBG_MSG("slicoss: %s iounmap dev->base_addr[%x]\n", __func__, 616 DBG_MSG("slicoss: %s iounmap dev->base_addr[%x]\n", __func__,
626 (uint) dev->base_addr); 617 (uint) dev->base_addr);
627 iounmap((void __iomem *)dev->base_addr); 618 iounmap((void __iomem *)dev->base_addr);
619 /* free multicast addresses */
620 mlist = adapter->mcastaddrs;
621 while (mlist) {
622 mcaddr = mlist;
623 mlist = mlist->next;
624 kfree(mcaddr);
625 }
628 ASSERT(adapter->card); 626 ASSERT(adapter->card);
629 card = adapter->card; 627 card = adapter->card;
630 ASSERT(card->adapters_allocated); 628 ASSERT(card->adapters_allocated);
diff --git a/drivers/staging/sxg/README b/drivers/staging/sxg/README
index 4d1ddbe4c335..d514d1848803 100644
--- a/drivers/staging/sxg/README
+++ b/drivers/staging/sxg/README
@@ -7,6 +7,7 @@ TODO:
7 - remove wrappers 7 - remove wrappers
8 - checkpatch.pl cleanups 8 - checkpatch.pl cleanups
9 - new functionality that the card needs 9 - new functionality that the card needs
10 - remove reliance on x86
10 11
11Please send patches to: 12Please send patches to:
12 Greg Kroah-Hartman <gregkh@suse.de> 13 Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/sxg/sxg.c b/drivers/staging/sxg/sxg.c
index 6ccbee875ab3..5272a18e2043 100644
--- a/drivers/staging/sxg/sxg.c
+++ b/drivers/staging/sxg/sxg.c
@@ -112,12 +112,16 @@ static bool sxg_mac_filter(p_adapter_t adapter,
112static struct net_device_stats *sxg_get_stats(p_net_device dev); 112static struct net_device_stats *sxg_get_stats(p_net_device dev);
113#endif 113#endif
114 114
115#define XXXTODO 0
116
117#if XXXTODO
115static int sxg_mac_set_address(p_net_device dev, void *ptr); 118static int sxg_mac_set_address(p_net_device dev, void *ptr);
119static void sxg_mcast_set_list(p_net_device dev);
120#endif
116 121
117static void sxg_adapter_set_hwaddr(p_adapter_t adapter); 122static void sxg_adapter_set_hwaddr(p_adapter_t adapter);
118 123
119static void sxg_unmap_mmio_space(p_adapter_t adapter); 124static void sxg_unmap_mmio_space(p_adapter_t adapter);
120static void sxg_mcast_set_mask(p_adapter_t adapter);
121 125
122static int sxg_initialize_adapter(p_adapter_t adapter); 126static int sxg_initialize_adapter(p_adapter_t adapter);
123static void sxg_stock_rcv_buffers(p_adapter_t adapter); 127static void sxg_stock_rcv_buffers(p_adapter_t adapter);
@@ -132,9 +136,6 @@ static int sxg_write_mdio_reg(p_adapter_t adapter,
132 u32 DevAddr, u32 RegAddr, u32 Value); 136 u32 DevAddr, u32 RegAddr, u32 Value);
133static int sxg_read_mdio_reg(p_adapter_t adapter, 137static int sxg_read_mdio_reg(p_adapter_t adapter,
134 u32 DevAddr, u32 RegAddr, u32 *pValue); 138 u32 DevAddr, u32 RegAddr, u32 *pValue);
135static void sxg_mcast_set_list(p_net_device dev);
136
137#define XXXTODO 0
138 139
139static unsigned int sxg_first_init = 1; 140static unsigned int sxg_first_init = 1;
140static char *sxg_banner = 141static char *sxg_banner =
@@ -202,7 +203,7 @@ static void sxg_init_driver(void)
202{ 203{
203 if (sxg_first_init) { 204 if (sxg_first_init) {
204 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n", 205 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
205 __FUNCTION__, jiffies); 206 __func__, jiffies);
206 sxg_first_init = 0; 207 sxg_first_init = 0;
207 spin_lock_init(&sxg_global.driver_lock); 208 spin_lock_init(&sxg_global.driver_lock);
208 } 209 }
@@ -223,7 +224,7 @@ static void sxg_dbg_macaddrs(p_adapter_t adapter)
223 return; 224 return;
224} 225}
225 226
226// SXG Globals 227/* SXG Globals */
227static SXG_DRIVER SxgDriver; 228static SXG_DRIVER SxgDriver;
228 229
229#ifdef ATKDBG 230#ifdef ATKDBG
@@ -250,7 +251,7 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
250 u32 ThisSectionSize; 251 u32 ThisSectionSize;
251 u32 *Instruction = NULL; 252 u32 *Instruction = NULL;
252 u32 BaseAddress, AddressOffset, Address; 253 u32 BaseAddress, AddressOffset, Address;
253// u32 Failure; 254/* u32 Failure; */
254 u32 ValueRead; 255 u32 ValueRead;
255 u32 i; 256 u32 i;
256 u32 numSections = 0; 257 u32 numSections = 0;
@@ -259,10 +260,10 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
259 260
260 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod", 261 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
261 adapter, 0, 0, 0); 262 adapter, 0, 0, 0);
262 DBG_ERROR("sxg: %s ENTER\n", __FUNCTION__); 263 DBG_ERROR("sxg: %s ENTER\n", __func__);
263 264
264 switch (UcodeSel) { 265 switch (UcodeSel) {
265 case SXG_UCODE_SAHARA: // Sahara operational ucode 266 case SXG_UCODE_SAHARA: /* Sahara operational ucode */
266 numSections = SNumSections; 267 numSections = SNumSections;
267 for (i = 0; i < numSections; i++) { 268 for (i = 0; i < numSections; i++) {
268 sectionSize[i] = SSectionSize[i]; 269 sectionSize[i] = SSectionSize[i];
@@ -276,13 +277,13 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
276 } 277 }
277 278
278 DBG_ERROR("sxg: RESET THE CARD\n"); 279 DBG_ERROR("sxg: RESET THE CARD\n");
279 // First, reset the card 280 /* First, reset the card */
280 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH); 281 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
281 282
282 // Download each section of the microcode as specified in 283 /* Download each section of the microcode as specified in */
283 // its download file. The *download.c file is generated using 284 /* its download file. The *download.c file is generated using */
284 // the saharaobjtoc facility which converts the metastep .obj 285 /* the saharaobjtoc facility which converts the metastep .obj */
285 // file to a .c file which contains a two dimentional array. 286 /* file to a .c file which contains a two dimentional array. */
286 for (Section = 0; Section < numSections; Section++) { 287 for (Section = 0; Section < numSections; Section++) {
287 DBG_ERROR("sxg: SECTION # %d\n", Section); 288 DBG_ERROR("sxg: SECTION # %d\n", Section);
288 switch (UcodeSel) { 289 switch (UcodeSel) {
@@ -294,35 +295,35 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
294 break; 295 break;
295 } 296 }
296 BaseAddress = sectionStart[Section]; 297 BaseAddress = sectionStart[Section];
297 ThisSectionSize = sectionSize[Section] / 12; // Size in instructions 298 ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */
298 for (AddressOffset = 0; AddressOffset < ThisSectionSize; 299 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
299 AddressOffset++) { 300 AddressOffset++) {
300 Address = BaseAddress + AddressOffset; 301 Address = BaseAddress + AddressOffset;
301 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0); 302 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
302 // Write instruction bits 31 - 0 303 /* Write instruction bits 31 - 0 */
303 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH); 304 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
304 // Write instruction bits 63-32 305 /* Write instruction bits 63-32 */
305 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1), 306 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
306 FLUSH); 307 FLUSH);
307 // Write instruction bits 95-64 308 /* Write instruction bits 95-64 */
308 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2), 309 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
309 FLUSH); 310 FLUSH);
310 // Write instruction address with the WRITE bit set 311 /* Write instruction address with the WRITE bit set */
311 WRITE_REG(HwRegs->UcodeAddr, 312 WRITE_REG(HwRegs->UcodeAddr,
312 (Address | MICROCODE_ADDRESS_WRITE), FLUSH); 313 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
313 // Sahara bug in the ucode download logic - the write to DataLow 314 /* Sahara bug in the ucode download logic - the write to DataLow */
314 // for the next instruction could get corrupted. To avoid this, 315 /* for the next instruction could get corrupted. To avoid this, */
315 // write to DataLow again for this instruction (which may get 316 /* write to DataLow again for this instruction (which may get */
316 // corrupted, but it doesn't matter), then increment the address 317 /* corrupted, but it doesn't matter), then increment the address */
317 // and write the data for the next instruction to DataLow. That 318 /* and write the data for the next instruction to DataLow. That */
318 // write should succeed. 319 /* write should succeed. */
319 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE); 320 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
320 // Advance 3 u32S to start of next instruction 321 /* Advance 3 u32S to start of next instruction */
321 Instruction += 3; 322 Instruction += 3;
322 } 323 }
323 } 324 }
324 // Now repeat the entire operation reading the instruction back and 325 /* Now repeat the entire operation reading the instruction back and */
325 // checking for parity errors 326 /* checking for parity errors */
326 for (Section = 0; Section < numSections; Section++) { 327 for (Section = 0; Section < numSections; Section++) {
327 DBG_ERROR("sxg: check SECTION # %d\n", Section); 328 DBG_ERROR("sxg: check SECTION # %d\n", Section);
328 switch (UcodeSel) { 329 switch (UcodeSel) {
@@ -334,74 +335,74 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
334 break; 335 break;
335 } 336 }
336 BaseAddress = sectionStart[Section]; 337 BaseAddress = sectionStart[Section];
337 ThisSectionSize = sectionSize[Section] / 12; // Size in instructions 338 ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */
338 for (AddressOffset = 0; AddressOffset < ThisSectionSize; 339 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
339 AddressOffset++) { 340 AddressOffset++) {
340 Address = BaseAddress + AddressOffset; 341 Address = BaseAddress + AddressOffset;
341 // Write the address with the READ bit set 342 /* Write the address with the READ bit set */
342 WRITE_REG(HwRegs->UcodeAddr, 343 WRITE_REG(HwRegs->UcodeAddr,
343 (Address | MICROCODE_ADDRESS_READ), FLUSH); 344 (Address | MICROCODE_ADDRESS_READ), FLUSH);
344 // Read it back and check parity bit. 345 /* Read it back and check parity bit. */
345 READ_REG(HwRegs->UcodeAddr, ValueRead); 346 READ_REG(HwRegs->UcodeAddr, ValueRead);
346 if (ValueRead & MICROCODE_ADDRESS_PARITY) { 347 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
347 DBG_ERROR("sxg: %s PARITY ERROR\n", 348 DBG_ERROR("sxg: %s PARITY ERROR\n",
348 __FUNCTION__); 349 __func__);
349 350
350 return (FALSE); // Parity error 351 return (FALSE); /* Parity error */
351 } 352 }
352 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address); 353 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
353 // Read the instruction back and compare 354 /* Read the instruction back and compare */
354 READ_REG(HwRegs->UcodeDataLow, ValueRead); 355 READ_REG(HwRegs->UcodeDataLow, ValueRead);
355 if (ValueRead != *Instruction) { 356 if (ValueRead != *Instruction) {
356 DBG_ERROR("sxg: %s MISCOMPARE LOW\n", 357 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
357 __FUNCTION__); 358 __func__);
358 return (FALSE); // Miscompare 359 return (FALSE); /* Miscompare */
359 } 360 }
360 READ_REG(HwRegs->UcodeDataMiddle, ValueRead); 361 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
361 if (ValueRead != *(Instruction + 1)) { 362 if (ValueRead != *(Instruction + 1)) {
362 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n", 363 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
363 __FUNCTION__); 364 __func__);
364 return (FALSE); // Miscompare 365 return (FALSE); /* Miscompare */
365 } 366 }
366 READ_REG(HwRegs->UcodeDataHigh, ValueRead); 367 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
367 if (ValueRead != *(Instruction + 2)) { 368 if (ValueRead != *(Instruction + 2)) {
368 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n", 369 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
369 __FUNCTION__); 370 __func__);
370 return (FALSE); // Miscompare 371 return (FALSE); /* Miscompare */
371 } 372 }
372 // Advance 3 u32S to start of next instruction 373 /* Advance 3 u32S to start of next instruction */
373 Instruction += 3; 374 Instruction += 3;
374 } 375 }
375 } 376 }
376 377
377 // Everything OK, Go. 378 /* Everything OK, Go. */
378 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH); 379 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
379 380
380 // Poll the CardUp register to wait for microcode to initialize 381 /* Poll the CardUp register to wait for microcode to initialize */
381 // Give up after 10,000 attemps (500ms). 382 /* Give up after 10,000 attemps (500ms). */
382 for (i = 0; i < 10000; i++) { 383 for (i = 0; i < 10000; i++) {
383 udelay(50); 384 udelay(50);
384 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead); 385 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
385 if (ValueRead == 0xCAFE) { 386 if (ValueRead == 0xCAFE) {
386 DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __FUNCTION__); 387 DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __func__);
387 break; 388 break;
388 } 389 }
389 } 390 }
390 if (i == 10000) { 391 if (i == 10000) {
391 DBG_ERROR("sxg: %s TIMEOUT\n", __FUNCTION__); 392 DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
392 393
393 return (FALSE); // Timeout 394 return (FALSE); /* Timeout */
394 } 395 }
395 // Now write the LoadSync register. This is used to 396 /* Now write the LoadSync register. This is used to */
396 // synchronize with the card so it can scribble on the memory 397 /* synchronize with the card so it can scribble on the memory */
397 // that contained 0xCAFE from the "CardUp" step above 398 /* that contained 0xCAFE from the "CardUp" step above */
398 if (UcodeSel == SXG_UCODE_SAHARA) { 399 if (UcodeSel == SXG_UCODE_SAHARA) {
399 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH); 400 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
400 } 401 }
401 402
402 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd", 403 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
403 adapter, 0, 0, 0); 404 adapter, 0, 0, 0);
404 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__); 405 DBG_ERROR("sxg: %s EXIT\n", __func__);
405 406
406 return (TRUE); 407 return (TRUE);
407} 408}
@@ -420,29 +421,29 @@ static int sxg_allocate_resources(p_adapter_t adapter)
420 int status; 421 int status;
421 u32 i; 422 u32 i;
422 u32 RssIds, IsrCount; 423 u32 RssIds, IsrCount;
423// PSXG_XMT_RING XmtRing; 424/* PSXG_XMT_RING XmtRing; */
424// PSXG_RCV_RING RcvRing; 425/* PSXG_RCV_RING RcvRing; */
425 426
426 DBG_ERROR("%s ENTER\n", __FUNCTION__); 427 DBG_ERROR("%s ENTER\n", __func__);
427 428
428 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes", 429 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
429 adapter, 0, 0, 0); 430 adapter, 0, 0, 0);
430 431
431 // Windows tells us how many CPUs it plans to use for 432 /* Windows tells us how many CPUs it plans to use for */
432 // RSS 433 /* RSS */
433 RssIds = SXG_RSS_CPU_COUNT(adapter); 434 RssIds = SXG_RSS_CPU_COUNT(adapter);
434 IsrCount = adapter->MsiEnabled ? RssIds : 1; 435 IsrCount = adapter->MsiEnabled ? RssIds : 1;
435 436
436 DBG_ERROR("%s Setup the spinlocks\n", __FUNCTION__); 437 DBG_ERROR("%s Setup the spinlocks\n", __func__);
437 438
438 // Allocate spinlocks and initialize listheads first. 439 /* Allocate spinlocks and initialize listheads first. */
439 spin_lock_init(&adapter->RcvQLock); 440 spin_lock_init(&adapter->RcvQLock);
440 spin_lock_init(&adapter->SglQLock); 441 spin_lock_init(&adapter->SglQLock);
441 spin_lock_init(&adapter->XmtZeroLock); 442 spin_lock_init(&adapter->XmtZeroLock);
442 spin_lock_init(&adapter->Bit64RegLock); 443 spin_lock_init(&adapter->Bit64RegLock);
443 spin_lock_init(&adapter->AdapterLock); 444 spin_lock_init(&adapter->AdapterLock);
444 445
445 DBG_ERROR("%s Setup the lists\n", __FUNCTION__); 446 DBG_ERROR("%s Setup the lists\n", __func__);
446 447
447 InitializeListHead(&adapter->FreeRcvBuffers); 448 InitializeListHead(&adapter->FreeRcvBuffers);
448 InitializeListHead(&adapter->FreeRcvBlocks); 449 InitializeListHead(&adapter->FreeRcvBlocks);
@@ -450,39 +451,39 @@ static int sxg_allocate_resources(p_adapter_t adapter)
450 InitializeListHead(&adapter->FreeSglBuffers); 451 InitializeListHead(&adapter->FreeSglBuffers);
451 InitializeListHead(&adapter->AllSglBuffers); 452 InitializeListHead(&adapter->AllSglBuffers);
452 453
453 // Mark these basic allocations done. This flags essentially 454 /* Mark these basic allocations done. This flags essentially */
454 // tells the SxgFreeResources routine that it can grab spinlocks 455 /* tells the SxgFreeResources routine that it can grab spinlocks */
455 // and reference listheads. 456 /* and reference listheads. */
456 adapter->BasicAllocations = TRUE; 457 adapter->BasicAllocations = TRUE;
457 // Main allocation loop. Start with the maximum supported by 458 /* Main allocation loop. Start with the maximum supported by */
458 // the microcode and back off if memory allocation 459 /* the microcode and back off if memory allocation */
459 // fails. If we hit a minimum, fail. 460 /* fails. If we hit a minimum, fail. */
460 461
461 for (;;) { 462 for (;;) {
462 DBG_ERROR("%s Allocate XmtRings size[%lx]\n", __FUNCTION__, 463 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
463 (sizeof(SXG_XMT_RING) * 1)); 464 (unsigned int)(sizeof(SXG_XMT_RING) * 1));
464 465
465 // Start with big items first - receive and transmit rings. At the moment 466 /* Start with big items first - receive and transmit rings. At the moment */
466 // I'm going to keep the ring size fixed and adjust the number of 467 /* I'm going to keep the ring size fixed and adjust the number of */
467 // TCBs if we fail. Later we might consider reducing the ring size as well.. 468 /* TCBs if we fail. Later we might consider reducing the ring size as well.. */
468 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev, 469 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
469 sizeof(SXG_XMT_RING) * 470 sizeof(SXG_XMT_RING) *
470 1, 471 1,
471 &adapter->PXmtRings); 472 &adapter->PXmtRings);
472 DBG_ERROR("%s XmtRings[%p]\n", __FUNCTION__, adapter->XmtRings); 473 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
473 474
474 if (!adapter->XmtRings) { 475 if (!adapter->XmtRings) {
475 goto per_tcb_allocation_failed; 476 goto per_tcb_allocation_failed;
476 } 477 }
477 memset(adapter->XmtRings, 0, sizeof(SXG_XMT_RING) * 1); 478 memset(adapter->XmtRings, 0, sizeof(SXG_XMT_RING) * 1);
478 479
479 DBG_ERROR("%s Allocate RcvRings size[%lx]\n", __FUNCTION__, 480 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
480 (sizeof(SXG_RCV_RING) * 1)); 481 (unsigned int)(sizeof(SXG_RCV_RING) * 1));
481 adapter->RcvRings = 482 adapter->RcvRings =
482 pci_alloc_consistent(adapter->pcidev, 483 pci_alloc_consistent(adapter->pcidev,
483 sizeof(SXG_RCV_RING) * 1, 484 sizeof(SXG_RCV_RING) * 1,
484 &adapter->PRcvRings); 485 &adapter->PRcvRings);
485 DBG_ERROR("%s RcvRings[%p]\n", __FUNCTION__, adapter->RcvRings); 486 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
486 if (!adapter->RcvRings) { 487 if (!adapter->RcvRings) {
487 goto per_tcb_allocation_failed; 488 goto per_tcb_allocation_failed;
488 } 489 }
@@ -490,7 +491,7 @@ static int sxg_allocate_resources(p_adapter_t adapter)
490 break; 491 break;
491 492
492 per_tcb_allocation_failed: 493 per_tcb_allocation_failed:
493 // an allocation failed. Free any successful allocations. 494 /* an allocation failed. Free any successful allocations. */
494 if (adapter->XmtRings) { 495 if (adapter->XmtRings) {
495 pci_free_consistent(adapter->pcidev, 496 pci_free_consistent(adapter->pcidev,
496 sizeof(SXG_XMT_RING) * 4096, 497 sizeof(SXG_XMT_RING) * 4096,
@@ -505,22 +506,22 @@ static int sxg_allocate_resources(p_adapter_t adapter)
505 adapter->PRcvRings); 506 adapter->PRcvRings);
506 adapter->RcvRings = NULL; 507 adapter->RcvRings = NULL;
507 } 508 }
508 // Loop around and try again.... 509 /* Loop around and try again.... */
509 } 510 }
510 511
511 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __FUNCTION__); 512 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
512 // Initialize rcv zero and xmt zero rings 513 /* Initialize rcv zero and xmt zero rings */
513 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE); 514 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
514 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE); 515 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
515 516
516 // Sanity check receive data structure format 517 /* Sanity check receive data structure format */
517 ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) || 518 ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
518 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); 519 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
519 ASSERT(sizeof(SXG_RCV_DESCRIPTOR_BLOCK) == 520 ASSERT(sizeof(SXG_RCV_DESCRIPTOR_BLOCK) ==
520 SXG_RCV_DESCRIPTOR_BLOCK_SIZE); 521 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
521 522
522 // Allocate receive data buffers. We allocate a block of buffers and 523 /* Allocate receive data buffers. We allocate a block of buffers and */
523 // a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK 524 /* a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK */
524 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS; 525 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
525 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) { 526 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
526 sxg_allocate_buffer_memory(adapter, 527 sxg_allocate_buffer_memory(adapter,
@@ -528,8 +529,8 @@ static int sxg_allocate_resources(p_adapter_t adapter)
528 ReceiveBufferSize), 529 ReceiveBufferSize),
529 SXG_BUFFER_TYPE_RCV); 530 SXG_BUFFER_TYPE_RCV);
530 } 531 }
531 // NBL resource allocation can fail in the 'AllocateComplete' routine, which 532 /* NBL resource allocation can fail in the 'AllocateComplete' routine, which */
532 // doesn't return status. Make sure we got the number of buffers we requested 533 /* doesn't return status. Make sure we got the number of buffers we requested */
533 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) { 534 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
534 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6", 535 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
535 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES, 536 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
@@ -537,17 +538,17 @@ static int sxg_allocate_resources(p_adapter_t adapter)
537 return (STATUS_RESOURCES); 538 return (STATUS_RESOURCES);
538 } 539 }
539 540
540 DBG_ERROR("%s Allocate EventRings size[%lx]\n", __FUNCTION__, 541 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
541 (sizeof(SXG_EVENT_RING) * RssIds)); 542 (unsigned int)(sizeof(SXG_EVENT_RING) * RssIds));
542 543
543 // Allocate event queues. 544 /* Allocate event queues. */
544 adapter->EventRings = pci_alloc_consistent(adapter->pcidev, 545 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
545 sizeof(SXG_EVENT_RING) * 546 sizeof(SXG_EVENT_RING) *
546 RssIds, 547 RssIds,
547 &adapter->PEventRings); 548 &adapter->PEventRings);
548 549
549 if (!adapter->EventRings) { 550 if (!adapter->EventRings) {
550 // Caller will call SxgFreeAdapter to clean up above allocations 551 /* Caller will call SxgFreeAdapter to clean up above allocations */
551 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8", 552 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
552 adapter, SXG_MAX_ENTRIES, 0, 0); 553 adapter, SXG_MAX_ENTRIES, 0, 0);
553 status = STATUS_RESOURCES; 554 status = STATUS_RESOURCES;
@@ -555,12 +556,12 @@ static int sxg_allocate_resources(p_adapter_t adapter)
555 } 556 }
556 memset(adapter->EventRings, 0, sizeof(SXG_EVENT_RING) * RssIds); 557 memset(adapter->EventRings, 0, sizeof(SXG_EVENT_RING) * RssIds);
557 558
558 DBG_ERROR("%s Allocate ISR size[%x]\n", __FUNCTION__, IsrCount); 559 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
559 // Allocate ISR 560 /* Allocate ISR */
560 adapter->Isr = pci_alloc_consistent(adapter->pcidev, 561 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
561 IsrCount, &adapter->PIsr); 562 IsrCount, &adapter->PIsr);
562 if (!adapter->Isr) { 563 if (!adapter->Isr) {
563 // Caller will call SxgFreeAdapter to clean up above allocations 564 /* Caller will call SxgFreeAdapter to clean up above allocations */
564 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9", 565 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
565 adapter, SXG_MAX_ENTRIES, 0, 0); 566 adapter, SXG_MAX_ENTRIES, 0, 0);
566 status = STATUS_RESOURCES; 567 status = STATUS_RESOURCES;
@@ -568,10 +569,10 @@ static int sxg_allocate_resources(p_adapter_t adapter)
568 } 569 }
569 memset(adapter->Isr, 0, sizeof(u32) * IsrCount); 570 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
570 571
571 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%lx]\n", 572 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
572 __FUNCTION__, sizeof(u32)); 573 __func__, (unsigned int)sizeof(u32));
573 574
574 // Allocate shared XMT ring zero index location 575 /* Allocate shared XMT ring zero index location */
575 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev, 576 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
576 sizeof(u32), 577 sizeof(u32),
577 &adapter-> 578 &adapter->
@@ -587,7 +588,7 @@ static int sxg_allocate_resources(p_adapter_t adapter)
587 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS", 588 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
588 adapter, SXG_MAX_ENTRIES, 0, 0); 589 adapter, SXG_MAX_ENTRIES, 0, 0);
589 590
590 DBG_ERROR("%s EXIT\n", __FUNCTION__); 591 DBG_ERROR("%s EXIT\n", __func__);
591 return (STATUS_SUCCESS); 592 return (STATUS_SUCCESS);
592} 593}
593 594
@@ -606,17 +607,17 @@ static void sxg_config_pci(struct pci_dev *pcidev)
606 u16 new_command; 607 u16 new_command;
607 608
608 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command); 609 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
609 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __FUNCTION__, pci_command); 610 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
610 // Set the command register 611 /* Set the command register */
611 new_command = pci_command | (PCI_COMMAND_MEMORY | // Memory Space Enable 612 new_command = pci_command | (PCI_COMMAND_MEMORY | /* Memory Space Enable */
612 PCI_COMMAND_MASTER | // Bus master enable 613 PCI_COMMAND_MASTER | /* Bus master enable */
613 PCI_COMMAND_INVALIDATE | // Memory write and invalidate 614 PCI_COMMAND_INVALIDATE | /* Memory write and invalidate */
614 PCI_COMMAND_PARITY | // Parity error response 615 PCI_COMMAND_PARITY | /* Parity error response */
615 PCI_COMMAND_SERR | // System ERR 616 PCI_COMMAND_SERR | /* System ERR */
616 PCI_COMMAND_FAST_BACK); // Fast back-to-back 617 PCI_COMMAND_FAST_BACK); /* Fast back-to-back */
617 if (pci_command != new_command) { 618 if (pci_command != new_command) {
618 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n", 619 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
619 __FUNCTION__, pci_command, new_command); 620 __func__, pci_command, new_command);
620 pci_write_config_word(pcidev, PCI_COMMAND, new_command); 621 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
621 } 622 }
622} 623}
@@ -634,9 +635,9 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
634 ulong mmio_len = 0; 635 ulong mmio_len = 0;
635 636
636 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n", 637 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
637 __FUNCTION__, jiffies, smp_processor_id()); 638 __func__, jiffies, smp_processor_id());
638 639
639 // Initialize trace buffer 640 /* Initialize trace buffer */
640#ifdef ATKDBG 641#ifdef ATKDBG
641 SxgTraceBuffer = &LSxgTraceBuffer; 642 SxgTraceBuffer = &LSxgTraceBuffer;
642 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY); 643 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
@@ -701,11 +702,11 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
701 mmio_start, mmio_len); 702 mmio_start, mmio_len);
702 703
703 memmapped_ioaddr = ioremap(mmio_start, mmio_len); 704 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
704 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __FUNCTION__, 705 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
705 memmapped_ioaddr); 706 memmapped_ioaddr);
706 if (!memmapped_ioaddr) { 707 if (!memmapped_ioaddr) {
707 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n", 708 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
708 __FUNCTION__, mmio_len, mmio_start); 709 __func__, mmio_len, mmio_start);
709 goto err_out_free_mmio_region; 710 goto err_out_free_mmio_region;
710 } 711 }
711 712
@@ -727,7 +728,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
727 memmapped_ioaddr); 728 memmapped_ioaddr);
728 if (!memmapped_ioaddr) { 729 if (!memmapped_ioaddr) {
729 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n", 730 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
730 __FUNCTION__, mmio_len, mmio_start); 731 __func__, mmio_len, mmio_start);
731 goto err_out_free_mmio_region; 732 goto err_out_free_mmio_region;
732 } 733 }
733 734
@@ -738,13 +739,13 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
738 adapter->UcodeRegs = (void *)memmapped_ioaddr; 739 adapter->UcodeRegs = (void *)memmapped_ioaddr;
739 740
740 adapter->State = SXG_STATE_INITIALIZING; 741 adapter->State = SXG_STATE_INITIALIZING;
741 // Maintain a list of all adapters anchored by 742 /* Maintain a list of all adapters anchored by */
742 // the global SxgDriver structure. 743 /* the global SxgDriver structure. */
743 adapter->Next = SxgDriver.Adapters; 744 adapter->Next = SxgDriver.Adapters;
744 SxgDriver.Adapters = adapter; 745 SxgDriver.Adapters = adapter;
745 adapter->AdapterID = ++SxgDriver.AdapterID; 746 adapter->AdapterID = ++SxgDriver.AdapterID;
746 747
747 // Initialize CRC table used to determine multicast hash 748 /* Initialize CRC table used to determine multicast hash */
748 sxg_mcast_init_crc32(); 749 sxg_mcast_init_crc32();
749 750
750 adapter->JumboEnabled = FALSE; 751 adapter->JumboEnabled = FALSE;
@@ -757,18 +758,18 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
757 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE; 758 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
758 } 759 }
759 760
760// status = SXG_READ_EEPROM(adapter); 761/* status = SXG_READ_EEPROM(adapter); */
761// if (!status) { 762/* if (!status) { */
762// goto sxg_init_bad; 763/* goto sxg_init_bad; */
763// } 764/* } */
764 765
765 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __FUNCTION__); 766 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
766 sxg_config_pci(pcidev); 767 sxg_config_pci(pcidev);
767 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __FUNCTION__); 768 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
768 769
769 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __FUNCTION__); 770 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
770 sxg_init_driver(); 771 sxg_init_driver();
771 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __FUNCTION__); 772 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
772 773
773 adapter->vendid = pci_tbl_entry->vendor; 774 adapter->vendid = pci_tbl_entry->vendor;
774 adapter->devid = pci_tbl_entry->device; 775 adapter->devid = pci_tbl_entry->device;
@@ -780,23 +781,23 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
780 adapter->irq = pcidev->irq; 781 adapter->irq = pcidev->irq;
781 adapter->next_netdevice = head_netdevice; 782 adapter->next_netdevice = head_netdevice;
782 head_netdevice = netdev; 783 head_netdevice = netdev;
783// adapter->chipid = chip_idx; 784/* adapter->chipid = chip_idx; */
784 adapter->port = 0; //adapter->functionnumber; 785 adapter->port = 0; /*adapter->functionnumber; */
785 adapter->cardindex = adapter->port; 786 adapter->cardindex = adapter->port;
786 787
787 // Allocate memory and other resources 788 /* Allocate memory and other resources */
788 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __FUNCTION__); 789 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
789 status = sxg_allocate_resources(adapter); 790 status = sxg_allocate_resources(adapter);
790 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n", 791 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
791 __FUNCTION__, status); 792 __func__, status);
792 if (status != STATUS_SUCCESS) { 793 if (status != STATUS_SUCCESS) {
793 goto err_out_unmap; 794 goto err_out_unmap;
794 } 795 }
795 796
796 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__); 797 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
797 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) { 798 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
798 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n", 799 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
799 __FUNCTION__); 800 __func__);
800 sxg_adapter_set_hwaddr(adapter); 801 sxg_adapter_set_hwaddr(adapter);
801 } else { 802 } else {
802 adapter->state = ADAPT_FAIL; 803 adapter->state = ADAPT_FAIL;
@@ -819,7 +820,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
819#endif 820#endif
820 821
821 strcpy(netdev->name, "eth%d"); 822 strcpy(netdev->name, "eth%d");
822// strcpy(netdev->name, pci_name(pcidev)); 823/* strcpy(netdev->name, pci_name(pcidev)); */
823 if ((err = register_netdev(netdev))) { 824 if ((err = register_netdev(netdev))) {
824 DBG_ERROR("Cannot register net device, aborting. %s\n", 825 DBG_ERROR("Cannot register net device, aborting. %s\n",
825 netdev->name); 826 netdev->name);
@@ -832,11 +833,11 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
832 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3], 833 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
833 netdev->dev_addr[4], netdev->dev_addr[5]); 834 netdev->dev_addr[4], netdev->dev_addr[5]);
834 835
835//sxg_init_bad: 836/*sxg_init_bad: */
836 ASSERT(status == FALSE); 837 ASSERT(status == FALSE);
837// sxg_free_adapter(adapter); 838/* sxg_free_adapter(adapter); */
838 839
839 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __FUNCTION__, 840 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
840 status, jiffies, smp_processor_id()); 841 status, jiffies, smp_processor_id());
841 return status; 842 return status;
842 843
@@ -848,7 +849,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
848 849
849 err_out_exit_sxg_probe: 850 err_out_exit_sxg_probe:
850 851
851 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __FUNCTION__, jiffies, 852 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
852 smp_processor_id()); 853 smp_processor_id());
853 854
854 return -ENODEV; 855 return -ENODEV;
@@ -874,12 +875,12 @@ static void sxg_disable_interrupt(p_adapter_t adapter)
874{ 875{
875 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr", 876 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
876 adapter, adapter->InterruptsEnabled, 0, 0); 877 adapter, adapter->InterruptsEnabled, 0, 0);
877 // For now, RSS is disabled with line based interrupts 878 /* For now, RSS is disabled with line based interrupts */
878 ASSERT(adapter->RssEnabled == FALSE); 879 ASSERT(adapter->RssEnabled == FALSE);
879 ASSERT(adapter->MsiEnabled == FALSE); 880 ASSERT(adapter->MsiEnabled == FALSE);
880 // 881 /* */
881 // Turn off interrupts by writing to the icr register. 882 /* Turn off interrupts by writing to the icr register. */
882 // 883 /* */
883 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE); 884 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
884 885
885 adapter->InterruptsEnabled = 0; 886 adapter->InterruptsEnabled = 0;
@@ -905,12 +906,12 @@ static void sxg_enable_interrupt(p_adapter_t adapter)
905{ 906{
906 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr", 907 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
907 adapter, adapter->InterruptsEnabled, 0, 0); 908 adapter, adapter->InterruptsEnabled, 0, 0);
908 // For now, RSS is disabled with line based interrupts 909 /* For now, RSS is disabled with line based interrupts */
909 ASSERT(adapter->RssEnabled == FALSE); 910 ASSERT(adapter->RssEnabled == FALSE);
910 ASSERT(adapter->MsiEnabled == FALSE); 911 ASSERT(adapter->MsiEnabled == FALSE);
911 // 912 /* */
912 // Turn on interrupts by writing to the icr register. 913 /* Turn on interrupts by writing to the icr register. */
913 // 914 /* */
914 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE); 915 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
915 916
916 adapter->InterruptsEnabled = 1; 917 adapter->InterruptsEnabled = 1;
@@ -935,29 +936,29 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
935{ 936{
936 p_net_device dev = (p_net_device) dev_id; 937 p_net_device dev = (p_net_device) dev_id;
937 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 938 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
938// u32 CpuMask = 0, i; 939/* u32 CpuMask = 0, i; */
939 940
940 adapter->Stats.NumInts++; 941 adapter->Stats.NumInts++;
941 if (adapter->Isr[0] == 0) { 942 if (adapter->Isr[0] == 0) {
942 // The SLIC driver used to experience a number of spurious interrupts 943 /* The SLIC driver used to experience a number of spurious interrupts */
943 // due to the delay associated with the masking of the interrupt 944 /* due to the delay associated with the masking of the interrupt */
944 // (we'd bounce back in here). If we see that again with Sahara, 945 /* (we'd bounce back in here). If we see that again with Sahara, */
945 // add a READ_REG of the Icr register after the WRITE_REG below. 946 /* add a READ_REG of the Icr register after the WRITE_REG below. */
946 adapter->Stats.FalseInts++; 947 adapter->Stats.FalseInts++;
947 return IRQ_NONE; 948 return IRQ_NONE;
948 } 949 }
949 // 950 /* */
950 // Move the Isr contents and clear the value in 951 /* Move the Isr contents and clear the value in */
951 // shared memory, and mask interrupts 952 /* shared memory, and mask interrupts */
952 // 953 /* */
953 adapter->IsrCopy[0] = adapter->Isr[0]; 954 adapter->IsrCopy[0] = adapter->Isr[0];
954 adapter->Isr[0] = 0; 955 adapter->Isr[0] = 0;
955 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE); 956 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
956// ASSERT(adapter->IsrDpcsPending == 0); 957/* ASSERT(adapter->IsrDpcsPending == 0); */
957#if XXXTODO // RSS Stuff 958#if XXXTODO /* RSS Stuff */
958 // If RSS is enabled and the ISR specifies 959 /* If RSS is enabled and the ISR specifies */
959 // SXG_ISR_EVENT, then schedule DPC's 960 /* SXG_ISR_EVENT, then schedule DPC's */
960 // based on event queues. 961 /* based on event queues. */
961 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) { 962 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
962 for (i = 0; 963 for (i = 0;
963 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount; 964 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
@@ -973,8 +974,8 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
973 } 974 }
974 } 975 }
975 } 976 }
976 // Now, either schedule the CPUs specified by the CpuMask, 977 /* Now, either schedule the CPUs specified by the CpuMask, */
977 // or queue default 978 /* or queue default */
978 if (CpuMask) { 979 if (CpuMask) {
979 *QueueDefault = FALSE; 980 *QueueDefault = FALSE;
980 } else { 981 } else {
@@ -983,9 +984,9 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
983 } 984 }
984 *TargetCpus = CpuMask; 985 *TargetCpus = CpuMask;
985#endif 986#endif
986 // 987 /* */
987 // There are no DPCs in Linux, so call the handler now 988 /* There are no DPCs in Linux, so call the handler now */
988 // 989 /* */
989 sxg_handle_interrupt(adapter); 990 sxg_handle_interrupt(adapter);
990 991
991 return IRQ_HANDLED; 992 return IRQ_HANDLED;
@@ -993,7 +994,7 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
993 994
994static void sxg_handle_interrupt(p_adapter_t adapter) 995static void sxg_handle_interrupt(p_adapter_t adapter)
995{ 996{
996// unsigned char RssId = 0; 997/* unsigned char RssId = 0; */
997 u32 NewIsr; 998 u32 NewIsr;
998 999
999 if (adapter->Stats.RcvNoBuffer < 5) { 1000 if (adapter->Stats.RcvNoBuffer < 5) {
@@ -1002,32 +1003,32 @@ static void sxg_handle_interrupt(p_adapter_t adapter)
1002 } 1003 }
1003 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr", 1004 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1004 adapter, adapter->IsrCopy[0], 0, 0); 1005 adapter, adapter->IsrCopy[0], 0, 0);
1005 // For now, RSS is disabled with line based interrupts 1006 /* For now, RSS is disabled with line based interrupts */
1006 ASSERT(adapter->RssEnabled == FALSE); 1007 ASSERT(adapter->RssEnabled == FALSE);
1007 ASSERT(adapter->MsiEnabled == FALSE); 1008 ASSERT(adapter->MsiEnabled == FALSE);
1008 ASSERT(adapter->IsrCopy[0]); 1009 ASSERT(adapter->IsrCopy[0]);
1009///////////////////////////// 1010/*/////////////////////////// */
1010 1011
1011 // Always process the event queue. 1012 /* Always process the event queue. */
1012 sxg_process_event_queue(adapter, 1013 sxg_process_event_queue(adapter,
1013 (adapter->RssEnabled ? /*RssId */ 0 : 0)); 1014 (adapter->RssEnabled ? /*RssId */ 0 : 0));
1014 1015
1015#if XXXTODO // RSS stuff 1016#if XXXTODO /* RSS stuff */
1016 if (--adapter->IsrDpcsPending) { 1017 if (--adapter->IsrDpcsPending) {
1017 // We're done. 1018 /* We're done. */
1018 ASSERT(adapter->RssEnabled); 1019 ASSERT(adapter->RssEnabled);
1019 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend", 1020 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1020 adapter, 0, 0, 0); 1021 adapter, 0, 0, 0);
1021 return; 1022 return;
1022 } 1023 }
1023#endif 1024#endif
1024 // 1025 /* */
1025 // Last (or only) DPC processes the ISR and clears the interrupt. 1026 /* Last (or only) DPC processes the ISR and clears the interrupt. */
1026 // 1027 /* */
1027 NewIsr = sxg_process_isr(adapter, 0); 1028 NewIsr = sxg_process_isr(adapter, 0);
1028 // 1029 /* */
1029 // Reenable interrupts 1030 /* Reenable interrupts */
1030 // 1031 /* */
1031 adapter->IsrCopy[0] = 0; 1032 adapter->IsrCopy[0] = 0;
1032 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr", 1033 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1033 adapter, NewIsr, 0, 0); 1034 adapter, NewIsr, 0, 0);
@@ -1063,75 +1064,75 @@ static int sxg_process_isr(p_adapter_t adapter, u32 MessageId)
1063 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr", 1064 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1064 adapter, Isr, 0, 0); 1065 adapter, Isr, 0, 0);
1065 1066
1066 // Error 1067 /* Error */
1067 if (Isr & SXG_ISR_ERR) { 1068 if (Isr & SXG_ISR_ERR) {
1068 if (Isr & SXG_ISR_PDQF) { 1069 if (Isr & SXG_ISR_PDQF) {
1069 adapter->Stats.PdqFull++; 1070 adapter->Stats.PdqFull++;
1070 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __FUNCTION__); 1071 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
1071 } 1072 }
1072 // No host buffer 1073 /* No host buffer */
1073 if (Isr & SXG_ISR_RMISS) { 1074 if (Isr & SXG_ISR_RMISS) {
1074 // There is a bunch of code in the SLIC driver which 1075 /* There is a bunch of code in the SLIC driver which */
1075 // attempts to process more receive events per DPC 1076 /* attempts to process more receive events per DPC */
1076 // if we start to fall behind. We'll probably 1077 /* if we start to fall behind. We'll probably */
1077 // need to do something similar here, but hold 1078 /* need to do something similar here, but hold */
1078 // off for now. I don't want to make the code more 1079 /* off for now. I don't want to make the code more */
1079 // complicated than strictly needed. 1080 /* complicated than strictly needed. */
1080 adapter->Stats.RcvNoBuffer++; 1081 adapter->Stats.RcvNoBuffer++;
1081 if (adapter->Stats.RcvNoBuffer < 5) { 1082 if (adapter->Stats.RcvNoBuffer < 5) {
1082 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n", 1083 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
1083 __FUNCTION__); 1084 __func__);
1084 } 1085 }
1085 } 1086 }
1086 // Card crash 1087 /* Card crash */
1087 if (Isr & SXG_ISR_DEAD) { 1088 if (Isr & SXG_ISR_DEAD) {
1088 // Set aside the crash info and set the adapter state to RESET 1089 /* Set aside the crash info and set the adapter state to RESET */
1089 adapter->CrashCpu = 1090 adapter->CrashCpu =
1090 (unsigned char)((Isr & SXG_ISR_CPU) >> 1091 (unsigned char)((Isr & SXG_ISR_CPU) >>
1091 SXG_ISR_CPU_SHIFT); 1092 SXG_ISR_CPU_SHIFT);
1092 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH); 1093 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1093 adapter->Dead = TRUE; 1094 adapter->Dead = TRUE;
1094 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __FUNCTION__, 1095 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
1095 adapter->CrashLocation, adapter->CrashCpu); 1096 adapter->CrashLocation, adapter->CrashCpu);
1096 } 1097 }
1097 // Event ring full 1098 /* Event ring full */
1098 if (Isr & SXG_ISR_ERFULL) { 1099 if (Isr & SXG_ISR_ERFULL) {
1099 // Same issue as RMISS, really. This means the 1100 /* Same issue as RMISS, really. This means the */
1100 // host is falling behind the card. Need to increase 1101 /* host is falling behind the card. Need to increase */
1101 // event ring size, process more events per interrupt, 1102 /* event ring size, process more events per interrupt, */
1102 // and/or reduce/remove interrupt aggregation. 1103 /* and/or reduce/remove interrupt aggregation. */
1103 adapter->Stats.EventRingFull++; 1104 adapter->Stats.EventRingFull++;
1104 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n", 1105 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
1105 __FUNCTION__); 1106 __func__);
1106 } 1107 }
1107 // Transmit drop - no DRAM buffers or XMT error 1108 /* Transmit drop - no DRAM buffers or XMT error */
1108 if (Isr & SXG_ISR_XDROP) { 1109 if (Isr & SXG_ISR_XDROP) {
1109 adapter->Stats.XmtDrops++; 1110 adapter->Stats.XmtDrops++;
1110 adapter->Stats.XmtErrors++; 1111 adapter->Stats.XmtErrors++;
1111 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __FUNCTION__); 1112 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
1112 } 1113 }
1113 } 1114 }
1114 // Slowpath send completions 1115 /* Slowpath send completions */
1115 if (Isr & SXG_ISR_SPSEND) { 1116 if (Isr & SXG_ISR_SPSEND) {
1116 sxg_complete_slow_send(adapter); 1117 sxg_complete_slow_send(adapter);
1117 } 1118 }
1118 // Dump 1119 /* Dump */
1119 if (Isr & SXG_ISR_UPC) { 1120 if (Isr & SXG_ISR_UPC) {
1120 ASSERT(adapter->DumpCmdRunning); // Maybe change when debug is added.. 1121 ASSERT(adapter->DumpCmdRunning); /* Maybe change when debug is added.. */
1121 adapter->DumpCmdRunning = FALSE; 1122 adapter->DumpCmdRunning = FALSE;
1122 } 1123 }
1123 // Link event 1124 /* Link event */
1124 if (Isr & SXG_ISR_LINK) { 1125 if (Isr & SXG_ISR_LINK) {
1125 sxg_link_event(adapter); 1126 sxg_link_event(adapter);
1126 } 1127 }
1127 // Debug - breakpoint hit 1128 /* Debug - breakpoint hit */
1128 if (Isr & SXG_ISR_BREAK) { 1129 if (Isr & SXG_ISR_BREAK) {
1129 // At the moment AGDB isn't written to support interactive 1130 /* At the moment AGDB isn't written to support interactive */
1130 // debug sessions. When it is, this interrupt will be used 1131 /* debug sessions. When it is, this interrupt will be used */
1131 // to signal AGDB that it has hit a breakpoint. For now, ASSERT. 1132 /* to signal AGDB that it has hit a breakpoint. For now, ASSERT. */
1132 ASSERT(0); 1133 ASSERT(0);
1133 } 1134 }
1134 // Heartbeat response 1135 /* Heartbeat response */
1135 if (Isr & SXG_ISR_PING) { 1136 if (Isr & SXG_ISR_PING) {
1136 adapter->PingOutstanding = FALSE; 1137 adapter->PingOutstanding = FALSE;
1137 } 1138 }
@@ -1171,39 +1172,39 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
1171 (adapter->State == SXG_STATE_PAUSING) || 1172 (adapter->State == SXG_STATE_PAUSING) ||
1172 (adapter->State == SXG_STATE_PAUSED) || 1173 (adapter->State == SXG_STATE_PAUSED) ||
1173 (adapter->State == SXG_STATE_HALTING)); 1174 (adapter->State == SXG_STATE_HALTING));
1174 // We may still have unprocessed events on the queue if 1175 /* We may still have unprocessed events on the queue if */
1175 // the card crashed. Don't process them. 1176 /* the card crashed. Don't process them. */
1176 if (adapter->Dead) { 1177 if (adapter->Dead) {
1177 return (0); 1178 return (0);
1178 } 1179 }
1179 // In theory there should only be a single processor that 1180 /* In theory there should only be a single processor that */
1180 // accesses this queue, and only at interrupt-DPC time. So 1181 /* accesses this queue, and only at interrupt-DPC time. So */
1181 // we shouldn't need a lock for any of this. 1182 /* we shouldn't need a lock for any of this. */
1182 while (Event->Status & EVENT_STATUS_VALID) { 1183 while (Event->Status & EVENT_STATUS_VALID) {
1183 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event", 1184 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1184 Event, Event->Code, Event->Status, 1185 Event, Event->Code, Event->Status,
1185 adapter->NextEvent); 1186 adapter->NextEvent);
1186 switch (Event->Code) { 1187 switch (Event->Code) {
1187 case EVENT_CODE_BUFFERS: 1188 case EVENT_CODE_BUFFERS:
1188 ASSERT(!(Event->CommandIndex & 0xFF00)); // SXG_RING_INFO Head & Tail == unsigned char 1189 ASSERT(!(Event->CommandIndex & 0xFF00)); /* SXG_RING_INFO Head & Tail == unsigned char */
1189 // 1190 /* */
1190 sxg_complete_descriptor_blocks(adapter, 1191 sxg_complete_descriptor_blocks(adapter,
1191 Event->CommandIndex); 1192 Event->CommandIndex);
1192 // 1193 /* */
1193 break; 1194 break;
1194 case EVENT_CODE_SLOWRCV: 1195 case EVENT_CODE_SLOWRCV:
1195 --adapter->RcvBuffersOnCard; 1196 --adapter->RcvBuffersOnCard;
1196 if ((skb = sxg_slow_receive(adapter, Event))) { 1197 if ((skb = sxg_slow_receive(adapter, Event))) {
1197 u32 rx_bytes; 1198 u32 rx_bytes;
1198#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS 1199#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1199 // Add it to our indication list 1200 /* Add it to our indication list */
1200 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb, 1201 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1201 IndicationList, num_skbs); 1202 IndicationList, num_skbs);
1202 // In Linux, we just pass up each skb to the protocol above at this point, 1203 /* In Linux, we just pass up each skb to the protocol above at this point, */
1203 // there is no capability of an indication list. 1204 /* there is no capability of an indication list. */
1204#else 1205#else
1205// CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); 1206/* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1206 rx_bytes = Event->Length; // (rcvbuf->length & IRHDDR_FLEN_MSK); 1207 rx_bytes = Event->Length; /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1207 skb_put(skb, rx_bytes); 1208 skb_put(skb, rx_bytes);
1208 adapter->stats.rx_packets++; 1209 adapter->stats.rx_packets++;
1209 adapter->stats.rx_bytes += rx_bytes; 1210 adapter->stats.rx_bytes += rx_bytes;
@@ -1218,43 +1219,43 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
1218 break; 1219 break;
1219 default: 1220 default:
1220 DBG_ERROR("%s: ERROR Invalid EventCode %d\n", 1221 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
1221 __FUNCTION__, Event->Code); 1222 __func__, Event->Code);
1222// ASSERT(0); 1223/* ASSERT(0); */
1223 } 1224 }
1224 // See if we need to restock card receive buffers. 1225 /* See if we need to restock card receive buffers. */
1225 // There are two things to note here: 1226 /* There are two things to note here: */
1226 // First - This test is not SMP safe. The 1227 /* First - This test is not SMP safe. The */
1227 // adapter->BuffersOnCard field is protected via atomic interlocked calls, but 1228 /* adapter->BuffersOnCard field is protected via atomic interlocked calls, but */
1228 // we do not protect it with respect to these tests. The only way to do that 1229 /* we do not protect it with respect to these tests. The only way to do that */
1229 // is with a lock, and I don't want to grab a lock every time we adjust the 1230 /* is with a lock, and I don't want to grab a lock every time we adjust the */
1230 // BuffersOnCard count. Instead, we allow the buffer replenishment to be off 1231 /* BuffersOnCard count. Instead, we allow the buffer replenishment to be off */
1231 // once in a while. The worst that can happen is the card is given one 1232 /* once in a while. The worst that can happen is the card is given one */
1232 // more-or-less descriptor block than the arbitrary value we've chosen. 1233 /* more-or-less descriptor block than the arbitrary value we've chosen. */
1233 // No big deal 1234 /* No big deal */
1234 // In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted. 1235 /* In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted. */
1235 // Second - We expect this test to rarely evaluate to true. We attempt to 1236 /* Second - We expect this test to rarely evaluate to true. We attempt to */
1236 // refill descriptor blocks as they are returned to us 1237 /* refill descriptor blocks as they are returned to us */
1237 // (sxg_complete_descriptor_blocks), so The only time this should evaluate 1238 /* (sxg_complete_descriptor_blocks), so The only time this should evaluate */
1238 // to true is when sxg_complete_descriptor_blocks failed to allocate 1239 /* to true is when sxg_complete_descriptor_blocks failed to allocate */
1239 // receive buffers. 1240 /* receive buffers. */
1240 if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) { 1241 if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
1241 sxg_stock_rcv_buffers(adapter); 1242 sxg_stock_rcv_buffers(adapter);
1242 } 1243 }
1243 // It's more efficient to just set this to zero. 1244 /* It's more efficient to just set this to zero. */
1244 // But clearing the top bit saves potential debug info... 1245 /* But clearing the top bit saves potential debug info... */
1245 Event->Status &= ~EVENT_STATUS_VALID; 1246 Event->Status &= ~EVENT_STATUS_VALID;
1246 // Advanct to the next event 1247 /* Advanct to the next event */
1247 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE); 1248 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1248 Event = &EventRing->Ring[adapter->NextEvent[RssId]]; 1249 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1249 EventsProcessed++; 1250 EventsProcessed++;
1250 if (EventsProcessed == EVENT_RING_BATCH) { 1251 if (EventsProcessed == EVENT_RING_BATCH) {
1251 // Release a batch of events back to the card 1252 /* Release a batch of events back to the card */
1252 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease, 1253 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1253 EVENT_RING_BATCH, FALSE); 1254 EVENT_RING_BATCH, FALSE);
1254 EventsProcessed = 0; 1255 EventsProcessed = 0;
1255 // If we've processed our batch limit, break out of the 1256 /* If we've processed our batch limit, break out of the */
1256 // loop and return SXG_ISR_EVENT to arrange for us to 1257 /* loop and return SXG_ISR_EVENT to arrange for us to */
1257 // be called again 1258 /* be called again */
1258 if (Batches++ == EVENT_BATCH_LIMIT) { 1259 if (Batches++ == EVENT_BATCH_LIMIT) {
1259 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, 1260 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1260 TRACE_NOISY, "EvtLimit", Batches, 1261 TRACE_NOISY, "EvtLimit", Batches,
@@ -1265,14 +1266,14 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
1265 } 1266 }
1266 } 1267 }
1267#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS 1268#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1268 // 1269 /* */
1269 // Indicate any received dumb-nic frames 1270 /* Indicate any received dumb-nic frames */
1270 // 1271 /* */
1271 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs); 1272 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1272#endif 1273#endif
1273 // 1274 /* */
1274 // Release events back to the card. 1275 /* Release events back to the card. */
1275 // 1276 /* */
1276 if (EventsProcessed) { 1277 if (EventsProcessed) {
1277 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease, 1278 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1278 EventsProcessed, FALSE); 1279 EventsProcessed, FALSE);
@@ -1299,43 +1300,43 @@ static void sxg_complete_slow_send(p_adapter_t adapter)
1299 u32 *ContextType; 1300 u32 *ContextType;
1300 PSXG_CMD XmtCmd; 1301 PSXG_CMD XmtCmd;
1301 1302
1302 // NOTE - This lock is dropped and regrabbed in this loop. 1303 /* NOTE - This lock is dropped and regrabbed in this loop. */
1303 // This means two different processors can both be running 1304 /* This means two different processors can both be running */
1304 // through this loop. Be *very* careful. 1305 /* through this loop. Be *very* careful. */
1305 spin_lock(&adapter->XmtZeroLock); 1306 spin_lock(&adapter->XmtZeroLock);
1306 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds", 1307 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1307 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 1308 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1308 1309
1309 while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) { 1310 while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) {
1310 // Locate the current Cmd (ring descriptor entry), and 1311 /* Locate the current Cmd (ring descriptor entry), and */
1311 // associated SGL, and advance the tail 1312 /* associated SGL, and advance the tail */
1312 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType); 1313 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1313 ASSERT(ContextType); 1314 ASSERT(ContextType);
1314 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd", 1315 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1315 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0); 1316 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
1316 // Clear the SGL field. 1317 /* Clear the SGL field. */
1317 XmtCmd->Sgl = 0; 1318 XmtCmd->Sgl = 0;
1318 1319
1319 switch (*ContextType) { 1320 switch (*ContextType) {
1320 case SXG_SGL_DUMB: 1321 case SXG_SGL_DUMB:
1321 { 1322 {
1322 struct sk_buff *skb; 1323 struct sk_buff *skb;
1323 // Dumb-nic send. Command context is the dumb-nic SGL 1324 /* Dumb-nic send. Command context is the dumb-nic SGL */
1324 skb = (struct sk_buff *)ContextType; 1325 skb = (struct sk_buff *)ContextType;
1325 // Complete the send 1326 /* Complete the send */
1326 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, 1327 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1327 TRACE_IMPORTANT, "DmSndCmp", skb, 0, 1328 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1328 0, 0); 1329 0, 0);
1329 ASSERT(adapter->Stats.XmtQLen); 1330 ASSERT(adapter->Stats.XmtQLen);
1330 adapter->Stats.XmtQLen--; // within XmtZeroLock 1331 adapter->Stats.XmtQLen--; /* within XmtZeroLock */
1331 adapter->Stats.XmtOk++; 1332 adapter->Stats.XmtOk++;
1332 // Now drop the lock and complete the send back to 1333 /* Now drop the lock and complete the send back to */
1333 // Microsoft. We need to drop the lock because 1334 /* Microsoft. We need to drop the lock because */
1334 // Microsoft can come back with a chimney send, which 1335 /* Microsoft can come back with a chimney send, which */
1335 // results in a double trip in SxgTcpOuput 1336 /* results in a double trip in SxgTcpOuput */
1336 spin_unlock(&adapter->XmtZeroLock); 1337 spin_unlock(&adapter->XmtZeroLock);
1337 SXG_COMPLETE_DUMB_SEND(adapter, skb); 1338 SXG_COMPLETE_DUMB_SEND(adapter, skb);
1338 // and reacquire.. 1339 /* and reacquire.. */
1339 spin_lock(&adapter->XmtZeroLock); 1340 spin_lock(&adapter->XmtZeroLock);
1340 } 1341 }
1341 break; 1342 break;
@@ -1371,7 +1372,7 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1371 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event, 1372 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1372 RcvDataBufferHdr, RcvDataBufferHdr->State, 1373 RcvDataBufferHdr, RcvDataBufferHdr->State,
1373 RcvDataBufferHdr->VirtualAddress); 1374 RcvDataBufferHdr->VirtualAddress);
1374 // Drop rcv frames in non-running state 1375 /* Drop rcv frames in non-running state */
1375 switch (adapter->State) { 1376 switch (adapter->State) {
1376 case SXG_STATE_RUNNING: 1377 case SXG_STATE_RUNNING:
1377 break; 1378 break;
@@ -1384,12 +1385,12 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1384 goto drop; 1385 goto drop;
1385 } 1386 }
1386 1387
1387 // Change buffer state to UPSTREAM 1388 /* Change buffer state to UPSTREAM */
1388 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; 1389 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1389 if (Event->Status & EVENT_STATUS_RCVERR) { 1390 if (Event->Status & EVENT_STATUS_RCVERR) {
1390 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError", 1391 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1391 Event, Event->Status, Event->HostHandle, 0); 1392 Event, Event->Status, Event->HostHandle, 0);
1392 // XXXTODO - Remove this print later 1393 /* XXXTODO - Remove this print later */
1393 DBG_ERROR("SXG: Receive error %x\n", *(u32 *) 1394 DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
1394 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)); 1395 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
1395 sxg_process_rcv_error(adapter, *(u32 *) 1396 sxg_process_rcv_error(adapter, *(u32 *)
@@ -1397,8 +1398,8 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1397 (RcvDataBufferHdr)); 1398 (RcvDataBufferHdr));
1398 goto drop; 1399 goto drop;
1399 } 1400 }
1400#if XXXTODO // VLAN stuff 1401#if XXXTODO /* VLAN stuff */
1401 // If there's a VLAN tag, extract it and validate it 1402 /* If there's a VLAN tag, extract it and validate it */
1402 if (((p_ether_header) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))-> 1403 if (((p_ether_header) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->
1403 EtherType == ETHERTYPE_VLAN) { 1404 EtherType == ETHERTYPE_VLAN) {
1404 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) != 1405 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
@@ -1411,9 +1412,9 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1411 } 1412 }
1412 } 1413 }
1413#endif 1414#endif
1414 // 1415 /* */
1415 // Dumb-nic frame. See if it passes our mac filter and update stats 1416 /* Dumb-nic frame. See if it passes our mac filter and update stats */
1416 // 1417 /* */
1417 if (!sxg_mac_filter(adapter, (p_ether_header) 1418 if (!sxg_mac_filter(adapter, (p_ether_header)
1418 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr), 1419 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1419 Event->Length)) { 1420 Event->Length)) {
@@ -1427,9 +1428,9 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1427 1428
1428 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv", 1429 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1429 RcvDataBufferHdr, Packet, Event->Length, 0); 1430 RcvDataBufferHdr, Packet, Event->Length, 0);
1430 // 1431 /* */
1431 // Lastly adjust the receive packet length. 1432 /* Lastly adjust the receive packet length. */
1432 // 1433 /* */
1433 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event); 1434 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1434 1435
1435 return (Packet); 1436 return (Packet);
@@ -1541,7 +1542,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
1541 1542
1542 if (SXG_MULTICAST_PACKET(EtherHdr)) { 1543 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1543 if (SXG_BROADCAST_PACKET(EtherHdr)) { 1544 if (SXG_BROADCAST_PACKET(EtherHdr)) {
1544 // broadcast 1545 /* broadcast */
1545 if (adapter->MacFilter & MAC_BCAST) { 1546 if (adapter->MacFilter & MAC_BCAST) {
1546 adapter->Stats.DumbRcvBcastPkts++; 1547 adapter->Stats.DumbRcvBcastPkts++;
1547 adapter->Stats.DumbRcvBcastBytes += length; 1548 adapter->Stats.DumbRcvBcastBytes += length;
@@ -1550,7 +1551,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
1550 return (TRUE); 1551 return (TRUE);
1551 } 1552 }
1552 } else { 1553 } else {
1553 // multicast 1554 /* multicast */
1554 if (adapter->MacFilter & MAC_ALLMCAST) { 1555 if (adapter->MacFilter & MAC_ALLMCAST) {
1555 adapter->Stats.DumbRcvMcastPkts++; 1556 adapter->Stats.DumbRcvMcastPkts++;
1556 adapter->Stats.DumbRcvMcastBytes += length; 1557 adapter->Stats.DumbRcvMcastBytes += length;
@@ -1580,9 +1581,9 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
1580 } 1581 }
1581 } 1582 }
1582 } else if (adapter->MacFilter & MAC_DIRECTED) { 1583 } else if (adapter->MacFilter & MAC_DIRECTED) {
1583 // Not broadcast or multicast. Must be directed at us or 1584 /* Not broadcast or multicast. Must be directed at us or */
1584 // the card is in promiscuous mode. Either way, consider it 1585 /* the card is in promiscuous mode. Either way, consider it */
1585 // ours if MAC_DIRECTED is set 1586 /* ours if MAC_DIRECTED is set */
1586 adapter->Stats.DumbRcvUcastPkts++; 1587 adapter->Stats.DumbRcvUcastPkts++;
1587 adapter->Stats.DumbRcvUcastBytes += length; 1588 adapter->Stats.DumbRcvUcastBytes += length;
1588 adapter->Stats.DumbRcvPkts++; 1589 adapter->Stats.DumbRcvPkts++;
@@ -1590,7 +1591,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
1590 return (TRUE); 1591 return (TRUE);
1591 } 1592 }
1592 if (adapter->MacFilter & MAC_PROMISC) { 1593 if (adapter->MacFilter & MAC_PROMISC) {
1593 // Whatever it is, keep it. 1594 /* Whatever it is, keep it. */
1594 adapter->Stats.DumbRcvPkts++; 1595 adapter->Stats.DumbRcvPkts++;
1595 adapter->Stats.DumbRcvBytes += length; 1596 adapter->Stats.DumbRcvBytes += length;
1596 return (TRUE); 1597 return (TRUE);
@@ -1606,7 +1607,7 @@ static int sxg_register_interrupt(p_adapter_t adapter)
1606 1607
1607 DBG_ERROR 1608 DBG_ERROR
1608 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n", 1609 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
1609 __FUNCTION__, adapter, adapter->netdev->irq, NR_IRQS); 1610 __func__, adapter, adapter->netdev->irq, NR_IRQS);
1610 1611
1611 spin_unlock_irqrestore(&sxg_global.driver_lock, 1612 spin_unlock_irqrestore(&sxg_global.driver_lock,
1612 sxg_global.flags); 1613 sxg_global.flags);
@@ -1625,18 +1626,18 @@ static int sxg_register_interrupt(p_adapter_t adapter)
1625 } 1626 }
1626 adapter->intrregistered = 1; 1627 adapter->intrregistered = 1;
1627 adapter->IntRegistered = TRUE; 1628 adapter->IntRegistered = TRUE;
1628 // Disable RSS with line-based interrupts 1629 /* Disable RSS with line-based interrupts */
1629 adapter->MsiEnabled = FALSE; 1630 adapter->MsiEnabled = FALSE;
1630 adapter->RssEnabled = FALSE; 1631 adapter->RssEnabled = FALSE;
1631 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n", 1632 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
1632 __FUNCTION__, adapter, adapter->netdev->irq); 1633 __func__, adapter, adapter->netdev->irq);
1633 } 1634 }
1634 return (STATUS_SUCCESS); 1635 return (STATUS_SUCCESS);
1635} 1636}
1636 1637
1637static void sxg_deregister_interrupt(p_adapter_t adapter) 1638static void sxg_deregister_interrupt(p_adapter_t adapter)
1638{ 1639{
1639 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __FUNCTION__, adapter); 1640 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
1640#if XXXTODO 1641#if XXXTODO
1641 slic_init_cleanup(adapter); 1642 slic_init_cleanup(adapter);
1642#endif 1643#endif
@@ -1651,7 +1652,7 @@ static void sxg_deregister_interrupt(p_adapter_t adapter)
1651 adapter->rcv_broadcasts = 0; 1652 adapter->rcv_broadcasts = 0;
1652 adapter->rcv_multicasts = 0; 1653 adapter->rcv_multicasts = 0;
1653 adapter->rcv_unicasts = 0; 1654 adapter->rcv_unicasts = 0;
1654 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__); 1655 DBG_ERROR("sxg: %s EXIT\n", __func__);
1655} 1656}
1656 1657
1657/* 1658/*
@@ -1666,7 +1667,7 @@ static int sxg_if_init(p_adapter_t adapter)
1666 int status = 0; 1667 int status = 0;
1667 1668
1668 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d:%d] flags[%x]\n", 1669 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d:%d] flags[%x]\n",
1669 __FUNCTION__, adapter->netdev->name, 1670 __func__, adapter->netdev->name,
1670 adapter->queues_initialized, adapter->state, 1671 adapter->queues_initialized, adapter->state,
1671 adapter->linkstate, dev->flags); 1672 adapter->linkstate, dev->flags);
1672 1673
@@ -1680,7 +1681,7 @@ static int sxg_if_init(p_adapter_t adapter)
1680 adapter->devflags_prev = dev->flags; 1681 adapter->devflags_prev = dev->flags;
1681 adapter->macopts = MAC_DIRECTED; 1682 adapter->macopts = MAC_DIRECTED;
1682 if (dev->flags) { 1683 if (dev->flags) {
1683 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __FUNCTION__, 1684 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
1684 adapter->netdev->name); 1685 adapter->netdev->name);
1685 if (dev->flags & IFF_BROADCAST) { 1686 if (dev->flags & IFF_BROADCAST) {
1686 adapter->macopts |= MAC_BCAST; 1687 adapter->macopts |= MAC_BCAST;
@@ -1713,7 +1714,7 @@ static int sxg_if_init(p_adapter_t adapter)
1713 /* 1714 /*
1714 * clear any pending events, then enable interrupts 1715 * clear any pending events, then enable interrupts
1715 */ 1716 */
1716 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __FUNCTION__); 1717 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
1717 1718
1718 return (STATUS_SUCCESS); 1719 return (STATUS_SUCCESS);
1719} 1720}
@@ -1724,11 +1725,11 @@ static int sxg_entry_open(p_net_device dev)
1724 int status; 1725 int status;
1725 1726
1726 ASSERT(adapter); 1727 ASSERT(adapter);
1727 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __FUNCTION__, 1728 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
1728 adapter->activated); 1729 adapter->activated);
1729 DBG_ERROR 1730 DBG_ERROR
1730 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n", 1731 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
1731 __FUNCTION__, adapter->netdev->name, jiffies, smp_processor_id(), 1732 __func__, adapter->netdev->name, jiffies, smp_processor_id(),
1732 adapter->netdev, adapter, adapter->port); 1733 adapter->netdev, adapter, adapter->port);
1733 1734
1734 netif_stop_queue(adapter->netdev); 1735 netif_stop_queue(adapter->netdev);
@@ -1738,16 +1739,16 @@ static int sxg_entry_open(p_net_device dev)
1738 sxg_global.num_sxg_ports_active++; 1739 sxg_global.num_sxg_ports_active++;
1739 adapter->activated = 1; 1740 adapter->activated = 1;
1740 } 1741 }
1741 // Initialize the adapter 1742 /* Initialize the adapter */
1742 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __FUNCTION__); 1743 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
1743 status = sxg_initialize_adapter(adapter); 1744 status = sxg_initialize_adapter(adapter);
1744 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n", 1745 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
1745 __FUNCTION__, status); 1746 __func__, status);
1746 1747
1747 if (status == STATUS_SUCCESS) { 1748 if (status == STATUS_SUCCESS) {
1748 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __FUNCTION__); 1749 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
1749 status = sxg_if_init(adapter); 1750 status = sxg_if_init(adapter);
1750 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __FUNCTION__, 1751 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
1751 status); 1752 status);
1752 } 1753 }
1753 1754
@@ -1760,12 +1761,12 @@ static int sxg_entry_open(p_net_device dev)
1760 sxg_global.flags); 1761 sxg_global.flags);
1761 return (status); 1762 return (status);
1762 } 1763 }
1763 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __FUNCTION__); 1764 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
1764 1765
1765 // Enable interrupts 1766 /* Enable interrupts */
1766 SXG_ENABLE_ALL_INTERRUPTS(adapter); 1767 SXG_ENABLE_ALL_INTERRUPTS(adapter);
1767 1768
1768 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__); 1769 DBG_ERROR("sxg: %s EXIT\n", __func__);
1769 1770
1770 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); 1771 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
1771 return STATUS_SUCCESS; 1772 return STATUS_SUCCESS;
@@ -1779,27 +1780,27 @@ static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
1779 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 1780 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
1780 1781
1781 ASSERT(adapter); 1782 ASSERT(adapter);
1782 DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __FUNCTION__, dev, 1783 DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
1783 adapter); 1784 adapter);
1784 sxg_deregister_interrupt(adapter); 1785 sxg_deregister_interrupt(adapter);
1785 sxg_unmap_mmio_space(adapter); 1786 sxg_unmap_mmio_space(adapter);
1786 DBG_ERROR("sxg: %s unregister_netdev\n", __FUNCTION__); 1787 DBG_ERROR("sxg: %s unregister_netdev\n", __func__);
1787 unregister_netdev(dev); 1788 unregister_netdev(dev);
1788 1789
1789 mmio_start = pci_resource_start(pcidev, 0); 1790 mmio_start = pci_resource_start(pcidev, 0);
1790 mmio_len = pci_resource_len(pcidev, 0); 1791 mmio_len = pci_resource_len(pcidev, 0);
1791 1792
1792 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__, 1793 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __func__,
1793 mmio_start, mmio_len); 1794 mmio_start, mmio_len);
1794 release_mem_region(mmio_start, mmio_len); 1795 release_mem_region(mmio_start, mmio_len);
1795 1796
1796 DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __FUNCTION__, 1797 DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __func__,
1797 (unsigned int)dev->base_addr); 1798 (unsigned int)dev->base_addr);
1798 iounmap((char *)dev->base_addr); 1799 iounmap((char *)dev->base_addr);
1799 1800
1800 DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__); 1801 DBG_ERROR("sxg: %s deallocate device\n", __func__);
1801 kfree(dev); 1802 kfree(dev);
1802 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__); 1803 DBG_ERROR("sxg: %s EXIT\n", __func__);
1803} 1804}
1804 1805
1805static int sxg_entry_halt(p_net_device dev) 1806static int sxg_entry_halt(p_net_device dev)
@@ -1807,17 +1808,17 @@ static int sxg_entry_halt(p_net_device dev)
1807 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 1808 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
1808 1809
1809 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags); 1810 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1810 DBG_ERROR("sxg: %s (%s) ENTER\n", __FUNCTION__, dev->name); 1811 DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
1811 1812
1812 netif_stop_queue(adapter->netdev); 1813 netif_stop_queue(adapter->netdev);
1813 adapter->state = ADAPT_DOWN; 1814 adapter->state = ADAPT_DOWN;
1814 adapter->linkstate = LINK_DOWN; 1815 adapter->linkstate = LINK_DOWN;
1815 adapter->devflags_prev = 0; 1816 adapter->devflags_prev = 0;
1816 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n", 1817 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
1817 __FUNCTION__, dev->name, adapter, adapter->state); 1818 __func__, dev->name, adapter, adapter->state);
1818 1819
1819 DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name); 1820 DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name);
1820 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__); 1821 DBG_ERROR("sxg: %s EXIT\n", __func__);
1821 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); 1822 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
1822 return (STATUS_SUCCESS); 1823 return (STATUS_SUCCESS);
1823} 1824}
@@ -1825,11 +1826,11 @@ static int sxg_entry_halt(p_net_device dev)
1825static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd) 1826static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
1826{ 1827{
1827 ASSERT(rq); 1828 ASSERT(rq);
1828// DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __FUNCTION__, cmd, rq, dev); 1829/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev); */
1829 switch (cmd) { 1830 switch (cmd) {
1830 case SIOCSLICSETINTAGG: 1831 case SIOCSLICSETINTAGG:
1831 { 1832 {
1832// p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 1833/* p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); */
1833 u32 data[7]; 1834 u32 data[7];
1834 u32 intagg; 1835 u32 intagg;
1835 1836
@@ -1841,12 +1842,12 @@ static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
1841 intagg = data[0]; 1842 intagg = data[0];
1842 printk(KERN_EMERG 1843 printk(KERN_EMERG
1843 "%s: set interrupt aggregation to %d\n", 1844 "%s: set interrupt aggregation to %d\n",
1844 __FUNCTION__, intagg); 1845 __func__, intagg);
1845 return 0; 1846 return 0;
1846 } 1847 }
1847 1848
1848 default: 1849 default:
1849// DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __FUNCTION__, cmd); 1850/* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
1850 return -EOPNOTSUPP; 1851 return -EOPNOTSUPP;
1851 } 1852 }
1852 return 0; 1853 return 0;
@@ -1870,15 +1871,15 @@ static int sxg_send_packets(struct sk_buff *skb, p_net_device dev)
1870 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 1871 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
1871 u32 status = STATUS_SUCCESS; 1872 u32 status = STATUS_SUCCESS;
1872 1873
1873 DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__, 1874 DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __func__,
1874 skb); 1875 skb);
1875 // Check the adapter state 1876 /* Check the adapter state */
1876 switch (adapter->State) { 1877 switch (adapter->State) {
1877 case SXG_STATE_INITIALIZING: 1878 case SXG_STATE_INITIALIZING:
1878 case SXG_STATE_HALTED: 1879 case SXG_STATE_HALTED:
1879 case SXG_STATE_SHUTDOWN: 1880 case SXG_STATE_SHUTDOWN:
1880 ASSERT(0); // unexpected 1881 ASSERT(0); /* unexpected */
1881 // fall through 1882 /* fall through */
1882 case SXG_STATE_RESETTING: 1883 case SXG_STATE_RESETTING:
1883 case SXG_STATE_SLEEP: 1884 case SXG_STATE_SLEEP:
1884 case SXG_STATE_BOOTDIAG: 1885 case SXG_STATE_BOOTDIAG:
@@ -1898,23 +1899,23 @@ static int sxg_send_packets(struct sk_buff *skb, p_net_device dev)
1898 if (status != STATUS_SUCCESS) { 1899 if (status != STATUS_SUCCESS) {
1899 goto xmit_fail; 1900 goto xmit_fail;
1900 } 1901 }
1901 // send a packet 1902 /* send a packet */
1902 status = sxg_transmit_packet(adapter, skb); 1903 status = sxg_transmit_packet(adapter, skb);
1903 if (status == STATUS_SUCCESS) { 1904 if (status == STATUS_SUCCESS) {
1904 goto xmit_done; 1905 goto xmit_done;
1905 } 1906 }
1906 1907
1907 xmit_fail: 1908 xmit_fail:
1908 // reject & complete all the packets if they cant be sent 1909 /* reject & complete all the packets if they cant be sent */
1909 if (status != STATUS_SUCCESS) { 1910 if (status != STATUS_SUCCESS) {
1910#if XXXTODO 1911#if XXXTODO
1911// sxg_send_packets_fail(adapter, skb, status); 1912/* sxg_send_packets_fail(adapter, skb, status); */
1912#else 1913#else
1913 SXG_DROP_DUMB_SEND(adapter, skb); 1914 SXG_DROP_DUMB_SEND(adapter, skb);
1914 adapter->stats.tx_dropped++; 1915 adapter->stats.tx_dropped++;
1915#endif 1916#endif
1916 } 1917 }
1917 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __FUNCTION__, 1918 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
1918 status); 1919 status);
1919 1920
1920 xmit_done: 1921 xmit_done:
@@ -1940,12 +1941,12 @@ static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb)
1940 void *SglBuffer; 1941 void *SglBuffer;
1941 u32 SglBufferLength; 1942 u32 SglBufferLength;
1942 1943
1943 // The vast majority of work is done in the shared 1944 /* The vast majority of work is done in the shared */
1944 // sxg_dumb_sgl routine. 1945 /* sxg_dumb_sgl routine. */
1945 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend", 1946 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
1946 adapter, skb, 0, 0); 1947 adapter, skb, 0, 0);
1947 1948
1948 // Allocate a SGL buffer 1949 /* Allocate a SGL buffer */
1949 SXG_GET_SGL_BUFFER(adapter, SxgSgl); 1950 SXG_GET_SGL_BUFFER(adapter, SxgSgl);
1950 if (!SxgSgl) { 1951 if (!SxgSgl) {
1951 adapter->Stats.NoSglBuf++; 1952 adapter->Stats.NoSglBuf++;
@@ -1963,9 +1964,9 @@ static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb)
1963 SxgSgl->DumbPacket = skb; 1964 SxgSgl->DumbPacket = skb;
1964 pSgl = NULL; 1965 pSgl = NULL;
1965 1966
1966 // Call the common sxg_dumb_sgl routine to complete the send. 1967 /* Call the common sxg_dumb_sgl routine to complete the send. */
1967 sxg_dumb_sgl(pSgl, SxgSgl); 1968 sxg_dumb_sgl(pSgl, SxgSgl);
1968 // Return success sxg_dumb_sgl (or something later) will complete it. 1969 /* Return success sxg_dumb_sgl (or something later) will complete it. */
1969 return (STATUS_SUCCESS); 1970 return (STATUS_SUCCESS);
1970} 1971}
1971 1972
@@ -1983,39 +1984,39 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
1983{ 1984{
1984 p_adapter_t adapter = SxgSgl->adapter; 1985 p_adapter_t adapter = SxgSgl->adapter;
1985 struct sk_buff *skb = SxgSgl->DumbPacket; 1986 struct sk_buff *skb = SxgSgl->DumbPacket;
1986 // For now, all dumb-nic sends go on RSS queue zero 1987 /* For now, all dumb-nic sends go on RSS queue zero */
1987 PSXG_XMT_RING XmtRing = &adapter->XmtRings[0]; 1988 PSXG_XMT_RING XmtRing = &adapter->XmtRings[0];
1988 PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo; 1989 PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo;
1989 PSXG_CMD XmtCmd = NULL; 1990 PSXG_CMD XmtCmd = NULL;
1990// u32 Index = 0; 1991/* u32 Index = 0; */
1991 u32 DataLength = skb->len; 1992 u32 DataLength = skb->len;
1992// unsigned int BufLen; 1993/* unsigned int BufLen; */
1993// u32 SglOffset; 1994/* u32 SglOffset; */
1994 u64 phys_addr; 1995 u64 phys_addr;
1995 1996
1996 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl", 1997 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
1997 pSgl, SxgSgl, 0, 0); 1998 pSgl, SxgSgl, 0, 0);
1998 1999
1999 // Set aside a pointer to the sgl 2000 /* Set aside a pointer to the sgl */
2000 SxgSgl->pSgl = pSgl; 2001 SxgSgl->pSgl = pSgl;
2001 2002
2002 // Sanity check that our SGL format is as we expect. 2003 /* Sanity check that our SGL format is as we expect. */
2003 ASSERT(sizeof(SXG_X64_SGE) == sizeof(SCATTER_GATHER_ELEMENT)); 2004 ASSERT(sizeof(SXG_X64_SGE) == sizeof(SCATTER_GATHER_ELEMENT));
2004 // Shouldn't be a vlan tag on this frame 2005 /* Shouldn't be a vlan tag on this frame */
2005 ASSERT(SxgSgl->VlanTag.VlanTci == 0); 2006 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2006 ASSERT(SxgSgl->VlanTag.VlanTpid == 0); 2007 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2007 2008
2008 // From here below we work with the SGL placed in our 2009 /* From here below we work with the SGL placed in our */
2009 // buffer. 2010 /* buffer. */
2010 2011
2011 SxgSgl->Sgl.NumberOfElements = 1; 2012 SxgSgl->Sgl.NumberOfElements = 1;
2012 2013
2013 // Grab the spinlock and acquire a command 2014 /* Grab the spinlock and acquire a command */
2014 spin_lock(&adapter->XmtZeroLock); 2015 spin_lock(&adapter->XmtZeroLock);
2015 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl); 2016 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2016 if (XmtCmd == NULL) { 2017 if (XmtCmd == NULL) {
2017 // Call sxg_complete_slow_send to see if we can 2018 /* Call sxg_complete_slow_send to see if we can */
2018 // free up any XmtRingZero entries and then try again 2019 /* free up any XmtRingZero entries and then try again */
2019 spin_unlock(&adapter->XmtZeroLock); 2020 spin_unlock(&adapter->XmtZeroLock);
2020 sxg_complete_slow_send(adapter); 2021 sxg_complete_slow_send(adapter);
2021 spin_lock(&adapter->XmtZeroLock); 2022 spin_lock(&adapter->XmtZeroLock);
@@ -2027,10 +2028,10 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
2027 } 2028 }
2028 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd", 2029 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2029 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 2030 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
2030 // Update stats 2031 /* Update stats */
2031 adapter->Stats.DumbXmtPkts++; 2032 adapter->Stats.DumbXmtPkts++;
2032 adapter->Stats.DumbXmtBytes += DataLength; 2033 adapter->Stats.DumbXmtBytes += DataLength;
2033#if XXXTODO // Stats stuff 2034#if XXXTODO /* Stats stuff */
2034 if (SXG_MULTICAST_PACKET(EtherHdr)) { 2035 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2035 if (SXG_BROADCAST_PACKET(EtherHdr)) { 2036 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2036 adapter->Stats.DumbXmtBcastPkts++; 2037 adapter->Stats.DumbXmtBcastPkts++;
@@ -2044,8 +2045,8 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
2044 adapter->Stats.DumbXmtUcastBytes += DataLength; 2045 adapter->Stats.DumbXmtUcastBytes += DataLength;
2045 } 2046 }
2046#endif 2047#endif
2047 // Fill in the command 2048 /* Fill in the command */
2048 // Copy out the first SGE to the command and adjust for offset 2049 /* Copy out the first SGE to the command and adjust for offset */
2049 phys_addr = 2050 phys_addr =
2050 pci_map_single(adapter->pcidev, skb->data, skb->len, 2051 pci_map_single(adapter->pcidev, skb->data, skb->len,
2051 PCI_DMA_TODEVICE); 2052 PCI_DMA_TODEVICE);
@@ -2053,54 +2054,54 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
2053 XmtCmd->Buffer.FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress << 32; 2054 XmtCmd->Buffer.FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress << 32;
2054 XmtCmd->Buffer.FirstSgeAddress = 2055 XmtCmd->Buffer.FirstSgeAddress =
2055 XmtCmd->Buffer.FirstSgeAddress | SXG_GET_ADDR_LOW(phys_addr); 2056 XmtCmd->Buffer.FirstSgeAddress | SXG_GET_ADDR_LOW(phys_addr);
2056// XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address; 2057/* XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address; */
2057// XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset; 2058/* XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset; */
2058 XmtCmd->Buffer.FirstSgeLength = DataLength; 2059 XmtCmd->Buffer.FirstSgeLength = DataLength;
2059 // Set a pointer to the remaining SGL entries 2060 /* Set a pointer to the remaining SGL entries */
2060// XmtCmd->Sgl = SxgSgl->PhysicalAddress; 2061/* XmtCmd->Sgl = SxgSgl->PhysicalAddress; */
2061 // Advance the physical address of the SxgSgl structure to 2062 /* Advance the physical address of the SxgSgl structure to */
2062 // the second SGE 2063 /* the second SGE */
2063// SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) - 2064/* SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) - */
2064// (u32 *)SxgSgl); 2065/* (u32 *)SxgSgl); */
2065// XmtCmd->Sgl.LowPart += SglOffset; 2066/* XmtCmd->Sgl.LowPart += SglOffset; */
2066 XmtCmd->Buffer.SgeOffset = 0; 2067 XmtCmd->Buffer.SgeOffset = 0;
2067 // Note - TotalLength might be overwritten with MSS below.. 2068 /* Note - TotalLength might be overwritten with MSS below.. */
2068 XmtCmd->Buffer.TotalLength = DataLength; 2069 XmtCmd->Buffer.TotalLength = DataLength;
2069 XmtCmd->SgEntries = 1; //(ushort)(SxgSgl->Sgl.NumberOfElements - Index); 2070 XmtCmd->SgEntries = 1; /*(ushort)(SxgSgl->Sgl.NumberOfElements - Index); */
2070 XmtCmd->Flags = 0; 2071 XmtCmd->Flags = 0;
2071 // 2072 /* */
2072 // Advance transmit cmd descripter by 1. 2073 /* Advance transmit cmd descripter by 1. */
2073 // NOTE - See comments in SxgTcpOutput where we write 2074 /* NOTE - See comments in SxgTcpOutput where we write */
2074 // to the XmtCmd register regarding CPU ID values and/or 2075 /* to the XmtCmd register regarding CPU ID values and/or */
2075 // multiple commands. 2076 /* multiple commands. */
2076 // 2077 /* */
2077 // 2078 /* */
2078 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE); 2079 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE);
2079 // 2080 /* */
2080 // 2081 /* */
2081 adapter->Stats.XmtQLen++; // Stats within lock 2082 adapter->Stats.XmtQLen++; /* Stats within lock */
2082 spin_unlock(&adapter->XmtZeroLock); 2083 spin_unlock(&adapter->XmtZeroLock);
2083 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2", 2084 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2084 XmtCmd, pSgl, SxgSgl, 0); 2085 XmtCmd, pSgl, SxgSgl, 0);
2085 return; 2086 return;
2086 2087
2087 abortcmd: 2088 abortcmd:
2088 // NOTE - Only jump to this label AFTER grabbing the 2089 /* NOTE - Only jump to this label AFTER grabbing the */
2089 // XmtZeroLock, and DO NOT DROP IT between the 2090 /* XmtZeroLock, and DO NOT DROP IT between the */
2090 // command allocation and the following abort. 2091 /* command allocation and the following abort. */
2091 if (XmtCmd) { 2092 if (XmtCmd) {
2092 SXG_ABORT_CMD(XmtRingInfo); 2093 SXG_ABORT_CMD(XmtRingInfo);
2093 } 2094 }
2094 spin_unlock(&adapter->XmtZeroLock); 2095 spin_unlock(&adapter->XmtZeroLock);
2095 2096
2096// failsgl: 2097/* failsgl: */
2097 // Jump to this label if failure occurs before the 2098 /* Jump to this label if failure occurs before the */
2098 // XmtZeroLock is grabbed 2099 /* XmtZeroLock is grabbed */
2099 adapter->Stats.XmtErrors++; 2100 adapter->Stats.XmtErrors++;
2100 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal", 2101 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2101 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail); 2102 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
2102 2103
2103 SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); // SxgSgl->DumbPacket is the skb 2104 SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); /* SxgSgl->DumbPacket is the skb */
2104} 2105}
2105 2106
2106/*************************************************************** 2107/***************************************************************
@@ -2127,122 +2128,122 @@ static int sxg_initialize_link(p_adapter_t adapter)
2127 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink", 2128 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2128 adapter, 0, 0, 0); 2129 adapter, 0, 0, 0);
2129 2130
2130 // Reset PHY and XGXS module 2131 /* Reset PHY and XGXS module */
2131 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE); 2132 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2132 2133
2133 // Reset transmit configuration register 2134 /* Reset transmit configuration register */
2134 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE); 2135 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2135 2136
2136 // Reset receive configuration register 2137 /* Reset receive configuration register */
2137 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE); 2138 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2138 2139
2139 // Reset all MAC modules 2140 /* Reset all MAC modules */
2140 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE); 2141 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2141 2142
2142 // Link address 0 2143 /* Link address 0 */
2143 // XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f) 2144 /* XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f) */
2144 // is stored with the first nibble (0a) in the byte 0 2145 /* is stored with the first nibble (0a) in the byte 0 */
2145 // of the Mac address. Possibly reverse? 2146 /* of the Mac address. Possibly reverse? */
2146 Value = *(u32 *) adapter->MacAddr; 2147 Value = *(u32 *) adapter->MacAddr;
2147 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE); 2148 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
2148 // also write the MAC address to the MAC. Endian is reversed. 2149 /* also write the MAC address to the MAC. Endian is reversed. */
2149 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE); 2150 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
2150 Value = (*(u16 *) & adapter->MacAddr[4] & 0x0000FFFF); 2151 Value = (*(u16 *) & adapter->MacAddr[4] & 0x0000FFFF);
2151 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE); 2152 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
2152 // endian swap for the MAC (put high bytes in bits [31:16], swapped) 2153 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
2153 Value = ntohl(Value); 2154 Value = ntohl(Value);
2154 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE); 2155 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
2155 // Link address 1 2156 /* Link address 1 */
2156 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE); 2157 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2157 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE); 2158 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
2158 // Link address 2 2159 /* Link address 2 */
2159 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE); 2160 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2160 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE); 2161 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
2161 // Link address 3 2162 /* Link address 3 */
2162 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE); 2163 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2163 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE); 2164 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2164 2165
2165 // Enable MAC modules 2166 /* Enable MAC modules */
2166 WRITE_REG(HwRegs->MacConfig0, 0, TRUE); 2167 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2167 2168
2168 // Configure MAC 2169 /* Configure MAC */
2169 WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | // Allow sending of pause 2170 WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | /* Allow sending of pause */
2170 AXGMAC_CFG1_XMT_EN | // Enable XMT 2171 AXGMAC_CFG1_XMT_EN | /* Enable XMT */
2171 AXGMAC_CFG1_RCV_PAUSE | // Enable detection of pause 2172 AXGMAC_CFG1_RCV_PAUSE | /* Enable detection of pause */
2172 AXGMAC_CFG1_RCV_EN | // Enable receive 2173 AXGMAC_CFG1_RCV_EN | /* Enable receive */
2173 AXGMAC_CFG1_SHORT_ASSERT | // short frame detection 2174 AXGMAC_CFG1_SHORT_ASSERT | /* short frame detection */
2174 AXGMAC_CFG1_CHECK_LEN | // Verify frame length 2175 AXGMAC_CFG1_CHECK_LEN | /* Verify frame length */
2175 AXGMAC_CFG1_GEN_FCS | // Generate FCS 2176 AXGMAC_CFG1_GEN_FCS | /* Generate FCS */
2176 AXGMAC_CFG1_PAD_64), // Pad frames to 64 bytes 2177 AXGMAC_CFG1_PAD_64), /* Pad frames to 64 bytes */
2177 TRUE); 2178 TRUE);
2178 2179
2179 // Set AXGMAC max frame length if jumbo. Not needed for standard MTU 2180 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
2180 if (adapter->JumboEnabled) { 2181 if (adapter->JumboEnabled) {
2181 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE); 2182 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2182 } 2183 }
2183 // AMIIM Configuration Register - 2184 /* AMIIM Configuration Register - */
2184 // The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion 2185 /* The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion */
2185 // (bottom bits) of this register is used to determine the 2186 /* (bottom bits) of this register is used to determine the */
2186 // MDC frequency as specified in the A-XGMAC Design Document. 2187 /* MDC frequency as specified in the A-XGMAC Design Document. */
2187 // This value must not be zero. The following value (62 or 0x3E) 2188 /* This value must not be zero. The following value (62 or 0x3E) */
2188 // is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz. 2189 /* is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz. */
2189 // Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec), 2190 /* Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec), */
2190 // we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62. 2191 /* we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62. */
2191 // This value happens to be the default value for this register, 2192 /* This value happens to be the default value for this register, */
2192 // so we really don't have to do this. 2193 /* so we really don't have to do this. */
2193 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE); 2194 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2194 2195
2195 // Power up and enable PHY and XAUI/XGXS/Serdes logic 2196 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
2196 WRITE_REG(HwRegs->LinkStatus, 2197 WRITE_REG(HwRegs->LinkStatus,
2197 (LS_PHY_CLR_RESET | 2198 (LS_PHY_CLR_RESET |
2198 LS_XGXS_ENABLE | 2199 LS_XGXS_ENABLE |
2199 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE); 2200 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
2200 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n"); 2201 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2201 2202
2202 // Per information given by Aeluros, wait 100 ms after removing reset. 2203 /* Per information given by Aeluros, wait 100 ms after removing reset. */
2203 // It's not enough to wait for the self-clearing reset bit in reg 0 to clear. 2204 /* It's not enough to wait for the self-clearing reset bit in reg 0 to clear. */
2204 mdelay(100); 2205 mdelay(100);
2205 2206
2206 // Verify the PHY has come up by checking that the Reset bit has cleared. 2207 /* Verify the PHY has come up by checking that the Reset bit has cleared. */
2207 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2208 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2208 PHY_PMA_CONTROL1, // PMA/PMD control register 2209 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2209 &Value); 2210 &Value);
2210 if (status != STATUS_SUCCESS) 2211 if (status != STATUS_SUCCESS)
2211 return (STATUS_FAILURE); 2212 return (STATUS_FAILURE);
2212 if (Value & PMA_CONTROL1_RESET) // reset complete if bit is 0 2213 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
2213 return (STATUS_FAILURE); 2214 return (STATUS_FAILURE);
2214 2215
2215 // The SERDES should be initialized by now - confirm 2216 /* The SERDES should be initialized by now - confirm */
2216 READ_REG(HwRegs->LinkStatus, Value); 2217 READ_REG(HwRegs->LinkStatus, Value);
2217 if (Value & LS_SERDES_DOWN) // verify SERDES is initialized 2218 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
2218 return (STATUS_FAILURE); 2219 return (STATUS_FAILURE);
2219 2220
2220 // The XAUI link should also be up - confirm 2221 /* The XAUI link should also be up - confirm */
2221 if (!(Value & LS_XAUI_LINK_UP)) // verify XAUI link is up 2222 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
2222 return (STATUS_FAILURE); 2223 return (STATUS_FAILURE);
2223 2224
2224 // Initialize the PHY 2225 /* Initialize the PHY */
2225 status = sxg_phy_init(adapter); 2226 status = sxg_phy_init(adapter);
2226 if (status != STATUS_SUCCESS) 2227 if (status != STATUS_SUCCESS)
2227 return (STATUS_FAILURE); 2228 return (STATUS_FAILURE);
2228 2229
2229 // Enable the Link Alarm 2230 /* Enable the Link Alarm */
2230 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2231 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2231 LASI_CONTROL, // LASI control register 2232 LASI_CONTROL, /* LASI control register */
2232 LASI_CTL_LS_ALARM_ENABLE); // enable link alarm bit 2233 LASI_CTL_LS_ALARM_ENABLE); /* enable link alarm bit */
2233 if (status != STATUS_SUCCESS) 2234 if (status != STATUS_SUCCESS)
2234 return (STATUS_FAILURE); 2235 return (STATUS_FAILURE);
2235 2236
2236 // XXXTODO - temporary - verify bit is set 2237 /* XXXTODO - temporary - verify bit is set */
2237 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2238 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2238 LASI_CONTROL, // LASI control register 2239 LASI_CONTROL, /* LASI control register */
2239 &Value); 2240 &Value);
2240 if (status != STATUS_SUCCESS) 2241 if (status != STATUS_SUCCESS)
2241 return (STATUS_FAILURE); 2242 return (STATUS_FAILURE);
2242 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) { 2243 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2243 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n"); 2244 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2244 } 2245 }
2245 // Enable receive 2246 /* Enable receive */
2246 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME; 2247 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2247 ConfigData = (RCV_CONFIG_ENABLE | 2248 ConfigData = (RCV_CONFIG_ENABLE |
2248 RCV_CONFIG_ENPARSE | 2249 RCV_CONFIG_ENPARSE |
@@ -2256,7 +2257,7 @@ static int sxg_initialize_link(p_adapter_t adapter)
2256 2257
2257 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE); 2258 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2258 2259
2259 // Mark the link as down. We'll get a link event when it comes up. 2260 /* Mark the link as down. We'll get a link event when it comes up. */
2260 sxg_link_state(adapter, SXG_LINK_DOWN); 2261 sxg_link_state(adapter, SXG_LINK_DOWN);
2261 2262
2262 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk", 2263 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
@@ -2279,35 +2280,35 @@ static int sxg_phy_init(p_adapter_t adapter)
2279 PPHY_UCODE p; 2280 PPHY_UCODE p;
2280 int status; 2281 int status;
2281 2282
2282 DBG_ERROR("ENTER %s\n", __FUNCTION__); 2283 DBG_ERROR("ENTER %s\n", __func__);
2283 2284
2284 // Read a register to identify the PHY type 2285 /* Read a register to identify the PHY type */
2285 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2286 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2286 0xC205, // PHY ID register (?) 2287 0xC205, /* PHY ID register (?) */
2287 &Value); // XXXTODO - add def 2288 &Value); /* XXXTODO - add def */
2288 if (status != STATUS_SUCCESS) 2289 if (status != STATUS_SUCCESS)
2289 return (STATUS_FAILURE); 2290 return (STATUS_FAILURE);
2290 2291
2291 if (Value == 0x0012) { // 0x0012 == AEL2005C PHY(?) - XXXTODO - add def 2292 if (Value == 0x0012) { /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
2292 DBG_ERROR 2293 DBG_ERROR
2293 ("AEL2005C PHY detected. Downloading PHY microcode.\n"); 2294 ("AEL2005C PHY detected. Downloading PHY microcode.\n");
2294 2295
2295 // Initialize AEL2005C PHY and download PHY microcode 2296 /* Initialize AEL2005C PHY and download PHY microcode */
2296 for (p = PhyUcode; p->Addr != 0xFFFF; p++) { 2297 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
2297 if (p->Addr == 0) { 2298 if (p->Addr == 0) {
2298 // if address == 0, data == sleep time in ms 2299 /* if address == 0, data == sleep time in ms */
2299 mdelay(p->Data); 2300 mdelay(p->Data);
2300 } else { 2301 } else {
2301 // write the given data to the specified address 2302 /* write the given data to the specified address */
2302 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2303 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2303 p->Addr, // PHY address 2304 p->Addr, /* PHY address */
2304 p->Data); // PHY data 2305 p->Data); /* PHY data */
2305 if (status != STATUS_SUCCESS) 2306 if (status != STATUS_SUCCESS)
2306 return (STATUS_FAILURE); 2307 return (STATUS_FAILURE);
2307 } 2308 }
2308 } 2309 }
2309 } 2310 }
2310 DBG_ERROR("EXIT %s\n", __FUNCTION__); 2311 DBG_ERROR("EXIT %s\n", __func__);
2311 2312
2312 return (STATUS_SUCCESS); 2313 return (STATUS_SUCCESS);
2313} 2314}
@@ -2330,42 +2331,42 @@ static void sxg_link_event(p_adapter_t adapter)
2330 2331
2331 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt", 2332 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
2332 adapter, 0, 0, 0); 2333 adapter, 0, 0, 0);
2333 DBG_ERROR("ENTER %s\n", __FUNCTION__); 2334 DBG_ERROR("ENTER %s\n", __func__);
2334 2335
2335 // Check the Link Status register. We should have a Link Alarm. 2336 /* Check the Link Status register. We should have a Link Alarm. */
2336 READ_REG(HwRegs->LinkStatus, Value); 2337 READ_REG(HwRegs->LinkStatus, Value);
2337 if (Value & LS_LINK_ALARM) { 2338 if (Value & LS_LINK_ALARM) {
2338 // We got a Link Status alarm. First, pause to let the 2339 /* We got a Link Status alarm. First, pause to let the */
2339 // link state settle (it can bounce a number of times) 2340 /* link state settle (it can bounce a number of times) */
2340 mdelay(10); 2341 mdelay(10);
2341 2342
2342 // Now clear the alarm by reading the LASI status register. 2343 /* Now clear the alarm by reading the LASI status register. */
2343 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2344 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2344 LASI_STATUS, // LASI status register 2345 LASI_STATUS, /* LASI status register */
2345 &Value); 2346 &Value);
2346 if (status != STATUS_SUCCESS) { 2347 if (status != STATUS_SUCCESS) {
2347 DBG_ERROR("Error reading LASI Status MDIO register!\n"); 2348 DBG_ERROR("Error reading LASI Status MDIO register!\n");
2348 sxg_link_state(adapter, SXG_LINK_DOWN); 2349 sxg_link_state(adapter, SXG_LINK_DOWN);
2349// ASSERT(0); 2350/* ASSERT(0); */
2350 } 2351 }
2351 ASSERT(Value & LASI_STATUS_LS_ALARM); 2352 ASSERT(Value & LASI_STATUS_LS_ALARM);
2352 2353
2353 // Now get and set the link state 2354 /* Now get and set the link state */
2354 LinkState = sxg_get_link_state(adapter); 2355 LinkState = sxg_get_link_state(adapter);
2355 sxg_link_state(adapter, LinkState); 2356 sxg_link_state(adapter, LinkState);
2356 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n", 2357 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
2357 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN")); 2358 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
2358 } else { 2359 } else {
2359 // XXXTODO - Assuming Link Attention is only being generated for the 2360 /* XXXTODO - Assuming Link Attention is only being generated for the */
2360 // Link Alarm pin (and not for a XAUI Link Status change), then it's 2361 /* Link Alarm pin (and not for a XAUI Link Status change), then it's */
2361 // impossible to get here. Yet we've gotten here twice (under extreme 2362 /* impossible to get here. Yet we've gotten here twice (under extreme */
2362 // conditions - bouncing the link up and down many times a second). 2363 /* conditions - bouncing the link up and down many times a second). */
2363 // Needs further investigation. 2364 /* Needs further investigation. */
2364 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n"); 2365 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
2365 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value); 2366 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
2366// ASSERT(0); 2367/* ASSERT(0); */
2367 } 2368 }
2368 DBG_ERROR("EXIT %s\n", __FUNCTION__); 2369 DBG_ERROR("EXIT %s\n", __func__);
2369 2370
2370} 2371}
2371 2372
@@ -2383,50 +2384,50 @@ static SXG_LINK_STATE sxg_get_link_state(p_adapter_t adapter)
2383 int status; 2384 int status;
2384 u32 Value; 2385 u32 Value;
2385 2386
2386 DBG_ERROR("ENTER %s\n", __FUNCTION__); 2387 DBG_ERROR("ENTER %s\n", __func__);
2387 2388
2388 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink", 2389 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
2389 adapter, 0, 0, 0); 2390 adapter, 0, 0, 0);
2390 2391
2391 // Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if 2392 /* Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if */
2392 // the following 3 bits (from 3 different MDIO registers) are all true. 2393 /* the following 3 bits (from 3 different MDIO registers) are all true. */
2393 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2394 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2394 PHY_PMA_RCV_DET, // PMA/PMD Receive Signal Detect register 2395 PHY_PMA_RCV_DET, /* PMA/PMD Receive Signal Detect register */
2395 &Value); 2396 &Value);
2396 if (status != STATUS_SUCCESS) 2397 if (status != STATUS_SUCCESS)
2397 goto bad; 2398 goto bad;
2398 2399
2399 // If PMA/PMD receive signal detect is 0, then the link is down 2400 /* If PMA/PMD receive signal detect is 0, then the link is down */
2400 if (!(Value & PMA_RCV_DETECT)) 2401 if (!(Value & PMA_RCV_DETECT))
2401 return (SXG_LINK_DOWN); 2402 return (SXG_LINK_DOWN);
2402 2403
2403 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, // PHY PCS module 2404 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, /* PHY PCS module */
2404 PHY_PCS_10G_STATUS1, // PCS 10GBASE-R Status 1 register 2405 PHY_PCS_10G_STATUS1, /* PCS 10GBASE-R Status 1 register */
2405 &Value); 2406 &Value);
2406 if (status != STATUS_SUCCESS) 2407 if (status != STATUS_SUCCESS)
2407 goto bad; 2408 goto bad;
2408 2409
2409 // If PCS is not locked to receive blocks, then the link is down 2410 /* If PCS is not locked to receive blocks, then the link is down */
2410 if (!(Value & PCS_10B_BLOCK_LOCK)) 2411 if (!(Value & PCS_10B_BLOCK_LOCK))
2411 return (SXG_LINK_DOWN); 2412 return (SXG_LINK_DOWN);
2412 2413
2413 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, // PHY XS module 2414 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, /* PHY XS module */
2414 PHY_XS_LANE_STATUS, // XS Lane Status register 2415 PHY_XS_LANE_STATUS, /* XS Lane Status register */
2415 &Value); 2416 &Value);
2416 if (status != STATUS_SUCCESS) 2417 if (status != STATUS_SUCCESS)
2417 goto bad; 2418 goto bad;
2418 2419
2419 // If XS transmit lanes are not aligned, then the link is down 2420 /* If XS transmit lanes are not aligned, then the link is down */
2420 if (!(Value & XS_LANE_ALIGN)) 2421 if (!(Value & XS_LANE_ALIGN))
2421 return (SXG_LINK_DOWN); 2422 return (SXG_LINK_DOWN);
2422 2423
2423 // All 3 bits are true, so the link is up 2424 /* All 3 bits are true, so the link is up */
2424 DBG_ERROR("EXIT %s\n", __FUNCTION__); 2425 DBG_ERROR("EXIT %s\n", __func__);
2425 2426
2426 return (SXG_LINK_UP); 2427 return (SXG_LINK_UP);
2427 2428
2428 bad: 2429 bad:
2429 // An error occurred reading an MDIO register. This shouldn't happen. 2430 /* An error occurred reading an MDIO register. This shouldn't happen. */
2430 DBG_ERROR("Error reading an MDIO register!\n"); 2431 DBG_ERROR("Error reading an MDIO register!\n");
2431 ASSERT(0); 2432 ASSERT(0);
2432 return (SXG_LINK_DOWN); 2433 return (SXG_LINK_DOWN);
@@ -2437,11 +2438,11 @@ static void sxg_indicate_link_state(p_adapter_t adapter,
2437{ 2438{
2438 if (adapter->LinkState == SXG_LINK_UP) { 2439 if (adapter->LinkState == SXG_LINK_UP) {
2439 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n", 2440 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
2440 __FUNCTION__); 2441 __func__);
2441 netif_start_queue(adapter->netdev); 2442 netif_start_queue(adapter->netdev);
2442 } else { 2443 } else {
2443 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n", 2444 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
2444 __FUNCTION__); 2445 __func__);
2445 netif_stop_queue(adapter->netdev); 2446 netif_stop_queue(adapter->netdev);
2446 } 2447 }
2447} 2448}
@@ -2464,23 +2465,23 @@ static void sxg_link_state(p_adapter_t adapter, SXG_LINK_STATE LinkState)
2464 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT", 2465 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
2465 adapter, LinkState, adapter->LinkState, adapter->State); 2466 adapter, LinkState, adapter->LinkState, adapter->State);
2466 2467
2467 DBG_ERROR("ENTER %s\n", __FUNCTION__); 2468 DBG_ERROR("ENTER %s\n", __func__);
2468 2469
2469 // Hold the adapter lock during this routine. Maybe move 2470 /* Hold the adapter lock during this routine. Maybe move */
2470 // the lock to the caller. 2471 /* the lock to the caller. */
2471 spin_lock(&adapter->AdapterLock); 2472 spin_lock(&adapter->AdapterLock);
2472 if (LinkState == adapter->LinkState) { 2473 if (LinkState == adapter->LinkState) {
2473 // Nothing changed.. 2474 /* Nothing changed.. */
2474 spin_unlock(&adapter->AdapterLock); 2475 spin_unlock(&adapter->AdapterLock);
2475 DBG_ERROR("EXIT #0 %s\n", __FUNCTION__); 2476 DBG_ERROR("EXIT #0 %s\n", __func__);
2476 return; 2477 return;
2477 } 2478 }
2478 // Save the adapter state 2479 /* Save the adapter state */
2479 adapter->LinkState = LinkState; 2480 adapter->LinkState = LinkState;
2480 2481
2481 // Drop the lock and indicate link state 2482 /* Drop the lock and indicate link state */
2482 spin_unlock(&adapter->AdapterLock); 2483 spin_unlock(&adapter->AdapterLock);
2483 DBG_ERROR("EXIT #1 %s\n", __FUNCTION__); 2484 DBG_ERROR("EXIT #1 %s\n", __func__);
2484 2485
2485 sxg_indicate_link_state(adapter, LinkState); 2486 sxg_indicate_link_state(adapter, LinkState);
2486} 2487}
@@ -2501,76 +2502,76 @@ static int sxg_write_mdio_reg(p_adapter_t adapter,
2501 u32 DevAddr, u32 RegAddr, u32 Value) 2502 u32 DevAddr, u32 RegAddr, u32 Value)
2502{ 2503{
2503 PSXG_HW_REGS HwRegs = adapter->HwRegs; 2504 PSXG_HW_REGS HwRegs = adapter->HwRegs;
2504 u32 AddrOp; // Address operation (written to MIIM field reg) 2505 u32 AddrOp; /* Address operation (written to MIIM field reg) */
2505 u32 WriteOp; // Write operation (written to MIIM field reg) 2506 u32 WriteOp; /* Write operation (written to MIIM field reg) */
2506 u32 Cmd; // Command (written to MIIM command reg) 2507 u32 Cmd; /* Command (written to MIIM command reg) */
2507 u32 ValueRead; 2508 u32 ValueRead;
2508 u32 Timeout; 2509 u32 Timeout;
2509 2510
2510// DBG_ERROR("ENTER %s\n", __FUNCTION__); 2511/* DBG_ERROR("ENTER %s\n", __func__); */
2511 2512
2512 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO", 2513 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2513 adapter, 0, 0, 0); 2514 adapter, 0, 0, 0);
2514 2515
2515 // Ensure values don't exceed field width 2516 /* Ensure values don't exceed field width */
2516 DevAddr &= 0x001F; // 5-bit field 2517 DevAddr &= 0x001F; /* 5-bit field */
2517 RegAddr &= 0xFFFF; // 16-bit field 2518 RegAddr &= 0xFFFF; /* 16-bit field */
2518 Value &= 0xFFFF; // 16-bit field 2519 Value &= 0xFFFF; /* 16-bit field */
2519 2520
2520 // Set MIIM field register bits for an MIIM address operation 2521 /* Set MIIM field register bits for an MIIM address operation */
2521 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | 2522 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2522 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | 2523 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2523 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | 2524 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2524 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr; 2525 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2525 2526
2526 // Set MIIM field register bits for an MIIM write operation 2527 /* Set MIIM field register bits for an MIIM write operation */
2527 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | 2528 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2528 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | 2529 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2529 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | 2530 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2530 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value; 2531 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
2531 2532
2532 // Set MIIM command register bits to execute an MIIM command 2533 /* Set MIIM command register bits to execute an MIIM command */
2533 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION; 2534 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2534 2535
2535 // Reset the command register command bit (in case it's not 0) 2536 /* Reset the command register command bit (in case it's not 0) */
2536 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); 2537 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2537 2538
2538 // MIIM write to set the address of the specified MDIO register 2539 /* MIIM write to set the address of the specified MDIO register */
2539 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE); 2540 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2540 2541
2541 // Write to MIIM Command Register to execute to address operation 2542 /* Write to MIIM Command Register to execute to address operation */
2542 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); 2543 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2543 2544
2544 // Poll AMIIM Indicator register to wait for completion 2545 /* Poll AMIIM Indicator register to wait for completion */
2545 Timeout = SXG_LINK_TIMEOUT; 2546 Timeout = SXG_LINK_TIMEOUT;
2546 do { 2547 do {
2547 udelay(100); // Timeout in 100us units 2548 udelay(100); /* Timeout in 100us units */
2548 READ_REG(HwRegs->MacAmiimIndicator, ValueRead); 2549 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2549 if (--Timeout == 0) { 2550 if (--Timeout == 0) {
2550 return (STATUS_FAILURE); 2551 return (STATUS_FAILURE);
2551 } 2552 }
2552 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); 2553 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2553 2554
2554 // Reset the command register command bit 2555 /* Reset the command register command bit */
2555 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); 2556 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2556 2557
2557 // MIIM write to set up an MDIO write operation 2558 /* MIIM write to set up an MDIO write operation */
2558 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE); 2559 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
2559 2560
2560 // Write to MIIM Command Register to execute the write operation 2561 /* Write to MIIM Command Register to execute the write operation */
2561 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); 2562 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2562 2563
2563 // Poll AMIIM Indicator register to wait for completion 2564 /* Poll AMIIM Indicator register to wait for completion */
2564 Timeout = SXG_LINK_TIMEOUT; 2565 Timeout = SXG_LINK_TIMEOUT;
2565 do { 2566 do {
2566 udelay(100); // Timeout in 100us units 2567 udelay(100); /* Timeout in 100us units */
2567 READ_REG(HwRegs->MacAmiimIndicator, ValueRead); 2568 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2568 if (--Timeout == 0) { 2569 if (--Timeout == 0) {
2569 return (STATUS_FAILURE); 2570 return (STATUS_FAILURE);
2570 } 2571 }
2571 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); 2572 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2572 2573
2573// DBG_ERROR("EXIT %s\n", __FUNCTION__); 2574/* DBG_ERROR("EXIT %s\n", __func__); */
2574 2575
2575 return (STATUS_SUCCESS); 2576 return (STATUS_SUCCESS);
2576} 2577}
@@ -2591,110 +2592,78 @@ static int sxg_read_mdio_reg(p_adapter_t adapter,
2591 u32 DevAddr, u32 RegAddr, u32 *pValue) 2592 u32 DevAddr, u32 RegAddr, u32 *pValue)
2592{ 2593{
2593 PSXG_HW_REGS HwRegs = adapter->HwRegs; 2594 PSXG_HW_REGS HwRegs = adapter->HwRegs;
2594 u32 AddrOp; // Address operation (written to MIIM field reg) 2595 u32 AddrOp; /* Address operation (written to MIIM field reg) */
2595 u32 ReadOp; // Read operation (written to MIIM field reg) 2596 u32 ReadOp; /* Read operation (written to MIIM field reg) */
2596 u32 Cmd; // Command (written to MIIM command reg) 2597 u32 Cmd; /* Command (written to MIIM command reg) */
2597 u32 ValueRead; 2598 u32 ValueRead;
2598 u32 Timeout; 2599 u32 Timeout;
2599 2600
2600 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO", 2601 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2601 adapter, 0, 0, 0); 2602 adapter, 0, 0, 0);
2602// DBG_ERROR("ENTER %s\n", __FUNCTION__); 2603/* DBG_ERROR("ENTER %s\n", __func__); */
2603 2604
2604 // Ensure values don't exceed field width 2605 /* Ensure values don't exceed field width */
2605 DevAddr &= 0x001F; // 5-bit field 2606 DevAddr &= 0x001F; /* 5-bit field */
2606 RegAddr &= 0xFFFF; // 16-bit field 2607 RegAddr &= 0xFFFF; /* 16-bit field */
2607 2608
2608 // Set MIIM field register bits for an MIIM address operation 2609 /* Set MIIM field register bits for an MIIM address operation */
2609 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | 2610 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2610 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | 2611 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2611 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | 2612 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2612 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr; 2613 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2613 2614
2614 // Set MIIM field register bits for an MIIM read operation 2615 /* Set MIIM field register bits for an MIIM read operation */
2615 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | 2616 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2616 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | 2617 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2617 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | 2618 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2618 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT); 2619 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
2619 2620
2620 // Set MIIM command register bits to execute an MIIM command 2621 /* Set MIIM command register bits to execute an MIIM command */
2621 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION; 2622 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2622 2623
2623 // Reset the command register command bit (in case it's not 0) 2624 /* Reset the command register command bit (in case it's not 0) */
2624 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); 2625 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2625 2626
2626 // MIIM write to set the address of the specified MDIO register 2627 /* MIIM write to set the address of the specified MDIO register */
2627 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE); 2628 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2628 2629
2629 // Write to MIIM Command Register to execute to address operation 2630 /* Write to MIIM Command Register to execute to address operation */
2630 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); 2631 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2631 2632
2632 // Poll AMIIM Indicator register to wait for completion 2633 /* Poll AMIIM Indicator register to wait for completion */
2633 Timeout = SXG_LINK_TIMEOUT; 2634 Timeout = SXG_LINK_TIMEOUT;
2634 do { 2635 do {
2635 udelay(100); // Timeout in 100us units 2636 udelay(100); /* Timeout in 100us units */
2636 READ_REG(HwRegs->MacAmiimIndicator, ValueRead); 2637 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2637 if (--Timeout == 0) { 2638 if (--Timeout == 0) {
2638 return (STATUS_FAILURE); 2639 return (STATUS_FAILURE);
2639 } 2640 }
2640 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); 2641 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2641 2642
2642 // Reset the command register command bit 2643 /* Reset the command register command bit */
2643 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); 2644 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2644 2645
2645 // MIIM write to set up an MDIO register read operation 2646 /* MIIM write to set up an MDIO register read operation */
2646 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE); 2647 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
2647 2648
2648 // Write to MIIM Command Register to execute the read operation 2649 /* Write to MIIM Command Register to execute the read operation */
2649 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); 2650 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2650 2651
2651 // Poll AMIIM Indicator register to wait for completion 2652 /* Poll AMIIM Indicator register to wait for completion */
2652 Timeout = SXG_LINK_TIMEOUT; 2653 Timeout = SXG_LINK_TIMEOUT;
2653 do { 2654 do {
2654 udelay(100); // Timeout in 100us units 2655 udelay(100); /* Timeout in 100us units */
2655 READ_REG(HwRegs->MacAmiimIndicator, ValueRead); 2656 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2656 if (--Timeout == 0) { 2657 if (--Timeout == 0) {
2657 return (STATUS_FAILURE); 2658 return (STATUS_FAILURE);
2658 } 2659 }
2659 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); 2660 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2660 2661
2661 // Read the MDIO register data back from the field register 2662 /* Read the MDIO register data back from the field register */
2662 READ_REG(HwRegs->MacAmiimField, *pValue); 2663 READ_REG(HwRegs->MacAmiimField, *pValue);
2663 *pValue &= 0xFFFF; // data is in the lower 16 bits 2664 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
2664 2665
2665// DBG_ERROR("EXIT %s\n", __FUNCTION__); 2666/* DBG_ERROR("EXIT %s\n", __func__); */
2666
2667 return (STATUS_SUCCESS);
2668}
2669
2670/*
2671 * Allocate a mcast_address structure to hold the multicast address.
2672 * Link it in.
2673 */
2674static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
2675{
2676 p_mcast_address_t mcaddr, mlist;
2677 bool equaladdr;
2678
2679 /* Check to see if it already exists */
2680 mlist = adapter->mcastaddrs;
2681 while (mlist) {
2682 ETHER_EQ_ADDR(mlist->address, address, equaladdr);
2683 if (equaladdr) {
2684 return (STATUS_SUCCESS);
2685 }
2686 mlist = mlist->next;
2687 }
2688
2689 /* Doesn't already exist. Allocate a structure to hold it */
2690 mcaddr = kmalloc(sizeof(mcast_address_t), GFP_ATOMIC);
2691 if (mcaddr == NULL)
2692 return 1;
2693
2694 memcpy(mcaddr->address, address, 6);
2695
2696 mcaddr->next = adapter->mcastaddrs;
2697 adapter->mcastaddrs = mcaddr;
2698 2667
2699 return (STATUS_SUCCESS); 2668 return (STATUS_SUCCESS);
2700} 2669}
@@ -2710,7 +2679,6 @@ static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
2710 * 2679 *
2711 */ 2680 */
2712static u32 sxg_crc_table[256]; /* Table of CRC's for all possible byte values */ 2681static u32 sxg_crc_table[256]; /* Table of CRC's for all possible byte values */
2713static u32 sxg_crc_init; /* Is table initialized */
2714 2682
2715/* 2683/*
2716 * Contruct the CRC32 table 2684 * Contruct the CRC32 table
@@ -2737,6 +2705,8 @@ static void sxg_mcast_init_crc32(void)
2737 } 2705 }
2738} 2706}
2739 2707
2708#if XXXTODO
2709static u32 sxg_crc_init; /* Is table initialized */
2740/* 2710/*
2741 * Return the MAC hast as described above. 2711 * Return the MAC hast as described above.
2742 */ 2712 */
@@ -2765,6 +2735,74 @@ static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
2765 return (machash); 2735 return (machash);
2766} 2736}
2767 2737
2738static void sxg_mcast_set_mask(p_adapter_t adapter)
2739{
2740 PSXG_UCODE_REGS sxg_regs = adapter->UcodeRegs;
2741
2742 DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __func__,
2743 adapter->netdev->name, (unsigned int)adapter->MacFilter,
2744 adapter->MulticastMask);
2745
2746 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
2747 /* Turn on all multicast addresses. We have to do this for promiscuous
2748 * mode as well as ALLMCAST mode. It saves the Microcode from having
2749 * to keep state about the MAC configuration.
2750 */
2751/* DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__func__); */
2752 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
2753 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
2754/* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__func__, adapter->netdev->name); */
2755
2756 } else {
2757 /* Commit our multicast mast to the SLIC by writing to the multicast
2758 * address mask registers
2759 */
2760 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
2761 __func__, adapter->netdev->name,
2762 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
2763 ((ulong)
2764 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
2765
2766 WRITE_REG(sxg_regs->McastLow,
2767 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
2768 WRITE_REG(sxg_regs->McastHigh,
2769 (u32) ((adapter->
2770 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
2771 }
2772}
2773
2774/*
2775 * Allocate a mcast_address structure to hold the multicast address.
2776 * Link it in.
2777 */
2778static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
2779{
2780 p_mcast_address_t mcaddr, mlist;
2781 bool equaladdr;
2782
2783 /* Check to see if it already exists */
2784 mlist = adapter->mcastaddrs;
2785 while (mlist) {
2786 ETHER_EQ_ADDR(mlist->address, address, equaladdr);
2787 if (equaladdr) {
2788 return (STATUS_SUCCESS);
2789 }
2790 mlist = mlist->next;
2791 }
2792
2793 /* Doesn't already exist. Allocate a structure to hold it */
2794 mcaddr = kmalloc(sizeof(mcast_address_t), GFP_ATOMIC);
2795 if (mcaddr == NULL)
2796 return 1;
2797
2798 memcpy(mcaddr->address, address, 6);
2799
2800 mcaddr->next = adapter->mcastaddrs;
2801 adapter->mcastaddrs = mcaddr;
2802
2803 return (STATUS_SUCCESS);
2804}
2805
2768static void sxg_mcast_set_bit(p_adapter_t adapter, char *address) 2806static void sxg_mcast_set_bit(p_adapter_t adapter, char *address)
2769{ 2807{
2770 unsigned char crcpoly; 2808 unsigned char crcpoly;
@@ -2783,7 +2821,6 @@ static void sxg_mcast_set_bit(p_adapter_t adapter, char *address)
2783 2821
2784static void sxg_mcast_set_list(p_net_device dev) 2822static void sxg_mcast_set_list(p_net_device dev)
2785{ 2823{
2786#if XXXTODO
2787 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 2824 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
2788 int status = STATUS_SUCCESS; 2825 int status = STATUS_SUCCESS;
2789 int i; 2826 int i;
@@ -2809,7 +2846,7 @@ static void sxg_mcast_set_list(p_net_device dev)
2809 } 2846 }
2810 2847
2811 DBG_ERROR("%s a->devflags_prev[%x] dev->flags[%x] status[%x]\n", 2848 DBG_ERROR("%s a->devflags_prev[%x] dev->flags[%x] status[%x]\n",
2812 __FUNCTION__, adapter->devflags_prev, dev->flags, status); 2849 __func__, adapter->devflags_prev, dev->flags, status);
2813 if (adapter->devflags_prev != dev->flags) { 2850 if (adapter->devflags_prev != dev->flags) {
2814 adapter->macopts = MAC_DIRECTED; 2851 adapter->macopts = MAC_DIRECTED;
2815 if (dev->flags) { 2852 if (dev->flags) {
@@ -2828,60 +2865,24 @@ static void sxg_mcast_set_list(p_net_device dev)
2828 } 2865 }
2829 adapter->devflags_prev = dev->flags; 2866 adapter->devflags_prev = dev->flags;
2830 DBG_ERROR("%s call sxg_config_set adapter->macopts[%x]\n", 2867 DBG_ERROR("%s call sxg_config_set adapter->macopts[%x]\n",
2831 __FUNCTION__, adapter->macopts); 2868 __func__, adapter->macopts);
2832 sxg_config_set(adapter, TRUE); 2869 sxg_config_set(adapter, TRUE);
2833 } else { 2870 } else {
2834 if (status == STATUS_SUCCESS) { 2871 if (status == STATUS_SUCCESS) {
2835 sxg_mcast_set_mask(adapter); 2872 sxg_mcast_set_mask(adapter);
2836 } 2873 }
2837 } 2874 }
2838#endif
2839 return; 2875 return;
2840} 2876}
2841 2877#endif
2842static void sxg_mcast_set_mask(p_adapter_t adapter)
2843{
2844 PSXG_UCODE_REGS sxg_regs = adapter->UcodeRegs;
2845
2846 DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __FUNCTION__,
2847 adapter->netdev->name, (unsigned int)adapter->MacFilter,
2848 adapter->MulticastMask);
2849
2850 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
2851 /* Turn on all multicast addresses. We have to do this for promiscuous
2852 * mode as well as ALLMCAST mode. It saves the Microcode from having
2853 * to keep state about the MAC configuration.
2854 */
2855// DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__FUNCTION__);
2856 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
2857 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
2858// DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__FUNCTION__, adapter->netdev->name);
2859
2860 } else {
2861 /* Commit our multicast mast to the SLIC by writing to the multicast
2862 * address mask registers
2863 */
2864 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
2865 __FUNCTION__, adapter->netdev->name,
2866 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
2867 ((ulong)
2868 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
2869
2870 WRITE_REG(sxg_regs->McastLow,
2871 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
2872 WRITE_REG(sxg_regs->McastHigh,
2873 (u32) ((adapter->
2874 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
2875 }
2876}
2877 2878
2878static void sxg_unmap_mmio_space(p_adapter_t adapter) 2879static void sxg_unmap_mmio_space(p_adapter_t adapter)
2879{ 2880{
2880#if LINUX_FREES_ADAPTER_RESOURCES 2881#if LINUX_FREES_ADAPTER_RESOURCES
2881// if (adapter->Regs) { 2882/* if (adapter->Regs) { */
2882// iounmap(adapter->Regs); 2883/* iounmap(adapter->Regs); */
2883// } 2884/* } */
2884// adapter->slic_regs = NULL; 2885/* adapter->slic_regs = NULL; */
2885#endif 2886#endif
2886} 2887}
2887 2888
@@ -2909,8 +2910,8 @@ void SxgFreeResources(p_adapter_t adapter)
2909 IsrCount = adapter->MsiEnabled ? RssIds : 1; 2910 IsrCount = adapter->MsiEnabled ? RssIds : 1;
2910 2911
2911 if (adapter->BasicAllocations == FALSE) { 2912 if (adapter->BasicAllocations == FALSE) {
2912 // No allocations have been made, including spinlocks, 2913 /* No allocations have been made, including spinlocks, */
2913 // or listhead initializations. Return. 2914 /* or listhead initializations. Return. */
2914 return; 2915 return;
2915 } 2916 }
2916 2917
@@ -2920,7 +2921,7 @@ void SxgFreeResources(p_adapter_t adapter)
2920 if (!(IsListEmpty(&adapter->AllSglBuffers))) { 2921 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
2921 SxgFreeSglBuffers(adapter); 2922 SxgFreeSglBuffers(adapter);
2922 } 2923 }
2923 // Free event queues. 2924 /* Free event queues. */
2924 if (adapter->EventRings) { 2925 if (adapter->EventRings) {
2925 pci_free_consistent(adapter->pcidev, 2926 pci_free_consistent(adapter->pcidev,
2926 sizeof(SXG_EVENT_RING) * RssIds, 2927 sizeof(SXG_EVENT_RING) * RssIds,
@@ -2947,17 +2948,17 @@ void SxgFreeResources(p_adapter_t adapter)
2947 SXG_FREE_PACKET_POOL(adapter->PacketPoolHandle); 2948 SXG_FREE_PACKET_POOL(adapter->PacketPoolHandle);
2948 SXG_FREE_BUFFER_POOL(adapter->BufferPoolHandle); 2949 SXG_FREE_BUFFER_POOL(adapter->BufferPoolHandle);
2949 2950
2950 // Unmap register spaces 2951 /* Unmap register spaces */
2951 SxgUnmapResources(adapter); 2952 SxgUnmapResources(adapter);
2952 2953
2953 // Deregister DMA 2954 /* Deregister DMA */
2954 if (adapter->DmaHandle) { 2955 if (adapter->DmaHandle) {
2955 SXG_DEREGISTER_DMA(adapter->DmaHandle); 2956 SXG_DEREGISTER_DMA(adapter->DmaHandle);
2956 } 2957 }
2957 // Deregister interrupt 2958 /* Deregister interrupt */
2958 SxgDeregisterInterrupt(adapter); 2959 SxgDeregisterInterrupt(adapter);
2959 2960
2960 // Possibly free system info (5.2 only) 2961 /* Possibly free system info (5.2 only) */
2961 SXG_RELEASE_SYSTEM_INFO(adapter); 2962 SXG_RELEASE_SYSTEM_INFO(adapter);
2962 2963
2963 SxgDiagFreeResources(adapter); 2964 SxgDiagFreeResources(adapter);
@@ -3047,23 +3048,23 @@ static int sxg_allocate_buffer_memory(p_adapter_t adapter,
3047 3048
3048 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem", 3049 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3049 adapter, Size, BufferType, 0); 3050 adapter, Size, BufferType, 0);
3050 // Grab the adapter lock and check the state. 3051 /* Grab the adapter lock and check the state. */
3051 // If we're in anything other than INITIALIZING or 3052 /* If we're in anything other than INITIALIZING or */
3052 // RUNNING state, fail. This is to prevent 3053 /* RUNNING state, fail. This is to prevent */
3053 // allocations in an improper driver state 3054 /* allocations in an improper driver state */
3054 spin_lock(&adapter->AdapterLock); 3055 spin_lock(&adapter->AdapterLock);
3055 3056
3056 // Increment the AllocationsPending count while holding 3057 /* Increment the AllocationsPending count while holding */
3057 // the lock. Pause processing relies on this 3058 /* the lock. Pause processing relies on this */
3058 ++adapter->AllocationsPending; 3059 ++adapter->AllocationsPending;
3059 spin_unlock(&adapter->AdapterLock); 3060 spin_unlock(&adapter->AdapterLock);
3060 3061
3061 // At initialization time allocate resources synchronously. 3062 /* At initialization time allocate resources synchronously. */
3062 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer); 3063 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3063 if (Buffer == NULL) { 3064 if (Buffer == NULL) {
3064 spin_lock(&adapter->AdapterLock); 3065 spin_lock(&adapter->AdapterLock);
3065 // Decrement the AllocationsPending count while holding 3066 /* Decrement the AllocationsPending count while holding */
3066 // the lock. Pause processing relies on this 3067 /* the lock. Pause processing relies on this */
3067 --adapter->AllocationsPending; 3068 --adapter->AllocationsPending;
3068 spin_unlock(&adapter->AdapterLock); 3069 spin_unlock(&adapter->AdapterLock);
3069 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1", 3070 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
@@ -3113,10 +3114,10 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3113 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) || 3114 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3114 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); 3115 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
3115 ASSERT(Length == SXG_RCV_BLOCK_SIZE(BufferSize)); 3116 ASSERT(Length == SXG_RCV_BLOCK_SIZE(BufferSize));
3116 // First, initialize the contained pool of receive data 3117 /* First, initialize the contained pool of receive data */
3117 // buffers. This initialization requires NBL/NB/MDL allocations, 3118 /* buffers. This initialization requires NBL/NB/MDL allocations, */
3118 // If any of them fail, free the block and return without 3119 /* If any of them fail, free the block and return without */
3119 // queueing the shared memory 3120 /* queueing the shared memory */
3120 RcvDataBuffer = RcvBlock; 3121 RcvDataBuffer = RcvBlock;
3121#if 0 3122#if 0
3122 for (i = 0, Paddr = *PhysicalAddress; 3123 for (i = 0, Paddr = *PhysicalAddress;
@@ -3126,14 +3127,14 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3126 for (i = 0, Paddr = PhysicalAddress; 3127 for (i = 0, Paddr = PhysicalAddress;
3127 i < SXG_RCV_DESCRIPTORS_PER_BLOCK; 3128 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3128 i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) { 3129 i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) {
3129 // 3130 /* */
3130 RcvDataBufferHdr = 3131 RcvDataBufferHdr =
3131 (PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer + 3132 (PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer +
3132 SXG_RCV_DATA_BUFFER_HDR_OFFSET 3133 SXG_RCV_DATA_BUFFER_HDR_OFFSET
3133 (BufferSize)); 3134 (BufferSize));
3134 RcvDataBufferHdr->VirtualAddress = RcvDataBuffer; 3135 RcvDataBufferHdr->VirtualAddress = RcvDataBuffer;
3135 RcvDataBufferHdr->PhysicalAddress = Paddr; 3136 RcvDataBufferHdr->PhysicalAddress = Paddr;
3136 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; // For FREE macro assertion 3137 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; /* For FREE macro assertion */
3137 RcvDataBufferHdr->Size = 3138 RcvDataBufferHdr->Size =
3138 SXG_RCV_BUFFER_DATA_SIZE(BufferSize); 3139 SXG_RCV_BUFFER_DATA_SIZE(BufferSize);
3139 3140
@@ -3143,8 +3144,8 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3143 3144
3144 } 3145 }
3145 3146
3146 // Place this entire block of memory on the AllRcvBlocks queue so it can be 3147 /* Place this entire block of memory on the AllRcvBlocks queue so it can be */
3147 // free later 3148 /* free later */
3148 RcvBlockHdr = 3149 RcvBlockHdr =
3149 (PSXG_RCV_BLOCK_HDR) ((unsigned char *)RcvBlock + 3150 (PSXG_RCV_BLOCK_HDR) ((unsigned char *)RcvBlock +
3150 SXG_RCV_BLOCK_HDR_OFFSET(BufferSize)); 3151 SXG_RCV_BLOCK_HDR_OFFSET(BufferSize));
@@ -3155,7 +3156,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3155 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList); 3156 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3156 spin_unlock(&adapter->RcvQLock); 3157 spin_unlock(&adapter->RcvQLock);
3157 3158
3158 // Now free the contained receive data buffers that we initialized above 3159 /* Now free the contained receive data buffers that we initialized above */
3159 RcvDataBuffer = RcvBlock; 3160 RcvDataBuffer = RcvBlock;
3160 for (i = 0, Paddr = PhysicalAddress; 3161 for (i = 0, Paddr = PhysicalAddress;
3161 i < SXG_RCV_DESCRIPTORS_PER_BLOCK; 3162 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
@@ -3168,7 +3169,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3168 spin_unlock(&adapter->RcvQLock); 3169 spin_unlock(&adapter->RcvQLock);
3169 } 3170 }
3170 3171
3171 // Locate the descriptor block and put it on a separate free queue 3172 /* Locate the descriptor block and put it on a separate free queue */
3172 RcvDescriptorBlock = 3173 RcvDescriptorBlock =
3173 (PSXG_RCV_DESCRIPTOR_BLOCK) ((unsigned char *)RcvBlock + 3174 (PSXG_RCV_DESCRIPTOR_BLOCK) ((unsigned char *)RcvBlock +
3174 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET 3175 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
@@ -3186,7 +3187,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3186 adapter, RcvBlock, Length, 0); 3187 adapter, RcvBlock, Length, 0);
3187 return; 3188 return;
3188 fail: 3189 fail:
3189 // Free any allocated resources 3190 /* Free any allocated resources */
3190 if (RcvBlock) { 3191 if (RcvBlock) {
3191 RcvDataBuffer = RcvBlock; 3192 RcvDataBuffer = RcvBlock;
3192 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; 3193 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
@@ -3200,7 +3201,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3200 pci_free_consistent(adapter->pcidev, 3201 pci_free_consistent(adapter->pcidev,
3201 Length, RcvBlock, PhysicalAddress); 3202 Length, RcvBlock, PhysicalAddress);
3202 } 3203 }
3203 DBG_ERROR("%s: OUT OF RESOURCES\n", __FUNCTION__); 3204 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
3204 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail", 3205 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3205 adapter, adapter->FreeRcvBufferCount, 3206 adapter, adapter->FreeRcvBufferCount,
3206 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount); 3207 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
@@ -3230,7 +3231,7 @@ static void sxg_allocate_sgl_buffer_complete(p_adapter_t adapter,
3230 adapter->AllSglBufferCount++; 3231 adapter->AllSglBufferCount++;
3231 memset(SxgSgl, 0, sizeof(SXG_SCATTER_GATHER)); 3232 memset(SxgSgl, 0, sizeof(SXG_SCATTER_GATHER));
3232 SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */ 3233 SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */
3233 SxgSgl->adapter = adapter; // Initialize backpointer once 3234 SxgSgl->adapter = adapter; /* Initialize backpointer once */
3234 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList); 3235 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
3235 spin_unlock(&adapter->SglQLock); 3236 spin_unlock(&adapter->SglQLock);
3236 SxgSgl->State = SXG_BUFFER_BUSY; 3237 SxgSgl->State = SXG_BUFFER_BUSY;
@@ -3244,14 +3245,14 @@ static unsigned char temp_mac_address[6] =
3244 3245
3245static void sxg_adapter_set_hwaddr(p_adapter_t adapter) 3246static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
3246{ 3247{
3247// DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __FUNCTION__, 3248/* DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __func__, */
3248// card->config_set, adapter->port, adapter->physport, adapter->functionnumber); 3249/* card->config_set, adapter->port, adapter->physport, adapter->functionnumber); */
3249// 3250/* */
3250// sxg_dbg_macaddrs(adapter); 3251/* sxg_dbg_macaddrs(adapter); */
3251 3252
3252 memcpy(adapter->macaddr, temp_mac_address, sizeof(SXG_CONFIG_MAC)); 3253 memcpy(adapter->macaddr, temp_mac_address, sizeof(SXG_CONFIG_MAC));
3253// DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __FUNCTION__); 3254/* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __func__); */
3254// sxg_dbg_macaddrs(adapter); 3255/* sxg_dbg_macaddrs(adapter); */
3255 if (!(adapter->currmacaddr[0] || 3256 if (!(adapter->currmacaddr[0] ||
3256 adapter->currmacaddr[1] || 3257 adapter->currmacaddr[1] ||
3257 adapter->currmacaddr[2] || 3258 adapter->currmacaddr[2] ||
@@ -3262,18 +3263,18 @@ static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
3262 if (adapter->netdev) { 3263 if (adapter->netdev) {
3263 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6); 3264 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
3264 } 3265 }
3265// DBG_ERROR ("%s EXIT port %d\n", __FUNCTION__, adapter->port); 3266/* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
3266 sxg_dbg_macaddrs(adapter); 3267 sxg_dbg_macaddrs(adapter);
3267 3268
3268} 3269}
3269 3270
3271#if XXXTODO
3270static int sxg_mac_set_address(p_net_device dev, void *ptr) 3272static int sxg_mac_set_address(p_net_device dev, void *ptr)
3271{ 3273{
3272#if XXXTODO
3273 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 3274 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
3274 struct sockaddr *addr = ptr; 3275 struct sockaddr *addr = ptr;
3275 3276
3276 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name); 3277 DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
3277 3278
3278 if (netif_running(dev)) { 3279 if (netif_running(dev)) {
3279 return -EBUSY; 3280 return -EBUSY;
@@ -3282,22 +3283,22 @@ static int sxg_mac_set_address(p_net_device dev, void *ptr)
3282 return -EBUSY; 3283 return -EBUSY;
3283 } 3284 }
3284 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", 3285 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3285 __FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0], 3286 __func__, adapter->netdev->name, adapter->currmacaddr[0],
3286 adapter->currmacaddr[1], adapter->currmacaddr[2], 3287 adapter->currmacaddr[1], adapter->currmacaddr[2],
3287 adapter->currmacaddr[3], adapter->currmacaddr[4], 3288 adapter->currmacaddr[3], adapter->currmacaddr[4],
3288 adapter->currmacaddr[5]); 3289 adapter->currmacaddr[5]);
3289 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 3290 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3290 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len); 3291 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
3291 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", 3292 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3292 __FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0], 3293 __func__, adapter->netdev->name, adapter->currmacaddr[0],
3293 adapter->currmacaddr[1], adapter->currmacaddr[2], 3294 adapter->currmacaddr[1], adapter->currmacaddr[2],
3294 adapter->currmacaddr[3], adapter->currmacaddr[4], 3295 adapter->currmacaddr[3], adapter->currmacaddr[4],
3295 adapter->currmacaddr[5]); 3296 adapter->currmacaddr[5]);
3296 3297
3297 sxg_config_set(adapter, TRUE); 3298 sxg_config_set(adapter, TRUE);
3298#endif
3299 return 0; 3299 return 0;
3300} 3300}
3301#endif
3301 3302
3302/*****************************************************************************/ 3303/*****************************************************************************/
3303/************* SXG DRIVER FUNCTIONS (below) ********************************/ 3304/************* SXG DRIVER FUNCTIONS (below) ********************************/
@@ -3321,77 +3322,77 @@ static int sxg_initialize_adapter(p_adapter_t adapter)
3321 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt", 3322 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
3322 adapter, 0, 0, 0); 3323 adapter, 0, 0, 0);
3323 3324
3324 RssIds = 1; // XXXTODO SXG_RSS_CPU_COUNT(adapter); 3325 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
3325 IsrCount = adapter->MsiEnabled ? RssIds : 1; 3326 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3326 3327
3327 // Sanity check SXG_UCODE_REGS structure definition to 3328 /* Sanity check SXG_UCODE_REGS structure definition to */
3328 // make sure the length is correct 3329 /* make sure the length is correct */
3329 ASSERT(sizeof(SXG_UCODE_REGS) == SXG_REGISTER_SIZE_PER_CPU); 3330 ASSERT(sizeof(SXG_UCODE_REGS) == SXG_REGISTER_SIZE_PER_CPU);
3330 3331
3331 // Disable interrupts 3332 /* Disable interrupts */
3332 SXG_DISABLE_ALL_INTERRUPTS(adapter); 3333 SXG_DISABLE_ALL_INTERRUPTS(adapter);
3333 3334
3334 // Set MTU 3335 /* Set MTU */
3335 ASSERT((adapter->FrameSize == ETHERMAXFRAME) || 3336 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
3336 (adapter->FrameSize == JUMBOMAXFRAME)); 3337 (adapter->FrameSize == JUMBOMAXFRAME));
3337 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE); 3338 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
3338 3339
3339 // Set event ring base address and size 3340 /* Set event ring base address and size */
3340 WRITE_REG64(adapter, 3341 WRITE_REG64(adapter,
3341 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0); 3342 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
3342 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE); 3343 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
3343 3344
3344 // Per-ISR initialization 3345 /* Per-ISR initialization */
3345 for (i = 0; i < IsrCount; i++) { 3346 for (i = 0; i < IsrCount; i++) {
3346 u64 Addr; 3347 u64 Addr;
3347 // Set interrupt status pointer 3348 /* Set interrupt status pointer */
3348 Addr = adapter->PIsr + (i * sizeof(u32)); 3349 Addr = adapter->PIsr + (i * sizeof(u32));
3349 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i); 3350 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
3350 } 3351 }
3351 3352
3352 // XMT ring zero index 3353 /* XMT ring zero index */
3353 WRITE_REG64(adapter, 3354 WRITE_REG64(adapter,
3354 adapter->UcodeRegs[0].SPSendIndex, 3355 adapter->UcodeRegs[0].SPSendIndex,
3355 adapter->PXmtRingZeroIndex, 0); 3356 adapter->PXmtRingZeroIndex, 0);
3356 3357
3357 // Per-RSS initialization 3358 /* Per-RSS initialization */
3358 for (i = 0; i < RssIds; i++) { 3359 for (i = 0; i < RssIds; i++) {
3359 // Release all event ring entries to the Microcode 3360 /* Release all event ring entries to the Microcode */
3360 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE, 3361 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
3361 TRUE); 3362 TRUE);
3362 } 3363 }
3363 3364
3364 // Transmit ring base and size 3365 /* Transmit ring base and size */
3365 WRITE_REG64(adapter, 3366 WRITE_REG64(adapter,
3366 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0); 3367 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
3367 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE); 3368 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
3368 3369
3369 // Receive ring base and size 3370 /* Receive ring base and size */
3370 WRITE_REG64(adapter, 3371 WRITE_REG64(adapter,
3371 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0); 3372 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
3372 WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE); 3373 WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE);
3373 3374
3374 // Populate the card with receive buffers 3375 /* Populate the card with receive buffers */
3375 sxg_stock_rcv_buffers(adapter); 3376 sxg_stock_rcv_buffers(adapter);
3376 3377
3377 // Initialize checksum offload capabilities. At the moment 3378 /* Initialize checksum offload capabilities. At the moment */
3378 // we always enable IP and TCP receive checksums on the card. 3379 /* we always enable IP and TCP receive checksums on the card. */
3379 // Depending on the checksum configuration specified by the 3380 /* Depending on the checksum configuration specified by the */
3380 // user, we can choose to report or ignore the checksum 3381 /* user, we can choose to report or ignore the checksum */
3381 // information provided by the card. 3382 /* information provided by the card. */
3382 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum, 3383 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
3383 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE); 3384 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
3384 3385
3385 // Initialize the MAC, XAUI 3386 /* Initialize the MAC, XAUI */
3386 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __FUNCTION__); 3387 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
3387 status = sxg_initialize_link(adapter); 3388 status = sxg_initialize_link(adapter);
3388 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __FUNCTION__, 3389 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
3389 status); 3390 status);
3390 if (status != STATUS_SUCCESS) { 3391 if (status != STATUS_SUCCESS) {
3391 return (status); 3392 return (status);
3392 } 3393 }
3393 // Initialize Dead to FALSE. 3394 /* Initialize Dead to FALSE. */
3394 // SlicCheckForHang or SlicDumpThread will take it from here. 3395 /* SlicCheckForHang or SlicDumpThread will take it from here. */
3395 adapter->Dead = FALSE; 3396 adapter->Dead = FALSE;
3396 adapter->PingOutstanding = FALSE; 3397 adapter->PingOutstanding = FALSE;
3397 3398
@@ -3428,14 +3429,14 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
3428 3429
3429 ASSERT(RcvDescriptorBlockHdr); 3430 ASSERT(RcvDescriptorBlockHdr);
3430 3431
3431 // If we don't have the resources to fill the descriptor block, 3432 /* If we don't have the resources to fill the descriptor block, */
3432 // return failure 3433 /* return failure */
3433 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) || 3434 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
3434 SXG_RING_FULL(RcvRingInfo)) { 3435 SXG_RING_FULL(RcvRingInfo)) {
3435 adapter->Stats.NoMem++; 3436 adapter->Stats.NoMem++;
3436 return (STATUS_FAILURE); 3437 return (STATUS_FAILURE);
3437 } 3438 }
3438 // Get a ring descriptor command 3439 /* Get a ring descriptor command */
3439 SXG_GET_CMD(RingZero, 3440 SXG_GET_CMD(RingZero,
3440 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr); 3441 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
3441 ASSERT(RingDescriptorCmd); 3442 ASSERT(RingDescriptorCmd);
@@ -3443,7 +3444,7 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
3443 RcvDescriptorBlock = 3444 RcvDescriptorBlock =
3444 (PSXG_RCV_DESCRIPTOR_BLOCK) RcvDescriptorBlockHdr->VirtualAddress; 3445 (PSXG_RCV_DESCRIPTOR_BLOCK) RcvDescriptorBlockHdr->VirtualAddress;
3445 3446
3446 // Fill in the descriptor block 3447 /* Fill in the descriptor block */
3447 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) { 3448 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
3448 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); 3449 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3449 ASSERT(RcvDataBufferHdr); 3450 ASSERT(RcvDataBufferHdr);
@@ -3454,13 +3455,13 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
3454 RcvDescriptorBlock->Descriptors[i].PhysicalAddress = 3455 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
3455 RcvDataBufferHdr->PhysicalAddress; 3456 RcvDataBufferHdr->PhysicalAddress;
3456 } 3457 }
3457 // Add the descriptor block to receive descriptor ring 0 3458 /* Add the descriptor block to receive descriptor ring 0 */
3458 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress; 3459 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
3459 3460
3460 // RcvBuffersOnCard is not protected via the receive lock (see 3461 /* RcvBuffersOnCard is not protected via the receive lock (see */
3461 // sxg_process_event_queue) We don't want to grap a lock every time a 3462 /* sxg_process_event_queue) We don't want to grap a lock every time a */
3462 // buffer is returned to us, so we use atomic interlocked functions 3463 /* buffer is returned to us, so we use atomic interlocked functions */
3463 // instead. 3464 /* instead. */
3464 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK; 3465 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
3465 3466
3466 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk", 3467 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
@@ -3490,10 +3491,10 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
3490 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf", 3491 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
3491 adapter, adapter->RcvBuffersOnCard, 3492 adapter, adapter->RcvBuffersOnCard,
3492 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount); 3493 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3493 // First, see if we've got less than our minimum threshold of 3494 /* First, see if we've got less than our minimum threshold of */
3494 // receive buffers, there isn't an allocation in progress, and 3495 /* receive buffers, there isn't an allocation in progress, and */
3495 // we haven't exceeded our maximum.. get another block of buffers 3496 /* we haven't exceeded our maximum.. get another block of buffers */
3496 // None of this needs to be SMP safe. It's round numbers. 3497 /* None of this needs to be SMP safe. It's round numbers. */
3497 if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) && 3498 if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
3498 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) && 3499 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
3499 (adapter->AllocationsPending == 0)) { 3500 (adapter->AllocationsPending == 0)) {
@@ -3502,12 +3503,12 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
3502 ReceiveBufferSize), 3503 ReceiveBufferSize),
3503 SXG_BUFFER_TYPE_RCV); 3504 SXG_BUFFER_TYPE_RCV);
3504 } 3505 }
3505 // Now grab the RcvQLock lock and proceed 3506 /* Now grab the RcvQLock lock and proceed */
3506 spin_lock(&adapter->RcvQLock); 3507 spin_lock(&adapter->RcvQLock);
3507 while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) { 3508 while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
3508 PLIST_ENTRY _ple; 3509 PLIST_ENTRY _ple;
3509 3510
3510 // Get a descriptor block 3511 /* Get a descriptor block */
3511 RcvDescriptorBlockHdr = NULL; 3512 RcvDescriptorBlockHdr = NULL;
3512 if (adapter->FreeRcvBlockCount) { 3513 if (adapter->FreeRcvBlockCount) {
3513 _ple = RemoveHeadList(&adapter->FreeRcvBlocks); 3514 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
@@ -3519,14 +3520,14 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
3519 } 3520 }
3520 3521
3521 if (RcvDescriptorBlockHdr == NULL) { 3522 if (RcvDescriptorBlockHdr == NULL) {
3522 // Bail out.. 3523 /* Bail out.. */
3523 adapter->Stats.NoMem++; 3524 adapter->Stats.NoMem++;
3524 break; 3525 break;
3525 } 3526 }
3526 // Fill in the descriptor block and give it to the card 3527 /* Fill in the descriptor block and give it to the card */
3527 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) == 3528 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
3528 STATUS_FAILURE) { 3529 STATUS_FAILURE) {
3529 // Free the descriptor block 3530 /* Free the descriptor block */
3530 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, 3531 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
3531 RcvDescriptorBlockHdr); 3532 RcvDescriptorBlockHdr);
3532 break; 3533 break;
@@ -3560,15 +3561,15 @@ static void sxg_complete_descriptor_blocks(p_adapter_t adapter,
3560 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks", 3561 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
3561 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail); 3562 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
3562 3563
3563 // Now grab the RcvQLock lock and proceed 3564 /* Now grab the RcvQLock lock and proceed */
3564 spin_lock(&adapter->RcvQLock); 3565 spin_lock(&adapter->RcvQLock);
3565 ASSERT(Index != RcvRingInfo->Tail); 3566 ASSERT(Index != RcvRingInfo->Tail);
3566 while (RcvRingInfo->Tail != Index) { 3567 while (RcvRingInfo->Tail != Index) {
3567 // 3568 /* */
3568 // Locate the current Cmd (ring descriptor entry), and 3569 /* Locate the current Cmd (ring descriptor entry), and */
3569 // associated receive descriptor block, and advance 3570 /* associated receive descriptor block, and advance */
3570 // the tail 3571 /* the tail */
3571 // 3572 /* */
3572 SXG_RETURN_CMD(RingZero, 3573 SXG_RETURN_CMD(RingZero,
3573 RcvRingInfo, 3574 RcvRingInfo,
3574 RingDescriptorCmd, RcvDescriptorBlockHdr); 3575 RingDescriptorCmd, RcvDescriptorBlockHdr);
@@ -3576,12 +3577,12 @@ static void sxg_complete_descriptor_blocks(p_adapter_t adapter,
3576 RcvRingInfo->Head, RcvRingInfo->Tail, 3577 RcvRingInfo->Head, RcvRingInfo->Tail,
3577 RingDescriptorCmd, RcvDescriptorBlockHdr); 3578 RingDescriptorCmd, RcvDescriptorBlockHdr);
3578 3579
3579 // Clear the SGL field 3580 /* Clear the SGL field */
3580 RingDescriptorCmd->Sgl = 0; 3581 RingDescriptorCmd->Sgl = 0;
3581 // Attempt to refill it and hand it right back to the 3582 /* Attempt to refill it and hand it right back to the */
3582 // card. If we fail to refill it, free the descriptor block 3583 /* card. If we fail to refill it, free the descriptor block */
3583 // header. The card will be restocked later via the 3584 /* header. The card will be restocked later via the */
3584 // RcvBuffersOnCard test 3585 /* RcvBuffersOnCard test */
3585 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) == 3586 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
3586 STATUS_FAILURE) { 3587 STATUS_FAILURE) {
3587 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, 3588 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
diff --git a/drivers/staging/sxg/sxg_os.h b/drivers/staging/sxg/sxg_os.h
index 26fb0ffafa5c..01182689aaba 100644
--- a/drivers/staging/sxg/sxg_os.h
+++ b/drivers/staging/sxg/sxg_os.h
@@ -44,7 +44,6 @@
44#define FALSE (0) 44#define FALSE (0)
45#define TRUE (1) 45#define TRUE (1)
46 46
47
48typedef struct _LIST_ENTRY { 47typedef struct _LIST_ENTRY {
49 struct _LIST_ENTRY *nle_flink; 48 struct _LIST_ENTRY *nle_flink;
50 struct _LIST_ENTRY *nle_blink; 49 struct _LIST_ENTRY *nle_blink;
@@ -69,35 +68,32 @@ typedef struct _LIST_ENTRY {
69 68
70/* These two have to be inlined since they return things. */ 69/* These two have to be inlined since they return things. */
71 70
72static __inline PLIST_ENTRY 71static __inline PLIST_ENTRY RemoveHeadList(list_entry * l)
73RemoveHeadList(list_entry *l)
74{ 72{
75 list_entry *f; 73 list_entry *f;
76 list_entry *e; 74 list_entry *e;
77 75
78 e = l->nle_flink; 76 e = l->nle_flink;
79 f = e->nle_flink; 77 f = e->nle_flink;
80 l->nle_flink = f; 78 l->nle_flink = f;
81 f->nle_blink = l; 79 f->nle_blink = l;
82 80
83 return (e); 81 return (e);
84} 82}
85 83
86static __inline PLIST_ENTRY 84static __inline PLIST_ENTRY RemoveTailList(list_entry * l)
87RemoveTailList(list_entry *l)
88{ 85{
89 list_entry *b; 86 list_entry *b;
90 list_entry *e; 87 list_entry *e;
91 88
92 e = l->nle_blink; 89 e = l->nle_blink;
93 b = e->nle_blink; 90 b = e->nle_blink;
94 l->nle_blink = b; 91 l->nle_blink = b;
95 b->nle_flink = l; 92 b->nle_flink = l;
96 93
97 return (e); 94 return (e);
98} 95}
99 96
100
101#define InsertTailList(l, e) \ 97#define InsertTailList(l, e) \
102 do { \ 98 do { \
103 list_entry *b; \ 99 list_entry *b; \
@@ -120,7 +116,6 @@ RemoveTailList(list_entry *l)
120 (l)->nle_flink = (e); \ 116 (l)->nle_flink = (e); \
121 } while (0) 117 } while (0)
122 118
123
124#define ATK_DEBUG 1 119#define ATK_DEBUG 1
125 120
126#if ATK_DEBUG 121#if ATK_DEBUG
@@ -133,7 +128,6 @@ RemoveTailList(list_entry *l)
133#define SLIC_TIMESTAMP(value) 128#define SLIC_TIMESTAMP(value)
134#endif 129#endif
135 130
136
137/****************** SXG DEFINES *****************************************/ 131/****************** SXG DEFINES *****************************************/
138 132
139#ifdef ATKDBG 133#ifdef ATKDBG
@@ -150,5 +144,4 @@ RemoveTailList(list_entry *l)
150#define WRITE_REG64(a,reg,value,cpu) sxg_reg64_write((a),(&reg),(value),(cpu)) 144#define WRITE_REG64(a,reg,value,cpu) sxg_reg64_write((a),(&reg),(value),(cpu))
151#define READ_REG(reg,value) (value) = readl((void __iomem *)(&reg)) 145#define READ_REG(reg,value) (value) = readl((void __iomem *)(&reg))
152 146
153#endif /* _SLIC_OS_SPECIFIC_H_ */ 147#endif /* _SLIC_OS_SPECIFIC_H_ */
154
diff --git a/drivers/staging/sxg/sxgdbg.h b/drivers/staging/sxg/sxgdbg.h
index cfb6c7c77a9e..4522b8d71495 100644
--- a/drivers/staging/sxg/sxgdbg.h
+++ b/drivers/staging/sxg/sxgdbg.h
@@ -58,7 +58,7 @@
58 { \ 58 { \
59 if (!(a)) { \ 59 if (!(a)) { \
60 DBG_ERROR("ASSERT() Failure: file %s, function %s line %d\n",\ 60 DBG_ERROR("ASSERT() Failure: file %s, function %s line %d\n",\
61 __FILE__, __FUNCTION__, __LINE__); \ 61 __FILE__, __func__, __LINE__); \
62 } \ 62 } \
63 } 63 }
64#endif 64#endif
diff --git a/drivers/staging/sxg/sxghif.h b/drivers/staging/sxg/sxghif.h
index ed26ceaa1315..88bffbaa3be8 100644
--- a/drivers/staging/sxg/sxghif.h
+++ b/drivers/staging/sxg/sxghif.h
@@ -14,119 +14,119 @@
14 *******************************************************************************/ 14 *******************************************************************************/
15typedef struct _SXG_UCODE_REGS { 15typedef struct _SXG_UCODE_REGS {
16 // Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0 16 // Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0
17 u32 Icr; // Code = 0 (extended), ExCode = 0 - Int control 17 u32 Icr; // Code = 0 (extended), ExCode = 0 - Int control
18 u32 RsvdReg1; // Code = 1 - TOE -NA 18 u32 RsvdReg1; // Code = 1 - TOE -NA
19 u32 RsvdReg2; // Code = 2 - TOE -NA 19 u32 RsvdReg2; // Code = 2 - TOE -NA
20 u32 RsvdReg3; // Code = 3 - TOE -NA 20 u32 RsvdReg3; // Code = 3 - TOE -NA
21 u32 RsvdReg4; // Code = 4 - TOE -NA 21 u32 RsvdReg4; // Code = 4 - TOE -NA
22 u32 RsvdReg5; // Code = 5 - TOE -NA 22 u32 RsvdReg5; // Code = 5 - TOE -NA
23 u32 CardUp; // Code = 6 - Microcode initialized when 1 23 u32 CardUp; // Code = 6 - Microcode initialized when 1
24 u32 RsvdReg7; // Code = 7 - TOE -NA 24 u32 RsvdReg7; // Code = 7 - TOE -NA
25 u32 CodeNotUsed[8]; // Codes 8-15 not used. ExCode = 0 25 u32 CodeNotUsed[8]; // Codes 8-15 not used. ExCode = 0
26 // This brings us to ExCode 1 at address 0x40 = Interrupt status pointer 26 // This brings us to ExCode 1 at address 0x40 = Interrupt status pointer
27 u32 Isp; // Code = 0 (extended), ExCode = 1 27 u32 Isp; // Code = 0 (extended), ExCode = 1
28 u32 PadEx1[15]; // Codes 1-15 not used with extended codes 28 u32 PadEx1[15]; // Codes 1-15 not used with extended codes
29 // ExCode 2 = Interrupt Status Register 29 // ExCode 2 = Interrupt Status Register
30 u32 Isr; // Code = 0 (extended), ExCode = 2 30 u32 Isr; // Code = 0 (extended), ExCode = 2
31 u32 PadEx2[15]; 31 u32 PadEx2[15];
32 // ExCode 3 = Event base register. Location of event rings 32 // ExCode 3 = Event base register. Location of event rings
33 u32 EventBase; // Code = 0 (extended), ExCode = 3 33 u32 EventBase; // Code = 0 (extended), ExCode = 3
34 u32 PadEx3[15]; 34 u32 PadEx3[15];
35 // ExCode 4 = Event ring size 35 // ExCode 4 = Event ring size
36 u32 EventSize; // Code = 0 (extended), ExCode = 4 36 u32 EventSize; // Code = 0 (extended), ExCode = 4
37 u32 PadEx4[15]; 37 u32 PadEx4[15];
38 // ExCode 5 = TCB Buffers base address 38 // ExCode 5 = TCB Buffers base address
39 u32 TcbBase; // Code = 0 (extended), ExCode = 5 39 u32 TcbBase; // Code = 0 (extended), ExCode = 5
40 u32 PadEx5[15]; 40 u32 PadEx5[15];
41 // ExCode 6 = TCB Composite Buffers base address 41 // ExCode 6 = TCB Composite Buffers base address
42 u32 TcbCompBase; // Code = 0 (extended), ExCode = 6 42 u32 TcbCompBase; // Code = 0 (extended), ExCode = 6
43 u32 PadEx6[15]; 43 u32 PadEx6[15];
44 // ExCode 7 = Transmit ring base address 44 // ExCode 7 = Transmit ring base address
45 u32 XmtBase; // Code = 0 (extended), ExCode = 7 45 u32 XmtBase; // Code = 0 (extended), ExCode = 7
46 u32 PadEx7[15]; 46 u32 PadEx7[15];
47 // ExCode 8 = Transmit ring size 47 // ExCode 8 = Transmit ring size
48 u32 XmtSize; // Code = 0 (extended), ExCode = 8 48 u32 XmtSize; // Code = 0 (extended), ExCode = 8
49 u32 PadEx8[15]; 49 u32 PadEx8[15];
50 // ExCode 9 = Receive ring base address 50 // ExCode 9 = Receive ring base address
51 u32 RcvBase; // Code = 0 (extended), ExCode = 9 51 u32 RcvBase; // Code = 0 (extended), ExCode = 9
52 u32 PadEx9[15]; 52 u32 PadEx9[15];
53 // ExCode 10 = Receive ring size 53 // ExCode 10 = Receive ring size
54 u32 RcvSize; // Code = 0 (extended), ExCode = 10 54 u32 RcvSize; // Code = 0 (extended), ExCode = 10
55 u32 PadEx10[15]; 55 u32 PadEx10[15];
56 // ExCode 11 = Read EEPROM Config 56 // ExCode 11 = Read EEPROM Config
57 u32 Config; // Code = 0 (extended), ExCode = 11 57 u32 Config; // Code = 0 (extended), ExCode = 11
58 u32 PadEx11[15]; 58 u32 PadEx11[15];
59 // ExCode 12 = Multicast bits 31:0 59 // ExCode 12 = Multicast bits 31:0
60 u32 McastLow; // Code = 0 (extended), ExCode = 12 60 u32 McastLow; // Code = 0 (extended), ExCode = 12
61 u32 PadEx12[15]; 61 u32 PadEx12[15];
62 // ExCode 13 = Multicast bits 63:32 62 // ExCode 13 = Multicast bits 63:32
63 u32 McastHigh; // Code = 0 (extended), ExCode = 13 63 u32 McastHigh; // Code = 0 (extended), ExCode = 13
64 u32 PadEx13[15]; 64 u32 PadEx13[15];
65 // ExCode 14 = Ping 65 // ExCode 14 = Ping
66 u32 Ping; // Code = 0 (extended), ExCode = 14 66 u32 Ping; // Code = 0 (extended), ExCode = 14
67 u32 PadEx14[15]; 67 u32 PadEx14[15];
68 // ExCode 15 = Link MTU 68 // ExCode 15 = Link MTU
69 u32 LinkMtu; // Code = 0 (extended), ExCode = 15 69 u32 LinkMtu; // Code = 0 (extended), ExCode = 15
70 u32 PadEx15[15]; 70 u32 PadEx15[15];
71 // ExCode 16 = Download synchronization 71 // ExCode 16 = Download synchronization
72 u32 LoadSync; // Code = 0 (extended), ExCode = 16 72 u32 LoadSync; // Code = 0 (extended), ExCode = 16
73 u32 PadEx16[15]; 73 u32 PadEx16[15];
74 // ExCode 17 = Upper DRAM address bits on 32-bit systems 74 // ExCode 17 = Upper DRAM address bits on 32-bit systems
75 u32 Upper; // Code = 0 (extended), ExCode = 17 75 u32 Upper; // Code = 0 (extended), ExCode = 17
76 u32 PadEx17[15]; 76 u32 PadEx17[15];
77 // ExCode 18 = Slowpath Send Index Address 77 // ExCode 18 = Slowpath Send Index Address
78 u32 SPSendIndex; // Code = 0 (extended), ExCode = 18 78 u32 SPSendIndex; // Code = 0 (extended), ExCode = 18
79 u32 PadEx18[15]; 79 u32 PadEx18[15];
80 u32 RsvdXF; // Code = 0 (extended), ExCode = 19 80 u32 RsvdXF; // Code = 0 (extended), ExCode = 19
81 u32 PadEx19[15]; 81 u32 PadEx19[15];
82 // ExCode 20 = Aggregation 82 // ExCode 20 = Aggregation
83 u32 Aggregation; // Code = 0 (extended), ExCode = 20 83 u32 Aggregation; // Code = 0 (extended), ExCode = 20
84 u32 PadEx20[15]; 84 u32 PadEx20[15];
85 // ExCode 21 = Receive MDL push timer 85 // ExCode 21 = Receive MDL push timer
86 u32 PushTicks; // Code = 0 (extended), ExCode = 21 86 u32 PushTicks; // Code = 0 (extended), ExCode = 21
87 u32 PadEx21[15]; 87 u32 PadEx21[15];
88 // ExCode 22 = TOE NA 88 // ExCode 22 = TOE NA
89 u32 AckFrequency; // Code = 0 (extended), ExCode = 22 89 u32 AckFrequency; // Code = 0 (extended), ExCode = 22
90 u32 PadEx22[15]; 90 u32 PadEx22[15];
91 // ExCode 23 = TOE NA 91 // ExCode 23 = TOE NA
92 u32 RsvdReg23; 92 u32 RsvdReg23;
93 u32 PadEx23[15]; 93 u32 PadEx23[15];
94 // ExCode 24 = TOE NA 94 // ExCode 24 = TOE NA
95 u32 RsvdReg24; 95 u32 RsvdReg24;
96 u32 PadEx24[15]; 96 u32 PadEx24[15];
97 // ExCode 25 = TOE NA 97 // ExCode 25 = TOE NA
98 u32 RsvdReg25; // Code = 0 (extended), ExCode = 25 98 u32 RsvdReg25; // Code = 0 (extended), ExCode = 25
99 u32 PadEx25[15]; 99 u32 PadEx25[15];
100 // ExCode 26 = Receive checksum requirements 100 // ExCode 26 = Receive checksum requirements
101 u32 ReceiveChecksum; // Code = 0 (extended), ExCode = 26 101 u32 ReceiveChecksum; // Code = 0 (extended), ExCode = 26
102 u32 PadEx26[15]; 102 u32 PadEx26[15];
103 // ExCode 27 = RSS Requirements 103 // ExCode 27 = RSS Requirements
104 u32 Rss; // Code = 0 (extended), ExCode = 27 104 u32 Rss; // Code = 0 (extended), ExCode = 27
105 u32 PadEx27[15]; 105 u32 PadEx27[15];
106 // ExCode 28 = RSS Table 106 // ExCode 28 = RSS Table
107 u32 RssTable; // Code = 0 (extended), ExCode = 28 107 u32 RssTable; // Code = 0 (extended), ExCode = 28
108 u32 PadEx28[15]; 108 u32 PadEx28[15];
109 // ExCode 29 = Event ring release entries 109 // ExCode 29 = Event ring release entries
110 u32 EventRelease; // Code = 0 (extended), ExCode = 29 110 u32 EventRelease; // Code = 0 (extended), ExCode = 29
111 u32 PadEx29[15]; 111 u32 PadEx29[15];
112 // ExCode 30 = Number of receive bufferlist commands on ring 0 112 // ExCode 30 = Number of receive bufferlist commands on ring 0
113 u32 RcvCmd; // Code = 0 (extended), ExCode = 30 113 u32 RcvCmd; // Code = 0 (extended), ExCode = 30
114 u32 PadEx30[15]; 114 u32 PadEx30[15];
115 // ExCode 31 = slowpath transmit command - Data[31:0] = 1 115 // ExCode 31 = slowpath transmit command - Data[31:0] = 1
116 u32 XmtCmd; // Code = 0 (extended), ExCode = 31 116 u32 XmtCmd; // Code = 0 (extended), ExCode = 31
117 u32 PadEx31[15]; 117 u32 PadEx31[15];
118 // ExCode 32 = Dump command 118 // ExCode 32 = Dump command
119 u32 DumpCmd; // Code = 0 (extended), ExCode = 32 119 u32 DumpCmd; // Code = 0 (extended), ExCode = 32
120 u32 PadEx32[15]; 120 u32 PadEx32[15];
121 // ExCode 33 = Debug command 121 // ExCode 33 = Debug command
122 u32 DebugCmd; // Code = 0 (extended), ExCode = 33 122 u32 DebugCmd; // Code = 0 (extended), ExCode = 33
123 u32 PadEx33[15]; 123 u32 PadEx33[15];
124 // There are 128 possible extended commands - each of account for 16 124 // There are 128 possible extended commands - each of account for 16
125 // words (including the non-relevent base command codes 1-15). 125 // words (including the non-relevent base command codes 1-15).
126 // Pad for the remainder of these here to bring us to the next CPU 126 // Pad for the remainder of these here to bring us to the next CPU
127 // base. As extended codes are added, reduce the first array value in 127 // base. As extended codes are added, reduce the first array value in
128 // the following field 128 // the following field
129 u32 PadToNextCpu[94][16]; // 94 = 128 - 34 (34 = Excodes 0 - 33) 129 u32 PadToNextCpu[94][16]; // 94 = 128 - 34 (34 = Excodes 0 - 33)
130} SXG_UCODE_REGS, *PSXG_UCODE_REGS; 130} SXG_UCODE_REGS, *PSXG_UCODE_REGS;
131 131
132// Interrupt control register (0) values 132// Interrupt control register (0) values
@@ -141,7 +141,7 @@ typedef struct _SXG_UCODE_REGS {
141 141
142// The Microcode supports up to 16 RSS queues 142// The Microcode supports up to 16 RSS queues
143#define SXG_MAX_RSS 16 143#define SXG_MAX_RSS 16
144#define SXG_MAX_RSS_TABLE_SIZE 256 // 256-byte max 144#define SXG_MAX_RSS_TABLE_SIZE 256 // 256-byte max
145 145
146#define SXG_RSS_TCP6 0x00000001 // RSS TCP over IPv6 146#define SXG_RSS_TCP6 0x00000001 // RSS TCP over IPv6
147#define SXG_RSS_TCP4 0x00000002 // RSS TCP over IPv4 147#define SXG_RSS_TCP4 0x00000002 // RSS TCP over IPv4
@@ -170,16 +170,16 @@ typedef struct _SXG_UCODE_REGS {
170 * SXG_UCODE_REGS definition above 170 * SXG_UCODE_REGS definition above
171 */ 171 */
172typedef struct _SXG_TCB_REGS { 172typedef struct _SXG_TCB_REGS {
173 u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */ 173 u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */
174 u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */ 174 u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */
175 u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */ 175 u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */
176 u32 Rsvd1; /* Code = 3 - TOE NA */ 176 u32 Rsvd1; /* Code = 3 - TOE NA */
177 u32 Rsvd2; /* Code = 4 - TOE NA */ 177 u32 Rsvd2; /* Code = 4 - TOE NA */
178 u32 Rsvd3; /* Code = 5 - TOE NA */ 178 u32 Rsvd3; /* Code = 5 - TOE NA */
179 u32 Invalid; /* Code = 6 - Reserved for "CardUp" see above */ 179 u32 Invalid; /* Code = 6 - Reserved for "CardUp" see above */
180 u32 Rsvd4; /* Code = 7 - TOE NA */ 180 u32 Rsvd4; /* Code = 7 - TOE NA */
181 u32 Rsvd5; /* Code = 8 - TOE NA */ 181 u32 Rsvd5; /* Code = 8 - TOE NA */
182 u32 Pad[7]; /* Codes 8-15 - Not used. */ 182 u32 Pad[7]; /* Codes 8-15 - Not used. */
183} SXG_TCB_REGS, *PSXG_TCB_REGS; 183} SXG_TCB_REGS, *PSXG_TCB_REGS;
184 184
185/*************************************************************************** 185/***************************************************************************
@@ -273,27 +273,27 @@ typedef struct _SXG_TCB_REGS {
273 */ 273 */
274#pragma pack(push, 1) 274#pragma pack(push, 1)
275typedef struct _SXG_EVENT { 275typedef struct _SXG_EVENT {
276 u32 Pad[1]; // not used 276 u32 Pad[1]; // not used
277 u32 SndUna; // SndUna value 277 u32 SndUna; // SndUna value
278 u32 Resid; // receive MDL resid 278 u32 Resid; // receive MDL resid
279 union { 279 union {
280 void * HostHandle; // Receive host handle 280 void *HostHandle; // Receive host handle
281 u32 Rsvd1; // TOE NA 281 u32 Rsvd1; // TOE NA
282 struct { 282 struct {
283 u32 NotUsed; 283 u32 NotUsed;
284 u32 Rsvd2; // TOE NA 284 u32 Rsvd2; // TOE NA
285 } Flush; 285 } Flush;
286 }; 286 };
287 u32 Toeplitz; // RSS Toeplitz hash 287 u32 Toeplitz; // RSS Toeplitz hash
288 union { 288 union {
289 ushort Rsvd3; // TOE NA 289 ushort Rsvd3; // TOE NA
290 ushort HdrOffset; // Slowpath 290 ushort HdrOffset; // Slowpath
291 }; 291 };
292 ushort Length; // 292 ushort Length; //
293 unsigned char Rsvd4; // TOE NA 293 unsigned char Rsvd4; // TOE NA
294 unsigned char Code; // Event code 294 unsigned char Code; // Event code
295 unsigned char CommandIndex; // New ring index 295 unsigned char CommandIndex; // New ring index
296 unsigned char Status; // Event status 296 unsigned char Status; // Event status
297} SXG_EVENT, *PSXG_EVENT; 297} SXG_EVENT, *PSXG_EVENT;
298#pragma pack(pop) 298#pragma pack(pop)
299 299
@@ -318,12 +318,12 @@ typedef struct _SXG_EVENT {
318// Event ring 318// Event ring
319// Size must be power of 2, between 128 and 16k 319// Size must be power of 2, between 128 and 16k
320#define EVENT_RING_SIZE 4096 // ?? 320#define EVENT_RING_SIZE 4096 // ??
321#define EVENT_RING_BATCH 16 // Hand entries back 16 at a time. 321#define EVENT_RING_BATCH 16 // Hand entries back 16 at a time.
322#define EVENT_BATCH_LIMIT 256 // Stop processing events after 256 (16 * 16) 322#define EVENT_BATCH_LIMIT 256 // Stop processing events after 256 (16 * 16)
323 323
324typedef struct _SXG_EVENT_RING { 324typedef struct _SXG_EVENT_RING {
325 SXG_EVENT Ring[EVENT_RING_SIZE]; 325 SXG_EVENT Ring[EVENT_RING_SIZE];
326}SXG_EVENT_RING, *PSXG_EVENT_RING; 326} SXG_EVENT_RING, *PSXG_EVENT_RING;
327 327
328/*************************************************************************** 328/***************************************************************************
329 * 329 *
@@ -341,7 +341,7 @@ typedef struct _SXG_EVENT_RING {
341#define SXG_TCB_PER_BUCKET 16 341#define SXG_TCB_PER_BUCKET 16
342#define SXG_TCB_BUCKET_MASK 0xFF0 // Bucket portion of TCB ID 342#define SXG_TCB_BUCKET_MASK 0xFF0 // Bucket portion of TCB ID
343#define SXG_TCB_ELEMENT_MASK 0x00F // Element within bucket 343#define SXG_TCB_ELEMENT_MASK 0x00F // Element within bucket
344#define SXG_TCB_BUCKETS 256 // 256 * 16 = 4k 344#define SXG_TCB_BUCKETS 256 // 256 * 16 = 4k
345 345
346#define SXG_TCB_BUFFER_SIZE 512 // ASSERT format is correct 346#define SXG_TCB_BUFFER_SIZE 512 // ASSERT format is correct
347 347
@@ -368,7 +368,6 @@ typedef struct _SXG_EVENT_RING {
368 &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip : \ 368 &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip : \
369 &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip 369 &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip
370 370
371
372#if DBG 371#if DBG
373// Horrible kludge to distinguish dumb-nic, slowpath, and 372// Horrible kludge to distinguish dumb-nic, slowpath, and
374// fastpath traffic. Decrement the HopLimit by one 373// fastpath traffic. Decrement the HopLimit by one
@@ -396,16 +395,16 @@ typedef struct _SXG_EVENT_RING {
396 * Receive and transmit rings 395 * Receive and transmit rings
397 ***************************************************************************/ 396 ***************************************************************************/
398#define SXG_MAX_RING_SIZE 256 397#define SXG_MAX_RING_SIZE 256
399#define SXG_XMT_RING_SIZE 128 // Start with 128 398#define SXG_XMT_RING_SIZE 128 // Start with 128
400#define SXG_RCV_RING_SIZE 128 // Start with 128 399#define SXG_RCV_RING_SIZE 128 // Start with 128
401#define SXG_MAX_ENTRIES 4096 400#define SXG_MAX_ENTRIES 4096
402 401
403// Structure and macros to manage a ring 402// Structure and macros to manage a ring
404typedef struct _SXG_RING_INFO { 403typedef struct _SXG_RING_INFO {
405 unsigned char Head; // Where we add entries - Note unsigned char:RING_SIZE 404 unsigned char Head; // Where we add entries - Note unsigned char:RING_SIZE
406 unsigned char Tail; // Where we pull off completed entries 405 unsigned char Tail; // Where we pull off completed entries
407 ushort Size; // Ring size - Must be multiple of 2 406 ushort Size; // Ring size - Must be multiple of 2
408 void * Context[SXG_MAX_RING_SIZE]; // Shadow ring 407 void *Context[SXG_MAX_RING_SIZE]; // Shadow ring
409} SXG_RING_INFO, *PSXG_RING_INFO; 408} SXG_RING_INFO, *PSXG_RING_INFO;
410 409
411#define SXG_INITIALIZE_RING(_ring, _size) { \ 410#define SXG_INITIALIZE_RING(_ring, _size) { \
@@ -483,40 +482,40 @@ typedef struct _SXG_RING_INFO {
483 */ 482 */
484#pragma pack(push, 1) 483#pragma pack(push, 1)
485typedef struct _SXG_CMD { 484typedef struct _SXG_CMD {
486 dma_addr_t Sgl; // Physical address of SGL 485 dma_addr_t Sgl; // Physical address of SGL
487 union { 486 union {
488 struct { 487 struct {
489 dma64_addr_t FirstSgeAddress;// Address of first SGE 488 dma64_addr_t FirstSgeAddress; // Address of first SGE
490 u32 FirstSgeLength; // Length of first SGE 489 u32 FirstSgeLength; // Length of first SGE
491 union { 490 union {
492 u32 Rsvd1; // TOE NA 491 u32 Rsvd1; // TOE NA
493 u32 SgeOffset; // Slowpath - 2nd SGE offset 492 u32 SgeOffset; // Slowpath - 2nd SGE offset
494 u32 Resid; // MDL completion - clobbers update 493 u32 Resid; // MDL completion - clobbers update
495 }; 494 };
496 union { 495 union {
497 u32 TotalLength; // Total transfer length 496 u32 TotalLength; // Total transfer length
498 u32 Mss; // LSO MSS 497 u32 Mss; // LSO MSS
499 }; 498 };
500 } Buffer; 499 } Buffer;
501 }; 500 };
502 union { 501 union {
503 struct { 502 struct {
504 unsigned char Flags:4; // slowpath flags 503 unsigned char Flags:4; // slowpath flags
505 unsigned char IpHl:4; // Ip header length (>>2) 504 unsigned char IpHl:4; // Ip header length (>>2)
506 unsigned char MacLen; // Mac header len 505 unsigned char MacLen; // Mac header len
507 } CsumFlags; 506 } CsumFlags;
508 struct { 507 struct {
509 ushort Flags:4; // slowpath flags 508 ushort Flags:4; // slowpath flags
510 ushort TcpHdrOff:7; // TCP 509 ushort TcpHdrOff:7; // TCP
511 ushort MacLen:5; // Mac header len 510 ushort MacLen:5; // Mac header len
512 } LsoFlags; 511 } LsoFlags;
513 ushort Flags; // flags 512 ushort Flags; // flags
514 }; 513 };
515 union { 514 union {
516 ushort SgEntries; // SG entry count including first sge 515 ushort SgEntries; // SG entry count including first sge
517 struct { 516 struct {
518 unsigned char Status; // Copied from event status 517 unsigned char Status; // Copied from event status
519 unsigned char NotUsed; 518 unsigned char NotUsed;
520 } Status; 519 } Status;
521 }; 520 };
522} SXG_CMD, *PSXG_CMD; 521} SXG_CMD, *PSXG_CMD;
@@ -524,8 +523,8 @@ typedef struct _SXG_CMD {
524 523
525#pragma pack(push, 1) 524#pragma pack(push, 1)
526typedef struct _VLAN_HDR { 525typedef struct _VLAN_HDR {
527 ushort VlanTci; 526 ushort VlanTci;
528 ushort VlanTpid; 527 ushort VlanTpid;
529} VLAN_HDR, *PVLAN_HDR; 528} VLAN_HDR, *PVLAN_HDR;
530#pragma pack(pop) 529#pragma pack(pop)
531 530
@@ -561,16 +560,16 @@ typedef struct _VLAN_HDR {
561 * 560 *
562 */ 561 */
563// Slowpath CMD flags 562// Slowpath CMD flags
564#define SXG_SLOWCMD_CSUM_IP 0x01 // Checksum IP 563#define SXG_SLOWCMD_CSUM_IP 0x01 // Checksum IP
565#define SXG_SLOWCMD_CSUM_TCP 0x02 // Checksum TCP 564#define SXG_SLOWCMD_CSUM_TCP 0x02 // Checksum TCP
566#define SXG_SLOWCMD_LSO 0x04 // Large segment send 565#define SXG_SLOWCMD_LSO 0x04 // Large segment send
567 566
568typedef struct _SXG_XMT_RING { 567typedef struct _SXG_XMT_RING {
569 SXG_CMD Descriptors[SXG_XMT_RING_SIZE]; 568 SXG_CMD Descriptors[SXG_XMT_RING_SIZE];
570} SXG_XMT_RING, *PSXG_XMT_RING; 569} SXG_XMT_RING, *PSXG_XMT_RING;
571 570
572typedef struct _SXG_RCV_RING { 571typedef struct _SXG_RCV_RING {
573 SXG_CMD Descriptors[SXG_RCV_RING_SIZE]; 572 SXG_CMD Descriptors[SXG_RCV_RING_SIZE];
574} SXG_RCV_RING, *PSXG_RCV_RING; 573} SXG_RCV_RING, *PSXG_RCV_RING;
575 574
576/*************************************************************************** 575/***************************************************************************
@@ -578,8 +577,8 @@ typedef struct _SXG_RCV_RING {
578 * shared memory allocation 577 * shared memory allocation
579 ***************************************************************************/ 578 ***************************************************************************/
580typedef enum { 579typedef enum {
581 SXG_BUFFER_TYPE_RCV, // Receive buffer 580 SXG_BUFFER_TYPE_RCV, // Receive buffer
582 SXG_BUFFER_TYPE_SGL // SGL buffer 581 SXG_BUFFER_TYPE_SGL // SGL buffer
583} SXG_BUFFER_TYPE; 582} SXG_BUFFER_TYPE;
584 583
585// State for SXG buffers 584// State for SXG buffers
@@ -668,60 +667,60 @@ typedef enum {
668#define SXG_RCV_DATA_BUFFERS 4096 // Amount to give to the card 667#define SXG_RCV_DATA_BUFFERS 4096 // Amount to give to the card
669#define SXG_INITIAL_RCV_DATA_BUFFERS 8192 // Initial pool of buffers 668#define SXG_INITIAL_RCV_DATA_BUFFERS 8192 // Initial pool of buffers
670#define SXG_MIN_RCV_DATA_BUFFERS 2048 // Minimum amount and when to get more 669#define SXG_MIN_RCV_DATA_BUFFERS 2048 // Minimum amount and when to get more
671#define SXG_MAX_RCV_BLOCKS 128 // = 16384 receive buffers 670#define SXG_MAX_RCV_BLOCKS 128 // = 16384 receive buffers
672 671
673// Receive buffer header 672// Receive buffer header
674typedef struct _SXG_RCV_DATA_BUFFER_HDR { 673typedef struct _SXG_RCV_DATA_BUFFER_HDR {
675 dma_addr_t PhysicalAddress; // Buffer physical address 674 dma_addr_t PhysicalAddress; // Buffer physical address
676 // Note - DO NOT USE the VirtualAddress field to locate data. 675 // Note - DO NOT USE the VirtualAddress field to locate data.
677 // Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead. 676 // Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead.
678 void *VirtualAddress; // Start of buffer 677 void *VirtualAddress; // Start of buffer
679 LIST_ENTRY FreeList; // Free queue of buffers 678 LIST_ENTRY FreeList; // Free queue of buffers
680 struct _SXG_RCV_DATA_BUFFER_HDR *Next; // Fastpath data buffer queue 679 struct _SXG_RCV_DATA_BUFFER_HDR *Next; // Fastpath data buffer queue
681 u32 Size; // Buffer size 680 u32 Size; // Buffer size
682 u32 ByteOffset; // See SXG_RESTORE_MDL_OFFSET 681 u32 ByteOffset; // See SXG_RESTORE_MDL_OFFSET
683 unsigned char State; // See SXG_BUFFER state above 682 unsigned char State; // See SXG_BUFFER state above
684 unsigned char Status; // Event status (to log PUSH) 683 unsigned char Status; // Event status (to log PUSH)
685 struct sk_buff * skb; // Double mapped (nbl and pkt) 684 struct sk_buff *skb; // Double mapped (nbl and pkt)
686} SXG_RCV_DATA_BUFFER_HDR, *PSXG_RCV_DATA_BUFFER_HDR; 685} SXG_RCV_DATA_BUFFER_HDR, *PSXG_RCV_DATA_BUFFER_HDR;
687 686
688// SxgSlowReceive uses the PACKET (skb) contained 687// SxgSlowReceive uses the PACKET (skb) contained
689// in the SXG_RCV_DATA_BUFFER_HDR when indicating dumb-nic data 688// in the SXG_RCV_DATA_BUFFER_HDR when indicating dumb-nic data
690#define SxgDumbRcvPacket skb 689#define SxgDumbRcvPacket skb
691 690
692#define SXG_RCV_DATA_HDR_SIZE 256 // Space for SXG_RCV_DATA_BUFFER_HDR 691#define SXG_RCV_DATA_HDR_SIZE 256 // Space for SXG_RCV_DATA_BUFFER_HDR
693#define SXG_RCV_DATA_BUFFER_SIZE 2048 // Non jumbo = 2k including HDR 692#define SXG_RCV_DATA_BUFFER_SIZE 2048 // Non jumbo = 2k including HDR
694#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 // jumbo = 10k including HDR 693#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 // jumbo = 10k including HDR
695 694
696// Receive data descriptor 695// Receive data descriptor
697typedef struct _SXG_RCV_DATA_DESCRIPTOR { 696typedef struct _SXG_RCV_DATA_DESCRIPTOR {
698 union { 697 union {
699 struct sk_buff * VirtualAddress; // Host handle 698 struct sk_buff *VirtualAddress; // Host handle
700 u64 ForceTo8Bytes; // Force x86 to 8-byte boundary 699 u64 ForceTo8Bytes; // Force x86 to 8-byte boundary
701 }; 700 };
702 dma_addr_t PhysicalAddress; 701 dma_addr_t PhysicalAddress;
703} SXG_RCV_DATA_DESCRIPTOR, *PSXG_RCV_DATA_DESCRIPTOR; 702} SXG_RCV_DATA_DESCRIPTOR, *PSXG_RCV_DATA_DESCRIPTOR;
704 703
705// Receive descriptor block 704// Receive descriptor block
706#define SXG_RCV_DESCRIPTORS_PER_BLOCK 128 705#define SXG_RCV_DESCRIPTORS_PER_BLOCK 128
707#define SXG_RCV_DESCRIPTOR_BLOCK_SIZE 2048 // For sanity check 706#define SXG_RCV_DESCRIPTOR_BLOCK_SIZE 2048 // For sanity check
708typedef struct _SXG_RCV_DESCRIPTOR_BLOCK { 707typedef struct _SXG_RCV_DESCRIPTOR_BLOCK {
709 SXG_RCV_DATA_DESCRIPTOR Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK]; 708 SXG_RCV_DATA_DESCRIPTOR Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK];
710} SXG_RCV_DESCRIPTOR_BLOCK, *PSXG_RCV_DESCRIPTOR_BLOCK; 709} SXG_RCV_DESCRIPTOR_BLOCK, *PSXG_RCV_DESCRIPTOR_BLOCK;
711 710
712// Receive descriptor block header 711// Receive descriptor block header
713typedef struct _SXG_RCV_DESCRIPTOR_BLOCK_HDR { 712typedef struct _SXG_RCV_DESCRIPTOR_BLOCK_HDR {
714 void * VirtualAddress; // Start of 2k buffer 713 void *VirtualAddress; // Start of 2k buffer
715 dma_addr_t PhysicalAddress; // ..and it's physical address 714 dma_addr_t PhysicalAddress; // ..and it's physical address
716 LIST_ENTRY FreeList; // Free queue of descriptor blocks 715 LIST_ENTRY FreeList; // Free queue of descriptor blocks
717 unsigned char State; // See SXG_BUFFER state above 716 unsigned char State; // See SXG_BUFFER state above
718} SXG_RCV_DESCRIPTOR_BLOCK_HDR, *PSXG_RCV_DESCRIPTOR_BLOCK_HDR; 717} SXG_RCV_DESCRIPTOR_BLOCK_HDR, *PSXG_RCV_DESCRIPTOR_BLOCK_HDR;
719 718
720// Receive block header 719// Receive block header
721typedef struct _SXG_RCV_BLOCK_HDR { 720typedef struct _SXG_RCV_BLOCK_HDR {
722 void * VirtualAddress; // Start of virtual memory 721 void *VirtualAddress; // Start of virtual memory
723 dma_addr_t PhysicalAddress; // ..and it's physical address 722 dma_addr_t PhysicalAddress; // ..and it's physical address
724 LIST_ENTRY AllList; // Queue of all SXG_RCV_BLOCKS 723 LIST_ENTRY AllList; // Queue of all SXG_RCV_BLOCKS
725} SXG_RCV_BLOCK_HDR, *PSXG_RCV_BLOCK_HDR; 724} SXG_RCV_BLOCK_HDR, *PSXG_RCV_BLOCK_HDR;
726 725
727// Macros to determine data structure offsets into receive block 726// Macros to determine data structure offsets into receive block
@@ -747,8 +746,8 @@ typedef struct _SXG_RCV_BLOCK_HDR {
747// Use the miniport reserved portion of the NBL to locate 746// Use the miniport reserved portion of the NBL to locate
748// our SXG_RCV_DATA_BUFFER_HDR structure. 747// our SXG_RCV_DATA_BUFFER_HDR structure.
749typedef struct _SXG_RCV_NBL_RESERVED { 748typedef struct _SXG_RCV_NBL_RESERVED {
750 PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr; 749 PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
751 void * Available; 750 void *Available;
752} SXG_RCV_NBL_RESERVED, *PSXG_RCV_NBL_RESERVED; 751} SXG_RCV_NBL_RESERVED, *PSXG_RCV_NBL_RESERVED;
753 752
754#define SXG_RCV_NBL_BUFFER_HDR(_NBL) (((PSXG_RCV_NBL_RESERVED)NET_BUFFER_LIST_MINIPORT_RESERVED(_NBL))->RcvDataBufferHdr) 753#define SXG_RCV_NBL_BUFFER_HDR(_NBL) (((PSXG_RCV_NBL_RESERVED)NET_BUFFER_LIST_MINIPORT_RESERVED(_NBL))->RcvDataBufferHdr)
@@ -760,12 +759,11 @@ typedef struct _SXG_RCV_NBL_RESERVED {
760#define SXG_MIN_SGL_BUFFERS 2048 // Minimum amount and when to get more 759#define SXG_MIN_SGL_BUFFERS 2048 // Minimum amount and when to get more
761#define SXG_MAX_SGL_BUFFERS 16384 // Maximum to allocate (note ADAPT:ushort) 760#define SXG_MAX_SGL_BUFFERS 16384 // Maximum to allocate (note ADAPT:ushort)
762 761
763
764// Self identifying structure type 762// Self identifying structure type
765typedef enum _SXG_SGL_TYPE { 763typedef enum _SXG_SGL_TYPE {
766 SXG_SGL_DUMB, // Dumb NIC SGL 764 SXG_SGL_DUMB, // Dumb NIC SGL
767 SXG_SGL_SLOW, // Slowpath protocol header - see below 765 SXG_SGL_SLOW, // Slowpath protocol header - see below
768 SXG_SGL_CHIMNEY // Chimney offload SGL 766 SXG_SGL_CHIMNEY // Chimney offload SGL
769} SXG_SGL_TYPE, PSXG_SGL_TYPE; 767} SXG_SGL_TYPE, PSXG_SGL_TYPE;
770 768
771// Note - the description below is Microsoft specific 769// Note - the description below is Microsoft specific
@@ -774,14 +772,14 @@ typedef enum _SXG_SGL_TYPE {
774// for the SCATTER_GATHER_LIST portion of the SXG_SCATTER_GATHER data structure. 772// for the SCATTER_GATHER_LIST portion of the SXG_SCATTER_GATHER data structure.
775// The following considerations apply when setting this value: 773// The following considerations apply when setting this value:
776// - First, the Sahara card is designed to read the Microsoft SGL structure 774// - First, the Sahara card is designed to read the Microsoft SGL structure
777// straight out of host memory. This means that the SGL must reside in 775// straight out of host memory. This means that the SGL must reside in
778// shared memory. If the length here is smaller than the SGL for the 776// shared memory. If the length here is smaller than the SGL for the
779// NET_BUFFER, then NDIS will allocate its own buffer. The buffer 777// NET_BUFFER, then NDIS will allocate its own buffer. The buffer
780// that NDIS allocates is not in shared memory, so when this happens, 778// that NDIS allocates is not in shared memory, so when this happens,
781// the SGL will need to be copied to a set of SXG_SCATTER_GATHER buffers. 779// the SGL will need to be copied to a set of SXG_SCATTER_GATHER buffers.
782// In other words.. we don't want this value to be too small. 780// In other words.. we don't want this value to be too small.
783// - On the other hand.. we're allocating up to 16k of these things. If 781// - On the other hand.. we're allocating up to 16k of these things. If
784// we make this too big, we start to consume a ton of memory.. 782// we make this too big, we start to consume a ton of memory..
785// At the moment, I'm going to limit the number of SG entries to 150. 783// At the moment, I'm going to limit the number of SG entries to 150.
786// If each entry maps roughly 4k, then this should cover roughly 600kB 784// If each entry maps roughly 4k, then this should cover roughly 600kB
787// NET_BUFFERs. Furthermore, since each entry is 24 bytes, the total 785// NET_BUFFERs. Furthermore, since each entry is 24 bytes, the total
@@ -801,24 +799,23 @@ typedef enum _SXG_SGL_TYPE {
801// the SGL. The following structure defines an x64 799// the SGL. The following structure defines an x64
802// formatted SGL entry 800// formatted SGL entry
803typedef struct _SXG_X64_SGE { 801typedef struct _SXG_X64_SGE {
804 dma64_addr_t Address; // same as wdm.h 802 dma64_addr_t Address; // same as wdm.h
805 u32 Length; // same as wdm.h 803 u32 Length; // same as wdm.h
806 u32 CompilerPad;// The compiler pads to 8-bytes 804 u32 CompilerPad; // The compiler pads to 8-bytes
807 u64 Reserved; // u32 * in wdm.h. Force to 8 bytes 805 u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
808} SXG_X64_SGE, *PSXG_X64_SGE; 806} SXG_X64_SGE, *PSXG_X64_SGE;
809 807
810typedef struct _SCATTER_GATHER_ELEMENT { 808typedef struct _SCATTER_GATHER_ELEMENT {
811 dma64_addr_t Address; // same as wdm.h 809 dma64_addr_t Address; // same as wdm.h
812 u32 Length; // same as wdm.h 810 u32 Length; // same as wdm.h
813 u32 CompilerPad;// The compiler pads to 8-bytes 811 u32 CompilerPad; // The compiler pads to 8-bytes
814 u64 Reserved; // u32 * in wdm.h. Force to 8 bytes 812 u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
815} SCATTER_GATHER_ELEMENT, *PSCATTER_GATHER_ELEMENT; 813} SCATTER_GATHER_ELEMENT, *PSCATTER_GATHER_ELEMENT;
816 814
817
818typedef struct _SCATTER_GATHER_LIST { 815typedef struct _SCATTER_GATHER_LIST {
819 u32 NumberOfElements; 816 u32 NumberOfElements;
820 u32 * Reserved; 817 u32 *Reserved;
821 SCATTER_GATHER_ELEMENT Elements[]; 818 SCATTER_GATHER_ELEMENT Elements[];
822} SCATTER_GATHER_LIST, *PSCATTER_GATHER_LIST; 819} SCATTER_GATHER_LIST, *PSCATTER_GATHER_LIST;
823 820
824// The card doesn't care about anything except elements, so 821// The card doesn't care about anything except elements, so
@@ -826,26 +823,26 @@ typedef struct _SCATTER_GATHER_LIST {
826// SGL structure. But redefine from wdm.h:SCATTER_GATHER_LIST so 823// SGL structure. But redefine from wdm.h:SCATTER_GATHER_LIST so
827// we can specify SXG_X64_SGE and define a fixed number of elements 824// we can specify SXG_X64_SGE and define a fixed number of elements
828typedef struct _SXG_X64_SGL { 825typedef struct _SXG_X64_SGL {
829 u32 NumberOfElements; 826 u32 NumberOfElements;
830 u32 * Reserved; 827 u32 *Reserved;
831 SXG_X64_SGE Elements[SXG_SGL_ENTRIES]; 828 SXG_X64_SGE Elements[SXG_SGL_ENTRIES];
832} SXG_X64_SGL, *PSXG_X64_SGL; 829} SXG_X64_SGL, *PSXG_X64_SGL;
833 830
834typedef struct _SXG_SCATTER_GATHER { 831typedef struct _SXG_SCATTER_GATHER {
835 SXG_SGL_TYPE Type; // FIRST! Dumb-nic or offload 832 SXG_SGL_TYPE Type; // FIRST! Dumb-nic or offload
836 void * adapter; // Back pointer to adapter 833 void *adapter; // Back pointer to adapter
837 LIST_ENTRY FreeList; // Free SXG_SCATTER_GATHER blocks 834 LIST_ENTRY FreeList; // Free SXG_SCATTER_GATHER blocks
838 LIST_ENTRY AllList; // All SXG_SCATTER_GATHER blocks 835 LIST_ENTRY AllList; // All SXG_SCATTER_GATHER blocks
839 dma_addr_t PhysicalAddress;// physical address 836 dma_addr_t PhysicalAddress; // physical address
840 unsigned char State; // See SXG_BUFFER state above 837 unsigned char State; // See SXG_BUFFER state above
841 unsigned char CmdIndex; // Command ring index 838 unsigned char CmdIndex; // Command ring index
842 struct sk_buff * DumbPacket; // Associated Packet 839 struct sk_buff *DumbPacket; // Associated Packet
843 u32 Direction; // For asynchronous completions 840 u32 Direction; // For asynchronous completions
844 u32 CurOffset; // Current SGL offset 841 u32 CurOffset; // Current SGL offset
845 u32 SglRef; // SGL reference count 842 u32 SglRef; // SGL reference count
846 VLAN_HDR VlanTag; // VLAN tag to be inserted into SGL 843 VLAN_HDR VlanTag; // VLAN tag to be inserted into SGL
847 PSCATTER_GATHER_LIST pSgl; // SGL Addr. Possibly &Sgl 844 PSCATTER_GATHER_LIST pSgl; // SGL Addr. Possibly &Sgl
848 SXG_X64_SGL Sgl; // SGL handed to card 845 SXG_X64_SGL Sgl; // SGL handed to card
849} SXG_SCATTER_GATHER, *PSXG_SCATTER_GATHER; 846} SXG_SCATTER_GATHER, *PSXG_SCATTER_GATHER;
850 847
851#if defined(CONFIG_X86_64) 848#if defined(CONFIG_X86_64)
@@ -856,6 +853,5 @@ typedef struct _SXG_SCATTER_GATHER {
856#define SXG_SGL_BUFFER(_SxgSgl) NULL 853#define SXG_SGL_BUFFER(_SxgSgl) NULL
857#define SXG_SGL_BUF_SIZE 0 854#define SXG_SGL_BUF_SIZE 0
858#else 855#else
859 Stop Compilation; 856Stop Compilation;
860#endif 857#endif
861
diff --git a/drivers/staging/sxg/sxghw.h b/drivers/staging/sxg/sxghw.h
index 8f4f6effdd98..2222ae91fd97 100644
--- a/drivers/staging/sxg/sxghw.h
+++ b/drivers/staging/sxg/sxghw.h
@@ -13,11 +13,11 @@
13/******************************************************************************* 13/*******************************************************************************
14 * Configuration space 14 * Configuration space
15 *******************************************************************************/ 15 *******************************************************************************/
16// PCI Vendor ID 16/* PCI Vendor ID */
17#define SXG_VENDOR_ID 0x139A // Alacritech's Vendor ID 17#define SXG_VENDOR_ID 0x139A /* Alacritech's Vendor ID */
18 18
19// PCI Device ID 19// PCI Device ID
20#define SXG_DEVICE_ID 0x0009 // Sahara Device ID 20#define SXG_DEVICE_ID 0x0009 /* Sahara Device ID */
21 21
22// 22//
23// Subsystem IDs. 23// Subsystem IDs.
@@ -141,7 +141,7 @@ typedef struct _SXG_HW_REGS {
141#define SXG_REGISTER_SIZE_PER_CPU 0x00002000 // Used to sanity check UCODE_REGS structure 141#define SXG_REGISTER_SIZE_PER_CPU 0x00002000 // Used to sanity check UCODE_REGS structure
142 142
143// Sahara receive sequencer status values 143// Sahara receive sequencer status values
144#define SXG_RCV_STATUS_ATTN 0x80000000 // Attention 144#define SXG_RCV_STATUS_ATTN 0x80000000 // Attention
145#define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 // Transport mask 145#define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 // Transport mask
146#define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 // Transport error 146#define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 // Transport error
147#define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 // Transport cksum error 147#define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 // Transport cksum error
@@ -156,9 +156,9 @@ typedef struct _SXG_HW_REGS {
156#define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 // Transport FTP 156#define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 // Transport FTP
157#define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 // Transport HTTP 157#define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 // Transport HTTP
158#define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 // Transport SMB 158#define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 // Transport SMB
159#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 // Network mask 159#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 // Network mask
160#define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 // Network error 160#define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 // Network error
161#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 // Network cksum error 161#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 // Network cksum error
162#define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 // Network underflow error 162#define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 // Network underflow error
163#define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 // Network header length 163#define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 // Network header length
164#define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 // Network overflow detected 164#define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 // Network overflow detected
@@ -167,67 +167,67 @@ typedef struct _SXG_HW_REGS {
167#define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 // Network offset detected 167#define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 // Network offset detected
168#define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 // Network fragment detected 168#define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 // Network fragment detected
169#define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 // Network transport type mask 169#define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 // Network transport type mask
170#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 // UDP 170#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 // UDP
171#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 // TCP 171#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 // TCP
172#define SXG_RCV_STATUS_IPONLY 0x00008000 // IP-only not TCP 172#define SXG_RCV_STATUS_IPONLY 0x00008000 // IP-only not TCP
173#define SXG_RCV_STATUS_PKT_PRI 0x00006000 // Receive priority 173#define SXG_RCV_STATUS_PKT_PRI 0x00006000 // Receive priority
174#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 // Receive priority shift 174#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 // Receive priority shift
175#define SXG_RCV_STATUS_PARITY 0x00001000 // MAC Receive RAM parity error 175#define SXG_RCV_STATUS_PARITY 0x00001000 // MAC Receive RAM parity error
176#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 // Link address detection mask 176#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 // Link address detection mask
177#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 // Link address D 177#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 // Link address D
178#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 // Link address C 178#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 // Link address C
179#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 // Link address B 179#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 // Link address B
180#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 // Link address A 180#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 // Link address A
181#define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 // Link address broadcast 181#define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 // Link address broadcast
182#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 // Link address multicast 182#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 // Link address multicast
183#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 // Link control multicast 183#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 // Link control multicast
184#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask 184#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
185#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 // Link error 185#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 // Link error
186#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask 186#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
187#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 // RcvMacQ parity error 187#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 // RcvMacQ parity error
188#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 // Data early 188#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 // Data early
189#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 // Buffer overflow 189#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 // Buffer overflow
190#define SXG_RCV_STATUS_LINK_CODE 0x00000084 // Link code error 190#define SXG_RCV_STATUS_LINK_CODE 0x00000084 // Link code error
191#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 // Dribble nibble 191#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 // Dribble nibble
192#define SXG_RCV_STATUS_LINK_CRC 0x00000082 // CRC error 192#define SXG_RCV_STATUS_LINK_CRC 0x00000082 // CRC error
193#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 // Link overflow 193#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 // Link overflow
194#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 // Link underflow 194#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 // Link underflow
195#define SXG_RCV_STATUS_LINK_8023 0x00000020 // 802.3 195#define SXG_RCV_STATUS_LINK_8023 0x00000020 // 802.3
196#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 // Snap 196#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 // Snap
197#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 // VLAN 197#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 // VLAN
198#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 // Network type mask 198#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 // Network type mask
199#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 // Control packet 199#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 // Control packet
200#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 // IPv6 packet 200#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 // IPv6 packet
201#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 // IPv4 packet 201#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 // IPv4 packet
202 202
203/*************************************************************************** 203/***************************************************************************
204 * Sahara receive and transmit configuration registers 204 * Sahara receive and transmit configuration registers
205 ***************************************************************************/ 205 ***************************************************************************/
206#define RCV_CONFIG_RESET 0x80000000 // RcvConfig register reset 206#define RCV_CONFIG_RESET 0x80000000 // RcvConfig register reset
207#define RCV_CONFIG_ENABLE 0x40000000 // Enable the receive logic 207#define RCV_CONFIG_ENABLE 0x40000000 // Enable the receive logic
208#define RCV_CONFIG_ENPARSE 0x20000000 // Enable the receive parser 208#define RCV_CONFIG_ENPARSE 0x20000000 // Enable the receive parser
209#define RCV_CONFIG_SOCKET 0x10000000 // Enable the socket detector 209#define RCV_CONFIG_SOCKET 0x10000000 // Enable the socket detector
210#define RCV_CONFIG_RCVBAD 0x08000000 // Receive all bad frames 210#define RCV_CONFIG_RCVBAD 0x08000000 // Receive all bad frames
211#define RCV_CONFIG_CONTROL 0x04000000 // Receive all control frames 211#define RCV_CONFIG_CONTROL 0x04000000 // Receive all control frames
212#define RCV_CONFIG_RCVPAUSE 0x02000000 // Enable pause transmit when attn 212#define RCV_CONFIG_RCVPAUSE 0x02000000 // Enable pause transmit when attn
213#define RCV_CONFIG_TZIPV6 0x01000000 // Include TCP port w/ IPv6 toeplitz 213#define RCV_CONFIG_TZIPV6 0x01000000 // Include TCP port w/ IPv6 toeplitz
214#define RCV_CONFIG_TZIPV4 0x00800000 // Include TCP port w/ IPv4 toeplitz 214#define RCV_CONFIG_TZIPV4 0x00800000 // Include TCP port w/ IPv4 toeplitz
215#define RCV_CONFIG_FLUSH 0x00400000 // Flush buffers 215#define RCV_CONFIG_FLUSH 0x00400000 // Flush buffers
216#define RCV_CONFIG_PRIORITY_MASK 0x00300000 // Priority level 216#define RCV_CONFIG_PRIORITY_MASK 0x00300000 // Priority level
217#define RCV_CONFIG_HASH_MASK 0x00030000 // Hash depth 217#define RCV_CONFIG_HASH_MASK 0x00030000 // Hash depth
218#define RCV_CONFIG_HASH_8 0x00000000 // Hash depth 8 218#define RCV_CONFIG_HASH_8 0x00000000 // Hash depth 8
219#define RCV_CONFIG_HASH_16 0x00010000 // Hash depth 16 219#define RCV_CONFIG_HASH_16 0x00010000 // Hash depth 16
220#define RCV_CONFIG_HASH_4 0x00020000 // Hash depth 4 220#define RCV_CONFIG_HASH_4 0x00020000 // Hash depth 4
221#define RCV_CONFIG_HASH_2 0x00030000 // Hash depth 2 221#define RCV_CONFIG_HASH_2 0x00030000 // Hash depth 2
222#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0 // Buffer length bits 15:4. ie multiple of 16. 222#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0 // Buffer length bits 15:4. ie multiple of 16.
223#define RCV_CONFIG_SKT_DIS 0x00000008 // Disable socket detection on attn 223#define RCV_CONFIG_SKT_DIS 0x00000008 // Disable socket detection on attn
224// Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size. 224// Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size.
225// We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC, 225// We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC,
226// and round up to nearest 16 byte boundary 226// and round up to nearest 16 byte boundary
227#define RCV_CONFIG_BUFSIZE(_MaxFrame) ((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK) 227#define RCV_CONFIG_BUFSIZE(_MaxFrame) ((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK)
228 228
229#define XMT_CONFIG_RESET 0x80000000 // XmtConfig register reset 229#define XMT_CONFIG_RESET 0x80000000 // XmtConfig register reset
230#define XMT_CONFIG_ENABLE 0x40000000 // Enable transmit logic 230#define XMT_CONFIG_ENABLE 0x40000000 // Enable transmit logic
231#define XMT_CONFIG_MAC_PARITY 0x20000000 // Inhibit MAC RAM parity error 231#define XMT_CONFIG_MAC_PARITY 0x20000000 // Inhibit MAC RAM parity error
232#define XMT_CONFIG_BUF_PARITY 0x10000000 // Inhibit D2F buffer parity error 232#define XMT_CONFIG_BUF_PARITY 0x10000000 // Inhibit D2F buffer parity error
233#define XMT_CONFIG_MEM_PARITY 0x08000000 // Inhibit 1T SRAM parity error 233#define XMT_CONFIG_MEM_PARITY 0x08000000 // Inhibit 1T SRAM parity error
@@ -249,9 +249,9 @@ typedef struct _SXG_HW_REGS {
249 249
250// A-XGMAC Configuration Register 1 250// A-XGMAC Configuration Register 1
251#define AXGMAC_CFG1_XMT_PAUSE 0x80000000 // Allow the sending of Pause frames 251#define AXGMAC_CFG1_XMT_PAUSE 0x80000000 // Allow the sending of Pause frames
252#define AXGMAC_CFG1_XMT_EN 0x40000000 // Enable transmit 252#define AXGMAC_CFG1_XMT_EN 0x40000000 // Enable transmit
253#define AXGMAC_CFG1_RCV_PAUSE 0x20000000 // Allow the detection of Pause frames 253#define AXGMAC_CFG1_RCV_PAUSE 0x20000000 // Allow the detection of Pause frames
254#define AXGMAC_CFG1_RCV_EN 0x10000000 // Enable receive 254#define AXGMAC_CFG1_RCV_EN 0x10000000 // Enable receive
255#define AXGMAC_CFG1_XMT_STATE 0x04000000 // Current transmit state - READ ONLY 255#define AXGMAC_CFG1_XMT_STATE 0x04000000 // Current transmit state - READ ONLY
256#define AXGMAC_CFG1_RCV_STATE 0x01000000 // Current receive state - READ ONLY 256#define AXGMAC_CFG1_RCV_STATE 0x01000000 // Current receive state - READ ONLY
257#define AXGMAC_CFG1_XOFF_SHORT 0x00001000 // Only pause for 64 slot on XOFF 257#define AXGMAC_CFG1_XOFF_SHORT 0x00001000 // Only pause for 64 slot on XOFF
@@ -262,24 +262,24 @@ typedef struct _SXG_HW_REGS {
262#define AXGMAC_CFG1_RCV_FCS2 0x00000200 // Delay receive FCS 2 4-byte words 262#define AXGMAC_CFG1_RCV_FCS2 0x00000200 // Delay receive FCS 2 4-byte words
263#define AXGMAC_CFG1_RCV_FCS3 0x00000300 // Delay receive FCS 3 4-byte words 263#define AXGMAC_CFG1_RCV_FCS3 0x00000300 // Delay receive FCS 3 4-byte words
264#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 // Per-packet override enable 264#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 // Per-packet override enable
265#define AXGMAC_CFG1_SWAP 0x00000040 // Byte swap enable 265#define AXGMAC_CFG1_SWAP 0x00000040 // Byte swap enable
266#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 // ASSERT srdrpfrm on short frame (<64) 266#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 // ASSERT srdrpfrm on short frame (<64)
267#define AXGMAC_CFG1_RCV_STRICT 0x00000010 // RCV only 802.3AE when CLEAR 267#define AXGMAC_CFG1_RCV_STRICT 0x00000010 // RCV only 802.3AE when CLEAR
268#define AXGMAC_CFG1_CHECK_LEN 0x00000008 // Verify frame length 268#define AXGMAC_CFG1_CHECK_LEN 0x00000008 // Verify frame length
269#define AXGMAC_CFG1_GEN_FCS 0x00000004 // Generate FCS 269#define AXGMAC_CFG1_GEN_FCS 0x00000004 // Generate FCS
270#define AXGMAC_CFG1_PAD_MASK 0x00000003 // Mask for pad bits 270#define AXGMAC_CFG1_PAD_MASK 0x00000003 // Mask for pad bits
271#define AXGMAC_CFG1_PAD_64 0x00000001 // Pad frames to 64 bytes 271#define AXGMAC_CFG1_PAD_64 0x00000001 // Pad frames to 64 bytes
272#define AXGMAC_CFG1_PAD_VLAN 0x00000002 // Detect VLAN and pad to 68 bytes 272#define AXGMAC_CFG1_PAD_VLAN 0x00000002 // Detect VLAN and pad to 68 bytes
273#define AXGMAC_CFG1_PAD_68 0x00000003 // Pad to 68 bytes 273#define AXGMAC_CFG1_PAD_68 0x00000003 // Pad to 68 bytes
274 274
275// A-XGMAC Configuration Register 2 275// A-XGMAC Configuration Register 2
276#define AXGMAC_CFG2_GEN_PAUSE 0x80000000 // Generate single pause frame (test) 276#define AXGMAC_CFG2_GEN_PAUSE 0x80000000 // Generate single pause frame (test)
277#define AXGMAC_CFG2_LF_MANUAL 0x08000000 // Manual link fault sequence 277#define AXGMAC_CFG2_LF_MANUAL 0x08000000 // Manual link fault sequence
278#define AXGMAC_CFG2_LF_AUTO 0x04000000 // Auto link fault sequence 278#define AXGMAC_CFG2_LF_AUTO 0x04000000 // Auto link fault sequence
279#define AXGMAC_CFG2_LF_REMOTE 0x02000000 // Remote link fault (READ ONLY) 279#define AXGMAC_CFG2_LF_REMOTE 0x02000000 // Remote link fault (READ ONLY)
280#define AXGMAC_CFG2_LF_LOCAL 0x01000000 // Local link fault (READ ONLY) 280#define AXGMAC_CFG2_LF_LOCAL 0x01000000 // Local link fault (READ ONLY)
281#define AXGMAC_CFG2_IPG_MASK 0x001F0000 // Inter packet gap 281#define AXGMAC_CFG2_IPG_MASK 0x001F0000 // Inter packet gap
282#define AXGMAC_CFG2_IPG_SHIFT 16 282#define AXGMAC_CFG2_IPG_SHIFT 16
283#define AXGMAC_CFG2_PAUSE_XMT 0x00008000 // Pause transmit module 283#define AXGMAC_CFG2_PAUSE_XMT 0x00008000 // Pause transmit module
284#define AXGMAC_CFG2_IPG_EXTEN 0x00000020 // Enable IPG extension algorithm 284#define AXGMAC_CFG2_IPG_EXTEN 0x00000020 // Enable IPG extension algorithm
285#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F // IPG extension 285#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F // IPG extension
@@ -299,9 +299,9 @@ typedef struct _SXG_HW_REGS {
299#define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 // Sixth octet 299#define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 // Sixth octet
300 300
301// A-XGMAC Maximum frame length register 301// A-XGMAC Maximum frame length register
302#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 // Maximum transmit frame length 302#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 // Maximum transmit frame length
303#define AXGMAC_MAXFRAME_XMT_SHIFT 16 303#define AXGMAC_MAXFRAME_XMT_SHIFT 16
304#define AXGMAC_MAXFRAME_RCV 0x0000FFFF // Maximum receive frame length 304#define AXGMAC_MAXFRAME_RCV 0x0000FFFF // Maximum receive frame length
305// This register doesn't need to be written for standard MTU. 305// This register doesn't need to be written for standard MTU.
306// For jumbo, I'll just statically define the value here. This 306// For jumbo, I'll just statically define the value here. This
307// value sets the receive byte count to 9036 (0x234C) and the 307// value sets the receive byte count to 9036 (0x234C) and the
@@ -324,34 +324,34 @@ typedef struct _SXG_HW_REGS {
324 324
325// A-XGMAC AMIIM Field Register 325// A-XGMAC AMIIM Field Register
326#define AXGMAC_AMIIM_FIELD_ST 0xC0000000 // 2-bit ST field 326#define AXGMAC_AMIIM_FIELD_ST 0xC0000000 // 2-bit ST field
327#define AXGMAC_AMIIM_FIELD_ST_SHIFT 30 327#define AXGMAC_AMIIM_FIELD_ST_SHIFT 30
328#define AXGMAC_AMIIM_FIELD_OP 0x30000000 // 2-bit OP field 328#define AXGMAC_AMIIM_FIELD_OP 0x30000000 // 2-bit OP field
329#define AXGMAC_AMIIM_FIELD_OP_SHIFT 28 329#define AXGMAC_AMIIM_FIELD_OP_SHIFT 28
330#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 // Port address field (hstphyadx in spec) 330#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 // Port address field (hstphyadx in spec)
331#define AXGMAC_AMIIM_FIELD_PORT_SHIFT 23 331#define AXGMAC_AMIIM_FIELD_PORT_SHIFT 23
332#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 // Device address field (hstregadx in spec) 332#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 // Device address field (hstregadx in spec)
333#define AXGMAC_AMIIM_FIELD_DEV_SHIFT 18 333#define AXGMAC_AMIIM_FIELD_DEV_SHIFT 18
334#define AXGMAC_AMIIM_FIELD_TA 0x00030000 // 2-bit TA field 334#define AXGMAC_AMIIM_FIELD_TA 0x00030000 // 2-bit TA field
335#define AXGMAC_AMIIM_FIELD_TA_SHIFT 16 335#define AXGMAC_AMIIM_FIELD_TA_SHIFT 16
336#define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF // Data field 336#define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF // Data field
337 337
338// Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register 338// Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register
339#define MIIM_OP_ADDR 0 // MIIM Address set operation 339#define MIIM_OP_ADDR 0 // MIIM Address set operation
340#define MIIM_OP_WRITE 1 // MIIM Write register operation 340#define MIIM_OP_WRITE 1 // MIIM Write register operation
341#define MIIM_OP_READ 2 // MIIM Read register operation 341#define MIIM_OP_READ 2 // MIIM Read register operation
342#define MIIM_OP_ADDR_SHIFT (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) 342#define MIIM_OP_ADDR_SHIFT (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT)
343 343
344// Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM Field Register 344// Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM Field Register
345#define MIIM_PORT_NUM 1 // All Sahara MIIM modules use port 1 345#define MIIM_PORT_NUM 1 // All Sahara MIIM modules use port 1
346 346
347// Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM Field Register 347// Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM Field Register
348#define MIIM_DEV_PHY_PMA 1 // PHY PMA/PMD module MIIM device number 348#define MIIM_DEV_PHY_PMA 1 // PHY PMA/PMD module MIIM device number
349#define MIIM_DEV_PHY_PCS 3 // PHY PCS module MIIM device number 349#define MIIM_DEV_PHY_PCS 3 // PHY PCS module MIIM device number
350#define MIIM_DEV_PHY_XS 4 // PHY XS module MIIM device number 350#define MIIM_DEV_PHY_XS 4 // PHY XS module MIIM device number
351#define MIIM_DEV_XGXS 5 // XGXS MIIM device number 351#define MIIM_DEV_XGXS 5 // XGXS MIIM device number
352 352
353// Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field Register 353// Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field Register
354#define MIIM_TA_10GB 2 // set to 2 for 10 GB operation 354#define MIIM_TA_10GB 2 // set to 2 for 10 GB operation
355 355
356// A-XGMAC AMIIM Configuration Register 356// A-XGMAC AMIIM Configuration Register
357#define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 // Bypass preamble of mngmt frame 357#define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 // Bypass preamble of mngmt frame
@@ -365,25 +365,25 @@ typedef struct _SXG_HW_REGS {
365#define AXGMAC_AMIIM_INDC_BUSY 0x00000001 // Set until cmd operation complete 365#define AXGMAC_AMIIM_INDC_BUSY 0x00000001 // Set until cmd operation complete
366 366
367// Link Status and Control Register 367// Link Status and Control Register
368#define LS_PHY_CLR_RESET 0x80000000 // Clear reset signal to PHY 368#define LS_PHY_CLR_RESET 0x80000000 // Clear reset signal to PHY
369#define LS_SERDES_POWER_DOWN 0x40000000 // Power down the Sahara Serdes 369#define LS_SERDES_POWER_DOWN 0x40000000 // Power down the Sahara Serdes
370#define LS_XGXS_ENABLE 0x20000000 // Enable the XAUI XGXS logic 370#define LS_XGXS_ENABLE 0x20000000 // Enable the XAUI XGXS logic
371#define LS_XGXS_CTL 0x10000000 // Hold XAUI XGXS logic reset until Serdes is up 371#define LS_XGXS_CTL 0x10000000 // Hold XAUI XGXS logic reset until Serdes is up
372#define LS_SERDES_DOWN 0x08000000 // When 0, XAUI Serdes is up and initialization is complete 372#define LS_SERDES_DOWN 0x08000000 // When 0, XAUI Serdes is up and initialization is complete
373#define LS_TRACE_DOWN 0x04000000 // When 0, Trace Serdes is up and initialization is complete 373#define LS_TRACE_DOWN 0x04000000 // When 0, Trace Serdes is up and initialization is complete
374#define LS_PHY_CLK_25MHZ 0x02000000 // Set PHY clock to 25 MHz (else 156.125 MHz) 374#define LS_PHY_CLK_25MHZ 0x02000000 // Set PHY clock to 25 MHz (else 156.125 MHz)
375#define LS_PHY_CLK_EN 0x01000000 // Enable clock to PHY 375#define LS_PHY_CLK_EN 0x01000000 // Enable clock to PHY
376#define LS_XAUI_LINK_UP 0x00000010 // XAUI link is up 376#define LS_XAUI_LINK_UP 0x00000010 // XAUI link is up
377#define LS_XAUI_LINK_CHNG 0x00000008 // XAUI link status has changed 377#define LS_XAUI_LINK_CHNG 0x00000008 // XAUI link status has changed
378#define LS_LINK_ALARM 0x00000004 // Link alarm pin 378#define LS_LINK_ALARM 0x00000004 // Link alarm pin
379#define LS_ATTN_CTRL_MASK 0x00000003 // Mask link attention control bits 379#define LS_ATTN_CTRL_MASK 0x00000003 // Mask link attention control bits
380#define LS_ATTN_ALARM 0x00000000 // 00 => Attn on link alarm 380#define LS_ATTN_ALARM 0x00000000 // 00 => Attn on link alarm
381#define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 // 01 => Attn on link alarm or status change 381#define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 // 01 => Attn on link alarm or status change
382#define LS_ATTN_STAT_CHNG 0x00000002 // 10 => Attn on link status change 382#define LS_ATTN_STAT_CHNG 0x00000002 // 10 => Attn on link status change
383#define LS_ATTN_NONE 0x00000003 // 11 => no Attn 383#define LS_ATTN_NONE 0x00000003 // 11 => no Attn
384 384
385// Link Address High Registers 385// Link Address High Registers
386#define LINK_ADDR_ENABLE 0x80000000 // Enable this link address 386#define LINK_ADDR_ENABLE 0x80000000 // Enable this link address
387 387
388 388
389/*************************************************************************** 389/***************************************************************************
@@ -396,7 +396,7 @@ typedef struct _SXG_HW_REGS {
396#define XGXS_ADDRESS_STATUS1 0x0001 // XS Status 1 396#define XGXS_ADDRESS_STATUS1 0x0001 // XS Status 1
397#define XGXS_ADDRESS_DEVID_LOW 0x0002 // XS Device ID (low) 397#define XGXS_ADDRESS_DEVID_LOW 0x0002 // XS Device ID (low)
398#define XGXS_ADDRESS_DEVID_HIGH 0x0003 // XS Device ID (high) 398#define XGXS_ADDRESS_DEVID_HIGH 0x0003 // XS Device ID (high)
399#define XGXS_ADDRESS_SPEED 0x0004 // XS Speed ability 399#define XGXS_ADDRESS_SPEED 0x0004 // XS Speed ability
400#define XGXS_ADDRESS_DEV_LOW 0x0005 // XS Devices in package 400#define XGXS_ADDRESS_DEV_LOW 0x0005 // XS Devices in package
401#define XGXS_ADDRESS_DEV_HIGH 0x0006 // XS Devices in package 401#define XGXS_ADDRESS_DEV_HIGH 0x0006 // XS Devices in package
402#define XGXS_ADDRESS_STATUS2 0x0008 // XS Status 2 402#define XGXS_ADDRESS_STATUS2 0x0008 // XS Status 2
@@ -410,27 +410,27 @@ typedef struct _SXG_HW_REGS {
410#define XGXS_ADDRESS_RESET_HI2 0x8003 // Vendor-Specific Reset Hi 2 410#define XGXS_ADDRESS_RESET_HI2 0x8003 // Vendor-Specific Reset Hi 2
411 411
412// XS Control 1 register bit definitions 412// XS Control 1 register bit definitions
413#define XGXS_CONTROL1_RESET 0x8000 // Reset - self clearing 413#define XGXS_CONTROL1_RESET 0x8000 // Reset - self clearing
414#define XGXS_CONTROL1_LOOPBACK 0x4000 // Enable loopback 414#define XGXS_CONTROL1_LOOPBACK 0x4000 // Enable loopback
415#define XGXS_CONTROL1_SPEED1 0x2000 // 0 = unspecified, 1 = 10Gb+ 415#define XGXS_CONTROL1_SPEED1 0x2000 // 0 = unspecified, 1 = 10Gb+
416#define XGXS_CONTROL1_LOWPOWER 0x0400 // 1 = Low power mode 416#define XGXS_CONTROL1_LOWPOWER 0x0400 // 1 = Low power mode
417#define XGXS_CONTROL1_SPEED2 0x0040 // Same as SPEED1 (?) 417#define XGXS_CONTROL1_SPEED2 0x0040 // Same as SPEED1 (?)
418#define XGXS_CONTROL1_SPEED 0x003C // Everything reserved except zero (?) 418#define XGXS_CONTROL1_SPEED 0x003C // Everything reserved except zero (?)
419 419
420// XS Status 1 register bit definitions 420// XS Status 1 register bit definitions
421#define XGXS_STATUS1_FAULT 0x0080 // Fault detected 421#define XGXS_STATUS1_FAULT 0x0080 // Fault detected
422#define XGXS_STATUS1_LINK 0x0004 // 1 = Link up 422#define XGXS_STATUS1_LINK 0x0004 // 1 = Link up
423#define XGXS_STATUS1_LOWPOWER 0x0002 // 1 = Low power supported 423#define XGXS_STATUS1_LOWPOWER 0x0002 // 1 = Low power supported
424 424
425// XS Speed register bit definitions 425// XS Speed register bit definitions
426#define XGXS_SPEED_10G 0x0001 // 1 = 10G capable 426#define XGXS_SPEED_10G 0x0001 // 1 = 10G capable
427 427
428// XS Devices register bit definitions 428// XS Devices register bit definitions
429#define XGXS_DEVICES_DTE 0x0020 // DTE XS Present 429#define XGXS_DEVICES_DTE 0x0020 // DTE XS Present
430#define XGXS_DEVICES_PHY 0x0010 // PHY XS Present 430#define XGXS_DEVICES_PHY 0x0010 // PHY XS Present
431#define XGXS_DEVICES_PCS 0x0008 // PCS Present 431#define XGXS_DEVICES_PCS 0x0008 // PCS Present
432#define XGXS_DEVICES_WIS 0x0004 // WIS Present 432#define XGXS_DEVICES_WIS 0x0004 // WIS Present
433#define XGXS_DEVICES_PMD 0x0002 // PMD/PMA Present 433#define XGXS_DEVICES_PMD 0x0002 // PMD/PMA Present
434#define XGXS_DEVICES_CLAUSE22 0x0001 // Clause 22 registers present 434#define XGXS_DEVICES_CLAUSE22 0x0001 // Clause 22 registers present
435 435
436// XS Devices High register bit definitions 436// XS Devices High register bit definitions
@@ -444,18 +444,18 @@ typedef struct _SXG_HW_REGS {
444#define XGXS_STATUS2_RCV_FAULT 0x0400 // Receive fault 444#define XGXS_STATUS2_RCV_FAULT 0x0400 // Receive fault
445 445
446// XS Package ID High register bit definitions 446// XS Package ID High register bit definitions
447#define XGXS_PKGID_HIGH_ORG 0xFC00 // Organizationally Unique 447#define XGXS_PKGID_HIGH_ORG 0xFC00 // Organizationally Unique
448#define XGXS_PKGID_HIGH_MFG 0x03F0 // Manufacturer Model 448#define XGXS_PKGID_HIGH_MFG 0x03F0 // Manufacturer Model
449#define XGXS_PKGID_HIGH_REV 0x000F // Revision Number 449#define XGXS_PKGID_HIGH_REV 0x000F // Revision Number
450 450
451// XS Lane Status register bit definitions 451// XS Lane Status register bit definitions
452#define XGXS_LANE_PHY 0x1000 // PHY/DTE lane alignment status 452#define XGXS_LANE_PHY 0x1000 // PHY/DTE lane alignment status
453#define XGXS_LANE_PATTERN 0x0800 // Pattern testing ability 453#define XGXS_LANE_PATTERN 0x0800 // Pattern testing ability
454#define XGXS_LANE_LOOPBACK 0x0400 // PHY loopback ability 454#define XGXS_LANE_LOOPBACK 0x0400 // PHY loopback ability
455#define XGXS_LANE_SYNC3 0x0008 // Lane 3 sync 455#define XGXS_LANE_SYNC3 0x0008 // Lane 3 sync
456#define XGXS_LANE_SYNC2 0x0004 // Lane 2 sync 456#define XGXS_LANE_SYNC2 0x0004 // Lane 2 sync
457#define XGXS_LANE_SYNC1 0x0002 // Lane 1 sync 457#define XGXS_LANE_SYNC1 0x0002 // Lane 1 sync
458#define XGXS_LANE_SYNC0 0x0001 // Lane 0 sync 458#define XGXS_LANE_SYNC0 0x0001 // Lane 0 sync
459 459
460// XS Test Control register bit definitions 460// XS Test Control register bit definitions
461#define XGXS_TEST_PATTERN_ENABLE 0x0004 // Test pattern enabled 461#define XGXS_TEST_PATTERN_ENABLE 0x0004 // Test pattern enabled
@@ -473,10 +473,10 @@ typedef struct _SXG_HW_REGS {
473// LASI (Link Alarm Status Interrupt) Registers (located in MIIM_DEV_PHY_PMA device) 473// LASI (Link Alarm Status Interrupt) Registers (located in MIIM_DEV_PHY_PMA device)
474#define LASI_RX_ALARM_CONTROL 0x9000 // LASI RX_ALARM Control 474#define LASI_RX_ALARM_CONTROL 0x9000 // LASI RX_ALARM Control
475#define LASI_TX_ALARM_CONTROL 0x9001 // LASI TX_ALARM Control 475#define LASI_TX_ALARM_CONTROL 0x9001 // LASI TX_ALARM Control
476#define LASI_CONTROL 0x9002 // LASI Control 476#define LASI_CONTROL 0x9002 // LASI Control
477#define LASI_RX_ALARM_STATUS 0x9003 // LASI RX_ALARM Status 477#define LASI_RX_ALARM_STATUS 0x9003 // LASI RX_ALARM Status
478#define LASI_TX_ALARM_STATUS 0x9004 // LASI TX_ALARM Status 478#define LASI_TX_ALARM_STATUS 0x9004 // LASI TX_ALARM Status
479#define LASI_STATUS 0x9005 // LASI Status 479#define LASI_STATUS 0x9005 // LASI Status
480 480
481// LASI_CONTROL bit definitions 481// LASI_CONTROL bit definitions
482#define LASI_CTL_RX_ALARM_ENABLE 0x0004 // Enable RX_ALARM interrupts 482#define LASI_CTL_RX_ALARM_ENABLE 0x0004 // Enable RX_ALARM interrupts
@@ -489,34 +489,34 @@ typedef struct _SXG_HW_REGS {
489#define LASI_STATUS_LS_ALARM 0x0001 // Link Status 489#define LASI_STATUS_LS_ALARM 0x0001 // Link Status
490 490
491// PHY registers - PMA/PMD (device 1) 491// PHY registers - PMA/PMD (device 1)
492#define PHY_PMA_CONTROL1 0x0000 // PMA/PMD Control 1 492#define PHY_PMA_CONTROL1 0x0000 // PMA/PMD Control 1
493#define PHY_PMA_STATUS1 0x0001 // PMA/PMD Status 1 493#define PHY_PMA_STATUS1 0x0001 // PMA/PMD Status 1
494#define PHY_PMA_RCV_DET 0x000A // PMA/PMD Receive Signal Detect 494#define PHY_PMA_RCV_DET 0x000A // PMA/PMD Receive Signal Detect
495 // other PMA/PMD registers exist and can be defined as needed 495 // other PMA/PMD registers exist and can be defined as needed
496 496
497// PHY registers - PCS (device 3) 497// PHY registers - PCS (device 3)
498#define PHY_PCS_CONTROL1 0x0000 // PCS Control 1 498#define PHY_PCS_CONTROL1 0x0000 // PCS Control 1
499#define PHY_PCS_STATUS1 0x0001 // PCS Status 1 499#define PHY_PCS_STATUS1 0x0001 // PCS Status 1
500#define PHY_PCS_10G_STATUS1 0x0020 // PCS 10GBASE-R Status 1 500#define PHY_PCS_10G_STATUS1 0x0020 // PCS 10GBASE-R Status 1
501 // other PCS registers exist and can be defined as needed 501 // other PCS registers exist and can be defined as needed
502 502
503// PHY registers - XS (device 4) 503// PHY registers - XS (device 4)
504#define PHY_XS_CONTROL1 0x0000 // XS Control 1 504#define PHY_XS_CONTROL1 0x0000 // XS Control 1
505#define PHY_XS_STATUS1 0x0001 // XS Status 1 505#define PHY_XS_STATUS1 0x0001 // XS Status 1
506#define PHY_XS_LANE_STATUS 0x0018 // XS Lane Status 506#define PHY_XS_LANE_STATUS 0x0018 // XS Lane Status
507 // other XS registers exist and can be defined as needed 507 // other XS registers exist and can be defined as needed
508 508
509// PHY_PMA_CONTROL1 register bit definitions 509// PHY_PMA_CONTROL1 register bit definitions
510#define PMA_CONTROL1_RESET 0x8000 // PMA/PMD reset 510#define PMA_CONTROL1_RESET 0x8000 // PMA/PMD reset
511 511
512// PHY_PMA_RCV_DET register bit definitions 512// PHY_PMA_RCV_DET register bit definitions
513#define PMA_RCV_DETECT 0x0001 // PMA/PMD receive signal detect 513#define PMA_RCV_DETECT 0x0001 // PMA/PMD receive signal detect
514 514
515// PHY_PCS_10G_STATUS1 register bit definitions 515// PHY_PCS_10G_STATUS1 register bit definitions
516#define PCS_10B_BLOCK_LOCK 0x0001 // PCS 10GBASE-R locked to receive blocks 516#define PCS_10B_BLOCK_LOCK 0x0001 // PCS 10GBASE-R locked to receive blocks
517 517
518// PHY_XS_LANE_STATUS register bit definitions 518// PHY_XS_LANE_STATUS register bit definitions
519#define XS_LANE_ALIGN 0x1000 // XS transmit lanes aligned 519#define XS_LANE_ALIGN 0x1000 // XS transmit lanes aligned
520 520
521// PHY Microcode download data structure 521// PHY Microcode download data structure
522typedef struct _PHY_UCODE { 522typedef struct _PHY_UCODE {
@@ -558,8 +558,8 @@ typedef struct _XMT_DESC {
558 // command codes 558 // command codes
559#define XMT_DESC_CMD_RAW_SEND 0 // raw send descriptor 559#define XMT_DESC_CMD_RAW_SEND 0 // raw send descriptor
560#define XMT_DESC_CMD_CSUM_INSERT 1 // checksum insert descriptor 560#define XMT_DESC_CMD_CSUM_INSERT 1 // checksum insert descriptor
561#define XMT_DESC_CMD_FORMAT 2 // format descriptor 561#define XMT_DESC_CMD_FORMAT 2 // format descriptor
562#define XMT_DESC_CMD_PRIME 3 // prime descriptor 562#define XMT_DESC_CMD_PRIME 3 // prime descriptor
563#define XMT_DESC_CMD_CODE_SHFT 6 // comand code shift (shift to bits [31:30] in word 0) 563#define XMT_DESC_CMD_CODE_SHFT 6 // comand code shift (shift to bits [31:30] in word 0)
564 // shifted command codes 564 // shifted command codes
565#define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT) 565#define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT)
@@ -569,22 +569,22 @@ typedef struct _XMT_DESC {
569 569
570// XMT_DESC Control Byte (XmtCtl) definitions 570// XMT_DESC Control Byte (XmtCtl) definitions
571// NOTE: These bits do not work on Sahara (Rev A)! 571// NOTE: These bits do not work on Sahara (Rev A)!
572#define XMT_CTL_PAUSE_FRAME 0x80 // current frame is a pause control frame (for statistics) 572#define XMT_CTL_PAUSE_FRAME 0x80 // current frame is a pause control frame (for statistics)
573#define XMT_CTL_CONTROL_FRAME 0x40 // current frame is a control frame (for statistics) 573#define XMT_CTL_CONTROL_FRAME 0x40 // current frame is a control frame (for statistics)
574#define XMT_CTL_PER_PKT_QUAL 0x20 // per packet qualifier 574#define XMT_CTL_PER_PKT_QUAL 0x20 // per packet qualifier
575#define XMT_CTL_PAD_MODE_NONE 0x00 // do not pad frame 575#define XMT_CTL_PAD_MODE_NONE 0x00 // do not pad frame
576#define XMT_CTL_PAD_MODE_64 0x08 // pad frame to 64 bytes 576#define XMT_CTL_PAD_MODE_64 0x08 // pad frame to 64 bytes
577#define XMT_CTL_PAD_MODE_VLAN_68 0x10 // pad frame to 64 bytes, and VLAN frames to 68 bytes 577#define XMT_CTL_PAD_MODE_VLAN_68 0x10 // pad frame to 64 bytes, and VLAN frames to 68 bytes
578#define XMT_CTL_PAD_MODE_68 0x18 // pad frame to 68 bytes 578#define XMT_CTL_PAD_MODE_68 0x18 // pad frame to 68 bytes
579#define XMT_CTL_GEN_FCS 0x04 // generate FCS (CRC) for this frame 579#define XMT_CTL_GEN_FCS 0x04 // generate FCS (CRC) for this frame
580#define XMT_CTL_DELAY_FCS_0 0x00 // do not delay FCS calcution 580#define XMT_CTL_DELAY_FCS_0 0x00 // do not delay FCS calcution
581#define XMT_CTL_DELAY_FCS_1 0x01 // delay FCS calculation by 1 (4-byte) word 581#define XMT_CTL_DELAY_FCS_1 0x01 // delay FCS calculation by 1 (4-byte) word
582#define XMT_CTL_DELAY_FCS_2 0x02 // delay FCS calculation by 2 (4-byte) words 582#define XMT_CTL_DELAY_FCS_2 0x02 // delay FCS calculation by 2 (4-byte) words
583#define XMT_CTL_DELAY_FCS_3 0x03 // delay FCS calculation by 3 (4-byte) words 583#define XMT_CTL_DELAY_FCS_3 0x03 // delay FCS calculation by 3 (4-byte) words
584 584
585// XMT_DESC XmtBufId definition 585// XMT_DESC XmtBufId definition
586#define XMT_BUF_ID_SHFT 8 // The Xmt buffer ID is formed by dividing 586#define XMT_BUF_ID_SHFT 8 // The Xmt buffer ID is formed by dividing
587 // the buffer (DRAM) address by 256 (or << 8) 587 // the buffer (DRAM) address by 256 (or << 8)
588 588
589/***************************************************************************** 589/*****************************************************************************
590 * Receiver Sequencer Definitions 590 * Receiver Sequencer Definitions
@@ -594,8 +594,8 @@ typedef struct _XMT_DESC {
594#define RCV_EVTQ_RBFID_MASK 0x0000FFFF // bit mask for the Receive Buffer ID 594#define RCV_EVTQ_RBFID_MASK 0x0000FFFF // bit mask for the Receive Buffer ID
595 595
596// Receive Buffer ID definition 596// Receive Buffer ID definition
597#define RCV_BUF_ID_SHFT 5 // The Rcv buffer ID is formed by dividing 597#define RCV_BUF_ID_SHFT 5 // The Rcv buffer ID is formed by dividing
598 // the buffer (DRAM) address by 32 (or << 5) 598 // the buffer (DRAM) address by 32 (or << 5)
599 599
600// Format of the 18 byte Receive Buffer returned by the 600// Format of the 18 byte Receive Buffer returned by the
601// Receive Sequencer for received packets 601// Receive Sequencer for received packets
@@ -623,48 +623,48 @@ typedef struct _RCV_BUF_HDR {
623 * Queue definitions 623 * Queue definitions
624 *****************************************************************************/ 624 *****************************************************************************/
625 625
626// Ingress (read only) queue numbers 626/* Ingress (read only) queue numbers */
627#define PXY_BUF_Q 0 // Proxy Buffer Queue 627#define PXY_BUF_Q 0 /* Proxy Buffer Queue */
628#define HST_EVT_Q 1 // Host Event Queue 628#define HST_EVT_Q 1 /* Host Event Queue */
629#define XMT_BUF_Q 2 // Transmit Buffer Queue 629#define XMT_BUF_Q 2 /* Transmit Buffer Queue */
630#define SKT_EVL_Q 3 // RcvSqr Socket Event Low Priority Queue 630#define SKT_EVL_Q 3 /* RcvSqr Socket Event Low Priority Queue */
631#define RCV_EVL_Q 4 // RcvSqr Rcv Event Low Priority Queue 631#define RCV_EVL_Q 4 /* RcvSqr Rcv Event Low Priority Queue */
632#define SKT_EVH_Q 5 // RcvSqr Socket Event High Priority Queue 632#define SKT_EVH_Q 5 /* RcvSqr Socket Event High Priority Queue */
633#define RCV_EVH_Q 6 // RcvSqr Rcv Event High Priority Queue 633#define RCV_EVH_Q 6 /* RcvSqr Rcv Event High Priority Queue */
634#define DMA_RSP_Q 7 // Dma Response Queue - one per CPU context 634#define DMA_RSP_Q 7 /* Dma Response Queue - one per CPU context */
635// Local (read/write) queue numbers 635/* Local (read/write) queue numbers */
636#define LOCAL_A_Q 8 // Spare local Queue 636#define LOCAL_A_Q 8 /* Spare local Queue */
637#define LOCAL_B_Q 9 // Spare local Queue 637#define LOCAL_B_Q 9 /* Spare local Queue */
638#define LOCAL_C_Q 10 // Spare local Queue 638#define LOCAL_C_Q 10 /* Spare local Queue */
639#define FSM_EVT_Q 11 // Finite-State-Machine Event Queue 639#define FSM_EVT_Q 11 /* Finite-State-Machine Event Queue */
640#define SBF_PAL_Q 12 // System Buffer Physical Address (low) Queue 640#define SBF_PAL_Q 12 /* System Buffer Physical Address (low) Queue */
641#define SBF_PAH_Q 13 // System Buffer Physical Address (high) Queue 641#define SBF_PAH_Q 13 /* System Buffer Physical Address (high) Queue */
642#define SBF_VAL_Q 14 // System Buffer Virtual Address (low) Queue 642#define SBF_VAL_Q 14 /* System Buffer Virtual Address (low) Queue */
643#define SBF_VAH_Q 15 // System Buffer Virtual Address (high) Queue 643#define SBF_VAH_Q 15 /* System Buffer Virtual Address (high) Queue */
644// Egress (write only) queue numbers 644/* Egress (write only) queue numbers */
645#define H2G_CMD_Q 16 // Host to GlbRam DMA Command Queue 645#define H2G_CMD_Q 16 /* Host to GlbRam DMA Command Queue */
646#define H2D_CMD_Q 17 // Host to DRAM DMA Command Queue 646#define H2D_CMD_Q 17 /* Host to DRAM DMA Command Queue */
647#define G2H_CMD_Q 18 // GlbRam to Host DMA Command Queue 647#define G2H_CMD_Q 18 /* GlbRam to Host DMA Command Queue */
648#define G2D_CMD_Q 19 // GlbRam to DRAM DMA Command Queue 648#define G2D_CMD_Q 19 /* GlbRam to DRAM DMA Command Queue */
649#define D2H_CMD_Q 20 // DRAM to Host DMA Command Queue 649#define D2H_CMD_Q 20 /* DRAM to Host DMA Command Queue */
650#define D2G_CMD_Q 21 // DRAM to GlbRam DMA Command Queue 650#define D2G_CMD_Q 21 /* DRAM to GlbRam DMA Command Queue */
651#define D2D_CMD_Q 22 // DRAM to DRAM DMA Command Queue 651#define D2D_CMD_Q 22 /* DRAM to DRAM DMA Command Queue */
652#define PXL_CMD_Q 23 // Low Priority Proxy Command Queue 652#define PXL_CMD_Q 23 /* Low Priority Proxy Command Queue */
653#define PXH_CMD_Q 24 // High Priority Proxy Command Queue 653#define PXH_CMD_Q 24 /* High Priority Proxy Command Queue */
654#define RSQ_CMD_Q 25 // Receive Sequencer Command Queue 654#define RSQ_CMD_Q 25 /* Receive Sequencer Command Queue */
655#define RCV_BUF_Q 26 // Receive Buffer Queue 655#define RCV_BUF_Q 26 /* Receive Buffer Queue */
656 656
657// Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) 657/* Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) */
658#define PXY_COPY_EN 0x00200000 // enable copy of xmt descriptor to xmt command queue 658#define PXY_COPY_EN 0x00200000 /* enable copy of xmt descriptor to xmt command queue */
659#define PXY_SIZE_16 0x00000000 // copy 16 bytes 659#define PXY_SIZE_16 0x00000000 /* copy 16 bytes */
660#define PXY_SIZE_32 0x00100000 // copy 32 bytes 660#define PXY_SIZE_32 0x00100000 /* copy 32 bytes */
661 661
662/***************************************************************************** 662/*****************************************************************************
663 * SXG EEPROM/Flash Configuration Definitions 663 * SXG EEPROM/Flash Configuration Definitions
664 *****************************************************************************/ 664 *****************************************************************************/
665#pragma pack(push, 1) 665#pragma pack(push, 1)
666 666
667// 667/* */
668typedef struct _HW_CFG_DATA { 668typedef struct _HW_CFG_DATA {
669 ushort Addr; 669 ushort Addr;
670 union { 670 union {
@@ -673,22 +673,22 @@ typedef struct _HW_CFG_DATA {
673 }; 673 };
674} HW_CFG_DATA, *PHW_CFG_DATA; 674} HW_CFG_DATA, *PHW_CFG_DATA;
675 675
676// 676/* */
677#define NUM_HW_CFG_ENTRIES ((128/sizeof(HW_CFG_DATA)) - 4) 677#define NUM_HW_CFG_ENTRIES ((128/sizeof(HW_CFG_DATA)) - 4)
678 678
679// MAC address 679/* MAC address */
680typedef struct _SXG_CONFIG_MAC { 680typedef struct _SXG_CONFIG_MAC {
681 unsigned char MacAddr[6]; // MAC Address 681 unsigned char MacAddr[6]; /* MAC Address */
682} SXG_CONFIG_MAC, *PSXG_CONFIG_MAC; 682} SXG_CONFIG_MAC, *PSXG_CONFIG_MAC;
683 683
684// 684/* */
685typedef struct _ATK_FRU { 685typedef struct _ATK_FRU {
686 unsigned char PartNum[6]; 686 unsigned char PartNum[6];
687 unsigned char Revision[2]; 687 unsigned char Revision[2];
688 unsigned char Serial[14]; 688 unsigned char Serial[14];
689} ATK_FRU, *PATK_FRU; 689} ATK_FRU, *PATK_FRU;
690 690
691// OEM FRU Format types 691/* OEM FRU Format types */
692#define ATK_FRU_FORMAT 0x0000 692#define ATK_FRU_FORMAT 0x0000
693#define CPQ_FRU_FORMAT 0x0001 693#define CPQ_FRU_FORMAT 0x0001
694#define DELL_FRU_FORMAT 0x0002 694#define DELL_FRU_FORMAT 0x0002
@@ -697,24 +697,24 @@ typedef struct _ATK_FRU {
697#define EMC_FRU_FORMAT 0x0005 697#define EMC_FRU_FORMAT 0x0005
698#define NO_FRU_FORMAT 0xFFFF 698#define NO_FRU_FORMAT 0xFFFF
699 699
700// EEPROM/Flash Format 700/* EEPROM/Flash Format */
701typedef struct _SXG_CONFIG { 701typedef struct _SXG_CONFIG {
702 // 702 /* */
703 // Section 1 (128 bytes) 703 /* Section 1 (128 bytes) */
704 // 704 /* */
705 ushort MagicWord; // EEPROM/FLASH Magic code 'A5A5' 705 ushort MagicWord; /* EEPROM/FLASH Magic code 'A5A5' */
706 ushort SpiClks; // SPI bus clock dividers 706 ushort SpiClks; /* SPI bus clock dividers */
707 HW_CFG_DATA HwCfg[NUM_HW_CFG_ENTRIES]; 707 HW_CFG_DATA HwCfg[NUM_HW_CFG_ENTRIES];
708 // 708 /* */
709 // 709 /* */
710 // 710 /* */
711 ushort Version; // EEPROM format version 711 ushort Version; /* EEPROM format version */
712 SXG_CONFIG_MAC MacAddr[4]; // space for 4 MAC addresses 712 SXG_CONFIG_MAC MacAddr[4]; /* space for 4 MAC addresses */
713 ATK_FRU AtkFru; // FRU information 713 ATK_FRU AtkFru; /* FRU information */
714 ushort OemFruFormat; // OEM FRU format type 714 ushort OemFruFormat; /* OEM FRU format type */
715 unsigned char OemFru[76]; // OEM FRU information (optional) 715 unsigned char OemFru[76]; /* OEM FRU information (optional) */
716 ushort Checksum; // Checksum of section 2 716 ushort Checksum; /* Checksum of section 2 */
717 // CS info XXXTODO 717 /* CS info XXXTODO */
718} SXG_CONFIG, *PSXG_CONFIG; 718} SXG_CONFIG, *PSXG_CONFIG;
719#pragma pack(pop) 719#pragma pack(pop)
720 720
@@ -723,12 +723,12 @@ typedef struct _SXG_CONFIG {
723 *****************************************************************************/ 723 *****************************************************************************/
724 724
725// Sahara (ASIC level) defines 725// Sahara (ASIC level) defines
726#define SAHARA_GRAM_SIZE 0x020000 // GRAM size - 128 KB 726#define SAHARA_GRAM_SIZE 0x020000 // GRAM size - 128 KB
727#define SAHARA_DRAM_SIZE 0x200000 // DRAM size - 2 MB 727#define SAHARA_DRAM_SIZE 0x200000 // DRAM size - 2 MB
728#define SAHARA_QRAM_SIZE 0x004000 // QRAM size - 16K entries (64 KB) 728#define SAHARA_QRAM_SIZE 0x004000 // QRAM size - 16K entries (64 KB)
729#define SAHARA_WCS_SIZE 0x002000 // WCS - 8K instructions (x 108 bits) 729#define SAHARA_WCS_SIZE 0x002000 // WCS - 8K instructions (x 108 bits)
730 730
731// Arabia (board level) defines 731// Arabia (board level) defines
732#define FLASH_SIZE 0x080000 // 512 KB (4 Mb) 732#define FLASH_SIZE 0x080000 // 512 KB (4 Mb)
733#define EEPROM_SIZE_XFMR 512 // true EEPROM size (bytes), including xfmr area 733#define EEPROM_SIZE_XFMR 512 // true EEPROM size (bytes), including xfmr area
734#define EEPROM_SIZE_NO_XFMR 256 // EEPROM size excluding xfmr area 734#define EEPROM_SIZE_NO_XFMR 256 // EEPROM size excluding xfmr area
diff --git a/drivers/staging/sxg/sxgphycode.h b/drivers/staging/sxg/sxgphycode.h
index 26b36c81eb1a..8dbaeda7eca4 100644
--- a/drivers/staging/sxg/sxgphycode.h
+++ b/drivers/staging/sxg/sxgphycode.h
@@ -34,7 +34,7 @@ static PHY_UCODE PhyUcode[] = {
34 */ 34 */
35 /* Addr, Data */ 35 /* Addr, Data */
36 {0xc017, 0xfeb0}, /* flip RX_LOS polarity (mandatory */ 36 {0xc017, 0xfeb0}, /* flip RX_LOS polarity (mandatory */
37 /* patch for SFP+ applications) */ 37 /* patch for SFP+ applications) */
38 {0xC001, 0x0428}, /* flip RX serial polarity */ 38 {0xC001, 0x0428}, /* flip RX serial polarity */
39 39
40 {0xc013, 0xf341}, /* invert lxmit clock (mandatory patch) */ 40 {0xc013, 0xf341}, /* invert lxmit clock (mandatory patch) */
@@ -43,7 +43,7 @@ static PHY_UCODE PhyUcode[] = {
43 {0xc210, 0x8000}, /* reset datapath (mandatory patch) */ 43 {0xc210, 0x8000}, /* reset datapath (mandatory patch) */
44 {0xc210, 0x0000}, /* reset datapath (mandatory patch) */ 44 {0xc210, 0x0000}, /* reset datapath (mandatory patch) */
45 {0x0000, 0x0032}, /* wait for 50ms for datapath reset to */ 45 {0x0000, 0x0032}, /* wait for 50ms for datapath reset to */
46 /* complete. (mandatory patch) */ 46 /* complete. (mandatory patch) */
47 47
48 /* Configure the LED's */ 48 /* Configure the LED's */
49 {0xc214, 0x0099}, /* configure the LED drivers */ 49 {0xc214, 0x0099}, /* configure the LED drivers */
@@ -52,15 +52,15 @@ static PHY_UCODE PhyUcode[] = {
52 52
53 /* Transceiver-specific MDIO Patches: */ 53 /* Transceiver-specific MDIO Patches: */
54 {0xc010, 0x448a}, /* (bit 14) mask out high BER input from the */ 54 {0xc010, 0x448a}, /* (bit 14) mask out high BER input from the */
55 /* LOS signal in 1.000A */ 55 /* LOS signal in 1.000A */
56 /* (mandatory patch for SR code)*/ 56 /* (mandatory patch for SR code) */
57 {0xc003, 0x0181}, /* (bit 7) enable the CDR inc setting in */ 57 {0xc003, 0x0181}, /* (bit 7) enable the CDR inc setting in */
58 /* 1.C005 (mandatory patch for SR code) */ 58 /* 1.C005 (mandatory patch for SR code) */
59 59
60 /* Transceiver-specific Microcontroller Initialization: */ 60 /* Transceiver-specific Microcontroller Initialization: */
61 {0xc04a, 0x5200}, /* activate microcontroller and pause */ 61 {0xc04a, 0x5200}, /* activate microcontroller and pause */
62 {0x0000, 0x0032}, /* wait 50ms for microcontroller before */ 62 {0x0000, 0x0032}, /* wait 50ms for microcontroller before */
63 /* writing in code. */ 63 /* writing in code. */
64 64
65 /* code block starts here: */ 65 /* code block starts here: */
66 {0xcc00, 0x2009}, 66 {0xcc00, 0x2009},
diff --git a/drivers/staging/usbip/Kconfig b/drivers/staging/usbip/Kconfig
index 7426235ccc44..217fb7e62c2f 100644
--- a/drivers/staging/usbip/Kconfig
+++ b/drivers/staging/usbip/Kconfig
@@ -1,6 +1,6 @@
1config USB_IP_COMMON 1config USB_IP_COMMON
2 tristate "USB IP support (EXPERIMENTAL)" 2 tristate "USB IP support (EXPERIMENTAL)"
3 depends on USB && EXPERIMENTAL 3 depends on USB && NET && EXPERIMENTAL
4 default N 4 default N
5 ---help--- 5 ---help---
6 This enables pushing USB packets over IP to allow remote 6 This enables pushing USB packets over IP to allow remote
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index e64918f42ff7..72e209276ea7 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -221,7 +221,7 @@ static void usbip_dump_request_type(__u8 rt)
221static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd) 221static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd)
222{ 222{
223 if (!cmd) { 223 if (!cmd) {
224 printk(" %s : null pointer\n", __FUNCTION__); 224 printk(" %s : null pointer\n", __func__);
225 return; 225 return;
226 } 226 }
227 227
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 933ccaf50afb..58e3995d0e2c 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -202,7 +202,7 @@ static void vhci_rx_pdu(struct usbip_device *ud)
202 ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0); 202 ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
203 if (ret != sizeof(pdu)) { 203 if (ret != sizeof(pdu)) {
204 uerr("receiving pdu failed! size is %d, should be %d\n", 204 uerr("receiving pdu failed! size is %d, should be %d\n",
205 ret, sizeof(pdu)); 205 ret, (unsigned int)sizeof(pdu));
206 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); 206 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
207 return; 207 return;
208 } 208 }
diff --git a/drivers/staging/winbond/Kconfig b/drivers/staging/winbond/Kconfig
index 10d72bec88a9..425219ed7ab9 100644
--- a/drivers/staging/winbond/Kconfig
+++ b/drivers/staging/winbond/Kconfig
@@ -1,6 +1,6 @@
1config W35UND 1config W35UND
2 tristate "Winbond driver" 2 tristate "Winbond driver"
3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL && !4KSTACKS 3 depends on MAC80211 && WLAN_80211 && USB && EXPERIMENTAL && !4KSTACKS
4 default n 4 default n
5 ---help--- 5 ---help---
6 This is highly experimental driver for winbond wifi card on some Kohjinsha notebooks 6 This is highly experimental driver for winbond wifi card on some Kohjinsha notebooks
diff --git a/drivers/staging/winbond/README b/drivers/staging/winbond/README
index 707b6b354dc5..cb944e4bf174 100644
--- a/drivers/staging/winbond/README
+++ b/drivers/staging/winbond/README
@@ -5,6 +5,7 @@ TODO:
5 - remove typedefs 5 - remove typedefs
6 - remove unused ioctls 6 - remove unused ioctls
7 - use cfg80211 for regulatory stuff 7 - use cfg80211 for regulatory stuff
8 - fix 4k stack problems
8 9
9Please send patches to Greg Kroah-Hartman <greg@kroah.com> and 10Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
10Pavel Machek <pavel@suse.cz> 11Pavel Machek <pavel@suse.cz>
diff --git a/drivers/staging/winbond/bss_f.h b/drivers/staging/winbond/bss_f.h
index c957bc94f08d..013183153993 100644
--- a/drivers/staging/winbond/bss_f.h
+++ b/drivers/staging/winbond/bss_f.h
@@ -24,7 +24,7 @@ void DesiredRate2InfoElement(PWB32_ADAPTER Adapter, u8 *addr, u16 *iFildOffset,
24 u8 *pBasicRateSet, u8 BasicRateCount, 24 u8 *pBasicRateSet, u8 BasicRateCount,
25 u8 *pOperationRateSet, u8 OperationRateCount); 25 u8 *pOperationRateSet, u8 OperationRateCount);
26void BSSAddIBSSdata(PWB32_ADAPTER Adapter, PWB_BSSDESCRIPTION psDesData); 26void BSSAddIBSSdata(PWB32_ADAPTER Adapter, PWB_BSSDESCRIPTION psDesData);
27unsigned char boCmpMacAddr( PUCHAR, PUCHAR ); 27unsigned char boCmpMacAddr( u8 *, u8 *);
28unsigned char boCmpSSID(struct SSID_Element *psSSID1, struct SSID_Element *psSSID2); 28unsigned char boCmpSSID(struct SSID_Element *psSSID1, struct SSID_Element *psSSID2);
29u16 wBSSfindSSID(PWB32_ADAPTER Adapter, struct SSID_Element *psSsid); 29u16 wBSSfindSSID(PWB32_ADAPTER Adapter, struct SSID_Element *psSsid);
30u16 wRoamingQuery(PWB32_ADAPTER Adapter); 30u16 wRoamingQuery(PWB32_ADAPTER Adapter);
@@ -42,11 +42,11 @@ void RateReSortForSRate(PWB32_ADAPTER Adapter, u8 *RateArray, u8 num);
42void Assemble_IE(PWB32_ADAPTER Adapter, u16 wBssIdx); 42void Assemble_IE(PWB32_ADAPTER Adapter, u16 wBssIdx);
43void SetMaxTxRate(PWB32_ADAPTER Adapter); 43void SetMaxTxRate(PWB32_ADAPTER Adapter);
44 44
45void CreateWpaIE(PWB32_ADAPTER Adapter, u16* iFildOffset, PUCHAR msg, struct Management_Frame* msgHeader, 45void CreateWpaIE(PWB32_ADAPTER Adapter, u16* iFildOffset, u8 *msg, struct Management_Frame* msgHeader,
46 struct Association_Request_Frame_Body* msgBody, u16 iMSindex); //added by WS 05/14/05 46 struct Association_Request_Frame_Body* msgBody, u16 iMSindex); //added by WS 05/14/05
47 47
48#ifdef _WPA2_ 48#ifdef _WPA2_
49void CreateRsnIE(PWB32_ADAPTER Adapter, u16* iFildOffset, PUCHAR msg, struct Management_Frame* msgHeader, 49void CreateRsnIE(PWB32_ADAPTER Adapter, u16* iFildOffset, u8 *msg, struct Management_Frame* msgHeader,
50 struct Association_Request_Frame_Body* msgBody, u16 iMSindex);//added by WS 05/14/05 50 struct Association_Request_Frame_Body* msgBody, u16 iMSindex);//added by WS 05/14/05
51 51
52u16 SearchPmkid(PWB32_ADAPTER Adapter, struct Management_Frame* msgHeader, 52u16 SearchPmkid(PWB32_ADAPTER Adapter, struct Management_Frame* msgHeader,
diff --git a/drivers/staging/winbond/ds_tkip.h b/drivers/staging/winbond/ds_tkip.h
index 29e5055b45a1..6841d66e7e8c 100644
--- a/drivers/staging/winbond/ds_tkip.h
+++ b/drivers/staging/winbond/ds_tkip.h
@@ -25,9 +25,9 @@ typedef struct tkip
25 s32 bytes_in_M; // # bytes in M 25 s32 bytes_in_M; // # bytes in M
26} tkip_t; 26} tkip_t;
27 27
28//void _append_data( PUCHAR pData, u16 size, tkip_t *p ); 28//void _append_data( u8 *pData, u16 size, tkip_t *p );
29void Mds_MicGet( void* Adapter, void* pRxLayer1, PUCHAR pKey, PUCHAR pMic ); 29void Mds_MicGet( void* Adapter, void* pRxLayer1, u8 *pKey, u8 *pMic );
30void Mds_MicFill( void* Adapter, void* pDes, PUCHAR XmitBufAddress ); 30void Mds_MicFill( void* Adapter, void* pDes, u8 *XmitBufAddress );
31 31
32 32
33 33
diff --git a/drivers/staging/winbond/linux/common.h b/drivers/staging/winbond/linux/common.h
index 6b00bad74f78..712a86cfa68b 100644
--- a/drivers/staging/winbond/linux/common.h
+++ b/drivers/staging/winbond/linux/common.h
@@ -39,14 +39,6 @@
39// Common type definition 39// Common type definition
40//=============================================================== 40//===============================================================
41 41
42typedef u8* PUCHAR;
43typedef s8* PCHAR;
44typedef u8* PBOOLEAN;
45typedef u16* PUSHORT;
46typedef u32* PULONG;
47typedef s16* PSHORT;
48
49
50//=========================================== 42//===========================================
51#define IGNORE 2 43#define IGNORE 2
52#define SUCCESS 1 44#define SUCCESS 1
@@ -110,16 +102,9 @@ typedef struct urb * PURB;
110#define OS_ATOMIC_READ( _A, _V ) _V 102#define OS_ATOMIC_READ( _A, _V ) _V
111#define OS_ATOMIC_INC( _A, _V ) EncapAtomicInc( _A, (void*)_V ) 103#define OS_ATOMIC_INC( _A, _V ) EncapAtomicInc( _A, (void*)_V )
112#define OS_ATOMIC_DEC( _A, _V ) EncapAtomicDec( _A, (void*)_V ) 104#define OS_ATOMIC_DEC( _A, _V ) EncapAtomicDec( _A, (void*)_V )
113#define OS_MEMORY_CLEAR( _A, _S ) memset( (PUCHAR)_A,0,_S) 105#define OS_MEMORY_CLEAR( _A, _S ) memset( (u8 *)_A,0,_S)
114#define OS_MEMORY_COMPARE( _A, _B, _S ) (memcmp(_A,_B,_S)? 0 : 1) // Definition is reverse with Ndis 1: the same 0: different 106#define OS_MEMORY_COMPARE( _A, _B, _S ) (memcmp(_A,_B,_S)? 0 : 1) // Definition is reverse with Ndis 1: the same 0: different
115 107
116
117#define OS_SPIN_LOCK spinlock_t
118#define OS_SPIN_LOCK_ALLOCATE( _S ) spin_lock_init( _S );
119#define OS_SPIN_LOCK_FREE( _S )
120#define OS_SPIN_LOCK_ACQUIRED( _S ) spin_lock_irq( _S )
121#define OS_SPIN_LOCK_RELEASED( _S ) spin_unlock_irq( _S );
122
123#define OS_TIMER struct timer_list 108#define OS_TIMER struct timer_list
124#define OS_TIMER_INITIAL( _T, _F, _P ) \ 109#define OS_TIMER_INITIAL( _T, _F, _P ) \
125{ \ 110{ \
diff --git a/drivers/staging/winbond/linux/wb35reg.c b/drivers/staging/winbond/linux/wb35reg.c
index 2c0b454e8cad..ebb6db5438a4 100644
--- a/drivers/staging/winbond/linux/wb35reg.c
+++ b/drivers/staging/winbond/linux/wb35reg.c
@@ -10,7 +10,7 @@ extern void phy_calibration_winbond(hw_data_t *phw_data, u32 frequency);
10// Flag : AUTO_INCREMENT - RegisterNo will auto increment 4 10// Flag : AUTO_INCREMENT - RegisterNo will auto increment 4
11// NO_INCREMENT - Function will write data into the same register 11// NO_INCREMENT - Function will write data into the same register
12unsigned char 12unsigned char
13Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8 NumberOfData, u8 Flag) 13Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterData, u8 NumberOfData, u8 Flag)
14{ 14{
15 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 15 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
16 PURB pUrb = NULL; 16 PURB pUrb = NULL;
@@ -30,13 +30,13 @@ Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8
30 if( pUrb && pRegQueue ) { 30 if( pUrb && pRegQueue ) {
31 pRegQueue->DIRECT = 2;// burst write register 31 pRegQueue->DIRECT = 2;// burst write register
32 pRegQueue->INDEX = RegisterNo; 32 pRegQueue->INDEX = RegisterNo;
33 pRegQueue->pBuffer = (PULONG)((PUCHAR)pRegQueue + sizeof(REG_QUEUE)); 33 pRegQueue->pBuffer = (u32 *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
34 memcpy( pRegQueue->pBuffer, pRegisterData, DataSize ); 34 memcpy( pRegQueue->pBuffer, pRegisterData, DataSize );
35 //the function for reversing register data from little endian to big endian 35 //the function for reversing register data from little endian to big endian
36 for( i=0; i<NumberOfData ; i++ ) 36 for( i=0; i<NumberOfData ; i++ )
37 pRegQueue->pBuffer[i] = cpu_to_le32( pRegQueue->pBuffer[i] ); 37 pRegQueue->pBuffer[i] = cpu_to_le32( pRegQueue->pBuffer[i] );
38 38
39 dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE) + DataSize); 39 dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE) + DataSize);
40 dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE; 40 dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
41 dr->bRequest = 0x04; // USB or vendor-defined request code, burst mode 41 dr->bRequest = 0x04; // USB or vendor-defined request code, burst mode
42 dr->wValue = cpu_to_le16( Flag ); // 0: Register number auto-increment, 1: No auto increment 42 dr->wValue = cpu_to_le16( Flag ); // 0: Register number auto-increment, 1: No auto increment
@@ -46,14 +46,14 @@ Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8
46 pRegQueue->pUsbReq = dr; 46 pRegQueue->pUsbReq = dr;
47 pRegQueue->pUrb = pUrb; 47 pRegQueue->pUrb = pUrb;
48 48
49 OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock ); 49 spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
50 if (pWb35Reg->pRegFirst == NULL) 50 if (pWb35Reg->pRegFirst == NULL)
51 pWb35Reg->pRegFirst = pRegQueue; 51 pWb35Reg->pRegFirst = pRegQueue;
52 else 52 else
53 pWb35Reg->pRegLast->Next = pRegQueue; 53 pWb35Reg->pRegLast->Next = pRegQueue;
54 pWb35Reg->pRegLast = pRegQueue; 54 pWb35Reg->pRegLast = pRegQueue;
55 55
56 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 56 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
57 57
58 // Start EP0VM 58 // Start EP0VM
59 Wb35Reg_EP0VM_start(pHwData); 59 Wb35Reg_EP0VM_start(pHwData);
@@ -181,7 +181,7 @@ Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue )
181 pRegQueue->INDEX = RegisterNo; 181 pRegQueue->INDEX = RegisterNo;
182 pRegQueue->VALUE = cpu_to_le32(RegisterValue); 182 pRegQueue->VALUE = cpu_to_le32(RegisterValue);
183 pRegQueue->RESERVED_VALID = FALSE; 183 pRegQueue->RESERVED_VALID = FALSE;
184 dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE)); 184 dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
185 dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE; 185 dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE;
186 dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode 186 dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode
187 dr->wValue = cpu_to_le16(0x0); 187 dr->wValue = cpu_to_le16(0x0);
@@ -193,14 +193,14 @@ Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue )
193 pRegQueue->pUsbReq = dr; 193 pRegQueue->pUsbReq = dr;
194 pRegQueue->pUrb = pUrb; 194 pRegQueue->pUrb = pUrb;
195 195
196 OS_SPIN_LOCK_ACQUIRED(&pWb35Reg->EP0VM_spin_lock ); 196 spin_lock_irq(&pWb35Reg->EP0VM_spin_lock );
197 if (pWb35Reg->pRegFirst == NULL) 197 if (pWb35Reg->pRegFirst == NULL)
198 pWb35Reg->pRegFirst = pRegQueue; 198 pWb35Reg->pRegFirst = pRegQueue;
199 else 199 else
200 pWb35Reg->pRegLast->Next = pRegQueue; 200 pWb35Reg->pRegLast->Next = pRegQueue;
201 pWb35Reg->pRegLast = pRegQueue; 201 pWb35Reg->pRegLast = pRegQueue;
202 202
203 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 203 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
204 204
205 // Start EP0VM 205 // Start EP0VM
206 Wb35Reg_EP0VM_start(pHwData); 206 Wb35Reg_EP0VM_start(pHwData);
@@ -220,7 +220,7 @@ Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue )
220// FALSE : register not support 220// FALSE : register not support
221unsigned char 221unsigned char
222Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue, 222Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue,
223 PCHAR pValue, s8 Len) 223 s8 *pValue, s8 Len)
224{ 224{
225 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 225 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
226 struct usb_ctrlrequest *dr; 226 struct usb_ctrlrequest *dr;
@@ -243,7 +243,7 @@ Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 Register
243 //NOTE : Users must guarantee the size of value will not exceed the buffer size. 243 //NOTE : Users must guarantee the size of value will not exceed the buffer size.
244 memcpy(pRegQueue->RESERVED, pValue, Len); 244 memcpy(pRegQueue->RESERVED, pValue, Len);
245 pRegQueue->RESERVED_VALID = TRUE; 245 pRegQueue->RESERVED_VALID = TRUE;
246 dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE)); 246 dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
247 dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE; 247 dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE;
248 dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode 248 dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode
249 dr->wValue = cpu_to_le16(0x0); 249 dr->wValue = cpu_to_le16(0x0);
@@ -254,14 +254,14 @@ Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 Register
254 pRegQueue->Next = NULL; 254 pRegQueue->Next = NULL;
255 pRegQueue->pUsbReq = dr; 255 pRegQueue->pUsbReq = dr;
256 pRegQueue->pUrb = pUrb; 256 pRegQueue->pUrb = pUrb;
257 OS_SPIN_LOCK_ACQUIRED (&pWb35Reg->EP0VM_spin_lock ); 257 spin_lock_irq (&pWb35Reg->EP0VM_spin_lock );
258 if( pWb35Reg->pRegFirst == NULL ) 258 if( pWb35Reg->pRegFirst == NULL )
259 pWb35Reg->pRegFirst = pRegQueue; 259 pWb35Reg->pRegFirst = pRegQueue;
260 else 260 else
261 pWb35Reg->pRegLast->Next = pRegQueue; 261 pWb35Reg->pRegLast->Next = pRegQueue;
262 pWb35Reg->pRegLast = pRegQueue; 262 pWb35Reg->pRegLast = pRegQueue;
263 263
264 OS_SPIN_LOCK_RELEASED ( &pWb35Reg->EP0VM_spin_lock ); 264 spin_unlock_irq ( &pWb35Reg->EP0VM_spin_lock );
265 265
266 // Start EP0VM 266 // Start EP0VM
267 Wb35Reg_EP0VM_start(pHwData); 267 Wb35Reg_EP0VM_start(pHwData);
@@ -278,10 +278,10 @@ Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 Register
278// FALSE : register not support 278// FALSE : register not support
279// pRegisterValue : It must be a resident buffer due to asynchronous read register. 279// pRegisterValue : It must be a resident buffer due to asynchronous read register.
280unsigned char 280unsigned char
281Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue ) 281Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue )
282{ 282{
283 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 283 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
284 PULONG pltmp = pRegisterValue; 284 u32 * pltmp = pRegisterValue;
285 int ret = -1; 285 int ret = -1;
286 286
287 // Module shutdown 287 // Module shutdown
@@ -327,7 +327,7 @@ Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue
327// FALSE : register not support 327// FALSE : register not support
328// pRegisterValue : It must be a resident buffer due to asynchronous read register. 328// pRegisterValue : It must be a resident buffer due to asynchronous read register.
329unsigned char 329unsigned char
330Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue ) 330Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue )
331{ 331{
332 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 332 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
333 struct usb_ctrlrequest * dr; 333 struct usb_ctrlrequest * dr;
@@ -348,7 +348,7 @@ Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue )
348 pRegQueue->DIRECT = 0;// read register 348 pRegQueue->DIRECT = 0;// read register
349 pRegQueue->INDEX = RegisterNo; 349 pRegQueue->INDEX = RegisterNo;
350 pRegQueue->pBuffer = pRegisterValue; 350 pRegQueue->pBuffer = pRegisterValue;
351 dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE)); 351 dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
352 dr->bRequestType = USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN; 352 dr->bRequestType = USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN;
353 dr->bRequest = 0x01; // USB or vendor-defined request code, burst mode 353 dr->bRequest = 0x01; // USB or vendor-defined request code, burst mode
354 dr->wValue = cpu_to_le16(0x0); 354 dr->wValue = cpu_to_le16(0x0);
@@ -359,14 +359,14 @@ Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue )
359 pRegQueue->Next = NULL; 359 pRegQueue->Next = NULL;
360 pRegQueue->pUsbReq = dr; 360 pRegQueue->pUsbReq = dr;
361 pRegQueue->pUrb = pUrb; 361 pRegQueue->pUrb = pUrb;
362 OS_SPIN_LOCK_ACQUIRED ( &pWb35Reg->EP0VM_spin_lock ); 362 spin_lock_irq ( &pWb35Reg->EP0VM_spin_lock );
363 if( pWb35Reg->pRegFirst == NULL ) 363 if( pWb35Reg->pRegFirst == NULL )
364 pWb35Reg->pRegFirst = pRegQueue; 364 pWb35Reg->pRegFirst = pRegQueue;
365 else 365 else
366 pWb35Reg->pRegLast->Next = pRegQueue; 366 pWb35Reg->pRegLast->Next = pRegQueue;
367 pWb35Reg->pRegLast = pRegQueue; 367 pWb35Reg->pRegLast = pRegQueue;
368 368
369 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 369 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
370 370
371 // Start EP0VM 371 // Start EP0VM
372 Wb35Reg_EP0VM_start( pHwData ); 372 Wb35Reg_EP0VM_start( pHwData );
@@ -399,7 +399,7 @@ Wb35Reg_EP0VM(phw_data_t pHwData )
399 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 399 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
400 PURB pUrb; 400 PURB pUrb;
401 struct usb_ctrlrequest *dr; 401 struct usb_ctrlrequest *dr;
402 PULONG pBuffer; 402 u32 * pBuffer;
403 int ret = -1; 403 int ret = -1;
404 PREG_QUEUE pRegQueue; 404 PREG_QUEUE pRegQueue;
405 405
@@ -411,9 +411,9 @@ Wb35Reg_EP0VM(phw_data_t pHwData )
411 goto cleanup; 411 goto cleanup;
412 412
413 // Get the register data and send to USB through Irp 413 // Get the register data and send to USB through Irp
414 OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock ); 414 spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
415 pRegQueue = pWb35Reg->pRegFirst; 415 pRegQueue = pWb35Reg->pRegFirst;
416 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 416 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
417 417
418 if (!pRegQueue) 418 if (!pRegQueue)
419 goto cleanup; 419 goto cleanup;
@@ -429,7 +429,7 @@ Wb35Reg_EP0VM(phw_data_t pHwData )
429 429
430 usb_fill_control_urb( pUrb, pHwData->WbUsb.udev, 430 usb_fill_control_urb( pUrb, pHwData->WbUsb.udev,
431 REG_DIRECTION(pHwData->WbUsb.udev,pRegQueue), 431 REG_DIRECTION(pHwData->WbUsb.udev,pRegQueue),
432 (PUCHAR)dr,pBuffer,cpu_to_le16(dr->wLength), 432 (u8 *)dr,pBuffer,cpu_to_le16(dr->wLength),
433 Wb35Reg_EP0VM_complete, (void*)pHwData); 433 Wb35Reg_EP0VM_complete, (void*)pHwData);
434 434
435 pWb35Reg->EP0vm_state = VM_RUNNING; 435 pWb35Reg->EP0vm_state = VM_RUNNING;
@@ -468,12 +468,12 @@ Wb35Reg_EP0VM_complete(PURB pUrb)
468 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Reg->RegFireCount ); 468 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Reg->RegFireCount );
469 } else { 469 } else {
470 // Complete to send, remove the URB from the first 470 // Complete to send, remove the URB from the first
471 OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock ); 471 spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
472 pRegQueue = pWb35Reg->pRegFirst; 472 pRegQueue = pWb35Reg->pRegFirst;
473 if (pRegQueue == pWb35Reg->pRegLast) 473 if (pRegQueue == pWb35Reg->pRegLast)
474 pWb35Reg->pRegLast = NULL; 474 pWb35Reg->pRegLast = NULL;
475 pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next; 475 pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next;
476 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 476 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
477 477
478 if (pWb35Reg->EP0VM_status) { 478 if (pWb35Reg->EP0VM_status) {
479#ifdef _PE_REG_DUMP_ 479#ifdef _PE_REG_DUMP_
@@ -513,7 +513,7 @@ Wb35Reg_destroy(phw_data_t pHwData)
513 OS_SLEEP(10000); // Delay for waiting function enter 940623.1.b 513 OS_SLEEP(10000); // Delay for waiting function enter 940623.1.b
514 514
515 // Release all the data in RegQueue 515 // Release all the data in RegQueue
516 OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock ); 516 spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
517 pRegQueue = pWb35Reg->pRegFirst; 517 pRegQueue = pWb35Reg->pRegFirst;
518 while (pRegQueue) { 518 while (pRegQueue) {
519 if (pRegQueue == pWb35Reg->pRegLast) 519 if (pRegQueue == pWb35Reg->pRegLast)
@@ -521,7 +521,7 @@ Wb35Reg_destroy(phw_data_t pHwData)
521 pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next; 521 pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next;
522 522
523 pUrb = pRegQueue->pUrb; 523 pUrb = pRegQueue->pUrb;
524 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 524 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
525 if (pUrb) { 525 if (pUrb) {
526 usb_free_urb(pUrb); 526 usb_free_urb(pUrb);
527 kfree(pRegQueue); 527 kfree(pRegQueue);
@@ -530,14 +530,11 @@ Wb35Reg_destroy(phw_data_t pHwData)
530 WBDEBUG(("EP0 queue release error\n")); 530 WBDEBUG(("EP0 queue release error\n"));
531 #endif 531 #endif
532 } 532 }
533 OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock ); 533 spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
534 534
535 pRegQueue = pWb35Reg->pRegFirst; 535 pRegQueue = pWb35Reg->pRegFirst;
536 } 536 }
537 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 537 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
538
539 // Free resource
540 OS_SPIN_LOCK_FREE( &pWb35Reg->EP0VM_spin_lock );
541} 538}
542 539
543//==================================================================================== 540//====================================================================================
@@ -550,7 +547,7 @@ unsigned char Wb35Reg_initial(phw_data_t pHwData)
550 u32 SoftwareSet, VCO_trim, TxVga, Region_ScanInterval; 547 u32 SoftwareSet, VCO_trim, TxVga, Region_ScanInterval;
551 548
552 // Spin lock is acquired for read and write IRP command 549 // Spin lock is acquired for read and write IRP command
553 OS_SPIN_LOCK_ALLOCATE( &pWb35Reg->EP0VM_spin_lock ); 550 spin_lock_init( &pWb35Reg->EP0VM_spin_lock );
554 551
555 // Getting RF module type from EEPROM ------------------------------------ 552 // Getting RF module type from EEPROM ------------------------------------
556 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x080d0000 ); // Start EEPROM access + Read + address(0x0d) 553 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x080d0000 ); // Start EEPROM access + Read + address(0x0d)
@@ -655,7 +652,7 @@ unsigned char Wb35Reg_initial(phw_data_t pHwData)
655// version in _GENREQ.ASM of the DWB NE1000/2000 driver. 652// version in _GENREQ.ASM of the DWB NE1000/2000 driver.
656//================================================================================== 653//==================================================================================
657u32 654u32
658CardComputeCrc(PUCHAR Buffer, u32 Length) 655CardComputeCrc(u8 * Buffer, u32 Length)
659{ 656{
660 u32 Crc, Carry; 657 u32 Crc, Carry;
661 u32 i, j; 658 u32 i, j;
diff --git a/drivers/staging/winbond/linux/wb35reg_f.h b/drivers/staging/winbond/linux/wb35reg_f.h
index 38e2906b51a7..3006cfe99ccd 100644
--- a/drivers/staging/winbond/linux/wb35reg_f.h
+++ b/drivers/staging/winbond/linux/wb35reg_f.h
@@ -29,16 +29,16 @@ void EEPROMTxVgaAdjust( phw_data_t pHwData ); // 20060619.5 Add
29 29
30void Wb35Reg_destroy( phw_data_t pHwData ); 30void Wb35Reg_destroy( phw_data_t pHwData );
31 31
32unsigned char Wb35Reg_Read( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue ); 32unsigned char Wb35Reg_Read( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue );
33unsigned char Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue ); 33unsigned char Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue );
34unsigned char Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue ); 34unsigned char Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue );
35unsigned char Wb35Reg_WriteSync( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue ); 35unsigned char Wb35Reg_WriteSync( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue );
36unsigned char Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, 36unsigned char Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData,
37 u16 RegisterNo, 37 u16 RegisterNo,
38 u32 RegisterValue, 38 u32 RegisterValue,
39 PCHAR pValue, 39 s8 *pValue,
40 s8 Len); 40 s8 Len);
41unsigned char Wb35Reg_BurstWrite( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8 NumberOfData, u8 Flag ); 41unsigned char Wb35Reg_BurstWrite( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterData, u8 NumberOfData, u8 Flag );
42 42
43void Wb35Reg_EP0VM( phw_data_t pHwData ); 43void Wb35Reg_EP0VM( phw_data_t pHwData );
44void Wb35Reg_EP0VM_start( phw_data_t pHwData ); 44void Wb35Reg_EP0VM_start( phw_data_t pHwData );
@@ -47,7 +47,7 @@ void Wb35Reg_EP0VM_complete( PURB pUrb );
47u32 BitReverse( u32 dwData, u32 DataLength); 47u32 BitReverse( u32 dwData, u32 DataLength);
48 48
49void CardGetMulticastBit( u8 Address[MAC_ADDR_LENGTH], u8 *Byte, u8 *Value ); 49void CardGetMulticastBit( u8 Address[MAC_ADDR_LENGTH], u8 *Byte, u8 *Value );
50u32 CardComputeCrc( PUCHAR Buffer, u32 Length ); 50u32 CardComputeCrc( u8 * Buffer, u32 Length );
51 51
52void Wb35Reg_phy_calibration( phw_data_t pHwData ); 52void Wb35Reg_phy_calibration( phw_data_t pHwData );
53void Wb35Reg_Update( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue ); 53void Wb35Reg_Update( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue );
diff --git a/drivers/staging/winbond/linux/wb35reg_s.h b/drivers/staging/winbond/linux/wb35reg_s.h
index a7595b1e7336..8b35b93f7f02 100644
--- a/drivers/staging/winbond/linux/wb35reg_s.h
+++ b/drivers/staging/winbond/linux/wb35reg_s.h
@@ -75,7 +75,7 @@ typedef struct _REG_QUEUE
75 union 75 union
76 { 76 {
77 u32 VALUE; 77 u32 VALUE;
78 PULONG pBuffer; 78 u32 * pBuffer;
79 }; 79 };
80 u8 RESERVED[4];// space reserved for communication 80 u8 RESERVED[4];// space reserved for communication
81 81
@@ -143,7 +143,7 @@ typedef struct _WB35REG
143 //------------------- 143 //-------------------
144 // VM 144 // VM
145 //------------------- 145 //-------------------
146 OS_SPIN_LOCK EP0VM_spin_lock; // 4B 146 spinlock_t EP0VM_spin_lock; // 4B
147 u32 EP0VM_status;//$$ 147 u32 EP0VM_status;//$$
148 PREG_QUEUE pRegFirst; 148 PREG_QUEUE pRegFirst;
149 PREG_QUEUE pRegLast; 149 PREG_QUEUE pRegLast;
diff --git a/drivers/staging/winbond/linux/wb35rx.c b/drivers/staging/winbond/linux/wb35rx.c
index 26157eb3d5a2..b4b9f5f371d9 100644
--- a/drivers/staging/winbond/linux/wb35rx.c
+++ b/drivers/staging/winbond/linux/wb35rx.c
@@ -27,7 +27,7 @@ void Wb35Rx_start(phw_data_t pHwData)
27void Wb35Rx( phw_data_t pHwData ) 27void Wb35Rx( phw_data_t pHwData )
28{ 28{
29 PWB35RX pWb35Rx = &pHwData->Wb35Rx; 29 PWB35RX pWb35Rx = &pHwData->Wb35Rx;
30 PUCHAR pRxBufferAddress; 30 u8 * pRxBufferAddress;
31 PURB pUrb = (PURB)pWb35Rx->RxUrb; 31 PURB pUrb = (PURB)pWb35Rx->RxUrb;
32 int retv; 32 int retv;
33 u32 RxBufferId; 33 u32 RxBufferId;
@@ -35,51 +35,50 @@ void Wb35Rx( phw_data_t pHwData )
35 // 35 //
36 // Issuing URB 36 // Issuing URB
37 // 37 //
38 do { 38 if (pHwData->SurpriseRemove || pHwData->HwStop)
39 if (pHwData->SurpriseRemove || pHwData->HwStop) 39 goto error;
40 break;
41 40
42 if (pWb35Rx->rx_halt) 41 if (pWb35Rx->rx_halt)
43 break; 42 goto error;
44 43
45 // Get RxBuffer's ID 44 // Get RxBuffer's ID
46 RxBufferId = pWb35Rx->RxBufferId; 45 RxBufferId = pWb35Rx->RxBufferId;
47 if (!pWb35Rx->RxOwner[RxBufferId]) { 46 if (!pWb35Rx->RxOwner[RxBufferId]) {
48 // It's impossible to run here. 47 // It's impossible to run here.
49 #ifdef _PE_RX_DUMP_ 48 #ifdef _PE_RX_DUMP_
50 WBDEBUG(("Rx driver fifo unavailable\n")); 49 WBDEBUG(("Rx driver fifo unavailable\n"));
51 #endif 50 #endif
52 break; 51 goto error;
53 } 52 }
54 53
55 // Update buffer point, then start to bulkin the data from USB 54 // Update buffer point, then start to bulkin the data from USB
56 pWb35Rx->RxBufferId++; 55 pWb35Rx->RxBufferId++;
57 pWb35Rx->RxBufferId %= MAX_USB_RX_BUFFER_NUMBER; 56 pWb35Rx->RxBufferId %= MAX_USB_RX_BUFFER_NUMBER;
58 57
59 pWb35Rx->CurrentRxBufferId = RxBufferId; 58 pWb35Rx->CurrentRxBufferId = RxBufferId;
60 59
61 if (1 != OS_MEMORY_ALLOC((void* *)&pWb35Rx->pDRx, MAX_USB_RX_BUFFER)) { 60 if (1 != OS_MEMORY_ALLOC((void* *)&pWb35Rx->pDRx, MAX_USB_RX_BUFFER)) {
62 printk("w35und: Rx memory alloc failed\n"); 61 printk("w35und: Rx memory alloc failed\n");
63 break; 62 goto error;
64 } 63 }
65 pRxBufferAddress = pWb35Rx->pDRx; 64 pRxBufferAddress = pWb35Rx->pDRx;
66 65
67 usb_fill_bulk_urb(pUrb, pHwData->WbUsb.udev, 66 usb_fill_bulk_urb(pUrb, pHwData->WbUsb.udev,
68 usb_rcvbulkpipe(pHwData->WbUsb.udev, 3), 67 usb_rcvbulkpipe(pHwData->WbUsb.udev, 3),
69 pRxBufferAddress, MAX_USB_RX_BUFFER, 68 pRxBufferAddress, MAX_USB_RX_BUFFER,
70 Wb35Rx_Complete, pHwData); 69 Wb35Rx_Complete, pHwData);
71 70
72 pWb35Rx->EP3vm_state = VM_RUNNING; 71 pWb35Rx->EP3vm_state = VM_RUNNING;
73 72
74 retv = wb_usb_submit_urb(pUrb); 73 retv = wb_usb_submit_urb(pUrb);
75 74
76 if (retv != 0) { 75 if (retv != 0) {
77 printk("Rx URB sending error\n"); 76 printk("Rx URB sending error\n");
78 break; 77 goto error;
79 } 78 }
80 return; 79 return;
81 } while(FALSE);
82 80
81error:
83 // VM stop 82 // VM stop
84 pWb35Rx->EP3vm_state = VM_STOP; 83 pWb35Rx->EP3vm_state = VM_STOP;
85 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Rx->RxFireCounter ); 84 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Rx->RxFireCounter );
@@ -89,7 +88,7 @@ void Wb35Rx_Complete(PURB pUrb)
89{ 88{
90 phw_data_t pHwData = pUrb->context; 89 phw_data_t pHwData = pUrb->context;
91 PWB35RX pWb35Rx = &pHwData->Wb35Rx; 90 PWB35RX pWb35Rx = &pHwData->Wb35Rx;
92 PUCHAR pRxBufferAddress; 91 u8 * pRxBufferAddress;
93 u32 SizeCheck; 92 u32 SizeCheck;
94 u16 BulkLength; 93 u16 BulkLength;
95 u32 RxBufferId; 94 u32 RxBufferId;
@@ -99,65 +98,63 @@ void Wb35Rx_Complete(PURB pUrb)
99 pWb35Rx->EP3vm_state = VM_COMPLETED; 98 pWb35Rx->EP3vm_state = VM_COMPLETED;
100 pWb35Rx->EP3VM_status = pUrb->status;//Store the last result of Irp 99 pWb35Rx->EP3VM_status = pUrb->status;//Store the last result of Irp
101 100
102 do { 101 RxBufferId = pWb35Rx->CurrentRxBufferId;
103 RxBufferId = pWb35Rx->CurrentRxBufferId;
104 102
105 pRxBufferAddress = pWb35Rx->pDRx; 103 pRxBufferAddress = pWb35Rx->pDRx;
106 BulkLength = (u16)pUrb->actual_length; 104 BulkLength = (u16)pUrb->actual_length;
107 105
108 // The IRP is completed 106 // The IRP is completed
109 pWb35Rx->EP3vm_state = VM_COMPLETED; 107 pWb35Rx->EP3vm_state = VM_COMPLETED;
110 108
111 if (pHwData->SurpriseRemove || pHwData->HwStop) // Must be here, or RxBufferId is invalid 109 if (pHwData->SurpriseRemove || pHwData->HwStop) // Must be here, or RxBufferId is invalid
112 break; 110 goto error;
113 111
114 if (pWb35Rx->rx_halt) 112 if (pWb35Rx->rx_halt)
115 break; 113 goto error;
116 114
117 // Start to process the data only in successful condition 115 // Start to process the data only in successful condition
118 pWb35Rx->RxOwner[ RxBufferId ] = 0; // Set the owner to driver 116 pWb35Rx->RxOwner[ RxBufferId ] = 0; // Set the owner to driver
119 R00.value = le32_to_cpu(*(PULONG)pRxBufferAddress); 117 R00.value = le32_to_cpu(*(u32 *)pRxBufferAddress);
120 118
121 // The URB is completed, check the result 119 // The URB is completed, check the result
122 if (pWb35Rx->EP3VM_status != 0) { 120 if (pWb35Rx->EP3VM_status != 0) {
123 #ifdef _PE_USB_STATE_DUMP_ 121 #ifdef _PE_USB_STATE_DUMP_
124 WBDEBUG(("EP3 IoCompleteRoutine return error\n")); 122 WBDEBUG(("EP3 IoCompleteRoutine return error\n"));
125 DebugUsbdStatusInformation( pWb35Rx->EP3VM_status ); 123 DebugUsbdStatusInformation( pWb35Rx->EP3VM_status );
126 #endif 124 #endif
127 pWb35Rx->EP3vm_state = VM_STOP; 125 pWb35Rx->EP3vm_state = VM_STOP;
128 break; 126 goto error;
129 } 127 }
130 128
131 // 20060220 For recovering. check if operating in single USB mode 129 // 20060220 For recovering. check if operating in single USB mode
132 if (!HAL_USB_MODE_BURST(pHwData)) { 130 if (!HAL_USB_MODE_BURST(pHwData)) {
133 SizeCheck = R00.R00_receive_byte_count; //20060926 anson's endian 131 SizeCheck = R00.R00_receive_byte_count; //20060926 anson's endian
134 if ((SizeCheck & 0x03) > 0) 132 if ((SizeCheck & 0x03) > 0)
135 SizeCheck -= 4; 133 SizeCheck -= 4;
136 SizeCheck = (SizeCheck + 3) & ~0x03; 134 SizeCheck = (SizeCheck + 3) & ~0x03;
137 SizeCheck += 12; // 8 + 4 badbeef 135 SizeCheck += 12; // 8 + 4 badbeef
138 if ((BulkLength > 1600) || 136 if ((BulkLength > 1600) ||
139 (SizeCheck > 1600) || 137 (SizeCheck > 1600) ||
140 (BulkLength != SizeCheck) || 138 (BulkLength != SizeCheck) ||
141 (BulkLength == 0)) { // Add for fail Urb 139 (BulkLength == 0)) { // Add for fail Urb
142 pWb35Rx->EP3vm_state = VM_STOP; 140 pWb35Rx->EP3vm_state = VM_STOP;
143 pWb35Rx->Ep3ErrorCount2++; 141 pWb35Rx->Ep3ErrorCount2++;
144 }
145 } 142 }
143 }
146 144
147 // Indicating the receiving data 145 // Indicating the receiving data
148 pWb35Rx->ByteReceived += BulkLength; 146 pWb35Rx->ByteReceived += BulkLength;
149 pWb35Rx->RxBufferSize[ RxBufferId ] = BulkLength; 147 pWb35Rx->RxBufferSize[ RxBufferId ] = BulkLength;
150
151 if (!pWb35Rx->RxOwner[ RxBufferId ])
152 Wb35Rx_indicate(pHwData);
153 148
154 kfree(pWb35Rx->pDRx); 149 if (!pWb35Rx->RxOwner[ RxBufferId ])
155 // Do the next receive 150 Wb35Rx_indicate(pHwData);
156 Wb35Rx(pHwData);
157 return;
158 151
159 } while(FALSE); 152 kfree(pWb35Rx->pDRx);
153 // Do the next receive
154 Wb35Rx(pHwData);
155 return;
160 156
157error:
161 pWb35Rx->RxOwner[ RxBufferId ] = 1; // Set the owner to hardware 158 pWb35Rx->RxOwner[ RxBufferId ] = 1; // Set the owner to hardware
162 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Rx->RxFireCounter ); 159 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Rx->RxFireCounter );
163 pWb35Rx->EP3vm_state = VM_STOP; 160 pWb35Rx->EP3vm_state = VM_STOP;
@@ -223,7 +220,7 @@ void Wb35Rx_reset_descriptor( phw_data_t pHwData )
223 220
224void Wb35Rx_adjust(PDESCRIPTOR pRxDes) 221void Wb35Rx_adjust(PDESCRIPTOR pRxDes)
225{ 222{
226 PULONG pRxBufferAddress; 223 u32 * pRxBufferAddress;
227 u32 DecryptionMethod; 224 u32 DecryptionMethod;
228 u32 i; 225 u32 i;
229 u16 BufferSize; 226 u16 BufferSize;
@@ -264,7 +261,7 @@ u16 Wb35Rx_indicate(phw_data_t pHwData)
264{ 261{
265 DESCRIPTOR RxDes; 262 DESCRIPTOR RxDes;
266 PWB35RX pWb35Rx = &pHwData->Wb35Rx; 263 PWB35RX pWb35Rx = &pHwData->Wb35Rx;
267 PUCHAR pRxBufferAddress; 264 u8 * pRxBufferAddress;
268 u16 PacketSize; 265 u16 PacketSize;
269 u16 stmp, BufferSize, stmp2 = 0; 266 u16 stmp, BufferSize, stmp2 = 0;
270 u32 RxBufferId; 267 u32 RxBufferId;
@@ -283,13 +280,13 @@ u16 Wb35Rx_indicate(phw_data_t pHwData)
283 280
284 // Parse the bulkin buffer 281 // Parse the bulkin buffer
285 while (BufferSize >= 4) { 282 while (BufferSize >= 4) {
286 if ((cpu_to_le32(*(PULONG)pRxBufferAddress) & 0x0fffffff) == RX_END_TAG) //Is ending? 921002.9.a 283 if ((cpu_to_le32(*(u32 *)pRxBufferAddress) & 0x0fffffff) == RX_END_TAG) //Is ending? 921002.9.a
287 break; 284 break;
288 285
289 // Get the R00 R01 first 286 // Get the R00 R01 first
290 RxDes.R00.value = le32_to_cpu(*(PULONG)pRxBufferAddress); 287 RxDes.R00.value = le32_to_cpu(*(u32 *)pRxBufferAddress);
291 PacketSize = (u16)RxDes.R00.R00_receive_byte_count; 288 PacketSize = (u16)RxDes.R00.R00_receive_byte_count;
292 RxDes.R01.value = le32_to_cpu(*((PULONG)(pRxBufferAddress+4))); 289 RxDes.R01.value = le32_to_cpu(*((u32 *)(pRxBufferAddress+4)));
293 // For new DMA 4k 290 // For new DMA 4k
294 if ((PacketSize & 0x03) > 0) 291 if ((PacketSize & 0x03) > 0)
295 PacketSize -= 4; 292 PacketSize -= 4;
diff --git a/drivers/staging/winbond/linux/wb35rx_s.h b/drivers/staging/winbond/linux/wb35rx_s.h
index 53b831fdeb78..b90c269e6adb 100644
--- a/drivers/staging/winbond/linux/wb35rx_s.h
+++ b/drivers/staging/winbond/linux/wb35rx_s.h
@@ -41,7 +41,7 @@ typedef struct _WB35RX
41 u32 Ep3ErrorCount2; // 20060625.1 Usbd for Rx DMA error count 41 u32 Ep3ErrorCount2; // 20060625.1 Usbd for Rx DMA error count
42 42
43 int EP3VM_status; 43 int EP3VM_status;
44 PUCHAR pDRx; 44 u8 * pDRx;
45 45
46} WB35RX, *PWB35RX; 46} WB35RX, *PWB35RX;
47 47
diff --git a/drivers/staging/winbond/linux/wb35tx.c b/drivers/staging/winbond/linux/wb35tx.c
index cf19c3bc524a..ba9d51244e29 100644
--- a/drivers/staging/winbond/linux/wb35tx.c
+++ b/drivers/staging/winbond/linux/wb35tx.c
@@ -12,7 +12,7 @@
12 12
13 13
14unsigned char 14unsigned char
15Wb35Tx_get_tx_buffer(phw_data_t pHwData, PUCHAR *pBuffer ) 15Wb35Tx_get_tx_buffer(phw_data_t pHwData, u8 **pBuffer)
16{ 16{
17 PWB35TX pWb35Tx = &pHwData->Wb35Tx; 17 PWB35TX pWb35Tx = &pHwData->Wb35Tx;
18 18
@@ -37,7 +37,7 @@ void Wb35Tx(phw_data_t pHwData)
37{ 37{
38 PWB35TX pWb35Tx = &pHwData->Wb35Tx; 38 PWB35TX pWb35Tx = &pHwData->Wb35Tx;
39 PADAPTER Adapter = pHwData->Adapter; 39 PADAPTER Adapter = pHwData->Adapter;
40 PUCHAR pTxBufferAddress; 40 u8 *pTxBufferAddress;
41 PMDS pMds = &Adapter->Mds; 41 PMDS pMds = &Adapter->Mds;
42 struct urb * pUrb = (struct urb *)pWb35Tx->Tx4Urb; 42 struct urb * pUrb = (struct urb *)pWb35Tx->Tx4Urb;
43 int retv; 43 int retv;
@@ -100,25 +100,24 @@ void Wb35Tx_complete(struct urb * pUrb)
100 pWb35Tx->TxSendIndex++; 100 pWb35Tx->TxSendIndex++;
101 pWb35Tx->TxSendIndex %= MAX_USB_TX_BUFFER_NUMBER; 101 pWb35Tx->TxSendIndex %= MAX_USB_TX_BUFFER_NUMBER;
102 102
103 do { 103 if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
104 if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove 104 goto error;
105 break;
106 105
107 if (pWb35Tx->tx_halt) 106 if (pWb35Tx->tx_halt)
108 break; 107 goto error;
109 108
110 // The URB is completed, check the result 109 // The URB is completed, check the result
111 if (pWb35Tx->EP4VM_status != 0) { 110 if (pWb35Tx->EP4VM_status != 0) {
112 printk("URB submission failed\n"); 111 printk("URB submission failed\n");
113 pWb35Tx->EP4vm_state = VM_STOP; 112 pWb35Tx->EP4vm_state = VM_STOP;
114 break; // Exit while(FALSE); 113 goto error;
115 } 114 }
116 115
117 Mds_Tx(Adapter); 116 Mds_Tx(Adapter);
118 Wb35Tx(pHwData); 117 Wb35Tx(pHwData);
119 return; 118 return;
120 } while(FALSE);
121 119
120error:
122 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxFireCounter ); 121 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxFireCounter );
123 pWb35Tx->EP4vm_state = VM_STOP; 122 pWb35Tx->EP4vm_state = VM_STOP;
124} 123}
@@ -225,36 +224,33 @@ void Wb35Tx_EP2VM(phw_data_t pHwData)
225{ 224{
226 PWB35TX pWb35Tx = &pHwData->Wb35Tx; 225 PWB35TX pWb35Tx = &pHwData->Wb35Tx;
227 struct urb * pUrb = (struct urb *)pWb35Tx->Tx2Urb; 226 struct urb * pUrb = (struct urb *)pWb35Tx->Tx2Urb;
228 PULONG pltmp = (PULONG)pWb35Tx->EP2_buf; 227 u32 * pltmp = (u32 *)pWb35Tx->EP2_buf;
229 int retv; 228 int retv;
230 229
231 do { 230 if (pHwData->SurpriseRemove || pHwData->HwStop)
232 if (pHwData->SurpriseRemove || pHwData->HwStop) 231 goto error;
233 break;
234
235 if (pWb35Tx->tx_halt)
236 break;
237
238 //
239 // Issuing URB
240 //
241 usb_fill_int_urb( pUrb, pHwData->WbUsb.udev, usb_rcvintpipe(pHwData->WbUsb.udev,2),
242 pltmp, MAX_INTERRUPT_LENGTH, Wb35Tx_EP2VM_complete, pHwData, 32);
243 232
244 pWb35Tx->EP2vm_state = VM_RUNNING; 233 if (pWb35Tx->tx_halt)
245 retv = wb_usb_submit_urb( pUrb ); 234 goto error;
246 235
247 if(retv < 0) { 236 //
248 #ifdef _PE_TX_DUMP_ 237 // Issuing URB
249 WBDEBUG(("EP2 Tx Irp sending error\n")); 238 //
250 #endif 239 usb_fill_int_urb( pUrb, pHwData->WbUsb.udev, usb_rcvintpipe(pHwData->WbUsb.udev,2),
251 break; 240 pltmp, MAX_INTERRUPT_LENGTH, Wb35Tx_EP2VM_complete, pHwData, 32);
252 }
253 241
254 return; 242 pWb35Tx->EP2vm_state = VM_RUNNING;
243 retv = wb_usb_submit_urb( pUrb );
255 244
256 } while(FALSE); 245 if (retv < 0) {
246 #ifdef _PE_TX_DUMP_
247 WBDEBUG(("EP2 Tx Irp sending error\n"));
248 #endif
249 goto error;
250 }
257 251
252 return;
253error:
258 pWb35Tx->EP2vm_state = VM_STOP; 254 pWb35Tx->EP2vm_state = VM_STOP;
259 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxResultCount ); 255 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxResultCount );
260} 256}
@@ -266,7 +262,7 @@ void Wb35Tx_EP2VM_complete(struct urb * pUrb)
266 T02_DESCRIPTOR T02, TSTATUS; 262 T02_DESCRIPTOR T02, TSTATUS;
267 PADAPTER Adapter = (PADAPTER)pHwData->Adapter; 263 PADAPTER Adapter = (PADAPTER)pHwData->Adapter;
268 PWB35TX pWb35Tx = &pHwData->Wb35Tx; 264 PWB35TX pWb35Tx = &pHwData->Wb35Tx;
269 PULONG pltmp = (PULONG)pWb35Tx->EP2_buf; 265 u32 * pltmp = (u32 *)pWb35Tx->EP2_buf;
270 u32 i; 266 u32 i;
271 u16 InterruptInLength; 267 u16 InterruptInLength;
272 268
@@ -275,38 +271,36 @@ void Wb35Tx_EP2VM_complete(struct urb * pUrb)
275 pWb35Tx->EP2vm_state = VM_COMPLETED; 271 pWb35Tx->EP2vm_state = VM_COMPLETED;
276 pWb35Tx->EP2VM_status = pUrb->status; 272 pWb35Tx->EP2VM_status = pUrb->status;
277 273
278 do { 274 // For Linux 2.4. Interrupt will always trigger
279 // For Linux 2.4. Interrupt will always trigger 275 if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
280 if( pHwData->SurpriseRemove || pHwData->HwStop ) // Let WbWlanHalt to handle surprise remove 276 goto error;
281 break; 277
282 278 if (pWb35Tx->tx_halt)
283 if( pWb35Tx->tx_halt ) 279 goto error;
284 break; 280
285 281 //The Urb is completed, check the result
286 //The Urb is completed, check the result 282 if (pWb35Tx->EP2VM_status != 0) {
287 if (pWb35Tx->EP2VM_status != 0) { 283 WBDEBUG(("EP2 IoCompleteRoutine return error\n"));
288 WBDEBUG(("EP2 IoCompleteRoutine return error\n")); 284 pWb35Tx->EP2vm_state= VM_STOP;
289 pWb35Tx->EP2vm_state= VM_STOP; 285 goto error;
290 break; // Exit while(FALSE); 286 }
291 }
292
293 // Update the Tx result
294 InterruptInLength = pUrb->actual_length;
295 // Modify for minimum memory access and DWORD alignment.
296 T02.value = cpu_to_le32(pltmp[0]) >> 8; // [31:8] -> [24:0]
297 InterruptInLength -= 1;// 20051221.1.c Modify the follow for more stable
298 InterruptInLength >>= 2; // InterruptInLength/4
299 for (i=1; i<=InterruptInLength; i++) {
300 T02.value |= ((cpu_to_le32(pltmp[i]) & 0xff) << 24);
301
302 TSTATUS.value = T02.value; //20061009 anson's endian
303 Mds_SendComplete( Adapter, &TSTATUS );
304 T02.value = cpu_to_le32(pltmp[i]) >> 8;
305 }
306
307 return;
308 } while(FALSE);
309 287
288 // Update the Tx result
289 InterruptInLength = pUrb->actual_length;
290 // Modify for minimum memory access and DWORD alignment.
291 T02.value = cpu_to_le32(pltmp[0]) >> 8; // [31:8] -> [24:0]
292 InterruptInLength -= 1;// 20051221.1.c Modify the follow for more stable
293 InterruptInLength >>= 2; // InterruptInLength/4
294 for (i = 1; i <= InterruptInLength; i++) {
295 T02.value |= ((cpu_to_le32(pltmp[i]) & 0xff) << 24);
296
297 TSTATUS.value = T02.value; //20061009 anson's endian
298 Mds_SendComplete( Adapter, &TSTATUS );
299 T02.value = cpu_to_le32(pltmp[i]) >> 8;
300 }
301
302 return;
303error:
310 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxResultCount ); 304 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxResultCount );
311 pWb35Tx->EP2vm_state = VM_STOP; 305 pWb35Tx->EP2vm_state = VM_STOP;
312} 306}
diff --git a/drivers/staging/winbond/linux/wb35tx_f.h b/drivers/staging/winbond/linux/wb35tx_f.h
index 7705a8454dcb..107b12918137 100644
--- a/drivers/staging/winbond/linux/wb35tx_f.h
+++ b/drivers/staging/winbond/linux/wb35tx_f.h
@@ -3,7 +3,7 @@
3//==================================== 3//====================================
4unsigned char Wb35Tx_initial( phw_data_t pHwData ); 4unsigned char Wb35Tx_initial( phw_data_t pHwData );
5void Wb35Tx_destroy( phw_data_t pHwData ); 5void Wb35Tx_destroy( phw_data_t pHwData );
6unsigned char Wb35Tx_get_tx_buffer( phw_data_t pHwData, PUCHAR *pBuffer ); 6unsigned char Wb35Tx_get_tx_buffer( phw_data_t pHwData, u8 **pBuffer );
7 7
8void Wb35Tx_EP2VM( phw_data_t pHwData ); 8void Wb35Tx_EP2VM( phw_data_t pHwData );
9void Wb35Tx_EP2VM_start( phw_data_t pHwData ); 9void Wb35Tx_EP2VM_start( phw_data_t pHwData );
diff --git a/drivers/staging/winbond/linux/wbusb.c b/drivers/staging/winbond/linux/wbusb.c
index cbad5fb05959..f4a7875f2389 100644
--- a/drivers/staging/winbond/linux/wbusb.c
+++ b/drivers/staging/winbond/linux/wbusb.c
@@ -6,42 +6,29 @@
6#include "sysdef.h" 6#include "sysdef.h"
7#include <net/mac80211.h> 7#include <net/mac80211.h>
8 8
9 9MODULE_AUTHOR(DRIVER_AUTHOR);
10MODULE_AUTHOR( DRIVER_AUTHOR ); 10MODULE_DESCRIPTION(DRIVER_DESC);
11MODULE_DESCRIPTION( DRIVER_DESC );
12MODULE_LICENSE("GPL"); 11MODULE_LICENSE("GPL");
13MODULE_VERSION("0.1"); 12MODULE_VERSION("0.1");
14 13
15 14static struct usb_device_id wb35_table[] __devinitdata = {
16//============================================================ 15 {USB_DEVICE(0x0416, 0x0035)},
17// vendor ID and product ID can into here for others 16 {USB_DEVICE(0x18E8, 0x6201)},
18//============================================================ 17 {USB_DEVICE(0x18E8, 0x6206)},
19static struct usb_device_id Id_Table[] = 18 {USB_DEVICE(0x18E8, 0x6217)},
20{ 19 {USB_DEVICE(0x18E8, 0x6230)},
21 {USB_DEVICE( 0x0416, 0x0035 )}, 20 {USB_DEVICE(0x18E8, 0x6233)},
22 {USB_DEVICE( 0x18E8, 0x6201 )}, 21 {USB_DEVICE(0x1131, 0x2035)},
23 {USB_DEVICE( 0x18E8, 0x6206 )}, 22 { 0, }
24 {USB_DEVICE( 0x18E8, 0x6217 )},
25 {USB_DEVICE( 0x18E8, 0x6230 )},
26 {USB_DEVICE( 0x18E8, 0x6233 )},
27 {USB_DEVICE( 0x1131, 0x2035 )},
28 { }
29}; 23};
30 24
31MODULE_DEVICE_TABLE(usb, Id_Table); 25MODULE_DEVICE_TABLE(usb, wb35_table);
32 26
33static struct usb_driver wb35_driver = { 27static struct ieee80211_rate wbsoft_rates[] = {
34 .name = "w35und",
35 .probe = wb35_probe,
36 .disconnect = wb35_disconnect,
37 .id_table = Id_Table,
38};
39
40static const struct ieee80211_rate wbsoft_rates[] = {
41 { .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 28 { .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
42}; 29};
43 30
44static const struct ieee80211_channel wbsoft_channels[] = { 31static struct ieee80211_channel wbsoft_channels[] = {
45 { .center_freq = 2412}, 32 { .center_freq = 2412},
46}; 33};
47 34
@@ -62,9 +49,22 @@ static void wbsoft_remove_interface(struct ieee80211_hw *dev,
62 printk("wbsoft_remove interface called\n"); 49 printk("wbsoft_remove interface called\n");
63} 50}
64 51
65static int wbsoft_nop(void) 52static void wbsoft_stop(struct ieee80211_hw *hw)
53{
54 printk(KERN_INFO "%s called\n", __func__);
55}
56
57static int wbsoft_get_stats(struct ieee80211_hw *hw,
58 struct ieee80211_low_level_stats *stats)
66{ 59{
67 printk("wbsoft_nop called\n"); 60 printk(KERN_INFO "%s called\n", __func__);
61 return 0;
62}
63
64static int wbsoft_get_tx_stats(struct ieee80211_hw *hw,
65 struct ieee80211_tx_queue_stats *stats)
66{
67 printk(KERN_INFO "%s called\n", __func__);
68 return 0; 68 return 0;
69} 69}
70 70
@@ -105,8 +105,7 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
105 *total_flags = new_flags; 105 *total_flags = new_flags;
106} 106}
107 107
108static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb, 108static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
109 struct ieee80211_tx_control *control)
110{ 109{
111 char *buffer = kmalloc(skb->len, GFP_ATOMIC); 110 char *buffer = kmalloc(skb->len, GFP_ATOMIC);
112 printk("Sending frame %d bytes\n", skb->len); 111 printk("Sending frame %d bytes\n", skb->len);
@@ -136,7 +135,7 @@ static int wbsoft_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
136 hal_set_current_channel(&my_adapter->sHwData, ch); 135 hal_set_current_channel(&my_adapter->sHwData, ch);
137 hal_set_beacon_period(&my_adapter->sHwData, conf->beacon_int); 136 hal_set_beacon_period(&my_adapter->sHwData, conf->beacon_int);
138// hal_set_cap_info(&my_adapter->sHwData, ?? ); 137// hal_set_cap_info(&my_adapter->sHwData, ?? );
139// hal_set_ssid(phw_data_t pHwData, PUCHAR pssid, u8 ssid_len); ?? 138// hal_set_ssid(phw_data_t pHwData, u8 * pssid, u8 ssid_len); ??
140 hal_set_accept_broadcast(&my_adapter->sHwData, 1); 139 hal_set_accept_broadcast(&my_adapter->sHwData, 1);
141 hal_set_accept_promiscuous(&my_adapter->sHwData, 1); 140 hal_set_accept_promiscuous(&my_adapter->sHwData, 1);
142 hal_set_accept_multicast(&my_adapter->sHwData, 1); 141 hal_set_accept_multicast(&my_adapter->sHwData, 1);
@@ -148,7 +147,7 @@ static int wbsoft_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
148 147
149// hal_start_bss(&my_adapter->sHwData, WLAN_BSSTYPE_INFRASTRUCTURE); ?? 148// hal_start_bss(&my_adapter->sHwData, WLAN_BSSTYPE_INFRASTRUCTURE); ??
150 149
151//void hal_set_rates(phw_data_t pHwData, PUCHAR pbss_rates, 150//void hal_set_rates(phw_data_t pHwData, u8 * pbss_rates,
152// u8 length, unsigned char basic_rate_set) 151// u8 length, unsigned char basic_rate_set)
153 152
154 return 0; 153 return 0;
@@ -171,14 +170,14 @@ static u64 wbsoft_get_tsf(struct ieee80211_hw *dev)
171static const struct ieee80211_ops wbsoft_ops = { 170static const struct ieee80211_ops wbsoft_ops = {
172 .tx = wbsoft_tx, 171 .tx = wbsoft_tx,
173 .start = wbsoft_start, /* Start can be pretty much empty as we do WbWLanInitialize() during probe? */ 172 .start = wbsoft_start, /* Start can be pretty much empty as we do WbWLanInitialize() during probe? */
174 .stop = wbsoft_nop, 173 .stop = wbsoft_stop,
175 .add_interface = wbsoft_add_interface, 174 .add_interface = wbsoft_add_interface,
176 .remove_interface = wbsoft_remove_interface, 175 .remove_interface = wbsoft_remove_interface,
177 .config = wbsoft_config, 176 .config = wbsoft_config,
178 .config_interface = wbsoft_config_interface, 177 .config_interface = wbsoft_config_interface,
179 .configure_filter = wbsoft_configure_filter, 178 .configure_filter = wbsoft_configure_filter,
180 .get_stats = wbsoft_nop, 179 .get_stats = wbsoft_get_stats,
181 .get_tx_stats = wbsoft_nop, 180 .get_tx_stats = wbsoft_get_tx_stats,
182 .get_tsf = wbsoft_get_tsf, 181 .get_tsf = wbsoft_get_tsf,
183// conf_tx: hal_set_cwmin()/hal_set_cwmax; 182// conf_tx: hal_set_cwmin()/hal_set_cwmax;
184}; 183};
@@ -187,21 +186,6 @@ struct wbsoft_priv {
187}; 186};
188 187
189 188
190int __init wb35_init(void)
191{
192 printk("[w35und]driver init\n");
193 return usb_register(&wb35_driver);
194}
195
196void __exit wb35_exit(void)
197{
198 printk("[w35und]driver exit\n");
199 usb_deregister( &wb35_driver );
200}
201
202module_init(wb35_init);
203module_exit(wb35_exit);
204
205// Usb kernel subsystem will call this function when a new device is plugged into. 189// Usb kernel subsystem will call this function when a new device is plugged into.
206int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table) 190int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table)
207{ 191{
@@ -210,7 +194,7 @@ int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table)
210 PWBUSB pWbUsb; 194 PWBUSB pWbUsb;
211 struct usb_host_interface *interface; 195 struct usb_host_interface *interface;
212 struct usb_endpoint_descriptor *endpoint; 196 struct usb_endpoint_descriptor *endpoint;
213 int i, ret = -1; 197 int ret = -1;
214 u32 ltmp; 198 u32 ltmp;
215 struct usb_device *udev = interface_to_usbdev(intf); 199 struct usb_device *udev = interface_to_usbdev(intf);
216 200
@@ -218,114 +202,95 @@ int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table)
218 202
219 printk("[w35und]wb35_probe ->\n"); 203 printk("[w35und]wb35_probe ->\n");
220 204
221 do { 205 // 20060630.2 Check the device if it already be opened
222 for (i=0; i<(sizeof(Id_Table)/sizeof(struct usb_device_id)); i++ ) { 206 ret = usb_control_msg(udev, usb_rcvctrlpipe( udev, 0 ),
223 if ((udev->descriptor.idVendor == Id_Table[i].idVendor) && 207 0x01, USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN,
224 (udev->descriptor.idProduct == Id_Table[i].idProduct)) { 208 0x0, 0x400, &ltmp, 4, HZ*100 );
225 printk("[w35und]Found supported hardware\n"); 209 if (ret < 0)
226 break; 210 goto error;
227 }
228 }
229 if ((i == (sizeof(Id_Table)/sizeof(struct usb_device_id)))) {
230 #ifdef _PE_USB_INI_DUMP_
231 WBDEBUG(("[w35und] This is not the one we are interested about\n"));
232 #endif
233 return -ENODEV;
234 }
235
236 // 20060630.2 Check the device if it already be opened
237 ret = usb_control_msg(udev, usb_rcvctrlpipe( udev, 0 ),
238 0x01, USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN,
239 0x0, 0x400, &ltmp, 4, HZ*100 );
240 if( ret < 0 )
241 break;
242 211
243 ltmp = cpu_to_le32(ltmp); 212 ltmp = cpu_to_le32(ltmp);
244 if (ltmp) // Is already initialized? 213 if (ltmp) // Is already initialized?
245 break; 214 goto error;
246 215
216 Adapter = kzalloc(sizeof(ADAPTER), GFP_KERNEL);
247 217
248 Adapter = kzalloc(sizeof(ADAPTER), GFP_KERNEL); 218 my_adapter = Adapter;
219 pWbLinux = &Adapter->WbLinux;
220 pWbUsb = &Adapter->sHwData.WbUsb;
221 pWbUsb->udev = udev;
249 222
250 my_adapter = Adapter; 223 interface = intf->cur_altsetting;
251 pWbLinux = &Adapter->WbLinux; 224 endpoint = &interface->endpoint[0].desc;
252 pWbUsb = &Adapter->sHwData.WbUsb;
253 pWbUsb->udev = udev;
254 225
255 interface = intf->cur_altsetting; 226 if (endpoint[2].wMaxPacketSize == 512) {
256 endpoint = &interface->endpoint[0].desc; 227 printk("[w35und] Working on USB 2.0\n");
257 228 pWbUsb->IsUsb20 = 1;
258 if (endpoint[2].wMaxPacketSize == 512) { 229 }
259 printk("[w35und] Working on USB 2.0\n");
260 pWbUsb->IsUsb20 = 1;
261 }
262
263 if (!WbWLanInitialize(Adapter)) {
264 printk("[w35und]WbWLanInitialize fail\n");
265 break;
266 }
267 230
268 { 231 if (!WbWLanInitialize(Adapter)) {
269 struct wbsoft_priv *priv; 232 printk("[w35und]WbWLanInitialize fail\n");
270 struct ieee80211_hw *dev; 233 goto error;
271 int res; 234 }
272 235
273 dev = ieee80211_alloc_hw(sizeof(*priv), &wbsoft_ops); 236 {
237 struct wbsoft_priv *priv;
238 struct ieee80211_hw *dev;
239 static struct ieee80211_supported_band band;
240 int res;
274 241
275 if (!dev) { 242 dev = ieee80211_alloc_hw(sizeof(*priv), &wbsoft_ops);
276 printk("w35und: ieee80211 alloc failed\n" );
277 BUG();
278 }
279 243
280 my_dev = dev; 244 if (!dev) {
245 printk("w35und: ieee80211 alloc failed\n" );
246 BUG();
247 }
281 248
282 SET_IEEE80211_DEV(dev, &udev->dev); 249 my_dev = dev;
283 {
284 phw_data_t pHwData = &Adapter->sHwData;
285 unsigned char dev_addr[MAX_ADDR_LEN];
286 hal_get_permanent_address(pHwData, dev_addr);
287 SET_IEEE80211_PERM_ADDR(dev, dev_addr);
288 }
289 250
251 SET_IEEE80211_DEV(dev, &udev->dev);
252 {
253 phw_data_t pHwData = &Adapter->sHwData;
254 unsigned char dev_addr[MAX_ADDR_LEN];
255 hal_get_permanent_address(pHwData, dev_addr);
256 SET_IEEE80211_PERM_ADDR(dev, dev_addr);
257 }
290 258
291 dev->extra_tx_headroom = 12; /* FIXME */
292 dev->flags = 0;
293 259
294 dev->channel_change_time = 1000; 260 dev->extra_tx_headroom = 12; /* FIXME */
295// dev->max_rssi = 100; 261 dev->flags = 0;
296 262
297 dev->queues = 1; 263 dev->channel_change_time = 1000;
264// dev->max_rssi = 100;
298 265
299 static struct ieee80211_supported_band band; 266 dev->queues = 1;
300 267
301 band.channels = wbsoft_channels; 268 band.channels = wbsoft_channels;
302 band.n_channels = ARRAY_SIZE(wbsoft_channels); 269 band.n_channels = ARRAY_SIZE(wbsoft_channels);
303 band.bitrates = wbsoft_rates; 270 band.bitrates = wbsoft_rates;
304 band.n_bitrates = ARRAY_SIZE(wbsoft_rates); 271 band.n_bitrates = ARRAY_SIZE(wbsoft_rates);
305 272
306 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band; 273 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band;
307#if 0 274#if 0
308 wbsoft_modes[0].num_channels = 1; 275 wbsoft_modes[0].num_channels = 1;
309 wbsoft_modes[0].channels = wbsoft_channels; 276 wbsoft_modes[0].channels = wbsoft_channels;
310 wbsoft_modes[0].mode = MODE_IEEE80211B; 277 wbsoft_modes[0].mode = MODE_IEEE80211B;
311 wbsoft_modes[0].num_rates = ARRAY_SIZE(wbsoft_rates); 278 wbsoft_modes[0].num_rates = ARRAY_SIZE(wbsoft_rates);
312 wbsoft_modes[0].rates = wbsoft_rates; 279 wbsoft_modes[0].rates = wbsoft_rates;
313 280
314 res = ieee80211_register_hwmode(dev, &wbsoft_modes[0]); 281 res = ieee80211_register_hwmode(dev, &wbsoft_modes[0]);
315 BUG_ON(res); 282 BUG_ON(res);
316#endif 283#endif
317 284
318 res = ieee80211_register_hw(dev); 285 res = ieee80211_register_hw(dev);
319 BUG_ON(res); 286 BUG_ON(res);
320 } 287 }
321
322 usb_set_intfdata( intf, Adapter );
323
324 printk("[w35und] _probe OK\n");
325 return 0;
326 288
327 } while(FALSE); 289 usb_set_intfdata( intf, Adapter );
328 290
291 printk("[w35und] _probe OK\n");
292 return 0;
293error:
329 return -ENOMEM; 294 return -ENOMEM;
330} 295}
331 296
@@ -401,4 +366,22 @@ void wb35_disconnect(struct usb_interface *intf)
401 366
402} 367}
403 368
369static struct usb_driver wb35_driver = {
370 .name = "w35und",
371 .id_table = wb35_table,
372 .probe = wb35_probe,
373 .disconnect = wb35_disconnect,
374};
404 375
376static int __init wb35_init(void)
377{
378 return usb_register(&wb35_driver);
379}
380
381static void __exit wb35_exit(void)
382{
383 usb_deregister(&wb35_driver);
384}
385
386module_init(wb35_init);
387module_exit(wb35_exit);
diff --git a/drivers/staging/winbond/mds.c b/drivers/staging/winbond/mds.c
index 8ce6389c4135..f1de813f9c76 100644
--- a/drivers/staging/winbond/mds.c
+++ b/drivers/staging/winbond/mds.c
@@ -40,7 +40,7 @@ Mds_Tx(PADAPTER Adapter)
40 PMDS pMds = &Adapter->Mds; 40 PMDS pMds = &Adapter->Mds;
41 DESCRIPTOR TxDes; 41 DESCRIPTOR TxDes;
42 PDESCRIPTOR pTxDes = &TxDes; 42 PDESCRIPTOR pTxDes = &TxDes;
43 PUCHAR XmitBufAddress; 43 u8 *XmitBufAddress;
44 u16 XmitBufSize, PacketSize, stmp, CurrentSize, FragmentThreshold; 44 u16 XmitBufSize, PacketSize, stmp, CurrentSize, FragmentThreshold;
45 u8 FillIndex, TxDesIndex, FragmentCount, FillCount; 45 u8 FillIndex, TxDesIndex, FragmentCount, FillCount;
46 unsigned char BufferFilled = FALSE, MICAdd = 0; 46 unsigned char BufferFilled = FALSE, MICAdd = 0;
@@ -90,7 +90,7 @@ Mds_Tx(PADAPTER Adapter)
90 BufferFilled = TRUE; 90 BufferFilled = TRUE;
91 91
92 /* Leaves first u8 intact */ 92 /* Leaves first u8 intact */
93 memset((PUCHAR)pTxDes + 1, 0, sizeof(DESCRIPTOR) - 1); 93 memset((u8 *)pTxDes + 1, 0, sizeof(DESCRIPTOR) - 1);
94 94
95 TxDesIndex = pMds->TxDesIndex;//Get the current ID 95 TxDesIndex = pMds->TxDesIndex;//Get the current ID
96 pTxDes->Descriptor_ID = TxDesIndex; 96 pTxDes->Descriptor_ID = TxDesIndex;
@@ -229,10 +229,10 @@ Mds_SendComplete(PADAPTER Adapter, PT02_DESCRIPTOR pT02)
229} 229}
230 230
231void 231void
232Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer) 232Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer)
233{ 233{
234 PMDS pMds = &Adapter->Mds; 234 PMDS pMds = &Adapter->Mds;
235 PUCHAR src_buffer = pDes->buffer_address[0];//931130.5.g 235 u8 *src_buffer = pDes->buffer_address[0];//931130.5.g
236 PT00_DESCRIPTOR pT00; 236 PT00_DESCRIPTOR pT00;
237 PT01_DESCRIPTOR pT01; 237 PT01_DESCRIPTOR pT01;
238 u16 stmp; 238 u16 stmp;
@@ -276,7 +276,7 @@ Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
276 // 276 //
277 // Set tx rate 277 // Set tx rate
278 // 278 //
279 stmp = *(PUSHORT)(TargetBuffer+30); // 2n alignment address 279 stmp = *(u16 *)(TargetBuffer+30); // 2n alignment address
280 280
281 //Use basic rate 281 //Use basic rate
282 ctmp1 = ctmpf = CURRENT_TX_RATE_FOR_MNG; 282 ctmp1 = ctmpf = CURRENT_TX_RATE_FOR_MNG;
@@ -326,11 +326,13 @@ Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
326 326
327// The function return the 4n size of usb pk 327// The function return the 4n size of usb pk
328u16 328u16
329Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer) 329Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer)
330{ 330{
331 PT00_DESCRIPTOR pT00; 331 PT00_DESCRIPTOR pT00;
332 PMDS pMds = &Adapter->Mds; 332 PMDS pMds = &Adapter->Mds;
333 PUCHAR buffer, src_buffer, pctmp; 333 u8 *buffer;
334 u8 *src_buffer;
335 u8 *pctmp;
334 u16 Size = 0; 336 u16 Size = 0;
335 u16 SizeLeft, CopySize, CopyLeft, stmp; 337 u16 SizeLeft, CopySize, CopyLeft, stmp;
336 u8 buf_index, FragmentCount = 0; 338 u8 buf_index, FragmentCount = 0;
@@ -354,7 +356,7 @@ Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
354 SizeLeft -= CopySize; 356 SizeLeft -= CopySize;
355 357
356 // 1 Byte operation 358 // 1 Byte operation
357 pctmp = (PUCHAR)( buffer + 8 + DOT_11_SEQUENCE_OFFSET ); 359 pctmp = (u8 *)( buffer + 8 + DOT_11_SEQUENCE_OFFSET );
358 *pctmp &= 0xf0; 360 *pctmp &= 0xf0;
359 *pctmp |= FragmentCount;//931130.5.m 361 *pctmp |= FragmentCount;//931130.5.m
360 if( !FragmentCount ) 362 if( !FragmentCount )
@@ -379,7 +381,7 @@ Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
379 buf_index++; 381 buf_index++;
380 buf_index %= MAX_DESCRIPTOR_BUFFER_INDEX; 382 buf_index %= MAX_DESCRIPTOR_BUFFER_INDEX;
381 } else { 383 } else {
382 PUCHAR pctmp = pDes->buffer_address[buf_index]; 384 u8 *pctmp = pDes->buffer_address[buf_index];
383 pctmp += CopySize; 385 pctmp += CopySize;
384 pDes->buffer_address[buf_index] = pctmp; 386 pDes->buffer_address[buf_index] = pctmp;
385 pDes->buffer_size[buf_index] -= CopySize; 387 pDes->buffer_size[buf_index] -= CopySize;
@@ -419,7 +421,7 @@ Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
419 421
420 pT00->T00_last_mpdu = 1; 422 pT00->T00_last_mpdu = 1;
421 pT00->T00_IsLastMpdu = 1; 423 pT00->T00_IsLastMpdu = 1;
422 buffer = (PUCHAR)pT00 + 8; // +8 for USB hdr 424 buffer = (u8 *)pT00 + 8; // +8 for USB hdr
423 buffer[1] &= ~0x04; // Clear more frag bit of 802.11 frame control 425 buffer[1] &= ~0x04; // Clear more frag bit of 802.11 frame control
424 pDes->FragmentCount = FragmentCount; // Update the correct fragment number 426 pDes->FragmentCount = FragmentCount; // Update the correct fragment number
425 return Size; 427 return Size;
@@ -427,7 +429,7 @@ Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
427 429
428 430
429void 431void
430Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR buffer ) 432Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, u8 *buffer )
431{ 433{
432 PT00_DESCRIPTOR pT00; 434 PT00_DESCRIPTOR pT00;
433 PT01_DESCRIPTOR pT01; 435 PT01_DESCRIPTOR pT01;
@@ -435,7 +437,7 @@ Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR buffer )
435 u8 Rate, i; 437 u8 Rate, i;
436 unsigned char CTS_on = FALSE, RTS_on = FALSE; 438 unsigned char CTS_on = FALSE, RTS_on = FALSE;
437 PT00_DESCRIPTOR pNextT00; 439 PT00_DESCRIPTOR pNextT00;
438 u16 BodyLen; 440 u16 BodyLen = 0;
439 unsigned char boGroupAddr = FALSE; 441 unsigned char boGroupAddr = FALSE;
440 442
441 443
@@ -574,7 +576,7 @@ Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR buffer )
574 DEFAULT_SIFSTIME*3 ); 576 DEFAULT_SIFSTIME*3 );
575 } 577 }
576 578
577 ((PUSHORT)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration 579 ((u16 *)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
578 580
579 //----20061009 add by anson's endian 581 //----20061009 add by anson's endian
580 pNextT00->value = cpu_to_le32(pNextT00->value); 582 pNextT00->value = cpu_to_le32(pNextT00->value);
@@ -615,7 +617,7 @@ Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR buffer )
615 } 617 }
616 } 618 }
617 619
618 ((PUSHORT)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration 620 ((u16 *)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
619 pT00->value = cpu_to_le32(pT00->value); 621 pT00->value = cpu_to_le32(pT00->value);
620 pT01->value = cpu_to_le32(pT01->value); 622 pT01->value = cpu_to_le32(pT01->value);
621 //--end 20061009 add 623 //--end 20061009 add
diff --git a/drivers/staging/winbond/mds_f.h b/drivers/staging/winbond/mds_f.h
index 651188be1065..7a682d4cfbdc 100644
--- a/drivers/staging/winbond/mds_f.h
+++ b/drivers/staging/winbond/mds_f.h
@@ -1,9 +1,9 @@
1unsigned char Mds_initial( PADAPTER Adapter ); 1unsigned char Mds_initial( PADAPTER Adapter );
2void Mds_Destroy( PADAPTER Adapter ); 2void Mds_Destroy( PADAPTER Adapter );
3void Mds_Tx( PADAPTER Adapter ); 3void Mds_Tx( PADAPTER Adapter );
4void Mds_HeaderCopy( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer ); 4void Mds_HeaderCopy( PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer );
5u16 Mds_BodyCopy( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer ); 5u16 Mds_BodyCopy( PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer );
6void Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer ); 6void Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer );
7void Mds_SendComplete( PADAPTER Adapter, PT02_DESCRIPTOR pT02 ); 7void Mds_SendComplete( PADAPTER Adapter, PT02_DESCRIPTOR pT02 );
8void Mds_MpduProcess( PADAPTER Adapter, PDESCRIPTOR pRxDes ); 8void Mds_MpduProcess( PADAPTER Adapter, PDESCRIPTOR pRxDes );
9void Mds_reset_descriptor( PADAPTER Adapter ); 9void Mds_reset_descriptor( PADAPTER Adapter );
diff --git a/drivers/staging/winbond/mds_s.h b/drivers/staging/winbond/mds_s.h
index 4738279d5f39..9df2e0936bf8 100644
--- a/drivers/staging/winbond/mds_s.h
+++ b/drivers/staging/winbond/mds_s.h
@@ -86,7 +86,7 @@ typedef struct _MDS
86{ 86{
87 // For Tx usage 87 // For Tx usage
88 u8 TxOwner[ ((MAX_USB_TX_BUFFER_NUMBER + 3) & ~0x03) ]; 88 u8 TxOwner[ ((MAX_USB_TX_BUFFER_NUMBER + 3) & ~0x03) ];
89 PUCHAR pTxBuffer; 89 u8 *pTxBuffer;
90 u16 TxBufferSize[ ((MAX_USB_TX_BUFFER_NUMBER + 1) & ~0x01) ]; 90 u16 TxBufferSize[ ((MAX_USB_TX_BUFFER_NUMBER + 1) & ~0x01) ];
91 u8 TxDesFrom[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ];//931130.4.u // 1: MLME 2: NDIS control 3: NDIS data 91 u8 TxDesFrom[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ];//931130.4.u // 1: MLME 2: NDIS control 3: NDIS data
92 u8 TxCountInBuffer[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ]; // 20060928 92 u8 TxCountInBuffer[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ]; // 20060928
@@ -103,7 +103,7 @@ typedef struct _MDS
103 u16 TxResult[ ((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01) ];//Collect the sending result of Mpdu 103 u16 TxResult[ ((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01) ];//Collect the sending result of Mpdu
104 104
105 u8 MicRedundant[8]; // For tmp use 105 u8 MicRedundant[8]; // For tmp use
106 PUCHAR MicWriteAddress[2]; //The start address to fill the Mic, use 2 point due to Mic maybe fragment 106 u8 *MicWriteAddress[2]; //The start address to fill the Mic, use 2 point due to Mic maybe fragment
107 107
108 u16 MicWriteSize[2]; //931130.4.x 108 u16 MicWriteSize[2]; //931130.4.x
109 109
@@ -144,7 +144,7 @@ typedef struct _MDS
144 144
145typedef struct _RxBuffer 145typedef struct _RxBuffer
146{ 146{
147 PUCHAR pBufferAddress; // Pointer the received data buffer. 147 u8 * pBufferAddress; // Pointer the received data buffer.
148 u16 BufferSize; 148 u16 BufferSize;
149 u8 RESERVED; 149 u8 RESERVED;
150 u8 BufferIndex;// Only 1 byte 150 u8 BufferIndex;// Only 1 byte
@@ -176,7 +176,7 @@ typedef struct _RXLAYER1
176 ///////////////////////////////////////////////////////////////////////////////////////////// 176 /////////////////////////////////////////////////////////////////////////////////////////////
177 // For brand-new Rx system 177 // For brand-new Rx system
178 u8 ReservedBuffer[ 2400 ];//If Buffer ID is reserved one, it must copy the data into this area 178 u8 ReservedBuffer[ 2400 ];//If Buffer ID is reserved one, it must copy the data into this area
179 PUCHAR ReservedBufferPoint;// Point to the next availabe address of reserved buffer 179 u8 *ReservedBufferPoint;// Point to the next availabe address of reserved buffer
180 180
181}RXLAYER1, * PRXLAYER1; 181}RXLAYER1, * PRXLAYER1;
182 182
diff --git a/drivers/staging/winbond/mlme_s.h b/drivers/staging/winbond/mlme_s.h
index 58094f61c032..039fd408ba62 100644
--- a/drivers/staging/winbond/mlme_s.h
+++ b/drivers/staging/winbond/mlme_s.h
@@ -125,12 +125,12 @@
125typedef struct _MLME_FRAME 125typedef struct _MLME_FRAME
126{ 126{
127 //NDIS_PACKET MLME_Packet; 127 //NDIS_PACKET MLME_Packet;
128 PCHAR pMMPDU; 128 s8 * pMMPDU;
129 u16 len; 129 u16 len;
130 u8 DataType; 130 u8 DataType;
131 u8 IsInUsed; 131 u8 IsInUsed;
132 132
133 OS_SPIN_LOCK MLMESpinLock; 133 spinlock_t MLMESpinLock;
134 134
135 u8 TxMMPDU[MAX_NUM_TX_MMPDU][MAX_MMPDU_SIZE]; 135 u8 TxMMPDU[MAX_NUM_TX_MMPDU][MAX_MMPDU_SIZE];
136 u8 TxMMPDUInUse[ (MAX_NUM_TX_MMPDU+3) & ~0x03 ]; 136 u8 TxMMPDUInUse[ (MAX_NUM_TX_MMPDU+3) & ~0x03 ];
diff --git a/drivers/staging/winbond/mlmetxrx.c b/drivers/staging/winbond/mlmetxrx.c
index 46b091e96794..e8533b8d1976 100644
--- a/drivers/staging/winbond/mlmetxrx.c
+++ b/drivers/staging/winbond/mlmetxrx.c
@@ -113,13 +113,13 @@ MLME_GetNextPacket(PADAPTER Adapter, PDESCRIPTOR pDes)
113 pDes->Type = Adapter->sMlmeFrame.DataType; 113 pDes->Type = Adapter->sMlmeFrame.DataType;
114} 114}
115 115
116void MLMEfreeMMPDUBuffer(PWB32_ADAPTER Adapter, PCHAR pData) 116void MLMEfreeMMPDUBuffer(PWB32_ADAPTER Adapter, s8 *pData)
117{ 117{
118 int i; 118 int i;
119 119
120 // Reclaim the data buffer 120 // Reclaim the data buffer
121 for (i = 0; i < MAX_NUM_TX_MMPDU; i++) { 121 for (i = 0; i < MAX_NUM_TX_MMPDU; i++) {
122 if (pData == (PCHAR)&(Adapter->sMlmeFrame.TxMMPDU[i])) 122 if (pData == (s8 *)&(Adapter->sMlmeFrame.TxMMPDU[i]))
123 break; 123 break;
124 } 124 }
125 if (Adapter->sMlmeFrame.TxMMPDUInUse[i]) 125 if (Adapter->sMlmeFrame.TxMMPDUInUse[i])
diff --git a/drivers/staging/winbond/mlmetxrx_f.h b/drivers/staging/winbond/mlmetxrx_f.h
index d74e225be215..24cd5f308d9f 100644
--- a/drivers/staging/winbond/mlmetxrx_f.h
+++ b/drivers/staging/winbond/mlmetxrx_f.h
@@ -20,7 +20,7 @@ MLMEGetMMPDUBuffer(
20 PWB32_ADAPTER Adapter 20 PWB32_ADAPTER Adapter
21 ); 21 );
22 22
23void MLMEfreeMMPDUBuffer( PWB32_ADAPTER Adapter, PCHAR pData); 23void MLMEfreeMMPDUBuffer( PWB32_ADAPTER Adapter, s8 * pData);
24 24
25void MLME_GetNextPacket( PADAPTER Adapter, PDESCRIPTOR pDes ); 25void MLME_GetNextPacket( PADAPTER Adapter, PDESCRIPTOR pDes );
26u8 MLMESendFrame( PWB32_ADAPTER Adapter, 26u8 MLMESendFrame( PWB32_ADAPTER Adapter,
@@ -42,7 +42,7 @@ MLMERcvFrame(
42void 42void
43MLMEReturnPacket( 43MLMEReturnPacket(
44 PWB32_ADAPTER Adapter, 44 PWB32_ADAPTER Adapter,
45 PUCHAR pRxBufer 45 u8 * pRxBufer
46 ); 46 );
47#ifdef _IBSS_BEACON_SEQ_STICK_ 47#ifdef _IBSS_BEACON_SEQ_STICK_
48s8 SendBCNullData(PWB32_ADAPTER Adapter, u16 wIdx); 48s8 SendBCNullData(PWB32_ADAPTER Adapter, u16 wIdx);
diff --git a/drivers/staging/winbond/reg.c b/drivers/staging/winbond/reg.c
index b475c7a7c424..57af5b831509 100644
--- a/drivers/staging/winbond/reg.c
+++ b/drivers/staging/winbond/reg.c
@@ -922,16 +922,16 @@ Uxx_ReadEthernetAddress( phw_data_t pHwData )
922 // Only unplug and plug again can make hardware read EEPROM again. 20060727 922 // Only unplug and plug again can make hardware read EEPROM again. 20060727
923 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08000000 ); // Start EEPROM access + Read + address(0x0d) 923 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08000000 ); // Start EEPROM access + Read + address(0x0d)
924 Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp ); 924 Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
925 *(PUSHORT)pHwData->PermanentMacAddress = cpu_to_le16((u16)ltmp); //20060926 anson's endian 925 *(u16 *)pHwData->PermanentMacAddress = cpu_to_le16((u16)ltmp); //20060926 anson's endian
926 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08010000 ); // Start EEPROM access + Read + address(0x0d) 926 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08010000 ); // Start EEPROM access + Read + address(0x0d)
927 Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp ); 927 Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
928 *(PUSHORT)(pHwData->PermanentMacAddress + 2) = cpu_to_le16((u16)ltmp); //20060926 anson's endian 928 *(u16 *)(pHwData->PermanentMacAddress + 2) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
929 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08020000 ); // Start EEPROM access + Read + address(0x0d) 929 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08020000 ); // Start EEPROM access + Read + address(0x0d)
930 Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp ); 930 Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
931 *(PUSHORT)(pHwData->PermanentMacAddress + 4) = cpu_to_le16((u16)ltmp); //20060926 anson's endian 931 *(u16 *)(pHwData->PermanentMacAddress + 4) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
932 *(PUSHORT)(pHwData->PermanentMacAddress + 6) = 0; 932 *(u16 *)(pHwData->PermanentMacAddress + 6) = 0;
933 Wb35Reg_WriteSync( pHwData, 0x03e8, cpu_to_le32(*(PULONG)pHwData->PermanentMacAddress) ); //20060926 anson's endian 933 Wb35Reg_WriteSync( pHwData, 0x03e8, cpu_to_le32(*(u32 *)pHwData->PermanentMacAddress) ); //20060926 anson's endian
934 Wb35Reg_WriteSync( pHwData, 0x03ec, cpu_to_le32(*(PULONG)(pHwData->PermanentMacAddress+4)) ); //20060926 anson's endian 934 Wb35Reg_WriteSync( pHwData, 0x03ec, cpu_to_le32(*(u32 *)(pHwData->PermanentMacAddress+4)) ); //20060926 anson's endian
935} 935}
936 936
937 937
@@ -1038,7 +1038,7 @@ void
1038RFSynthesizer_initial(phw_data_t pHwData) 1038RFSynthesizer_initial(phw_data_t pHwData)
1039{ 1039{
1040 u32 altmp[32]; 1040 u32 altmp[32];
1041 PULONG pltmp = altmp; 1041 u32 * pltmp = altmp;
1042 u32 ltmp; 1042 u32 ltmp;
1043 u8 number=0x00; // The number of register vale 1043 u8 number=0x00; // The number of register vale
1044 u8 i; 1044 u8 i;
@@ -2358,11 +2358,11 @@ void Mxx_initial( phw_data_t pHwData )
2358 pltmp[2] = pWb35Reg->M2C_MacControl; 2358 pltmp[2] = pWb35Reg->M2C_MacControl;
2359 2359
2360 // M30 BSSID 2360 // M30 BSSID
2361 pltmp[3] = *(PULONG)pHwData->bssid; 2361 pltmp[3] = *(u32 *)pHwData->bssid;
2362 2362
2363 // M34 2363 // M34
2364 pHwData->AID = DEFAULT_AID; 2364 pHwData->AID = DEFAULT_AID;
2365 tmp = *(PUSHORT)(pHwData->bssid+4); 2365 tmp = *(u16 *)(pHwData->bssid+4);
2366 tmp |= DEFAULT_AID << 16; 2366 tmp |= DEFAULT_AID << 16;
2367 pltmp[4] = tmp; 2367 pltmp[4] = tmp;
2368 2368
@@ -2428,7 +2428,7 @@ void GetTxVgaFromEEPROM( phw_data_t pHwData )
2428{ 2428{
2429 u32 i, j, ltmp; 2429 u32 i, j, ltmp;
2430 u16 Value[MAX_TXVGA_EEPROM]; 2430 u16 Value[MAX_TXVGA_EEPROM];
2431 PUCHAR pctmp; 2431 u8 *pctmp;
2432 u8 ctmp=0; 2432 u8 ctmp=0;
2433 2433
2434 // Get the entire TxVga setting in EEPROM 2434 // Get the entire TxVga setting in EEPROM
@@ -2441,7 +2441,7 @@ void GetTxVgaFromEEPROM( phw_data_t pHwData )
2441 } 2441 }
2442 2442
2443 // Adjust the filed which fills with reserved value. 2443 // Adjust the filed which fills with reserved value.
2444 pctmp = (PUCHAR)Value; 2444 pctmp = (u8 *)Value;
2445 for( i=0; i<(MAX_TXVGA_EEPROM*2); i++ ) 2445 for( i=0; i<(MAX_TXVGA_EEPROM*2); i++ )
2446 { 2446 {
2447 if( pctmp[i] != 0xff ) 2447 if( pctmp[i] != 0xff )
@@ -2480,7 +2480,7 @@ void GetTxVgaFromEEPROM( phw_data_t pHwData )
2480// This function will use default TxVgaSettingInEEPROM data to calculate new TxVga. 2480// This function will use default TxVgaSettingInEEPROM data to calculate new TxVga.
2481void EEPROMTxVgaAdjust( phw_data_t pHwData ) // 20060619.5 Add 2481void EEPROMTxVgaAdjust( phw_data_t pHwData ) // 20060619.5 Add
2482{ 2482{
2483 PUCHAR pTxVga = pHwData->TxVgaSettingInEEPROM; 2483 u8 * pTxVga = pHwData->TxVgaSettingInEEPROM;
2484 s16 i, stmp; 2484 s16 i, stmp;
2485 2485
2486 //-- 2.4G -- 20060704.2 Request from Tiger 2486 //-- 2.4G -- 20060704.2 Request from Tiger
diff --git a/drivers/staging/winbond/sme_api.c b/drivers/staging/winbond/sme_api.c
index 40e93b7600eb..31c9673ea865 100644
--- a/drivers/staging/winbond/sme_api.c
+++ b/drivers/staging/winbond/sme_api.c
@@ -10,4 +10,5 @@
10s8 sme_get_rssi(void *pcore_data, s32 *prssi) 10s8 sme_get_rssi(void *pcore_data, s32 *prssi)
11{ 11{
12 BUG(); 12 BUG();
13 return 0;
13} 14}
diff --git a/drivers/staging/winbond/sme_api.h b/drivers/staging/winbond/sme_api.h
index 016b225ca4a4..745eb376bc70 100644
--- a/drivers/staging/winbond/sme_api.h
+++ b/drivers/staging/winbond/sme_api.h
@@ -208,7 +208,7 @@ s8 sme_set_tx_antenna(void *pcore_data, u32 TxAntenna);
208s8 sme_set_IBSS_chan(void *pcore_data, ChanInfo chan); 208s8 sme_set_IBSS_chan(void *pcore_data, ChanInfo chan);
209 209
210//20061108 WPS 210//20061108 WPS
211s8 sme_set_IE_append(void *pcore_data, PUCHAR buffer, u16 buf_len); 211s8 sme_set_IE_append(void *pcore_data, u8 *buffer, u16 buf_len);
212 212
213 213
214 214
diff --git a/drivers/staging/winbond/wbhal.c b/drivers/staging/winbond/wbhal.c
index daf442247558..5d68ecec34c7 100644
--- a/drivers/staging/winbond/wbhal.c
+++ b/drivers/staging/winbond/wbhal.c
@@ -1,13 +1,13 @@
1#include "os_common.h" 1#include "os_common.h"
2 2
3void hal_get_ethernet_address( phw_data_t pHwData, PUCHAR current_address ) 3void hal_get_ethernet_address( phw_data_t pHwData, u8 *current_address )
4{ 4{
5 if( pHwData->SurpriseRemove ) return; 5 if( pHwData->SurpriseRemove ) return;
6 6
7 memcpy( current_address, pHwData->CurrentMacAddress, ETH_LENGTH_OF_ADDRESS ); 7 memcpy( current_address, pHwData->CurrentMacAddress, ETH_LENGTH_OF_ADDRESS );
8} 8}
9 9
10void hal_set_ethernet_address( phw_data_t pHwData, PUCHAR current_address ) 10void hal_set_ethernet_address( phw_data_t pHwData, u8 *current_address )
11{ 11{
12 u32 ltmp[2]; 12 u32 ltmp[2];
13 13
@@ -15,13 +15,13 @@ void hal_set_ethernet_address( phw_data_t pHwData, PUCHAR current_address )
15 15
16 memcpy( pHwData->CurrentMacAddress, current_address, ETH_LENGTH_OF_ADDRESS ); 16 memcpy( pHwData->CurrentMacAddress, current_address, ETH_LENGTH_OF_ADDRESS );
17 17
18 ltmp[0]= cpu_to_le32( *(PULONG)pHwData->CurrentMacAddress ); 18 ltmp[0]= cpu_to_le32( *(u32 *)pHwData->CurrentMacAddress );
19 ltmp[1]= cpu_to_le32( *(PULONG)(pHwData->CurrentMacAddress + 4) ) & 0xffff; 19 ltmp[1]= cpu_to_le32( *(u32 *)(pHwData->CurrentMacAddress + 4) ) & 0xffff;
20 20
21 Wb35Reg_BurstWrite( pHwData, 0x03e8, ltmp, 2, AUTO_INCREMENT ); 21 Wb35Reg_BurstWrite( pHwData, 0x03e8, ltmp, 2, AUTO_INCREMENT );
22} 22}
23 23
24void hal_get_permanent_address( phw_data_t pHwData, PUCHAR pethernet_address ) 24void hal_get_permanent_address( phw_data_t pHwData, u8 *pethernet_address )
25{ 25{
26 if( pHwData->SurpriseRemove ) return; 26 if( pHwData->SurpriseRemove ) return;
27 27
@@ -89,7 +89,7 @@ void hal_halt(phw_data_t pHwData, void *ppa_data)
89} 89}
90 90
91//--------------------------------------------------------------------------------------------------- 91//---------------------------------------------------------------------------------------------------
92void hal_set_rates(phw_data_t pHwData, PUCHAR pbss_rates, 92void hal_set_rates(phw_data_t pHwData, u8 *pbss_rates,
93 u8 length, unsigned char basic_rate_set) 93 u8 length, unsigned char basic_rate_set)
94{ 94{
95 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 95 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
@@ -158,13 +158,13 @@ void hal_set_rates(phw_data_t pHwData, PUCHAR pbss_rates,
158 // Fill data into support rate until buffer full 158 // Fill data into support rate until buffer full
159 //---20060926 add by anson's endian 159 //---20060926 add by anson's endian
160 for (i=0; i<4; i++) 160 for (i=0; i<4; i++)
161 *(PULONG)(SupportedRate+(i<<2)) = cpu_to_le32( *(PULONG)(SupportedRate+(i<<2)) ); 161 *(u32 *)(SupportedRate+(i<<2)) = cpu_to_le32( *(u32 *)(SupportedRate+(i<<2)) );
162 //--- end 20060926 add by anson's endian 162 //--- end 20060926 add by anson's endian
163 Wb35Reg_BurstWrite( pHwData,0x087c, (PULONG)SupportedRate, 4, AUTO_INCREMENT ); 163 Wb35Reg_BurstWrite( pHwData,0x087c, (u32 *)SupportedRate, 4, AUTO_INCREMENT );
164 pWb35Reg->M7C_MacControl = ((PULONG)SupportedRate)[0]; 164 pWb35Reg->M7C_MacControl = ((u32 *)SupportedRate)[0];
165 pWb35Reg->M80_MacControl = ((PULONG)SupportedRate)[1]; 165 pWb35Reg->M80_MacControl = ((u32 *)SupportedRate)[1];
166 pWb35Reg->M84_MacControl = ((PULONG)SupportedRate)[2]; 166 pWb35Reg->M84_MacControl = ((u32 *)SupportedRate)[2];
167 pWb35Reg->M88_MacControl = ((PULONG)SupportedRate)[3]; 167 pWb35Reg->M88_MacControl = ((u32 *)SupportedRate)[3];
168 168
169 // Fill length 169 // Fill length
170 tmp = Count1<<28 | Count2<<24; 170 tmp = Count1<<28 | Count2<<24;
@@ -206,7 +206,7 @@ void hal_set_current_channel_ex( phw_data_t pHwData, ChanInfo channel )
206 pWb35Reg->M28_MacControl &= ~0xff; // Clean channel information field 206 pWb35Reg->M28_MacControl &= ~0xff; // Clean channel information field
207 pWb35Reg->M28_MacControl |= channel.ChanNo; 207 pWb35Reg->M28_MacControl |= channel.ChanNo;
208 Wb35Reg_WriteWithCallbackValue( pHwData, 0x0828, pWb35Reg->M28_MacControl, 208 Wb35Reg_WriteWithCallbackValue( pHwData, 0x0828, pWb35Reg->M28_MacControl,
209 (PCHAR)&channel, sizeof(ChanInfo)); 209 (s8 *)&channel, sizeof(ChanInfo));
210} 210}
211//--------------------------------------------------------------------------------------------------- 211//---------------------------------------------------------------------------------------------------
212void hal_set_current_channel( phw_data_t pHwData, ChanInfo channel ) 212void hal_set_current_channel( phw_data_t pHwData, ChanInfo channel )
@@ -277,7 +277,7 @@ void hal_set_accept_beacon( phw_data_t pHwData, u8 enable )
277 Wb35Reg_Write( pHwData, 0x0800, pWb35Reg->M00_MacControl ); 277 Wb35Reg_Write( pHwData, 0x0800, pWb35Reg->M00_MacControl );
278} 278}
279//--------------------------------------------------------------------------------------------------- 279//---------------------------------------------------------------------------------------------------
280void hal_set_multicast_address( phw_data_t pHwData, PUCHAR address, u8 number ) 280void hal_set_multicast_address( phw_data_t pHwData, u8 *address, u8 number )
281{ 281{
282 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 282 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
283 u8 Byte, Bit; 283 u8 Byte, Bit;
@@ -297,7 +297,7 @@ void hal_set_multicast_address( phw_data_t pHwData, PUCHAR address, u8 number )
297 } 297 }
298 298
299 // Updating register 299 // Updating register
300 Wb35Reg_BurstWrite( pHwData, 0x0804, (PULONG)pWb35Reg->Multicast, 2, AUTO_INCREMENT ); 300 Wb35Reg_BurstWrite( pHwData, 0x0804, (u32 *)pWb35Reg->Multicast, 2, AUTO_INCREMENT );
301} 301}
302//--------------------------------------------------------------------------------------------------- 302//---------------------------------------------------------------------------------------------------
303u8 hal_get_accept_beacon( phw_data_t pHwData ) 303u8 hal_get_accept_beacon( phw_data_t pHwData )
@@ -806,7 +806,7 @@ u8 hal_get_hw_radio_off( phw_data_t pHwData )
806 } 806 }
807} 807}
808 808
809unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, PULONG pValue ) 809unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, u32 * pValue )
810{ 810{
811 if( number < 0x1000 ) 811 if( number < 0x1000 )
812 number += 0x1000; 812 number += 0x1000;
diff --git a/drivers/staging/winbond/wbhal_f.h b/drivers/staging/winbond/wbhal_f.h
index fe25f97af724..ea9531ac8474 100644
--- a/drivers/staging/winbond/wbhal_f.h
+++ b/drivers/staging/winbond/wbhal_f.h
@@ -16,23 +16,23 @@
16//==================================================================================== 16//====================================================================================
17// Function declaration 17// Function declaration
18//==================================================================================== 18//====================================================================================
19void hal_remove_mapping_key( phw_data_t pHwData, PUCHAR pmac_addr ); 19void hal_remove_mapping_key( phw_data_t pHwData, u8 *pmac_addr );
20void hal_remove_default_key( phw_data_t pHwData, u32 index ); 20void hal_remove_default_key( phw_data_t pHwData, u32 index );
21unsigned char hal_set_mapping_key( phw_data_t Adapter, PUCHAR pmac_addr, u8 null_key, u8 wep_on, PUCHAR ptx_tsc, PUCHAR prx_tsc, u8 key_type, u8 key_len, PUCHAR pkey_data ); 21unsigned char hal_set_mapping_key( phw_data_t Adapter, u8 *pmac_addr, u8 null_key, u8 wep_on, u8 *ptx_tsc, u8 *prx_tsc, u8 key_type, u8 key_len, u8 *pkey_data );
22unsigned char hal_set_default_key( phw_data_t Adapter, u8 index, u8 null_key, u8 wep_on, PUCHAR ptx_tsc, PUCHAR prx_tsc, u8 key_type, u8 key_len, PUCHAR pkey_data ); 22unsigned char hal_set_default_key( phw_data_t Adapter, u8 index, u8 null_key, u8 wep_on, u8 *ptx_tsc, u8 *prx_tsc, u8 key_type, u8 key_len, u8 *pkey_data );
23void hal_clear_all_default_key( phw_data_t pHwData ); 23void hal_clear_all_default_key( phw_data_t pHwData );
24void hal_clear_all_group_key( phw_data_t pHwData ); 24void hal_clear_all_group_key( phw_data_t pHwData );
25void hal_clear_all_mapping_key( phw_data_t pHwData ); 25void hal_clear_all_mapping_key( phw_data_t pHwData );
26void hal_clear_all_key( phw_data_t pHwData ); 26void hal_clear_all_key( phw_data_t pHwData );
27void hal_get_ethernet_address( phw_data_t pHwData, PUCHAR current_address ); 27void hal_get_ethernet_address( phw_data_t pHwData, u8 *current_address );
28void hal_set_ethernet_address( phw_data_t pHwData, PUCHAR current_address ); 28void hal_set_ethernet_address( phw_data_t pHwData, u8 *current_address );
29void hal_get_permanent_address( phw_data_t pHwData, PUCHAR pethernet_address ); 29void hal_get_permanent_address( phw_data_t pHwData, u8 *pethernet_address );
30unsigned char hal_init_hardware( phw_data_t pHwData, PADAPTER Adapter ); 30unsigned char hal_init_hardware( phw_data_t pHwData, PADAPTER Adapter );
31void hal_set_power_save_mode( phw_data_t pHwData, unsigned char power_save, unsigned char wakeup, unsigned char dtim ); 31void hal_set_power_save_mode( phw_data_t pHwData, unsigned char power_save, unsigned char wakeup, unsigned char dtim );
32void hal_get_power_save_mode( phw_data_t pHwData, PBOOLEAN pin_pwr_save ); 32void hal_get_power_save_mode( phw_data_t pHwData, u8 *pin_pwr_save );
33void hal_set_slot_time( phw_data_t pHwData, u8 type ); 33void hal_set_slot_time( phw_data_t pHwData, u8 type );
34#define hal_set_atim_window( _A, _ATM ) 34#define hal_set_atim_window( _A, _ATM )
35void hal_set_rates( phw_data_t pHwData, PUCHAR pbss_rates, u8 length, unsigned char basic_rate_set ); 35void hal_set_rates( phw_data_t pHwData, u8 *pbss_rates, u8 length, unsigned char basic_rate_set );
36#define hal_set_basic_rates( _A, _R, _L ) hal_set_rates( _A, _R, _L, TRUE ) 36#define hal_set_basic_rates( _A, _R, _L ) hal_set_rates( _A, _R, _L, TRUE )
37#define hal_set_op_rates( _A, _R, _L ) hal_set_rates( _A, _R, _L, FALSE ) 37#define hal_set_op_rates( _A, _R, _L ) hal_set_rates( _A, _R, _L, FALSE )
38void hal_start_bss( phw_data_t pHwData, u8 mac_op_mode ); 38void hal_start_bss( phw_data_t pHwData, u8 mac_op_mode );
@@ -40,19 +40,19 @@ void hal_join_request( phw_data_t pHwData, u8 bss_type ); // 0:BSS STA 1:IBSS
40void hal_stop_sync_bss( phw_data_t pHwData ); 40void hal_stop_sync_bss( phw_data_t pHwData );
41void hal_resume_sync_bss( phw_data_t pHwData); 41void hal_resume_sync_bss( phw_data_t pHwData);
42void hal_set_aid( phw_data_t pHwData, u16 aid ); 42void hal_set_aid( phw_data_t pHwData, u16 aid );
43void hal_set_bssid( phw_data_t pHwData, PUCHAR pbssid ); 43void hal_set_bssid( phw_data_t pHwData, u8 *pbssid );
44void hal_get_bssid( phw_data_t pHwData, PUCHAR pbssid ); 44void hal_get_bssid( phw_data_t pHwData, u8 *pbssid );
45void hal_set_beacon_period( phw_data_t pHwData, u16 beacon_period ); 45void hal_set_beacon_period( phw_data_t pHwData, u16 beacon_period );
46void hal_set_listen_interval( phw_data_t pHwData, u16 listen_interval ); 46void hal_set_listen_interval( phw_data_t pHwData, u16 listen_interval );
47void hal_set_cap_info( phw_data_t pHwData, u16 capability_info ); 47void hal_set_cap_info( phw_data_t pHwData, u16 capability_info );
48void hal_set_ssid( phw_data_t pHwData, PUCHAR pssid, u8 ssid_len ); 48void hal_set_ssid( phw_data_t pHwData, u8 *pssid, u8 ssid_len );
49void hal_set_current_channel( phw_data_t pHwData, ChanInfo channel ); 49void hal_set_current_channel( phw_data_t pHwData, ChanInfo channel );
50void hal_set_current_channel_ex( phw_data_t pHwData, ChanInfo channel ); 50void hal_set_current_channel_ex( phw_data_t pHwData, ChanInfo channel );
51void hal_get_current_channel( phw_data_t pHwData, ChanInfo *channel ); 51void hal_get_current_channel( phw_data_t pHwData, ChanInfo *channel );
52void hal_set_accept_broadcast( phw_data_t pHwData, u8 enable ); 52void hal_set_accept_broadcast( phw_data_t pHwData, u8 enable );
53void hal_set_accept_multicast( phw_data_t pHwData, u8 enable ); 53void hal_set_accept_multicast( phw_data_t pHwData, u8 enable );
54void hal_set_accept_beacon( phw_data_t pHwData, u8 enable ); 54void hal_set_accept_beacon( phw_data_t pHwData, u8 enable );
55void hal_set_multicast_address( phw_data_t pHwData, PUCHAR address, u8 number ); 55void hal_set_multicast_address( phw_data_t pHwData, u8 *address, u8 number );
56u8 hal_get_accept_beacon( phw_data_t pHwData ); 56u8 hal_get_accept_beacon( phw_data_t pHwData );
57void hal_stop( phw_data_t pHwData ); 57void hal_stop( phw_data_t pHwData );
58void hal_halt( phw_data_t pHwData, void *ppa_data ); 58void hal_halt( phw_data_t pHwData, void *ppa_data );
@@ -97,7 +97,7 @@ void hal_surprise_remove( phw_data_t pHwData );
97 97
98 98
99void hal_rate_change( phw_data_t pHwData ); // Notify the HAL rate is changing 20060613.1 99void hal_rate_change( phw_data_t pHwData ); // Notify the HAL rate is changing 20060613.1
100unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, PULONG pValue ); 100unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, u32 * pValue );
101unsigned char hal_set_dxx_reg( phw_data_t pHwData, u16 number, u32 value ); 101unsigned char hal_set_dxx_reg( phw_data_t pHwData, u16 number, u32 value );
102#define hal_get_time_count( _P ) (_P->time_count/10) // return 100ms count 102#define hal_get_time_count( _P ) (_P->time_count/10) // return 100ms count
103#define hal_detect_error( _P ) (_P->WbUsb.DetectCount) 103#define hal_detect_error( _P ) (_P->WbUsb.DetectCount)
@@ -116,7 +116,7 @@ unsigned char hal_idle( phw_data_t pHwData );
116#define pa_stall_execution( _A ) //OS_SLEEP( 1 ) 116#define pa_stall_execution( _A ) //OS_SLEEP( 1 )
117#define hw_get_cxx_reg( _A, _B, _C ) 117#define hw_get_cxx_reg( _A, _B, _C )
118#define hw_set_cxx_reg( _A, _B, _C ) 118#define hw_set_cxx_reg( _A, _B, _C )
119#define hw_get_dxx_reg( _A, _B, _C ) hal_get_dxx_reg( _A, _B, (PULONG)_C ) 119#define hw_get_dxx_reg( _A, _B, _C ) hal_get_dxx_reg( _A, _B, (u32 *)_C )
120#define hw_set_dxx_reg( _A, _B, _C ) hal_set_dxx_reg( _A, _B, (u32)_C ) 120#define hw_set_dxx_reg( _A, _B, _C ) hal_set_dxx_reg( _A, _B, (u32)_C )
121 121
122 122
diff --git a/drivers/staging/winbond/wbhal_s.h b/drivers/staging/winbond/wbhal_s.h
index 5b862ff357bd..2ee3f0fc1ad8 100644
--- a/drivers/staging/winbond/wbhal_s.h
+++ b/drivers/staging/winbond/wbhal_s.h
@@ -461,7 +461,7 @@ typedef struct _HW_DATA_T
461 //===================================================================== 461 //=====================================================================
462 // Definition for 802.11 462 // Definition for 802.11
463 //===================================================================== 463 //=====================================================================
464 PUCHAR bssid_pointer; // Used by hal_get_bssid for return value 464 u8 *bssid_pointer; // Used by hal_get_bssid for return value
465 u8 bssid[8];// Only 6 byte will be used. 8 byte is required for read buffer 465 u8 bssid[8];// Only 6 byte will be used. 8 byte is required for read buffer
466 u8 ssid[32];// maximum ssid length is 32 byte 466 u8 ssid[32];// maximum ssid length is 32 byte
467 467
@@ -486,7 +486,7 @@ typedef struct _HW_DATA_T
486 u32 CurrentRadioSw; // 20060320.2 0:On 1:Off 486 u32 CurrentRadioSw; // 20060320.2 0:On 1:Off
487 u32 CurrentRadioHw; // 20060825 0:On 1:Off 487 u32 CurrentRadioHw; // 20060825 0:On 1:Off
488 488
489 PUCHAR power_save_point; // Used by hal_get_power_save_mode for return value 489 u8 *power_save_point; // Used by hal_get_power_save_mode for return value
490 u8 cwmin; 490 u8 cwmin;
491 u8 desired_power_save; 491 u8 desired_power_save;
492 u8 dtim;// Is running dtim 492 u8 dtim;// Is running dtim
diff --git a/drivers/staging/winbond/wblinux.c b/drivers/staging/winbond/wblinux.c
index 2eade5a47b19..4ed45e488318 100644
--- a/drivers/staging/winbond/wblinux.c
+++ b/drivers/staging/winbond/wblinux.c
@@ -25,11 +25,11 @@ EncapAtomicInc(PADAPTER Adapter, void* pAtomic)
25{ 25{
26 PWBLINUX pWbLinux = &Adapter->WbLinux; 26 PWBLINUX pWbLinux = &Adapter->WbLinux;
27 u32 ltmp; 27 u32 ltmp;
28 PULONG pltmp = (PULONG)pAtomic; 28 u32 * pltmp = (u32 *)pAtomic;
29 OS_SPIN_LOCK_ACQUIRED( &pWbLinux->AtomicSpinLock ); 29 spin_lock_irq( &pWbLinux->AtomicSpinLock );
30 (*pltmp)++; 30 (*pltmp)++;
31 ltmp = (*pltmp); 31 ltmp = (*pltmp);
32 OS_SPIN_LOCK_RELEASED( &pWbLinux->AtomicSpinLock ); 32 spin_unlock_irq( &pWbLinux->AtomicSpinLock );
33 return ltmp; 33 return ltmp;
34} 34}
35 35
@@ -38,11 +38,11 @@ EncapAtomicDec(PADAPTER Adapter, void* pAtomic)
38{ 38{
39 PWBLINUX pWbLinux = &Adapter->WbLinux; 39 PWBLINUX pWbLinux = &Adapter->WbLinux;
40 u32 ltmp; 40 u32 ltmp;
41 PULONG pltmp = (PULONG)pAtomic; 41 u32 * pltmp = (u32 *)pAtomic;
42 OS_SPIN_LOCK_ACQUIRED( &pWbLinux->AtomicSpinLock ); 42 spin_lock_irq( &pWbLinux->AtomicSpinLock );
43 (*pltmp)--; 43 (*pltmp)--;
44 ltmp = (*pltmp); 44 ltmp = (*pltmp);
45 OS_SPIN_LOCK_RELEASED( &pWbLinux->AtomicSpinLock ); 45 spin_unlock_irq( &pWbLinux->AtomicSpinLock );
46 return ltmp; 46 return ltmp;
47} 47}
48 48
@@ -51,8 +51,8 @@ WBLINUX_Initial(PADAPTER Adapter)
51{ 51{
52 PWBLINUX pWbLinux = &Adapter->WbLinux; 52 PWBLINUX pWbLinux = &Adapter->WbLinux;
53 53
54 OS_SPIN_LOCK_ALLOCATE( &pWbLinux->SpinLock ); 54 spin_lock_init( &pWbLinux->SpinLock );
55 OS_SPIN_LOCK_ALLOCATE( &pWbLinux->AtomicSpinLock ); 55 spin_lock_init( &pWbLinux->AtomicSpinLock );
56 return TRUE; 56 return TRUE;
57} 57}
58 58
@@ -79,7 +79,6 @@ void
79WBLINUX_Destroy(PADAPTER Adapter) 79WBLINUX_Destroy(PADAPTER Adapter)
80{ 80{
81 WBLINUX_stop( Adapter ); 81 WBLINUX_stop( Adapter );
82 OS_SPIN_LOCK_FREE( &pWbNdis->SpinLock );
83#ifdef _PE_USB_INI_DUMP_ 82#ifdef _PE_USB_INI_DUMP_
84 WBDEBUG(("[w35und] unregister_netdev!\n")); 83 WBDEBUG(("[w35und] unregister_netdev!\n"));
85#endif 84#endif
@@ -142,119 +141,118 @@ unsigned char
142WbWLanInitialize(PADAPTER Adapter) 141WbWLanInitialize(PADAPTER Adapter)
143{ 142{
144 phw_data_t pHwData; 143 phw_data_t pHwData;
145 PUCHAR pMacAddr, pMacAddr2; 144 u8 *pMacAddr;
145 u8 *pMacAddr2;
146 u32 InitStep = 0; 146 u32 InitStep = 0;
147 u8 EEPROM_region; 147 u8 EEPROM_region;
148 u8 HwRadioOff; 148 u8 HwRadioOff;
149 149
150 do { 150 //
151 // 151 // Setting default value for Linux
152 // Setting default value for Linux 152 //
153 // 153 Adapter->sLocalPara.region_INF = REGION_AUTO;
154 Adapter->sLocalPara.region_INF = REGION_AUTO; 154 Adapter->sLocalPara.TxRateMode = RATE_AUTO;
155 Adapter->sLocalPara.TxRateMode = RATE_AUTO; 155 psLOCAL->bMacOperationMode = MODE_802_11_BG; // B/G mode
156 psLOCAL->bMacOperationMode = MODE_802_11_BG; // B/G mode 156 Adapter->Mds.TxRTSThreshold = DEFAULT_RTSThreshold;
157 Adapter->Mds.TxRTSThreshold = DEFAULT_RTSThreshold; 157 Adapter->Mds.TxFragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD;
158 Adapter->Mds.TxFragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD; 158 hal_set_phy_type( &Adapter->sHwData, RF_WB_242_1 );
159 hal_set_phy_type( &Adapter->sHwData, RF_WB_242_1 ); 159 Adapter->sLocalPara.MTUsize = MAX_ETHERNET_PACKET_SIZE;
160 Adapter->sLocalPara.MTUsize = MAX_ETHERNET_PACKET_SIZE; 160 psLOCAL->bPreambleMode = AUTO_MODE;
161 psLOCAL->bPreambleMode = AUTO_MODE; 161 Adapter->sLocalPara.RadioOffStatus.boSwRadioOff = FALSE;
162 Adapter->sLocalPara.RadioOffStatus.boSwRadioOff = FALSE; 162 pHwData = &Adapter->sHwData;
163 pHwData = &Adapter->sHwData; 163 hal_set_phy_type( pHwData, RF_DECIDE_BY_INF );
164 hal_set_phy_type( pHwData, RF_DECIDE_BY_INF ); 164
165 165 //
166 // 166 // Initial each module and variable
167 // Initial each module and variable 167 //
168 // 168 if (!WBLINUX_Initial(Adapter)) {
169 if (!WBLINUX_Initial(Adapter)) {
170#ifdef _PE_USB_INI_DUMP_ 169#ifdef _PE_USB_INI_DUMP_
171 WBDEBUG(("[w35und]WBNDIS initialization failed\n")); 170 WBDEBUG(("[w35und]WBNDIS initialization failed\n"));
172#endif 171#endif
173 break; 172 goto error;
174 } 173 }
175 174
176 // Initial Software variable 175 // Initial Software variable
177 Adapter->sLocalPara.ShutDowned = FALSE; 176 Adapter->sLocalPara.ShutDowned = FALSE;
178 177
179 //added by ws for wep key error detection 178 //added by ws for wep key error detection
180 Adapter->sLocalPara.bWepKeyError= FALSE; 179 Adapter->sLocalPara.bWepKeyError= FALSE;
181 Adapter->sLocalPara.bToSelfPacketReceived = FALSE; 180 Adapter->sLocalPara.bToSelfPacketReceived = FALSE;
182 Adapter->sLocalPara.WepKeyDetectTimerCount= 2 * 100; /// 2 seconds 181 Adapter->sLocalPara.WepKeyDetectTimerCount= 2 * 100; /// 2 seconds
183 182
184 // Initial USB hal 183 // Initial USB hal
185 InitStep = 1; 184 InitStep = 1;
186 pHwData = &Adapter->sHwData; 185 pHwData = &Adapter->sHwData;
187 if (!hal_init_hardware(pHwData, Adapter)) 186 if (!hal_init_hardware(pHwData, Adapter))
188 break; 187 goto error;
189 188
190 EEPROM_region = hal_get_region_from_EEPROM( pHwData ); 189 EEPROM_region = hal_get_region_from_EEPROM( pHwData );
191 if (EEPROM_region != REGION_AUTO) 190 if (EEPROM_region != REGION_AUTO)
192 psLOCAL->region = EEPROM_region; 191 psLOCAL->region = EEPROM_region;
193 else { 192 else {
194 if (psLOCAL->region_INF != REGION_AUTO) 193 if (psLOCAL->region_INF != REGION_AUTO)
195 psLOCAL->region = psLOCAL->region_INF; 194 psLOCAL->region = psLOCAL->region_INF;
196 else 195 else
197 psLOCAL->region = REGION_USA; //default setting 196 psLOCAL->region = REGION_USA; //default setting
198 } 197 }
199 198
200 // Get Software setting flag from hal 199 // Get Software setting flag from hal
201 Adapter->sLocalPara.boAntennaDiversity = FALSE; 200 Adapter->sLocalPara.boAntennaDiversity = FALSE;
202 if (hal_software_set(pHwData) & 0x00000001) 201 if (hal_software_set(pHwData) & 0x00000001)
203 Adapter->sLocalPara.boAntennaDiversity = TRUE; 202 Adapter->sLocalPara.boAntennaDiversity = TRUE;
204 203
205 // 204 //
206 // For TS module 205 // For TS module
207 // 206 //
208 InitStep = 2; 207 InitStep = 2;
209 208
210 // For MDS module 209 // For MDS module
211 InitStep = 3; 210 InitStep = 3;
212 Mds_initial(Adapter); 211 Mds_initial(Adapter);
213 212
214 //======================================= 213 //=======================================
215 // Initialize the SME, SCAN, MLME, ROAM 214 // Initialize the SME, SCAN, MLME, ROAM
216 //======================================= 215 //=======================================
217 InitStep = 4; 216 InitStep = 4;
218 InitStep = 5; 217 InitStep = 5;
219 InitStep = 6; 218 InitStep = 6;
220 219
221 // If no user-defined address in the registry, use the addresss "burned" on the NIC instead. 220 // If no user-defined address in the registry, use the addresss "burned" on the NIC instead.
222 pMacAddr = Adapter->sLocalPara.ThisMacAddress; 221 pMacAddr = Adapter->sLocalPara.ThisMacAddress;
223 pMacAddr2 = Adapter->sLocalPara.PermanentAddress; 222 pMacAddr2 = Adapter->sLocalPara.PermanentAddress;
224 hal_get_permanent_address( pHwData, Adapter->sLocalPara.PermanentAddress );// Reading ethernet address from EEPROM 223 hal_get_permanent_address( pHwData, Adapter->sLocalPara.PermanentAddress );// Reading ethernet address from EEPROM
225 if (OS_MEMORY_COMPARE(pMacAddr, "\x00\x00\x00\x00\x00\x00", MAC_ADDR_LENGTH )) // Is equal 224 if (OS_MEMORY_COMPARE(pMacAddr, "\x00\x00\x00\x00\x00\x00", MAC_ADDR_LENGTH )) // Is equal
226 { 225 {
227 memcpy( pMacAddr, pMacAddr2, MAC_ADDR_LENGTH ); 226 memcpy( pMacAddr, pMacAddr2, MAC_ADDR_LENGTH );
228 } else { 227 } else {
229 // Set the user define MAC address 228 // Set the user define MAC address
230 hal_set_ethernet_address( pHwData, Adapter->sLocalPara.ThisMacAddress ); 229 hal_set_ethernet_address( pHwData, Adapter->sLocalPara.ThisMacAddress );
231 } 230 }
232 231
233 //get current antenna 232 //get current antenna
234 psLOCAL->bAntennaNo = hal_get_antenna_number(pHwData); 233 psLOCAL->bAntennaNo = hal_get_antenna_number(pHwData);
235#ifdef _PE_STATE_DUMP_ 234#ifdef _PE_STATE_DUMP_
236 WBDEBUG(("Driver init, antenna no = %d\n", psLOCAL->bAntennaNo)); 235 WBDEBUG(("Driver init, antenna no = %d\n", psLOCAL->bAntennaNo));
237#endif 236#endif
238 hal_get_hw_radio_off( pHwData ); 237 hal_get_hw_radio_off( pHwData );
239 238
240 // Waiting for HAL setting OK 239 // Waiting for HAL setting OK
241 while (!hal_idle(pHwData)) 240 while (!hal_idle(pHwData))
242 OS_SLEEP(10000); 241 OS_SLEEP(10000);
243 242
244 MTO_Init(Adapter); 243 MTO_Init(Adapter);
245 244
246 HwRadioOff = hal_get_hw_radio_off( pHwData ); 245 HwRadioOff = hal_get_hw_radio_off( pHwData );
247 psLOCAL->RadioOffStatus.boHwRadioOff = !!HwRadioOff; 246 psLOCAL->RadioOffStatus.boHwRadioOff = !!HwRadioOff;
248 247
249 hal_set_radio_mode( pHwData, (unsigned char)(psLOCAL->RadioOffStatus.boSwRadioOff || psLOCAL->RadioOffStatus.boHwRadioOff) ); 248 hal_set_radio_mode( pHwData, (unsigned char)(psLOCAL->RadioOffStatus.boSwRadioOff || psLOCAL->RadioOffStatus.boHwRadioOff) );
250 249
251 hal_driver_init_OK(pHwData) = 1; // Notify hal that the driver is ready now. 250 hal_driver_init_OK(pHwData) = 1; // Notify hal that the driver is ready now.
252 //set a tx power for reference..... 251 //set a tx power for reference.....
253// sme_set_tx_power_level(Adapter, 12); FIXME? 252// sme_set_tx_power_level(Adapter, 12); FIXME?
254 return TRUE; 253 return TRUE;
255 }
256 while(FALSE);
257 254
255error:
258 switch (InitStep) { 256 switch (InitStep) {
259 case 5: 257 case 5:
260 case 4: 258 case 4:
diff --git a/drivers/staging/winbond/wblinux_s.h b/drivers/staging/winbond/wblinux_s.h
index 97e9167ab839..fd2bb43bf3cf 100644
--- a/drivers/staging/winbond/wblinux_s.h
+++ b/drivers/staging/winbond/wblinux_s.h
@@ -24,8 +24,8 @@
24 24
25typedef struct _WBLINUX 25typedef struct _WBLINUX
26{ 26{
27 OS_SPIN_LOCK AtomicSpinLock; 27 spinlock_t AtomicSpinLock;
28 OS_SPIN_LOCK SpinLock; 28 spinlock_t SpinLock;
29 u32 shutdown; 29 u32 shutdown;
30 30
31 OS_ATOMIC ThreadCount; 31 OS_ATOMIC ThreadCount;
diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig
index 10b1f0f634d3..2425d860dcaf 100644
--- a/drivers/staging/wlan-ng/Kconfig
+++ b/drivers/staging/wlan-ng/Kconfig
@@ -1,6 +1,6 @@
1config PRISM2_USB 1config PRISM2_USB
2 tristate "Prism2.5 USB driver" 2 tristate "Prism2.5 USB driver"
3 depends on USB 3 depends on WLAN_80211 && USB
4 default n 4 default n
5 ---help--- 5 ---help---
6 This is the wlan-ng prism 2.5 USB driver for a wide range of 6 This is the wlan-ng prism 2.5 USB driver for a wide range of
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index a2054639d24b..0dfb8ce9aae7 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -824,7 +824,7 @@ PD Record codes
824#define HFA384x_CMD_MACPORT_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET(value)) 824#define HFA384x_CMD_MACPORT_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET(value))
825#define HFA384x_CMD_ISRECL(value) ((UINT16)(HFA384x_CMD_AINFO_GET((UINT16)(value) & HFA384x_CMD_RECL))) 825#define HFA384x_CMD_ISRECL(value) ((UINT16)(HFA384x_CMD_AINFO_GET((UINT16)(value) & HFA384x_CMD_RECL)))
826#define HFA384x_CMD_RECL_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET(value)) 826#define HFA384x_CMD_RECL_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET(value))
827#define HFA384x_CMD_QOS_GET(value) ((UINT16((((UINT16)(value))&((UINT16)0x3000)) >> 12)) 827#define HFA384x_CMD_QOS_GET(value) ((UINT16)((((UINT16)(value))&((UINT16)0x3000)) >> 12))
828#define HFA384x_CMD_QOS_SET(value) ((UINT16)((((UINT16)(value)) << 12) & 0x3000)) 828#define HFA384x_CMD_QOS_SET(value) ((UINT16)((((UINT16)(value)) << 12) & 0x3000))
829#define HFA384x_CMD_ISWRITE(value) ((UINT16)(HFA384x_CMD_AINFO_GET((UINT16)(value) & HFA384x_CMD_WRITE))) 829#define HFA384x_CMD_ISWRITE(value) ((UINT16)(HFA384x_CMD_AINFO_GET((UINT16)(value) & HFA384x_CMD_WRITE)))
830#define HFA384x_CMD_WRITE_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET((UINT16)value)) 830#define HFA384x_CMD_WRITE_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET((UINT16)value))
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 53fe2985971f..11a50c7fbfc8 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -64,7 +64,6 @@
64/*================================================================*/ 64/*================================================================*/
65/* Project Includes */ 65/* Project Includes */
66 66
67#include "version.h"
68#include "p80211hdr.h" 67#include "p80211hdr.h"
69#include "p80211types.h" 68#include "p80211types.h"
70#include "p80211msg.h" 69#include "p80211msg.h"
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 268fd9bba1ef..eac06f793d81 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -90,8 +90,6 @@
90#include <linux/usb.h> 90#include <linux/usb.h>
91//#endif 91//#endif
92 92
93#include "wlan_compat.h"
94
95/*================================================================*/ 93/*================================================================*/
96/* Project Includes */ 94/* Project Includes */
97 95
diff --git a/drivers/staging/wlan-ng/wlan_compat.h b/drivers/staging/wlan-ng/wlan_compat.h
index 17026570708f..59dfa8f84cbe 100644
--- a/drivers/staging/wlan-ng/wlan_compat.h
+++ b/drivers/staging/wlan-ng/wlan_compat.h
@@ -245,11 +245,11 @@ typedef int64_t INT64;
245# define preempt_count() (0UL) 245# define preempt_count() (0UL)
246#endif 246#endif
247 247
248#define WLAN_LOG_ERROR(x,args...) printk(KERN_ERR "%s: " x , __FUNCTION__ , ##args); 248#define WLAN_LOG_ERROR(x,args...) printk(KERN_ERR "%s: " x , __func__ , ##args);
249 249
250#define WLAN_LOG_WARNING(x,args...) printk(KERN_WARNING "%s: " x , __FUNCTION__ , ##args); 250#define WLAN_LOG_WARNING(x,args...) printk(KERN_WARNING "%s: " x , __func__ , ##args);
251 251
252#define WLAN_LOG_NOTICE(x,args...) printk(KERN_NOTICE "%s: " x , __FUNCTION__ , ##args); 252#define WLAN_LOG_NOTICE(x,args...) printk(KERN_NOTICE "%s: " x , __func__ , ##args);
253 253
254#define WLAN_LOG_INFO(args... ) printk(KERN_INFO args) 254#define WLAN_LOG_INFO(args... ) printk(KERN_INFO args)
255 255
@@ -265,7 +265,7 @@ typedef int64_t INT64;
265 #define DBFENTER { if ( WLAN_DBVAR >= 5 ){ WLAN_LOG_DEBUG(3,"---->\n"); } } 265 #define DBFENTER { if ( WLAN_DBVAR >= 5 ){ WLAN_LOG_DEBUG(3,"---->\n"); } }
266 #define DBFEXIT { if ( WLAN_DBVAR >= 5 ){ WLAN_LOG_DEBUG(3,"<----\n"); } } 266 #define DBFEXIT { if ( WLAN_DBVAR >= 5 ){ WLAN_LOG_DEBUG(3,"<----\n"); } }
267 267
268 #define WLAN_LOG_DEBUG(l,x,args...) if ( WLAN_DBVAR >= (l)) printk(KERN_DEBUG "%s(%lu): " x , __FUNCTION__, (preempt_count() & PREEMPT_MASK), ##args ); 268 #define WLAN_LOG_DEBUG(l,x,args...) if ( WLAN_DBVAR >= (l)) printk(KERN_DEBUG "%s(%lu): " x , __func__, (preempt_count() & PREEMPT_MASK), ##args );
269#else 269#else
270 #define WLAN_ASSERT(c) 270 #define WLAN_ASSERT(c)
271 #define WLAN_HEX_DUMP( l, s, p, n) 271 #define WLAN_HEX_DUMP( l, s, p, n)
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index 41b6530b8f25..a913efc69669 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -2328,7 +2328,6 @@ static int ixj_release(struct inode *inode, struct file *file_p)
2328 j->rec_codec = j->play_codec = 0; 2328 j->rec_codec = j->play_codec = 0;
2329 j->rec_frame_size = j->play_frame_size = 0; 2329 j->rec_frame_size = j->play_frame_size = 0;
2330 j->flags.cidsent = j->flags.cidring = 0; 2330 j->flags.cidsent = j->flags.cidring = 0;
2331 ixj_fasync(-1, file_p, 0); /* remove from list of async notification */
2332 2331
2333 if(j->cardtype == QTI_LINEJACK && !j->readers && !j->writers) { 2332 if(j->cardtype == QTI_LINEJACK && !j->readers && !j->writers) {
2334 ixj_set_port(j, PORT_PSTN); 2333 ixj_set_port(j, PORT_PSTN);
diff --git a/drivers/telephony/phonedev.c b/drivers/telephony/phonedev.c
index 37caf4d69037..b52cc830c0b4 100644
--- a/drivers/telephony/phonedev.c
+++ b/drivers/telephony/phonedev.c
@@ -8,7 +8,7 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * Author: Alan Cox, <alan@redhat.com> 11 * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
12 * 12 *
13 * Fixes: Mar 01 2000 Thomas Sparr, <thomas.l.sparr@telia.com> 13 * Fixes: Mar 01 2000 Thomas Sparr, <thomas.l.sparr@telia.com>
14 * phone_register_device now works with unit!=PHONE_UNIT_ANY 14 * phone_register_device now works with unit!=PHONE_UNIT_ANY
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index f9b4647255aa..2d2440cd57a9 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -367,9 +367,6 @@ static int uio_release(struct inode *inode, struct file *filep)
367 ret = idev->info->release(idev->info, inode); 367 ret = idev->info->release(idev->info, inode);
368 368
369 module_put(idev->owner); 369 module_put(idev->owner);
370
371 if (filep->f_flags & FASYNC)
372 ret = uio_fasync(-1, filep, 0);
373 kfree(listener); 370 kfree(listener);
374 return ret; 371 return ret;
375} 372}
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index bcefbddeba50..289d81adfb9c 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -36,7 +36,8 @@ config USB_ARCH_HAS_OHCI
36 default y if PXA3xx 36 default y if PXA3xx
37 default y if ARCH_EP93XX 37 default y if ARCH_EP93XX
38 default y if ARCH_AT91 38 default y if ARCH_AT91
39 default y if ARCH_PNX4008 39 default y if ARCH_PNX4008 && I2C
40 default y if MFD_TC6393XB
40 # PPC: 41 # PPC:
41 default y if STB03xxx 42 default y if STB03xxx
42 default y if PPC_MPC52xx 43 default y if PPC_MPC52xx
@@ -97,6 +98,8 @@ source "drivers/usb/core/Kconfig"
97 98
98source "drivers/usb/mon/Kconfig" 99source "drivers/usb/mon/Kconfig"
99 100
101source "drivers/usb/wusbcore/Kconfig"
102
100source "drivers/usb/host/Kconfig" 103source "drivers/usb/host/Kconfig"
101 104
102source "drivers/usb/musb/Kconfig" 105source "drivers/usb/musb/Kconfig"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index a419c42e880e..8b7c419b876e 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -16,9 +16,12 @@ obj-$(CONFIG_USB_UHCI_HCD) += host/
16obj-$(CONFIG_USB_SL811_HCD) += host/ 16obj-$(CONFIG_USB_SL811_HCD) += host/
17obj-$(CONFIG_USB_U132_HCD) += host/ 17obj-$(CONFIG_USB_U132_HCD) += host/
18obj-$(CONFIG_USB_R8A66597_HCD) += host/ 18obj-$(CONFIG_USB_R8A66597_HCD) += host/
19obj-$(CONFIG_USB_HWA_HCD) += host/
19 20
20obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ 21obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
21 22
23obj-$(CONFIG_USB_WUSB) += wusbcore/
24
22obj-$(CONFIG_USB_ACM) += class/ 25obj-$(CONFIG_USB_ACM) += class/
23obj-$(CONFIG_USB_PRINTER) += class/ 26obj-$(CONFIG_USB_PRINTER) += class/
24 27
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 76fce44c2f9a..3e862401a638 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -722,6 +722,16 @@ static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_de
722 flush_scheduled_work(); 722 flush_scheduled_work();
723} 723}
724 724
725static int speedtch_pre_reset(struct usb_interface *intf)
726{
727 return 0;
728}
729
730static int speedtch_post_reset(struct usb_interface *intf)
731{
732 return 0;
733}
734
725 735
726/********** 736/**********
727** USB ** 737** USB **
@@ -740,6 +750,8 @@ static struct usb_driver speedtch_usb_driver = {
740 .name = speedtch_driver_name, 750 .name = speedtch_driver_name,
741 .probe = speedtch_usb_probe, 751 .probe = speedtch_usb_probe,
742 .disconnect = usbatm_usb_disconnect, 752 .disconnect = usbatm_usb_disconnect,
753 .pre_reset = speedtch_pre_reset,
754 .post_reset = speedtch_post_reset,
743 .id_table = speedtch_usb_ids 755 .id_table = speedtch_usb_ids
744}; 756};
745 757
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index fab23ee8702b..d50a99f70aee 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -158,16 +158,12 @@ static int acm_wb_is_avail(struct acm *acm)
158} 158}
159 159
160/* 160/*
161 * Finish write. 161 * Finish write. Caller must hold acm->write_lock
162 */ 162 */
163static void acm_write_done(struct acm *acm, struct acm_wb *wb) 163static void acm_write_done(struct acm *acm, struct acm_wb *wb)
164{ 164{
165 unsigned long flags;
166
167 spin_lock_irqsave(&acm->write_lock, flags);
168 wb->use = 0; 165 wb->use = 0;
169 acm->transmitting--; 166 acm->transmitting--;
170 spin_unlock_irqrestore(&acm->write_lock, flags);
171} 167}
172 168
173/* 169/*
@@ -482,6 +478,7 @@ static void acm_write_bulk(struct urb *urb)
482{ 478{
483 struct acm_wb *wb = urb->context; 479 struct acm_wb *wb = urb->context;
484 struct acm *acm = wb->instance; 480 struct acm *acm = wb->instance;
481 unsigned long flags;
485 482
486 if (verbose || urb->status 483 if (verbose || urb->status
487 || (urb->actual_length != urb->transfer_buffer_length)) 484 || (urb->actual_length != urb->transfer_buffer_length))
@@ -490,7 +487,9 @@ static void acm_write_bulk(struct urb *urb)
490 urb->transfer_buffer_length, 487 urb->transfer_buffer_length,
491 urb->status); 488 urb->status);
492 489
490 spin_lock_irqsave(&acm->write_lock, flags);
493 acm_write_done(acm, wb); 491 acm_write_done(acm, wb);
492 spin_unlock_irqrestore(&acm->write_lock, flags);
494 if (ACM_READY(acm)) 493 if (ACM_READY(acm))
495 schedule_work(&acm->work); 494 schedule_work(&acm->work);
496 else 495 else
@@ -849,9 +848,10 @@ static void acm_write_buffers_free(struct acm *acm)
849{ 848{
850 int i; 849 int i;
851 struct acm_wb *wb; 850 struct acm_wb *wb;
851 struct usb_device *usb_dev = interface_to_usbdev(acm->control);
852 852
853 for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) { 853 for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) {
854 usb_buffer_free(acm->dev, acm->writesize, wb->buf, wb->dmah); 854 usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah);
855 } 855 }
856} 856}
857 857
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 7429f70b9d06..5a8ecc045e3f 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -42,6 +42,8 @@ static struct usb_device_id wdm_ids[] = {
42 { } 42 { }
43}; 43};
44 44
45MODULE_DEVICE_TABLE (usb, wdm_ids);
46
45#define WDM_MINOR_BASE 176 47#define WDM_MINOR_BASE 176
46 48
47 49
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 543811f6e6e8..43a863c5cc43 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -51,6 +51,7 @@ static struct usb_device_id usbtmc_devices[] = {
51 { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), }, 51 { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), },
52 { 0, } /* terminating entry */ 52 { 0, } /* terminating entry */
53}; 53};
54MODULE_DEVICE_TABLE(usb, usbtmc_devices);
54 55
55/* 56/*
56 * This structure is the capabilities for the device 57 * This structure is the capabilities for the device
@@ -133,7 +134,7 @@ static int usbtmc_release(struct inode *inode, struct file *file)
133 134
134static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data) 135static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
135{ 136{
136 char *buffer; 137 u8 *buffer;
137 struct device *dev; 138 struct device *dev;
138 int rv; 139 int rv;
139 int n; 140 int n;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e935be7eb468..8c081308b0e2 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -279,7 +279,9 @@ static int usb_unbind_interface(struct device *dev)
279 * altsetting means creating new endpoint device entries). 279 * altsetting means creating new endpoint device entries).
280 * When either of these happens, defer the Set-Interface. 280 * When either of these happens, defer the Set-Interface.
281 */ 281 */
282 if (!error && intf->dev.power.status == DPM_ON) 282 if (intf->cur_altsetting->desc.bAlternateSetting == 0)
283 ; /* Already in altsetting 0 so skip Set-Interface */
284 else if (!error && intf->dev.power.status == DPM_ON)
283 usb_set_interface(udev, intf->altsetting[0]. 285 usb_set_interface(udev, intf->altsetting[0].
284 desc.bInterfaceNumber, 0); 286 desc.bInterfaceNumber, 0);
285 else 287 else
@@ -1610,7 +1612,8 @@ int usb_external_resume_device(struct usb_device *udev)
1610 status = usb_resume_both(udev); 1612 status = usb_resume_both(udev);
1611 udev->last_busy = jiffies; 1613 udev->last_busy = jiffies;
1612 usb_pm_unlock(udev); 1614 usb_pm_unlock(udev);
1613 do_unbind_rebind(udev, DO_REBIND); 1615 if (status == 0)
1616 do_unbind_rebind(udev, DO_REBIND);
1614 1617
1615 /* Now that the device is awake, we can start trying to autosuspend 1618 /* Now that the device is awake, we can start trying to autosuspend
1616 * it again. */ 1619 * it again. */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index fc9018e72a09..e1b42626d04d 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -106,6 +106,9 @@ static DEFINE_SPINLOCK(hcd_root_hub_lock);
106/* used when updating an endpoint's URB list */ 106/* used when updating an endpoint's URB list */
107static DEFINE_SPINLOCK(hcd_urb_list_lock); 107static DEFINE_SPINLOCK(hcd_urb_list_lock);
108 108
109/* used to protect against unlinking URBs after the device is gone */
110static DEFINE_SPINLOCK(hcd_urb_unlink_lock);
111
109/* wait queue for synchronous unlinks */ 112/* wait queue for synchronous unlinks */
110DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue); 113DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue);
111 114
@@ -1376,10 +1379,25 @@ static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status)
1376int usb_hcd_unlink_urb (struct urb *urb, int status) 1379int usb_hcd_unlink_urb (struct urb *urb, int status)
1377{ 1380{
1378 struct usb_hcd *hcd; 1381 struct usb_hcd *hcd;
1379 int retval; 1382 int retval = -EIDRM;
1383 unsigned long flags;
1380 1384
1381 hcd = bus_to_hcd(urb->dev->bus); 1385 /* Prevent the device and bus from going away while
1382 retval = unlink1(hcd, urb, status); 1386 * the unlink is carried out. If they are already gone
1387 * then urb->use_count must be 0, since disconnected
1388 * devices can't have any active URBs.
1389 */
1390 spin_lock_irqsave(&hcd_urb_unlink_lock, flags);
1391 if (atomic_read(&urb->use_count) > 0) {
1392 retval = 0;
1393 usb_get_dev(urb->dev);
1394 }
1395 spin_unlock_irqrestore(&hcd_urb_unlink_lock, flags);
1396 if (retval == 0) {
1397 hcd = bus_to_hcd(urb->dev->bus);
1398 retval = unlink1(hcd, urb, status);
1399 usb_put_dev(urb->dev);
1400 }
1383 1401
1384 if (retval == 0) 1402 if (retval == 0)
1385 retval = -EINPROGRESS; 1403 retval = -EINPROGRESS;
@@ -1528,6 +1546,17 @@ void usb_hcd_disable_endpoint(struct usb_device *udev,
1528 hcd->driver->endpoint_disable(hcd, ep); 1546 hcd->driver->endpoint_disable(hcd, ep);
1529} 1547}
1530 1548
1549/* Protect against drivers that try to unlink URBs after the device
1550 * is gone, by waiting until all unlinks for @udev are finished.
1551 * Since we don't currently track URBs by device, simply wait until
1552 * nothing is running in the locked region of usb_hcd_unlink_urb().
1553 */
1554void usb_hcd_synchronize_unlinks(struct usb_device *udev)
1555{
1556 spin_lock_irq(&hcd_urb_unlink_lock);
1557 spin_unlock_irq(&hcd_urb_unlink_lock);
1558}
1559
1531/*-------------------------------------------------------------------------*/ 1560/*-------------------------------------------------------------------------*/
1532 1561
1533/* called in any context */ 1562/* called in any context */
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 2dcde61c465e..9465e70f4dd0 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -232,6 +232,7 @@ extern void usb_hcd_flush_endpoint(struct usb_device *udev,
232 struct usb_host_endpoint *ep); 232 struct usb_host_endpoint *ep);
233extern void usb_hcd_disable_endpoint(struct usb_device *udev, 233extern void usb_hcd_disable_endpoint(struct usb_device *udev,
234 struct usb_host_endpoint *ep); 234 struct usb_host_endpoint *ep);
235extern void usb_hcd_synchronize_unlinks(struct usb_device *udev);
235extern int usb_hcd_get_frame_number(struct usb_device *udev); 236extern int usb_hcd_get_frame_number(struct usb_device *udev);
236 237
237extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver, 238extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d73ce262c365..b19cbfcd51da 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -659,6 +659,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
659 PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2); 659 PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2);
660 schedule_delayed_work(&hub->init_work, 660 schedule_delayed_work(&hub->init_work,
661 msecs_to_jiffies(delay)); 661 msecs_to_jiffies(delay));
662
663 /* Suppress autosuspend until init is done */
664 to_usb_interface(hub->intfdev)->pm_usage_cnt = 1;
662 return; /* Continues at init2: below */ 665 return; /* Continues at init2: below */
663 } else { 666 } else {
664 hub_power_on(hub, true); 667 hub_power_on(hub, true);
@@ -1429,6 +1432,7 @@ void usb_disconnect(struct usb_device **pdev)
1429 */ 1432 */
1430 dev_dbg (&udev->dev, "unregistering device\n"); 1433 dev_dbg (&udev->dev, "unregistering device\n");
1431 usb_disable_device(udev, 0); 1434 usb_disable_device(udev, 0);
1435 usb_hcd_synchronize_unlinks(udev);
1432 1436
1433 usb_unlock_device(udev); 1437 usb_unlock_device(udev);
1434 1438
@@ -3504,7 +3508,7 @@ int usb_reset_device(struct usb_device *udev)
3504 USB_INTERFACE_BOUND) 3508 USB_INTERFACE_BOUND)
3505 rebind = 1; 3509 rebind = 1;
3506 } 3510 }
3507 if (rebind) 3511 if (ret == 0 && rebind)
3508 usb_rebind_intf(cintf); 3512 usb_rebind_intf(cintf);
3509 } 3513 }
3510 } 3514 }
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 887738577b28..6d1048faf08e 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1091,6 +1091,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1091 continue; 1091 continue;
1092 dev_dbg(&dev->dev, "unregistering interface %s\n", 1092 dev_dbg(&dev->dev, "unregistering interface %s\n",
1093 dev_name(&interface->dev)); 1093 dev_name(&interface->dev));
1094 interface->unregistering = 1;
1094 usb_remove_sysfs_intf_files(interface); 1095 usb_remove_sysfs_intf_files(interface);
1095 device_del(&interface->dev); 1096 device_del(&interface->dev);
1096 } 1097 }
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index f66fba11fbd5..4fb65fdc9dc3 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -840,7 +840,7 @@ int usb_create_sysfs_intf_files(struct usb_interface *intf)
840 struct usb_host_interface *alt = intf->cur_altsetting; 840 struct usb_host_interface *alt = intf->cur_altsetting;
841 int retval; 841 int retval;
842 842
843 if (intf->sysfs_files_created) 843 if (intf->sysfs_files_created || intf->unregistering)
844 return 0; 844 return 0;
845 845
846 /* The interface string may be present in some altsettings 846 /* The interface string may be present in some altsettings
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index f2638009a464..1f68af9db3f7 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -85,8 +85,8 @@ EXPORT_SYMBOL_GPL(usb_alloc_urb);
85 * Must be called when a user of a urb is finished with it. When the last user 85 * Must be called when a user of a urb is finished with it. When the last user
86 * of the urb calls this function, the memory of the urb is freed. 86 * of the urb calls this function, the memory of the urb is freed.
87 * 87 *
88 * Note: The transfer buffer associated with the urb is not freed, that must be 88 * Note: The transfer buffer associated with the urb is not freed unless the
89 * done elsewhere. 89 * URB_FREE_BUFFER transfer flag is set.
90 */ 90 */
91void usb_free_urb(struct urb *urb) 91void usb_free_urb(struct urb *urb)
92{ 92{
@@ -474,6 +474,12 @@ EXPORT_SYMBOL_GPL(usb_submit_urb);
474 * indicating that the request has been canceled (rather than any other 474 * indicating that the request has been canceled (rather than any other
475 * code). 475 * code).
476 * 476 *
477 * Drivers should not call this routine or related routines, such as
478 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
479 * method has returned. The disconnect function should synchronize with
480 * a driver's I/O routines to insure that all URB-related activity has
481 * completed before it returns.
482 *
477 * This request is always asynchronous. Success is indicated by 483 * This request is always asynchronous. Success is indicated by
478 * returning -EINPROGRESS, at which time the URB will probably not yet 484 * returning -EINPROGRESS, at which time the URB will probably not yet
479 * have been given back to the device driver. When it is eventually 485 * have been given back to the device driver. When it is eventually
@@ -550,6 +556,9 @@ EXPORT_SYMBOL_GPL(usb_unlink_urb);
550 * This routine may not be used in an interrupt context (such as a bottom 556 * This routine may not be used in an interrupt context (such as a bottom
551 * half or a completion handler), or when holding a spinlock, or in other 557 * half or a completion handler), or when holding a spinlock, or in other
552 * situations where the caller can't schedule(). 558 * situations where the caller can't schedule().
559 *
560 * This routine should not be called by a driver after its disconnect
561 * method has returned.
553 */ 562 */
554void usb_kill_urb(struct urb *urb) 563void usb_kill_urb(struct urb *urb)
555{ 564{
@@ -588,6 +597,9 @@ EXPORT_SYMBOL_GPL(usb_kill_urb);
588 * This routine may not be used in an interrupt context (such as a bottom 597 * This routine may not be used in an interrupt context (such as a bottom
589 * half or a completion handler), or when holding a spinlock, or in other 598 * half or a completion handler), or when holding a spinlock, or in other
590 * situations where the caller can't schedule(). 599 * situations where the caller can't schedule().
600 *
601 * This routine should not be called by a driver after its disconnect
602 * method has returned.
591 */ 603 */
592void usb_poison_urb(struct urb *urb) 604void usb_poison_urb(struct urb *urb)
593{ 605{
@@ -622,6 +634,9 @@ EXPORT_SYMBOL_GPL(usb_unpoison_urb);
622 * 634 *
623 * this allows all outstanding URBs to be killed starting 635 * this allows all outstanding URBs to be killed starting
624 * from the back of the queue 636 * from the back of the queue
637 *
638 * This routine should not be called by a driver after its disconnect
639 * method has returned.
625 */ 640 */
626void usb_kill_anchored_urbs(struct usb_anchor *anchor) 641void usb_kill_anchored_urbs(struct usb_anchor *anchor)
627{ 642{
@@ -651,6 +666,9 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
651 * this allows all outstanding URBs to be poisoned starting 666 * this allows all outstanding URBs to be poisoned starting
652 * from the back of the queue. Newly added URBs will also be 667 * from the back of the queue. Newly added URBs will also be
653 * poisoned 668 * poisoned
669 *
670 * This routine should not be called by a driver after its disconnect
671 * method has returned.
654 */ 672 */
655void usb_poison_anchored_urbs(struct usb_anchor *anchor) 673void usb_poison_anchored_urbs(struct usb_anchor *anchor)
656{ 674{
@@ -672,6 +690,7 @@ void usb_poison_anchored_urbs(struct usb_anchor *anchor)
672 spin_unlock_irq(&anchor->lock); 690 spin_unlock_irq(&anchor->lock);
673} 691}
674EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); 692EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
693
675/** 694/**
676 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse 695 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
677 * @anchor: anchor the requests are bound to 696 * @anchor: anchor the requests are bound to
@@ -680,6 +699,9 @@ EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
680 * from the back of the queue. This function is asynchronous. 699 * from the back of the queue. This function is asynchronous.
681 * The unlinking is just tiggered. It may happen after this 700 * The unlinking is just tiggered. It may happen after this
682 * function has returned. 701 * function has returned.
702 *
703 * This routine should not be called by a driver after its disconnect
704 * method has returned.
683 */ 705 */
684void usb_unlink_anchored_urbs(struct usb_anchor *anchor) 706void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
685{ 707{
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 1ca1c326392a..e1191b9a316a 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -168,7 +168,7 @@ usb_copy_descriptors(struct usb_descriptor_header **src)
168 * usb_find_endpoint - find a copy of an endpoint descriptor 168 * usb_find_endpoint - find a copy of an endpoint descriptor
169 * @src: original vector of descriptors 169 * @src: original vector of descriptors
170 * @copy: copy of @src 170 * @copy: copy of @src
171 * @ep: endpoint descriptor found in @src 171 * @match: endpoint descriptor found in @src
172 * 172 *
173 * This returns the copy of the @match descriptor made for @copy. Its 173 * This returns the copy of the @match descriptor made for @copy. Its
174 * intended use is to help remembering the endpoint descriptor to use 174 * intended use is to help remembering the endpoint descriptor to use
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 5ee1590b8e9c..c1d34df0b157 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -463,7 +463,11 @@ static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value,
463 notify->wLength = cpu_to_le16(length); 463 notify->wLength = cpu_to_le16(length);
464 memcpy(buf, data, length); 464 memcpy(buf, data, length);
465 465
466 /* ep_queue() can complete immediately if it fills the fifo... */
467 spin_unlock(&acm->lock);
466 status = usb_ep_queue(ep, req, GFP_ATOMIC); 468 status = usb_ep_queue(ep, req, GFP_ATOMIC);
469 spin_lock(&acm->lock);
470
467 if (status < 0) { 471 if (status < 0) {
468 ERROR(acm->port.func.config->cdev, 472 ERROR(acm->port.func.config->cdev,
469 "acm ttyGS%d can't notify serial state, %d\n", 473 "acm ttyGS%d can't notify serial state, %d\n",
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 659b3d9671c4..3a8bb53fc473 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -172,7 +172,6 @@ static struct usb_interface_descriptor rndis_data_intf __initdata = {
172 .bDescriptorType = USB_DT_INTERFACE, 172 .bDescriptorType = USB_DT_INTERFACE,
173 173
174 /* .bInterfaceNumber = DYNAMIC */ 174 /* .bInterfaceNumber = DYNAMIC */
175 .bAlternateSetting = 1,
176 .bNumEndpoints = 2, 175 .bNumEndpoints = 2,
177 .bInterfaceClass = USB_CLASS_CDC_DATA, 176 .bInterfaceClass = USB_CLASS_CDC_DATA,
178 .bInterfaceSubClass = 0, 177 .bInterfaceSubClass = 0,
@@ -303,7 +302,7 @@ static void rndis_response_available(void *_rndis)
303 __le32 *data = req->buf; 302 __le32 *data = req->buf;
304 int status; 303 int status;
305 304
306 if (atomic_inc_return(&rndis->notify_count)) 305 if (atomic_inc_return(&rndis->notify_count) != 1)
307 return; 306 return;
308 307
309 /* Send RNDIS RESPONSE_AVAILABLE notification; a 308 /* Send RNDIS RESPONSE_AVAILABLE notification; a
@@ -652,6 +651,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
652 fs_in_desc.bEndpointAddress; 651 fs_in_desc.bEndpointAddress;
653 hs_out_desc.bEndpointAddress = 652 hs_out_desc.bEndpointAddress =
654 fs_out_desc.bEndpointAddress; 653 fs_out_desc.bEndpointAddress;
654 hs_notify_desc.bEndpointAddress =
655 fs_notify_desc.bEndpointAddress;
655 656
656 /* copy descriptors, and track endpoint copies */ 657 /* copy descriptors, and track endpoint copies */
657 f->hs_descriptors = usb_copy_descriptors(eth_hs_function); 658 f->hs_descriptors = usb_copy_descriptors(eth_hs_function);
@@ -663,6 +664,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
663 f->hs_descriptors, &hs_in_desc); 664 f->hs_descriptors, &hs_in_desc);
664 rndis->hs.out = usb_find_endpoint(eth_hs_function, 665 rndis->hs.out = usb_find_endpoint(eth_hs_function,
665 f->hs_descriptors, &hs_out_desc); 666 f->hs_descriptors, &hs_out_desc);
667 rndis->hs.notify = usb_find_endpoint(eth_hs_function,
668 f->hs_descriptors, &hs_notify_desc);
666 } 669 }
667 670
668 rndis->port.open = rndis_open; 671 rndis->port.open = rndis_open;
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 1fe8b44787b3..b3408ff39fba 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2363,6 +2363,9 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2363 nuke(loop_ep, -ESHUTDOWN); 2363 nuke(loop_ep, -ESHUTDOWN);
2364 spin_unlock_irqrestore(&udc_controller->lock, flags); 2364 spin_unlock_irqrestore(&udc_controller->lock, flags);
2365 2365
2366 /* report disconnect; the controller is already quiesced */
2367 driver->disconnect(&udc_controller->gadget);
2368
2366 /* unbind gadget and unhook driver. */ 2369 /* unbind gadget and unhook driver. */
2367 driver->unbind(&udc_controller->gadget); 2370 driver->unbind(&udc_controller->gadget);
2368 udc_controller->gadget.dev.driver = NULL; 2371 udc_controller->gadget.dev.driver = NULL;
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c
index 091bb55c9aa7..f3c6703cffda 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.c
+++ b/drivers/usb/gadget/fsl_usb2_udc.c
@@ -1836,6 +1836,9 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1836 nuke(loop_ep, -ESHUTDOWN); 1836 nuke(loop_ep, -ESHUTDOWN);
1837 spin_unlock_irqrestore(&udc_controller->lock, flags); 1837 spin_unlock_irqrestore(&udc_controller->lock, flags);
1838 1838
1839 /* report disconnect; the controller is already quiesced */
1840 driver->disconnect(&udc_controller->gadget);
1841
1839 /* unbind gadget and unhook driver. */ 1842 /* unbind gadget and unhook driver. */
1840 driver->unbind(&udc_controller->gadget); 1843 driver->unbind(&udc_controller->gadget);
1841 udc_controller->gadget.dev.driver = NULL; 1844 udc_controller->gadget.dev.driver = NULL;
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index f4585d3e90d7..eeb26c0f88e5 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1251,7 +1251,6 @@ dev_release (struct inode *inode, struct file *fd)
1251 * alternatively, all host requests will time out. 1251 * alternatively, all host requests will time out.
1252 */ 1252 */
1253 1253
1254 fasync_helper (-1, fd, 0, &dev->fasync);
1255 kfree (dev->buf); 1254 kfree (dev->buf);
1256 dev->buf = NULL; 1255 dev->buf = NULL;
1257 put_dev (dev); 1256 put_dev (dev);
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index da6e93c201d2..2dbc0db0b46c 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -141,7 +141,11 @@ static int is_vbus_present(void)
141 141
142 if (mach->gpio_vbus) { 142 if (mach->gpio_vbus) {
143 int value = gpio_get_value(mach->gpio_vbus); 143 int value = gpio_get_value(mach->gpio_vbus);
144 return mach->gpio_vbus_inverted ? !value : value; 144
145 if (mach->gpio_vbus_inverted)
146 return !value;
147 else
148 return !!value;
145 } 149 }
146 if (mach->udc_is_connected) 150 if (mach->udc_is_connected)
147 return mach->udc_is_connected(); 151 return mach->udc_is_connected();
@@ -982,7 +986,7 @@ static int pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
982 struct pxa25x_udc *udc; 986 struct pxa25x_udc *udc;
983 987
984 udc = container_of(_gadget, struct pxa25x_udc, gadget); 988 udc = container_of(_gadget, struct pxa25x_udc, gadget);
985 udc->vbus = (is_active != 0); 989 udc->vbus = is_active;
986 DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); 990 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
987 pullup(udc); 991 pullup(udc);
988 return 0; 992 return 0;
@@ -1399,12 +1403,8 @@ lubbock_vbus_irq(int irq, void *_dev)
1399static irqreturn_t udc_vbus_irq(int irq, void *_dev) 1403static irqreturn_t udc_vbus_irq(int irq, void *_dev)
1400{ 1404{
1401 struct pxa25x_udc *dev = _dev; 1405 struct pxa25x_udc *dev = _dev;
1402 int vbus = gpio_get_value(dev->mach->gpio_vbus);
1403 1406
1404 if (dev->mach->gpio_vbus_inverted) 1407 pxa25x_udc_vbus_session(&dev->gadget, is_vbus_present());
1405 vbus = !vbus;
1406
1407 pxa25x_udc_vbus_session(&dev->gadget, vbus);
1408 return IRQ_HANDLED; 1408 return IRQ_HANDLED;
1409} 1409}
1410 1410
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index bcf375ca3d72..caa37c95802c 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -650,7 +650,7 @@ pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
650 struct pxa27x_request *req; 650 struct pxa27x_request *req;
651 651
652 req = kzalloc(sizeof *req, gfp_flags); 652 req = kzalloc(sizeof *req, gfp_flags);
653 if (!req || !_ep) 653 if (!req)
654 return NULL; 654 return NULL;
655 655
656 INIT_LIST_HEAD(&req->queue); 656 INIT_LIST_HEAD(&req->queue);
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 48f51b12d2e2..00ba06b44752 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1894,11 +1894,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
1894 udc->regs_info = debugfs_create_file("registers", S_IRUGO, 1894 udc->regs_info = debugfs_create_file("registers", S_IRUGO,
1895 s3c2410_udc_debugfs_root, 1895 s3c2410_udc_debugfs_root,
1896 udc, &s3c2410_udc_debugfs_fops); 1896 udc, &s3c2410_udc_debugfs_fops);
1897 if (IS_ERR(udc->regs_info)) { 1897 if (!udc->regs_info)
1898 dev_warn(dev, "debugfs file creation failed %ld\n", 1898 dev_warn(dev, "debugfs file creation failed\n");
1899 PTR_ERR(udc->regs_info));
1900 udc->regs_info = NULL;
1901 }
1902 } 1899 }
1903 1900
1904 dev_dbg(dev, "probe ok\n"); 1901 dev_dbg(dev, "probe ok\n");
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 228797e54f9c..f3a75a929e0a 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -110,35 +110,23 @@ config USB_ISP116X_HCD
110 110
111config USB_ISP1760_HCD 111config USB_ISP1760_HCD
112 tristate "ISP 1760 HCD support" 112 tristate "ISP 1760 HCD support"
113 depends on USB && EXPERIMENTAL 113 depends on USB && EXPERIMENTAL && (PCI || PPC_OF)
114 ---help--- 114 ---help---
115 The ISP1760 chip is a USB 2.0 host controller. 115 The ISP1760 chip is a USB 2.0 host controller.
116 116
117 This driver does not support isochronous transfers or OTG. 117 This driver does not support isochronous transfers or OTG.
118 This USB controller is usually attached to a non-DMA-Master
119 capable bus. NXP's eval kit brings this chip on PCI card
120 where the chip itself is behind a PLB to simulate such
121 a bus.
118 122
119 To compile this driver as a module, choose M here: the 123 To compile this driver as a module, choose M here: the
120 module will be called isp1760-hcd. 124 module will be called isp1760.
121
122config USB_ISP1760_PCI
123 bool "Support for the PCI bus"
124 depends on USB_ISP1760_HCD && PCI
125 ---help---
126 Enables support for the device present on the PCI bus.
127 This should only be required if you happen to have the eval kit from
128 NXP and you are going to test it.
129
130config USB_ISP1760_OF
131 bool "Support for the OF platform bus"
132 depends on USB_ISP1760_HCD && PPC_OF
133 ---help---
134 Enables support for the device present on the PowerPC
135 OpenFirmware platform bus.
136 125
137config USB_OHCI_HCD 126config USB_OHCI_HCD
138 tristate "OHCI HCD support" 127 tristate "OHCI HCD support"
139 depends on USB && USB_ARCH_HAS_OHCI 128 depends on USB && USB_ARCH_HAS_OHCI
140 select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 129 select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
141 select I2C if ARCH_PNX4008
142 ---help--- 130 ---help---
143 The Open Host Controller Interface (OHCI) is a standard for accessing 131 The Open Host Controller Interface (OHCI) is a standard for accessing
144 USB 1.1 host controller hardware. It does more in hardware than Intel's 132 USB 1.1 host controller hardware. It does more in hardware than Intel's
@@ -305,3 +293,31 @@ config SUPERH_ON_CHIP_R8A66597
305 help 293 help
306 This driver enables support for the on-chip R8A66597 in the 294 This driver enables support for the on-chip R8A66597 in the
307 SH7366 and SH7723 processors. 295 SH7366 and SH7723 processors.
296
297config USB_WHCI_HCD
298 tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
299 depends on EXPERIMENTAL
300 depends on PCI && USB
301 select USB_WUSB
302 select UWB_WHCI
303 help
304 A driver for PCI-based Wireless USB Host Controllers that are
305 compliant with the WHCI specification.
306
307 To compile this driver a module, choose M here: the module
308 will be called "whci-hcd".
309
310config USB_HWA_HCD
311 tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)"
312 depends on EXPERIMENTAL
313 depends on USB
314 select USB_WUSB
315 select UWB_HWA
316 help
317 This driver enables you to connect Wireless USB devices to
318 your system using a Host Wire Adaptor USB dongle. This is an
319 UWB Radio Controller and WUSB Host Controller connected to
320 your machine via USB (specified in WUSB1.0).
321
322 To compile this driver a module, choose M here: the module
323 will be called "hwa-hc".
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f1edda2dcfde..23be22224044 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -8,6 +8,8 @@ endif
8 8
9isp1760-objs := isp1760-hcd.o isp1760-if.o 9isp1760-objs := isp1760-hcd.o isp1760-if.o
10 10
11obj-$(CONFIG_USB_WHCI_HCD) += whci/
12
11obj-$(CONFIG_PCI) += pci-quirks.o 13obj-$(CONFIG_PCI) += pci-quirks.o
12 14
13obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o 15obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
@@ -19,3 +21,4 @@ obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
19obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o 21obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
20obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o 22obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
21obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o 23obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
24obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 15a803b206b8..4725d15d096f 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -643,7 +643,7 @@ static int ehci_run (struct usb_hcd *hcd)
643static irqreturn_t ehci_irq (struct usb_hcd *hcd) 643static irqreturn_t ehci_irq (struct usb_hcd *hcd)
644{ 644{
645 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 645 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
646 u32 status, pcd_status = 0, cmd; 646 u32 status, masked_status, pcd_status = 0, cmd;
647 int bh; 647 int bh;
648 648
649 spin_lock (&ehci->lock); 649 spin_lock (&ehci->lock);
@@ -656,14 +656,14 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
656 goto dead; 656 goto dead;
657 } 657 }
658 658
659 status &= INTR_MASK; 659 masked_status = status & INTR_MASK;
660 if (!status) { /* irq sharing? */ 660 if (!masked_status) { /* irq sharing? */
661 spin_unlock(&ehci->lock); 661 spin_unlock(&ehci->lock);
662 return IRQ_NONE; 662 return IRQ_NONE;
663 } 663 }
664 664
665 /* clear (just) interrupts */ 665 /* clear (just) interrupts */
666 ehci_writel(ehci, status, &ehci->regs->status); 666 ehci_writel(ehci, masked_status, &ehci->regs->status);
667 cmd = ehci_readl(ehci, &ehci->regs->command); 667 cmd = ehci_readl(ehci, &ehci->regs->command);
668 bh = 0; 668 bh = 0;
669 669
@@ -734,18 +734,17 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
734 734
735 /* PCI errors [4.15.2.4] */ 735 /* PCI errors [4.15.2.4] */
736 if (unlikely ((status & STS_FATAL) != 0)) { 736 if (unlikely ((status & STS_FATAL) != 0)) {
737 ehci_err(ehci, "fatal error\n");
737 dbg_cmd(ehci, "fatal", cmd); 738 dbg_cmd(ehci, "fatal", cmd);
738 dbg_status(ehci, "fatal", status); 739 dbg_status(ehci, "fatal", status);
739 if (status & STS_HALT) { 740 ehci_halt(ehci);
740 ehci_err (ehci, "fatal error\n");
741dead: 741dead:
742 ehci_reset (ehci); 742 ehci_reset(ehci);
743 ehci_writel(ehci, 0, &ehci->regs->configured_flag); 743 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
744 /* generic layer kills/unlinks all urbs, then 744 /* generic layer kills/unlinks all urbs, then
745 * uses ehci_stop to clean up the rest 745 * uses ehci_stop to clean up the rest
746 */ 746 */
747 bh = 1; 747 bh = 1;
748 }
749 } 748 }
750 749
751 if (bh) 750 if (bh)
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index c46a58f9181d..36864f958444 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -66,6 +66,8 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
66{ 66{
67 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 67 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
68 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 68 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
69 struct pci_dev *p_smbus;
70 u8 rev;
69 u32 temp; 71 u32 temp;
70 int retval; 72 int retval;
71 73
@@ -166,6 +168,28 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
166 pci_write_config_byte(pdev, 0x4b, tmp | 0x20); 168 pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
167 } 169 }
168 break; 170 break;
171 case PCI_VENDOR_ID_ATI:
172 /* SB600 and old version of SB700 have a bug in EHCI controller,
173 * which causes usb devices lose response in some cases.
174 */
175 if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
176 p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
177 PCI_DEVICE_ID_ATI_SBX00_SMBUS,
178 NULL);
179 if (!p_smbus)
180 break;
181 rev = p_smbus->revision;
182 if ((pdev->device == 0x4386) || (rev == 0x3a)
183 || (rev == 0x3b)) {
184 u8 tmp;
185 ehci_info(ehci, "applying AMD SB600/SB700 USB "
186 "freeze workaround\n");
187 pci_read_config_byte(pdev, 0x53, &tmp);
188 pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
189 }
190 pci_dev_put(p_smbus);
191 }
192 break;
169 } 193 }
170 194
171 ehci_reset(ehci); 195 ehci_reset(ehci);
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 0eba894bcb01..9c9da35abc6c 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -205,6 +205,7 @@ static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
205 205
206 tmp = hcd->irq; 206 tmp = hcd->irq;
207 207
208 ehci_shutdown(hcd);
208 usb_remove_hcd(hcd); 209 usb_remove_hcd(hcd);
209 210
210 ps3_system_bus_set_driver_data(dev, NULL); 211 ps3_system_bus_set_driver_data(dev, NULL);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 4a0c5a78b2ed..a081ee65bde6 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -918,7 +918,7 @@ iso_stream_init (
918 */ 918 */
919 stream->usecs = HS_USECS_ISO (maxp); 919 stream->usecs = HS_USECS_ISO (maxp);
920 bandwidth = stream->usecs * 8; 920 bandwidth = stream->usecs * 8;
921 bandwidth /= 1 << (interval - 1); 921 bandwidth /= interval;
922 922
923 } else { 923 } else {
924 u32 addr; 924 u32 addr;
@@ -951,7 +951,7 @@ iso_stream_init (
951 } else 951 } else
952 stream->raw_mask = smask_out [hs_transfers - 1]; 952 stream->raw_mask = smask_out [hs_transfers - 1];
953 bandwidth = stream->usecs + stream->c_usecs; 953 bandwidth = stream->usecs + stream->c_usecs;
954 bandwidth /= 1 << (interval + 2); 954 bandwidth /= interval << 3;
955 955
956 /* stream->splits gets created from raw_mask later */ 956 /* stream->splits gets created from raw_mask later */
957 stream->address = cpu_to_hc32(ehci, addr); 957 stream->address = cpu_to_hc32(ehci, addr);
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index b11798d17ae5..c7d4b5a06bdb 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -183,16 +183,14 @@ timer_action (struct ehci_hcd *ehci, enum ehci_timer_action action)
183 * the async ring; just the I/O watchdog. Note that if a 183 * the async ring; just the I/O watchdog. Note that if a
184 * SHRINK were pending, OFF would never be requested. 184 * SHRINK were pending, OFF would never be requested.
185 */ 185 */
186 enum ehci_timer_action oldactions = ehci->actions; 186 if (timer_pending(&ehci->watchdog)
187 && ((BIT(TIMER_ASYNC_SHRINK) | BIT(TIMER_ASYNC_OFF))
188 & ehci->actions))
189 return;
187 190
188 if (!test_and_set_bit (action, &ehci->actions)) { 191 if (!test_and_set_bit (action, &ehci->actions)) {
189 unsigned long t; 192 unsigned long t;
190 193
191 if (timer_pending(&ehci->watchdog)
192 && ((BIT(TIMER_ASYNC_SHRINK) | BIT(TIMER_ASYNC_OFF))
193 & oldactions))
194 return;
195
196 switch (action) { 194 switch (action) {
197 case TIMER_IO_WATCHDOG: 195 case TIMER_IO_WATCHDOG:
198 t = EHCI_IO_JIFFIES; 196 t = EHCI_IO_JIFFIES;
@@ -208,7 +206,7 @@ timer_action (struct ehci_hcd *ehci, enum ehci_timer_action action)
208 t = DIV_ROUND_UP(EHCI_SHRINK_FRAMES * HZ, 1000) + 1; 206 t = DIV_ROUND_UP(EHCI_SHRINK_FRAMES * HZ, 1000) + 1;
209 break; 207 break;
210 } 208 }
211 mod_timer(&ehci->watchdog, round_jiffies(t + jiffies)); 209 mod_timer(&ehci->watchdog, t + jiffies);
212 } 210 }
213} 211}
214 212
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
new file mode 100644
index 000000000000..64be4d88df11
--- /dev/null
+++ b/drivers/usb/host/hwa-hc.c
@@ -0,0 +1,925 @@
1/*
2 * Host Wire Adapter:
3 * Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * The HWA driver is a simple layer that forwards requests to the WAHC
24 * (Wire Adater Host Controller) or WUSBHC (Wireless USB Host
25 * Controller) layers.
26 *
27 * Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB
28 * Host Controller that is connected to your system via USB (a USB
29 * dongle that implements a USB host...). There is also a Device Wired
30 * Adaptor, DWA (Wireless USB hub) that uses the same mechanism for
31 * transferring data (it is after all a USB host connected via
32 * Wireless USB), we have a common layer called Wire Adapter Host
33 * Controller that does all the hard work. The WUSBHC (Wireless USB
34 * Host Controller) is the part common to WUSB Host Controllers, the
35 * HWA and the PCI-based one, that is implemented following the WHCI
36 * spec. All these layers are implemented in ../wusbcore.
37 *
38 * The main functions are hwahc_op_urb_{en,de}queue(), that pass the
39 * job of converting a URB to a Wire Adapter
40 *
41 * Entry points:
42 *
43 * hwahc_driver_*() Driver initialization, registration and
44 * teardown.
45 *
46 * hwahc_probe() New device came up, create an instance for
47 * it [from device enumeration].
48 *
49 * hwahc_disconnect() Remove device instance [from device
50 * enumeration].
51 *
52 * [__]hwahc_op_*() Host-Wire-Adaptor specific functions for
53 * starting/stopping/etc (some might be made also
54 * DWA).
55 */
56#include <linux/kernel.h>
57#include <linux/version.h>
58#include <linux/init.h>
59#include <linux/module.h>
60#include <linux/workqueue.h>
61#include <linux/wait.h>
62#include <linux/completion.h>
63#include "../wusbcore/wa-hc.h"
64#include "../wusbcore/wusbhc.h"
65
66#define D_LOCAL 0
67#include <linux/uwb/debug.h>
68
69struct hwahc {
70 struct wusbhc wusbhc; /* has to be 1st */
71 struct wahc wa;
72 u8 buffer[16]; /* for misc usb transactions */
73};
74
75/**
76 * FIXME should be wusbhc
77 *
78 * NOTE: we need to cache the Cluster ID because later...there is no
79 * way to get it :)
80 */
81static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id)
82{
83 int result;
84 struct wusbhc *wusbhc = &hwahc->wusbhc;
85 struct wahc *wa = &hwahc->wa;
86 struct device *dev = &wa->usb_iface->dev;
87
88 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
89 WUSB_REQ_SET_CLUSTER_ID,
90 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
91 cluster_id,
92 wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
93 NULL, 0, 1000 /* FIXME: arbitrary */);
94 if (result < 0)
95 dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n",
96 cluster_id, result);
97 else
98 wusbhc->cluster_id = cluster_id;
99 dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id);
100 return result;
101}
102
103static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
104{
105 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
106 struct wahc *wa = &hwahc->wa;
107
108 return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
109 WUSB_REQ_SET_NUM_DNTS,
110 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
111 interval << 8 | slots,
112 wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
113 NULL, 0, 1000 /* FIXME: arbitrary */);
114}
115
116/*
117 * Reset a WUSB host controller and wait for it to complete doing it.
118 *
119 * @usb_hcd: Pointer to WUSB Host Controller instance.
120 *
121 */
122static int hwahc_op_reset(struct usb_hcd *usb_hcd)
123{
124 int result;
125 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
126 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
127 struct device *dev = &hwahc->wa.usb_iface->dev;
128
129 d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
130 mutex_lock(&wusbhc->mutex);
131 wa_nep_disarm(&hwahc->wa);
132 result = __wa_set_feature(&hwahc->wa, WA_RESET);
133 if (result < 0) {
134 dev_err(dev, "error commanding HC to reset: %d\n", result);
135 goto error_unlock;
136 }
137 d_printf(3, dev, "reset: waiting for device to change state\n");
138 result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0);
139 if (result < 0) {
140 dev_err(dev, "error waiting for HC to reset: %d\n", result);
141 goto error_unlock;
142 }
143error_unlock:
144 mutex_unlock(&wusbhc->mutex);
145 d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
146 return result;
147}
148
149/*
150 * FIXME: break this function up
151 */
152static int hwahc_op_start(struct usb_hcd *usb_hcd)
153{
154 u8 addr;
155 int result;
156 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
157 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
158 struct device *dev = &hwahc->wa.usb_iface->dev;
159
160 /* Set up a Host Info WUSB Information Element */
161 d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
162 result = -ENOSPC;
163 mutex_lock(&wusbhc->mutex);
164 /* Start the numbering from the top so that the bottom
165 * range of the unauth addr space is used for devices,
166 * the top for HCs; use 0xfe - RC# */
167 addr = wusb_cluster_id_get();
168 if (addr == 0)
169 goto error_cluster_id_get;
170 result = __hwahc_set_cluster_id(hwahc, addr);
171 if (result < 0)
172 goto error_set_cluster_id;
173
174 result = wa_nep_arm(&hwahc->wa, GFP_KERNEL);
175 if (result < 0) {
176 dev_err(dev, "cannot listen to notifications: %d\n", result);
177 goto error_stop;
178 }
179 usb_hcd->uses_new_polling = 1;
180 usb_hcd->poll_rh = 1;
181 usb_hcd->state = HC_STATE_RUNNING;
182 result = 0;
183out:
184 mutex_unlock(&wusbhc->mutex);
185 d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
186 return result;
187
188error_stop:
189 __wa_stop(&hwahc->wa);
190error_set_cluster_id:
191 wusb_cluster_id_put(wusbhc->cluster_id);
192error_cluster_id_get:
193 goto out;
194
195}
196
197/*
198 * FIXME: break this function up
199 */
200static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc)
201{
202 int result;
203 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
204 struct device *dev = &hwahc->wa.usb_iface->dev;
205
206 /* Set up a Host Info WUSB Information Element */
207 d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
208 result = -ENOSPC;
209
210 result = __wa_set_feature(&hwahc->wa, WA_ENABLE);
211 if (result < 0) {
212 dev_err(dev, "error commanding HC to start: %d\n", result);
213 goto error_stop;
214 }
215 result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE);
216 if (result < 0) {
217 dev_err(dev, "error waiting for HC to start: %d\n", result);
218 goto error_stop;
219 }
220 result = 0;
221out:
222 d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
223 return result;
224
225error_stop:
226 result = __wa_clear_feature(&hwahc->wa, WA_ENABLE);
227 goto out;
228}
229
230static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg)
231{
232 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
233 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
234 dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__,
235 usb_hcd, hwahc, *(unsigned long *) &msg);
236 return -ENOSYS;
237}
238
239static int hwahc_op_resume(struct usb_hcd *usb_hcd)
240{
241 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
242 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
243
244 dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
245 usb_hcd, hwahc);
246 return -ENOSYS;
247}
248
249static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc)
250{
251 int result;
252 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
253 struct device *dev = &hwahc->wa.usb_iface->dev;
254
255 d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
256 /* Nothing for now */
257 d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
258 return;
259}
260
261/*
262 * No need to abort pipes, as when this is called, all the children
263 * has been disconnected and that has done it [through
264 * usb_disable_interface() -> usb_disable_endpoint() ->
265 * hwahc_op_ep_disable() - >rpipe_ep_disable()].
266 */
267static void hwahc_op_stop(struct usb_hcd *usb_hcd)
268{
269 int result;
270 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
271 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
272 struct wahc *wa = &hwahc->wa;
273 struct device *dev = &wa->usb_iface->dev;
274
275 d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
276 mutex_lock(&wusbhc->mutex);
277 wusbhc_stop(wusbhc);
278 wa_nep_disarm(&hwahc->wa);
279 result = __wa_stop(&hwahc->wa);
280 wusb_cluster_id_put(wusbhc->cluster_id);
281 mutex_unlock(&wusbhc->mutex);
282 d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
283 return;
284}
285
286static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd)
287{
288 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
289 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
290
291 dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
292 usb_hcd, hwahc);
293 return -ENOSYS;
294}
295
296static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
297 gfp_t gfp)
298{
299 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
300 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
301
302 return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp);
303}
304
305static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb,
306 int status)
307{
308 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
309 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
310
311 return wa_urb_dequeue(&hwahc->wa, urb);
312}
313
314/*
315 * Release resources allocated for an endpoint
316 *
317 * If there is an associated rpipe to this endpoint, go ahead and put it.
318 */
319static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd,
320 struct usb_host_endpoint *ep)
321{
322 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
323 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
324
325 rpipe_ep_disable(&hwahc->wa, ep);
326}
327
328/*
329 * Set the UWB MAS allocation for the WUSB cluster
330 *
331 * @stream_index: stream to use (-1 for cancelling the allocation)
332 * @mas: mas bitmap to use
333 */
334static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
335 const struct uwb_mas_bm *mas)
336{
337 int result;
338 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
339 struct wahc *wa = &hwahc->wa;
340 struct device *dev = &wa->usb_iface->dev;
341 u8 mas_le[UWB_NUM_MAS/8];
342
343 /* Set the stream index */
344 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
345 WUSB_REQ_SET_STREAM_IDX,
346 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
347 stream_index,
348 wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
349 NULL, 0, 1000 /* FIXME: arbitrary */);
350 if (result < 0) {
351 dev_err(dev, "Cannot set WUSB stream index: %d\n", result);
352 goto out;
353 }
354 uwb_mas_bm_copy_le(mas_le, mas);
355 /* Set the MAS allocation */
356 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
357 WUSB_REQ_SET_WUSB_MAS,
358 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
359 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
360 mas_le, 32, 1000 /* FIXME: arbitrary */);
361 if (result < 0)
362 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
363out:
364 return result;
365}
366
367/*
368 * Add an IE to the host's MMC
369 *
370 * @interval: See WUSB1.0[8.5.3.1]
371 * @repeat_cnt: See WUSB1.0[8.5.3.1]
372 * @handle: See WUSB1.0[8.5.3.1]
373 * @wuie: Pointer to the header of the WUSB IE data to add.
374 * MUST BE allocated in a kmalloc buffer (no stack or
375 * vmalloc).
376 *
377 * NOTE: the format of the WUSB IEs for MMCs are different to the
378 * normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length +
379 * Id in WUSB IEs). Standards...you gotta love'em.
380 */
381static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval,
382 u8 repeat_cnt, u8 handle,
383 struct wuie_hdr *wuie)
384{
385 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
386 struct wahc *wa = &hwahc->wa;
387 u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
388
389 return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
390 WUSB_REQ_ADD_MMC_IE,
391 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
392 interval << 8 | repeat_cnt,
393 handle << 8 | iface_no,
394 wuie, wuie->bLength, 1000 /* FIXME: arbitrary */);
395}
396
397/*
398 * Remove an IE to the host's MMC
399 *
400 * @handle: See WUSB1.0[8.5.3.1]
401 */
402static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
403{
404 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
405 struct wahc *wa = &hwahc->wa;
406 u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
407 return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
408 WUSB_REQ_REMOVE_MMC_IE,
409 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
410 0, handle << 8 | iface_no,
411 NULL, 0, 1000 /* FIXME: arbitrary */);
412}
413
414/*
415 * Update device information for a given fake port
416 *
417 * @port_idx: Fake port to which device is connected (wusbhc index, not
418 * USB port number).
419 */
420static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc,
421 struct wusb_dev *wusb_dev)
422{
423 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
424 struct wahc *wa = &hwahc->wa;
425 u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
426 struct hwa_dev_info *dev_info;
427 int ret;
428
429 /* fill out the Device Info buffer and send it */
430 dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL);
431 if (!dev_info)
432 return -ENOMEM;
433 uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability,
434 &wusb_dev->availability);
435 dev_info->bDeviceAddress = wusb_dev->addr;
436
437 /*
438 * If the descriptors haven't been read yet, use a default PHY
439 * rate of 53.3 Mbit/s only. The correct value will be used
440 * when this will be called again as part of the
441 * authentication process (which occurs after the descriptors
442 * have been read).
443 */
444 if (wusb_dev->wusb_cap_descr)
445 dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates;
446 else
447 dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53);
448
449 ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
450 WUSB_REQ_SET_DEV_INFO,
451 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
452 0, wusb_dev->port_idx << 8 | iface_no,
453 dev_info, sizeof(struct hwa_dev_info),
454 1000 /* FIXME: arbitrary */);
455 kfree(dev_info);
456 return ret;
457}
458
459/*
460 * Set host's idea of which encryption (and key) method to use when
461 * talking to ad evice on a given port.
462 *
463 * If key is NULL, it means disable encryption for that "virtual port"
464 * (used when we disconnect).
465 */
466static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
467 const void *key, size_t key_size,
468 u8 key_idx)
469{
470 int result = -ENOMEM;
471 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
472 struct wahc *wa = &hwahc->wa;
473 u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
474 struct usb_key_descriptor *keyd;
475 size_t keyd_len;
476
477 keyd_len = sizeof(*keyd) + key_size;
478 keyd = kzalloc(keyd_len, GFP_KERNEL);
479 if (keyd == NULL)
480 return -ENOMEM;
481
482 keyd->bLength = keyd_len;
483 keyd->bDescriptorType = USB_DT_KEY;
484 keyd->tTKID[0] = (tkid >> 0) & 0xff;
485 keyd->tTKID[1] = (tkid >> 8) & 0xff;
486 keyd->tTKID[2] = (tkid >> 16) & 0xff;
487 memcpy(keyd->bKeyData, key, key_size);
488
489 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
490 USB_REQ_SET_DESCRIPTOR,
491 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
492 USB_DT_KEY << 8 | key_idx,
493 port_idx << 8 | iface_no,
494 keyd, keyd_len, 1000 /* FIXME: arbitrary */);
495
496 memset(keyd, 0, sizeof(*keyd)); /* clear keys etc. */
497 kfree(keyd);
498 return result;
499}
500
501/*
502 * Set host's idea of which encryption (and key) method to use when
503 * talking to ad evice on a given port.
504 *
505 * If key is NULL, it means disable encryption for that "virtual port"
506 * (used when we disconnect).
507 */
508static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
509 const void *key, size_t key_size)
510{
511 int result = -ENOMEM;
512 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
513 struct wahc *wa = &hwahc->wa;
514 u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
515 u8 encryption_value;
516
517 /* Tell the host which key to use to talk to the device */
518 if (key) {
519 u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK,
520 WUSB_KEY_INDEX_ORIGINATOR_HOST);
521
522 result = __hwahc_dev_set_key(wusbhc, port_idx, tkid,
523 key, key_size, key_idx);
524 if (result < 0)
525 goto error_set_key;
526 encryption_value = wusbhc->ccm1_etd->bEncryptionValue;
527 } else {
528 /* FIXME: this should come from wusbhc->etd[UNSECURE].value */
529 encryption_value = 0;
530 }
531
532 /* Set the encryption type for commmunicating with the device */
533 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
534 USB_REQ_SET_ENCRYPTION,
535 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
536 encryption_value, port_idx << 8 | iface_no,
537 NULL, 0, 1000 /* FIXME: arbitrary */);
538 if (result < 0)
539 dev_err(wusbhc->dev, "Can't set host's WUSB encryption for "
540 "port index %u to %s (value %d): %d\n", port_idx,
541 wusb_et_name(wusbhc->ccm1_etd->bEncryptionType),
542 wusbhc->ccm1_etd->bEncryptionValue, result);
543error_set_key:
544 return result;
545}
546
547/*
548 * Set host's GTK key
549 */
550static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid,
551 const void *key, size_t key_size)
552{
553 u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
554 WUSB_KEY_INDEX_ORIGINATOR_HOST);
555
556 return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx);
557}
558
559/*
560 * Get the Wire Adapter class-specific descriptor
561 *
562 * NOTE: this descriptor comes with the big bundled configuration
563 * descriptor that includes the interfaces' and endpoints', so
564 * we just look for it in the cached copy kept by the USB stack.
565 *
566 * NOTE2: We convert LE fields to CPU order.
567 */
568static int wa_fill_descr(struct wahc *wa)
569{
570 int result;
571 struct device *dev = &wa->usb_iface->dev;
572 char *itr;
573 struct usb_device *usb_dev = wa->usb_dev;
574 struct usb_descriptor_header *hdr;
575 struct usb_wa_descriptor *wa_descr;
576 size_t itr_size, actconfig_idx;
577
578 actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
579 sizeof(usb_dev->config[0]);
580 itr = usb_dev->rawdescriptors[actconfig_idx];
581 itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
582 while (itr_size >= sizeof(*hdr)) {
583 hdr = (struct usb_descriptor_header *) itr;
584 d_printf(3, dev, "Extra device descriptor: "
585 "type %02x/%u bytes @ %zu (%zu left)\n",
586 hdr->bDescriptorType, hdr->bLength,
587 (itr - usb_dev->rawdescriptors[actconfig_idx]),
588 itr_size);
589 if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER)
590 goto found;
591 itr += hdr->bLength;
592 itr_size -= hdr->bLength;
593 }
594 dev_err(dev, "cannot find Wire Adapter Class descriptor\n");
595 return -ENODEV;
596
597found:
598 result = -EINVAL;
599 if (hdr->bLength > itr_size) { /* is it available? */
600 dev_err(dev, "incomplete Wire Adapter Class descriptor "
601 "(%zu bytes left, %u needed)\n",
602 itr_size, hdr->bLength);
603 goto error;
604 }
605 if (hdr->bLength < sizeof(*wa->wa_descr)) {
606 dev_err(dev, "short Wire Adapter Class descriptor\n");
607 goto error;
608 }
609 wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
610 /* Make LE fields CPU order */
611 wa_descr->bcdWAVersion = le16_to_cpu(wa_descr->bcdWAVersion);
612 wa_descr->wNumRPipes = le16_to_cpu(wa_descr->wNumRPipes);
613 wa_descr->wRPipeMaxBlock = le16_to_cpu(wa_descr->wRPipeMaxBlock);
614 if (wa_descr->bcdWAVersion > 0x0100)
615 dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
616 wa_descr->bcdWAVersion & 0xff00 >> 8,
617 wa_descr->bcdWAVersion & 0x00ff);
618 result = 0;
619error:
620 return result;
621}
622
623static struct hc_driver hwahc_hc_driver = {
624 .description = "hwa-hcd",
625 .product_desc = "Wireless USB HWA host controller",
626 .hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd),
627 .irq = NULL, /* FIXME */
628 .flags = HCD_USB2, /* FIXME */
629 .reset = hwahc_op_reset,
630 .start = hwahc_op_start,
631 .pci_suspend = hwahc_op_suspend,
632 .pci_resume = hwahc_op_resume,
633 .stop = hwahc_op_stop,
634 .get_frame_number = hwahc_op_get_frame_number,
635 .urb_enqueue = hwahc_op_urb_enqueue,
636 .urb_dequeue = hwahc_op_urb_dequeue,
637 .endpoint_disable = hwahc_op_endpoint_disable,
638
639 .hub_status_data = wusbhc_rh_status_data,
640 .hub_control = wusbhc_rh_control,
641 .bus_suspend = wusbhc_rh_suspend,
642 .bus_resume = wusbhc_rh_resume,
643 .start_port_reset = wusbhc_rh_start_port_reset,
644};
645
646static int hwahc_security_create(struct hwahc *hwahc)
647{
648 int result;
649 struct wusbhc *wusbhc = &hwahc->wusbhc;
650 struct usb_device *usb_dev = hwahc->wa.usb_dev;
651 struct device *dev = &usb_dev->dev;
652 struct usb_security_descriptor *secd;
653 struct usb_encryption_descriptor *etd;
654 void *itr, *top;
655 size_t itr_size, needed, bytes;
656 u8 index;
657 char buf[64];
658
659 /* Find the host's security descriptors in the config descr bundle */
660 index = (usb_dev->actconfig - usb_dev->config) /
661 sizeof(usb_dev->config[0]);
662 itr = usb_dev->rawdescriptors[index];
663 itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
664 top = itr + itr_size;
665 result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
666 le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
667 USB_DT_SECURITY, (void **) &secd);
668 if (result == -1) {
669 dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
670 return 0;
671 }
672 needed = sizeof(*secd);
673 if (top - (void *)secd < needed) {
674 dev_err(dev, "BUG? Not enough data to process security "
675 "descriptor header (%zu bytes left vs %zu needed)\n",
676 top - (void *) secd, needed);
677 return 0;
678 }
679 needed = le16_to_cpu(secd->wTotalLength);
680 if (top - (void *)secd < needed) {
681 dev_err(dev, "BUG? Not enough data to process security "
682 "descriptors (%zu bytes left vs %zu needed)\n",
683 top - (void *) secd, needed);
684 return 0;
685 }
686 /* Walk over the sec descriptors and store CCM1's on wusbhc */
687 itr = (void *) secd + sizeof(*secd);
688 top = (void *) secd + le16_to_cpu(secd->wTotalLength);
689 index = 0;
690 bytes = 0;
691 while (itr < top) {
692 etd = itr;
693 if (top - itr < sizeof(*etd)) {
694 dev_err(dev, "BUG: bad host security descriptor; "
695 "not enough data (%zu vs %zu left)\n",
696 top - itr, sizeof(*etd));
697 break;
698 }
699 if (etd->bLength < sizeof(*etd)) {
700 dev_err(dev, "BUG: bad host encryption descriptor; "
701 "descriptor is too short "
702 "(%zu vs %zu needed)\n",
703 (size_t)etd->bLength, sizeof(*etd));
704 break;
705 }
706 itr += etd->bLength;
707 bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
708 "%s (0x%02x) ",
709 wusb_et_name(etd->bEncryptionType),
710 etd->bEncryptionValue);
711 wusbhc->ccm1_etd = etd;
712 }
713 dev_info(dev, "supported encryption types: %s\n", buf);
714 if (wusbhc->ccm1_etd == NULL) {
715 dev_err(dev, "E: host doesn't support CCM-1 crypto\n");
716 return 0;
717 }
718 /* Pretty print what we support */
719 return 0;
720}
721
722static void hwahc_security_release(struct hwahc *hwahc)
723{
724 /* nothing to do here so far... */
725}
726
727static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
728{
729 int result;
730 struct device *dev = &iface->dev;
731 struct wusbhc *wusbhc = &hwahc->wusbhc;
732 struct wahc *wa = &hwahc->wa;
733 struct usb_device *usb_dev = interface_to_usbdev(iface);
734
735 wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
736 wa->usb_iface = usb_get_intf(iface);
737 wusbhc->dev = dev;
738 wusbhc->uwb_rc = uwb_rc_get_by_grandpa(iface->dev.parent);
739 if (wusbhc->uwb_rc == NULL) {
740 result = -ENODEV;
741 dev_err(dev, "Cannot get associated UWB Host Controller\n");
742 goto error_rc_get;
743 }
744 result = wa_fill_descr(wa); /* Get the device descriptor */
745 if (result < 0)
746 goto error_fill_descriptor;
747 if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) {
748 dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB "
749 "adapter (%u ports)\n", wa->wa_descr->bNumPorts);
750 wusbhc->ports_max = USB_MAXCHILDREN;
751 } else {
752 wusbhc->ports_max = wa->wa_descr->bNumPorts;
753 }
754 wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs;
755 wusbhc->start = __hwahc_op_wusbhc_start;
756 wusbhc->stop = __hwahc_op_wusbhc_stop;
757 wusbhc->mmcie_add = __hwahc_op_mmcie_add;
758 wusbhc->mmcie_rm = __hwahc_op_mmcie_rm;
759 wusbhc->dev_info_set = __hwahc_op_dev_info_set;
760 wusbhc->bwa_set = __hwahc_op_bwa_set;
761 wusbhc->set_num_dnts = __hwahc_op_set_num_dnts;
762 wusbhc->set_ptk = __hwahc_op_set_ptk;
763 wusbhc->set_gtk = __hwahc_op_set_gtk;
764 result = hwahc_security_create(hwahc);
765 if (result < 0) {
766 dev_err(dev, "Can't initialize security: %d\n", result);
767 goto error_security_create;
768 }
769 wa->wusb = wusbhc; /* FIXME: ugly, need to fix */
770 result = wusbhc_create(&hwahc->wusbhc);
771 if (result < 0) {
772 dev_err(dev, "Can't create WUSB HC structures: %d\n", result);
773 goto error_wusbhc_create;
774 }
775 result = wa_create(&hwahc->wa, iface);
776 if (result < 0)
777 goto error_wa_create;
778 return 0;
779
780error_wa_create:
781 wusbhc_destroy(&hwahc->wusbhc);
782error_wusbhc_create:
783 /* WA Descr fill allocs no resources */
784error_security_create:
785error_fill_descriptor:
786 uwb_rc_put(wusbhc->uwb_rc);
787error_rc_get:
788 usb_put_intf(iface);
789 usb_put_dev(usb_dev);
790 return result;
791}
792
793static void hwahc_destroy(struct hwahc *hwahc)
794{
795 struct wusbhc *wusbhc = &hwahc->wusbhc;
796
797 d_fnstart(1, NULL, "(hwahc %p)\n", hwahc);
798 mutex_lock(&wusbhc->mutex);
799 __wa_destroy(&hwahc->wa);
800 wusbhc_destroy(&hwahc->wusbhc);
801 hwahc_security_release(hwahc);
802 hwahc->wusbhc.dev = NULL;
803 uwb_rc_put(wusbhc->uwb_rc);
804 usb_put_intf(hwahc->wa.usb_iface);
805 usb_put_dev(hwahc->wa.usb_dev);
806 mutex_unlock(&wusbhc->mutex);
807 d_fnend(1, NULL, "(hwahc %p) = void\n", hwahc);
808}
809
810static void hwahc_init(struct hwahc *hwahc)
811{
812 wa_init(&hwahc->wa);
813}
814
815static int hwahc_probe(struct usb_interface *usb_iface,
816 const struct usb_device_id *id)
817{
818 int result;
819 struct usb_hcd *usb_hcd;
820 struct wusbhc *wusbhc;
821 struct hwahc *hwahc;
822 struct device *dev = &usb_iface->dev;
823
824 d_fnstart(4, dev, "(%p, %p)\n", usb_iface, id);
825 result = -ENOMEM;
826 usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa");
827 if (usb_hcd == NULL) {
828 dev_err(dev, "unable to allocate instance\n");
829 goto error_alloc;
830 }
831 usb_hcd->wireless = 1;
832 usb_hcd->flags |= HCD_FLAG_SAW_IRQ;
833 wusbhc = usb_hcd_to_wusbhc(usb_hcd);
834 hwahc = container_of(wusbhc, struct hwahc, wusbhc);
835 hwahc_init(hwahc);
836 result = hwahc_create(hwahc, usb_iface);
837 if (result < 0) {
838 dev_err(dev, "Cannot initialize internals: %d\n", result);
839 goto error_hwahc_create;
840 }
841 result = usb_add_hcd(usb_hcd, 0, 0);
842 if (result < 0) {
843 dev_err(dev, "Cannot add HCD: %d\n", result);
844 goto error_add_hcd;
845 }
846 result = wusbhc_b_create(&hwahc->wusbhc);
847 if (result < 0) {
848 dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result);
849 goto error_wusbhc_b_create;
850 }
851 d_fnend(4, dev, "(%p, %p) = 0\n", usb_iface, id);
852 return 0;
853
854error_wusbhc_b_create:
855 usb_remove_hcd(usb_hcd);
856error_add_hcd:
857 hwahc_destroy(hwahc);
858error_hwahc_create:
859 usb_put_hcd(usb_hcd);
860error_alloc:
861 d_fnend(4, dev, "(%p, %p) = %d\n", usb_iface, id, result);
862 return result;
863}
864
865static void hwahc_disconnect(struct usb_interface *usb_iface)
866{
867 struct usb_hcd *usb_hcd;
868 struct wusbhc *wusbhc;
869 struct hwahc *hwahc;
870
871 usb_hcd = usb_get_intfdata(usb_iface);
872 wusbhc = usb_hcd_to_wusbhc(usb_hcd);
873 hwahc = container_of(wusbhc, struct hwahc, wusbhc);
874
875 d_fnstart(1, NULL, "(hwahc %p [usb_iface %p])\n", hwahc, usb_iface);
876 wusbhc_b_destroy(&hwahc->wusbhc);
877 usb_remove_hcd(usb_hcd);
878 hwahc_destroy(hwahc);
879 usb_put_hcd(usb_hcd);
880 d_fnend(1, NULL, "(hwahc %p [usb_iface %p]) = void\n", hwahc,
881 usb_iface);
882}
883
884/** USB device ID's that we handle */
885static struct usb_device_id hwahc_id_table[] = {
886 /* FIXME: use class labels for this */
887 { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
888 {},
889};
890MODULE_DEVICE_TABLE(usb, hwahc_id_table);
891
892static struct usb_driver hwahc_driver = {
893 .name = "hwa-hc",
894 .probe = hwahc_probe,
895 .disconnect = hwahc_disconnect,
896 .id_table = hwahc_id_table,
897};
898
899static int __init hwahc_driver_init(void)
900{
901 int result;
902 result = usb_register(&hwahc_driver);
903 if (result < 0) {
904 printk(KERN_ERR "WA-CDS: Cannot register USB driver: %d\n",
905 result);
906 goto error_usb_register;
907 }
908 return 0;
909
910error_usb_register:
911 return result;
912
913}
914module_init(hwahc_driver_init);
915
916static void __exit hwahc_driver_exit(void)
917{
918 usb_deregister(&hwahc_driver);
919}
920module_exit(hwahc_driver_exit);
921
922
923MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
924MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
925MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index af849f596135..b87ca7cf4b37 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -14,16 +14,16 @@
14#include "../core/hcd.h" 14#include "../core/hcd.h"
15#include "isp1760-hcd.h" 15#include "isp1760-hcd.h"
16 16
17#ifdef CONFIG_USB_ISP1760_OF 17#ifdef CONFIG_PPC_OF
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/of_platform.h> 19#include <linux/of_platform.h>
20#endif 20#endif
21 21
22#ifdef CONFIG_USB_ISP1760_PCI 22#ifdef CONFIG_PCI
23#include <linux/pci.h> 23#include <linux/pci.h>
24#endif 24#endif
25 25
26#ifdef CONFIG_USB_ISP1760_OF 26#ifdef CONFIG_PPC_OF
27static int of_isp1760_probe(struct of_device *dev, 27static int of_isp1760_probe(struct of_device *dev,
28 const struct of_device_id *match) 28 const struct of_device_id *match)
29{ 29{
@@ -128,7 +128,7 @@ static struct of_platform_driver isp1760_of_driver = {
128}; 128};
129#endif 129#endif
130 130
131#ifdef CONFIG_USB_ISP1760_PCI 131#ifdef CONFIG_PCI
132static u32 nxp_pci_io_base; 132static u32 nxp_pci_io_base;
133static u32 iolength; 133static u32 iolength;
134static u32 pci_mem_phy0; 134static u32 pci_mem_phy0;
@@ -288,28 +288,28 @@ static struct pci_driver isp1761_pci_driver = {
288 288
289static int __init isp1760_init(void) 289static int __init isp1760_init(void)
290{ 290{
291 int ret = -ENODEV; 291 int ret;
292 292
293 init_kmem_once(); 293 init_kmem_once();
294 294
295#ifdef CONFIG_USB_ISP1760_OF 295#ifdef CONFIG_PPC_OF
296 ret = of_register_platform_driver(&isp1760_of_driver); 296 ret = of_register_platform_driver(&isp1760_of_driver);
297 if (ret) { 297 if (ret) {
298 deinit_kmem_cache(); 298 deinit_kmem_cache();
299 return ret; 299 return ret;
300 } 300 }
301#endif 301#endif
302#ifdef CONFIG_USB_ISP1760_PCI 302#ifdef CONFIG_PCI
303 ret = pci_register_driver(&isp1761_pci_driver); 303 ret = pci_register_driver(&isp1761_pci_driver);
304 if (ret) 304 if (ret)
305 goto unreg_of; 305 goto unreg_of;
306#endif 306#endif
307 return ret; 307 return ret;
308 308
309#ifdef CONFIG_USB_ISP1760_PCI 309#ifdef CONFIG_PCI
310unreg_of: 310unreg_of:
311#endif 311#endif
312#ifdef CONFIG_USB_ISP1760_OF 312#ifdef CONFIG_PPC_OF
313 of_unregister_platform_driver(&isp1760_of_driver); 313 of_unregister_platform_driver(&isp1760_of_driver);
314#endif 314#endif
315 deinit_kmem_cache(); 315 deinit_kmem_cache();
@@ -319,10 +319,10 @@ module_init(isp1760_init);
319 319
320static void __exit isp1760_exit(void) 320static void __exit isp1760_exit(void)
321{ 321{
322#ifdef CONFIG_USB_ISP1760_OF 322#ifdef CONFIG_PPC_OF
323 of_unregister_platform_driver(&isp1760_of_driver); 323 of_unregister_platform_driver(&isp1760_of_driver);
324#endif 324#endif
325#ifdef CONFIG_USB_ISP1760_PCI 325#ifdef CONFIG_PCI
326 pci_unregister_driver(&isp1761_pci_driver); 326 pci_unregister_driver(&isp1761_pci_driver);
327#endif 327#endif
328 deinit_kmem_cache(); 328 deinit_kmem_cache();
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8647dab0d7f9..8aa3f4556a32 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1075,12 +1075,18 @@ MODULE_LICENSE ("GPL");
1075#define SM501_OHCI_DRIVER ohci_hcd_sm501_driver 1075#define SM501_OHCI_DRIVER ohci_hcd_sm501_driver
1076#endif 1076#endif
1077 1077
1078#ifdef CONFIG_MFD_TC6393XB
1079#include "ohci-tmio.c"
1080#define TMIO_OHCI_DRIVER ohci_hcd_tmio_driver
1081#endif
1082
1078#if !defined(PCI_DRIVER) && \ 1083#if !defined(PCI_DRIVER) && \
1079 !defined(PLATFORM_DRIVER) && \ 1084 !defined(PLATFORM_DRIVER) && \
1080 !defined(OF_PLATFORM_DRIVER) && \ 1085 !defined(OF_PLATFORM_DRIVER) && \
1081 !defined(SA1111_DRIVER) && \ 1086 !defined(SA1111_DRIVER) && \
1082 !defined(PS3_SYSTEM_BUS_DRIVER) && \ 1087 !defined(PS3_SYSTEM_BUS_DRIVER) && \
1083 !defined(SM501_OHCI_DRIVER) && \ 1088 !defined(SM501_OHCI_DRIVER) && \
1089 !defined(TMIO_OHCI_DRIVER) && \
1084 !defined(SSB_OHCI_DRIVER) 1090 !defined(SSB_OHCI_DRIVER)
1085#error "missing bus glue for ohci-hcd" 1091#error "missing bus glue for ohci-hcd"
1086#endif 1092#endif
@@ -1147,13 +1153,25 @@ static int __init ohci_hcd_mod_init(void)
1147 goto error_sm501; 1153 goto error_sm501;
1148#endif 1154#endif
1149 1155
1156#ifdef TMIO_OHCI_DRIVER
1157 retval = platform_driver_register(&TMIO_OHCI_DRIVER);
1158 if (retval < 0)
1159 goto error_tmio;
1160#endif
1161
1150 return retval; 1162 return retval;
1151 1163
1152 /* Error path */ 1164 /* Error path */
1165#ifdef TMIO_OHCI_DRIVER
1166 platform_driver_unregister(&TMIO_OHCI_DRIVER);
1167 error_tmio:
1168#endif
1153#ifdef SM501_OHCI_DRIVER 1169#ifdef SM501_OHCI_DRIVER
1170 platform_driver_unregister(&SM501_OHCI_DRIVER);
1154 error_sm501: 1171 error_sm501:
1155#endif 1172#endif
1156#ifdef SSB_OHCI_DRIVER 1173#ifdef SSB_OHCI_DRIVER
1174 ssb_driver_unregister(&SSB_OHCI_DRIVER);
1157 error_ssb: 1175 error_ssb:
1158#endif 1176#endif
1159#ifdef PCI_DRIVER 1177#ifdef PCI_DRIVER
@@ -1189,6 +1207,9 @@ module_init(ohci_hcd_mod_init);
1189 1207
1190static void __exit ohci_hcd_mod_exit(void) 1208static void __exit ohci_hcd_mod_exit(void)
1191{ 1209{
1210#ifdef TMIO_OHCI_DRIVER
1211 platform_driver_unregister(&TMIO_OHCI_DRIVER);
1212#endif
1192#ifdef SM501_OHCI_DRIVER 1213#ifdef SM501_OHCI_DRIVER
1193 platform_driver_unregister(&SM501_OHCI_DRIVER); 1214 platform_driver_unregister(&SM501_OHCI_DRIVER);
1194#endif 1215#endif
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index 2089d8a46c4b..3c1a3b5f89f1 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -192,7 +192,7 @@ fail_start:
192 return result; 192 return result;
193} 193}
194 194
195static int ps3_ohci_remove (struct ps3_system_bus_device *dev) 195static int ps3_ohci_remove(struct ps3_system_bus_device *dev)
196{ 196{
197 unsigned int tmp; 197 unsigned int tmp;
198 struct usb_hcd *hcd = 198 struct usb_hcd *hcd =
@@ -205,6 +205,7 @@ static int ps3_ohci_remove (struct ps3_system_bus_device *dev)
205 205
206 tmp = hcd->irq; 206 tmp = hcd->irq;
207 207
208 ohci_shutdown(hcd);
208 usb_remove_hcd(hcd); 209 usb_remove_hcd(hcd);
209 210
210 ps3_system_bus_set_driver_data(dev, NULL); 211 ps3_system_bus_set_driver_data(dev, NULL);
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
new file mode 100644
index 000000000000..f9f134af0bd1
--- /dev/null
+++ b/drivers/usb/host/ohci-tmio.c
@@ -0,0 +1,376 @@
1/*
2 * OHCI HCD(Host Controller Driver) for USB.
3 *
4 *(C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 *(C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 *(C) Copyright 2002 Hewlett-Packard Company
7 *
8 * Bus glue for Toshiba Mobile IO(TMIO) Controller's OHCI core
9 * (C) Copyright 2005 Chris Humbert <mahadri-usb@drigon.com>
10 * (C) Copyright 2007, 2008 Dmitry Baryshkov <dbaryshkov@gmail.com>
11 *
12 * This is known to work with the following variants:
13 * TC6393XB revision 3 (32kB SRAM)
14 *
15 * The TMIO's OHCI core DMAs through a small internal buffer that
16 * is directly addressable by the CPU.
17 *
18 * Written from sparse documentation from Toshiba and Sharp's driver
19 * for the 2.4 kernel,
20 * usb-ohci-tc6393.c(C) Copyright 2004 Lineo Solutions, Inc.
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License version 2 as
24 * published by the Free Software Foundation.
25 */
26
27/*#include <linux/fs.h>
28#include <linux/mount.h>
29#include <linux/pagemap.h>
30#include <linux/init.h>
31#include <linux/namei.h>
32#include <linux/sched.h>*/
33#include <linux/platform_device.h>
34#include <linux/mfd/core.h>
35#include <linux/mfd/tmio.h>
36#include <linux/dma-mapping.h>
37
38/*-------------------------------------------------------------------------*/
39
40/*
41 * USB Host Controller Configuration Register
42 */
43#define CCR_REVID 0x08 /* b Revision ID */
44#define CCR_BASE 0x10 /* l USB Control Register Base Address Low */
45#define CCR_ILME 0x40 /* b Internal Local Memory Enable */
46#define CCR_PM 0x4c /* w Power Management */
47#define CCR_INTC 0x50 /* b INT Control */
48#define CCR_LMW1L 0x54 /* w Local Memory Window 1 LMADRS Low */
49#define CCR_LMW1H 0x56 /* w Local Memory Window 1 LMADRS High */
50#define CCR_LMW1BL 0x58 /* w Local Memory Window 1 Base Address Low */
51#define CCR_LMW1BH 0x5A /* w Local Memory Window 1 Base Address High */
52#define CCR_LMW2L 0x5C /* w Local Memory Window 2 LMADRS Low */
53#define CCR_LMW2H 0x5E /* w Local Memory Window 2 LMADRS High */
54#define CCR_LMW2BL 0x60 /* w Local Memory Window 2 Base Address Low */
55#define CCR_LMW2BH 0x62 /* w Local Memory Window 2 Base Address High */
56#define CCR_MISC 0xFC /* b MISC */
57
58#define CCR_PM_GKEN 0x0001
59#define CCR_PM_CKRNEN 0x0002
60#define CCR_PM_USBPW1 0x0004
61#define CCR_PM_USBPW2 0x0008
62#define CCR_PM_USBPW3 0x0008
63#define CCR_PM_PMEE 0x0100
64#define CCR_PM_PMES 0x8000
65
66/*-------------------------------------------------------------------------*/
67
68struct tmio_hcd {
69 void __iomem *ccr;
70 spinlock_t lock; /* protects RMW cycles */
71};
72
73#define hcd_to_tmio(hcd) ((struct tmio_hcd *)(hcd_to_ohci(hcd) + 1))
74
75/*-------------------------------------------------------------------------*/
76
77static void tmio_write_pm(struct platform_device *dev)
78{
79 struct usb_hcd *hcd = platform_get_drvdata(dev);
80 struct tmio_hcd *tmio = hcd_to_tmio(hcd);
81 u16 pm;
82 unsigned long flags;
83
84 spin_lock_irqsave(&tmio->lock, flags);
85
86 pm = CCR_PM_GKEN | CCR_PM_CKRNEN |
87 CCR_PM_PMEE | CCR_PM_PMES;
88
89 tmio_iowrite16(pm, tmio->ccr + CCR_PM);
90 spin_unlock_irqrestore(&tmio->lock, flags);
91}
92
93static void tmio_stop_hc(struct platform_device *dev)
94{
95 struct usb_hcd *hcd = platform_get_drvdata(dev);
96 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
97 struct tmio_hcd *tmio = hcd_to_tmio(hcd);
98 u16 pm;
99
100 pm = CCR_PM_GKEN | CCR_PM_CKRNEN;
101 switch (ohci->num_ports) {
102 default:
103 dev_err(&dev->dev, "Unsupported amount of ports: %d\n", ohci->num_ports);
104 case 3:
105 pm |= CCR_PM_USBPW3;
106 case 2:
107 pm |= CCR_PM_USBPW2;
108 case 1:
109 pm |= CCR_PM_USBPW1;
110 }
111 tmio_iowrite8(0, tmio->ccr + CCR_INTC);
112 tmio_iowrite8(0, tmio->ccr + CCR_ILME);
113 tmio_iowrite16(0, tmio->ccr + CCR_BASE);
114 tmio_iowrite16(0, tmio->ccr + CCR_BASE + 2);
115 tmio_iowrite16(pm, tmio->ccr + CCR_PM);
116}
117
118static void tmio_start_hc(struct platform_device *dev)
119{
120 struct usb_hcd *hcd = platform_get_drvdata(dev);
121 struct tmio_hcd *tmio = hcd_to_tmio(hcd);
122 unsigned long base = hcd->rsrc_start;
123
124 tmio_write_pm(dev);
125 tmio_iowrite16(base, tmio->ccr + CCR_BASE);
126 tmio_iowrite16(base >> 16, tmio->ccr + CCR_BASE + 2);
127 tmio_iowrite8(1, tmio->ccr + CCR_ILME);
128 tmio_iowrite8(2, tmio->ccr + CCR_INTC);
129
130 dev_info(&dev->dev, "revision %d @ 0x%08llx, irq %d\n",
131 tmio_ioread8(tmio->ccr + CCR_REVID), hcd->rsrc_start, hcd->irq);
132}
133
134static int ohci_tmio_start(struct usb_hcd *hcd)
135{
136 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
137 int ret;
138
139 if ((ret = ohci_init(ohci)) < 0)
140 return ret;
141
142 if ((ret = ohci_run(ohci)) < 0) {
143 err("can't start %s", hcd->self.bus_name);
144 ohci_stop(hcd);
145 return ret;
146 }
147
148 return 0;
149}
150
151static const struct hc_driver ohci_tmio_hc_driver = {
152 .description = hcd_name,
153 .product_desc = "TMIO OHCI USB Host Controller",
154 .hcd_priv_size = sizeof(struct ohci_hcd) + sizeof (struct tmio_hcd),
155
156 /* generic hardware linkage */
157 .irq = ohci_irq,
158 .flags = HCD_USB11 | HCD_MEMORY | HCD_LOCAL_MEM,
159
160 /* basic lifecycle operations */
161 .start = ohci_tmio_start,
162 .stop = ohci_stop,
163 .shutdown = ohci_shutdown,
164
165 /* managing i/o requests and associated device resources */
166 .urb_enqueue = ohci_urb_enqueue,
167 .urb_dequeue = ohci_urb_dequeue,
168 .endpoint_disable = ohci_endpoint_disable,
169
170 /* scheduling support */
171 .get_frame_number = ohci_get_frame,
172
173 /* root hub support */
174 .hub_status_data = ohci_hub_status_data,
175 .hub_control = ohci_hub_control,
176#ifdef CONFIG_PM
177 .bus_suspend = ohci_bus_suspend,
178 .bus_resume = ohci_bus_resume,
179#endif
180 .start_port_reset = ohci_start_port_reset,
181};
182
183/*-------------------------------------------------------------------------*/
184static struct platform_driver ohci_hcd_tmio_driver;
185
186static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
187{
188 struct mfd_cell *cell = dev->dev.platform_data;
189 struct resource *regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
190 struct resource *config = platform_get_resource(dev, IORESOURCE_MEM, 1);
191 struct resource *sram = platform_get_resource(dev, IORESOURCE_MEM, 2);
192 int irq = platform_get_irq(dev, 0);
193 struct tmio_hcd *tmio;
194 struct ohci_hcd *ohci;
195 struct usb_hcd *hcd;
196 int ret;
197
198 if (usb_disabled())
199 return -ENODEV;
200
201 if (!cell)
202 return -EINVAL;
203
204 hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev->dev.bus_id);
205 if (!hcd) {
206 ret = -ENOMEM;
207 goto err_usb_create_hcd;
208 }
209
210 hcd->rsrc_start = regs->start;
211 hcd->rsrc_len = regs->end - regs->start + 1;
212
213 tmio = hcd_to_tmio(hcd);
214
215 spin_lock_init(&tmio->lock);
216
217 tmio->ccr = ioremap(config->start, config->end - config->start + 1);
218 if (!tmio->ccr) {
219 ret = -ENOMEM;
220 goto err_ioremap_ccr;
221 }
222
223 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
224 if (!hcd->regs) {
225 ret = -ENOMEM;
226 goto err_ioremap_regs;
227 }
228
229 if (!dma_declare_coherent_memory(&dev->dev, sram->start,
230 sram->start,
231 sram->end - sram->start + 1,
232 DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE)) {
233 ret = -EBUSY;
234 goto err_dma_declare;
235 }
236
237 if (cell->enable) {
238 ret = cell->enable(dev);
239 if (ret)
240 goto err_enable;
241 }
242
243 tmio_start_hc(dev);
244 ohci = hcd_to_ohci(hcd);
245 ohci_hcd_init(ohci);
246
247 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
248 if (ret)
249 goto err_add_hcd;
250
251 if (ret == 0)
252 return ret;
253
254 usb_remove_hcd(hcd);
255
256err_add_hcd:
257 tmio_stop_hc(dev);
258 if (cell->disable)
259 cell->disable(dev);
260err_enable:
261 dma_release_declared_memory(&dev->dev);
262err_dma_declare:
263 iounmap(hcd->regs);
264err_ioremap_regs:
265 iounmap(tmio->ccr);
266err_ioremap_ccr:
267 usb_put_hcd(hcd);
268err_usb_create_hcd:
269
270 return ret;
271}
272
273static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev)
274{
275 struct usb_hcd *hcd = platform_get_drvdata(dev);
276 struct tmio_hcd *tmio = hcd_to_tmio(hcd);
277 struct mfd_cell *cell = dev->dev.platform_data;
278
279 usb_remove_hcd(hcd);
280 tmio_stop_hc(dev);
281 if (cell->disable)
282 cell->disable(dev);
283 dma_release_declared_memory(&dev->dev);
284 iounmap(hcd->regs);
285 iounmap(tmio->ccr);
286 usb_put_hcd(hcd);
287
288 platform_set_drvdata(dev, NULL);
289
290 return 0;
291}
292
293#ifdef CONFIG_PM
294static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t state)
295{
296 struct mfd_cell *cell = dev->dev.platform_data;
297 struct usb_hcd *hcd = platform_get_drvdata(dev);
298 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
299 struct tmio_hcd *tmio = hcd_to_tmio(hcd);
300 unsigned long flags;
301 u8 misc;
302 int ret;
303
304 if (time_before(jiffies, ohci->next_statechange))
305 msleep(5);
306 ohci->next_statechange = jiffies;
307
308 spin_lock_irqsave(&tmio->lock, flags);
309
310 misc = tmio_ioread8(tmio->ccr + CCR_MISC);
311 misc |= 1 << 3; /* USSUSP */
312 tmio_iowrite8(misc, tmio->ccr + CCR_MISC);
313
314 spin_unlock_irqrestore(&tmio->lock, flags);
315
316 if (cell->suspend) {
317 ret = cell->suspend(dev);
318 if (ret)
319 return ret;
320 }
321
322 hcd->state = HC_STATE_SUSPENDED;
323
324 return 0;
325}
326
327static int ohci_hcd_tmio_drv_resume(struct platform_device *dev)
328{
329 struct mfd_cell *cell = dev->dev.platform_data;
330 struct usb_hcd *hcd = platform_get_drvdata(dev);
331 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
332 struct tmio_hcd *tmio = hcd_to_tmio(hcd);
333 unsigned long flags;
334 u8 misc;
335 int ret;
336
337 if (time_before(jiffies, ohci->next_statechange))
338 msleep(5);
339 ohci->next_statechange = jiffies;
340
341 if (cell->resume) {
342 ret = cell->resume(dev);
343 if (ret)
344 return ret;
345 }
346
347 tmio_start_hc(dev);
348
349 spin_lock_irqsave(&tmio->lock, flags);
350
351 misc = tmio_ioread8(tmio->ccr + CCR_MISC);
352 misc &= ~(1 << 3); /* USSUSP */
353 tmio_iowrite8(misc, tmio->ccr + CCR_MISC);
354
355 spin_unlock_irqrestore(&tmio->lock, flags);
356
357 ohci_finish_controller_resume(hcd);
358
359 return 0;
360}
361#else
362#define ohci_hcd_tmio_drv_suspend NULL
363#define ohci_hcd_tmio_drv_resume NULL
364#endif
365
366static struct platform_driver ohci_hcd_tmio_driver = {
367 .probe = ohci_hcd_tmio_drv_probe,
368 .remove = __devexit_p(ohci_hcd_tmio_drv_remove),
369 .shutdown = usb_hcd_platform_shutdown,
370 .suspend = ohci_hcd_tmio_drv_suspend,
371 .resume = ohci_hcd_tmio_drv_resume,
372 .driver = {
373 .name = "tmio-ohci",
374 .owner = THIS_MODULE,
375 },
376};
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index c18d8790c410..2376f24f3c83 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1763,11 +1763,12 @@ static void r8a66597_timer(unsigned long _r8a66597)
1763{ 1763{
1764 struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597; 1764 struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
1765 unsigned long flags; 1765 unsigned long flags;
1766 int port;
1766 1767
1767 spin_lock_irqsave(&r8a66597->lock, flags); 1768 spin_lock_irqsave(&r8a66597->lock, flags);
1768 1769
1769 r8a66597_root_hub_control(r8a66597, 0); 1770 for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++)
1770 r8a66597_root_hub_control(r8a66597, 1); 1771 r8a66597_root_hub_control(r8a66597, port);
1771 1772
1772 spin_unlock_irqrestore(&r8a66597->lock, flags); 1773 spin_unlock_irqrestore(&r8a66597->lock, flags);
1773} 1774}
diff --git a/drivers/usb/host/whci/Kbuild b/drivers/usb/host/whci/Kbuild
new file mode 100644
index 000000000000..26a3871ea0f9
--- /dev/null
+++ b/drivers/usb/host/whci/Kbuild
@@ -0,0 +1,11 @@
1obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o
2
3whci-hcd-y := \
4 asl.o \
5 hcd.o \
6 hw.o \
7 init.o \
8 int.o \
9 pzl.o \
10 qset.o \
11 wusb.o
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c
new file mode 100644
index 000000000000..4d7078e50572
--- /dev/null
+++ b/drivers/usb/host/whci/asl.c
@@ -0,0 +1,367 @@
1/*
2 * Wireless Host Controller (WHC) asynchronous schedule management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h>
21#include <linux/usb.h>
22#define D_LOCAL 0
23#include <linux/uwb/debug.h>
24
25#include "../../wusbcore/wusbhc.h"
26
27#include "whcd.h"
28
29#if D_LOCAL >= 4
30static void dump_asl(struct whc *whc, const char *tag)
31{
32 struct device *dev = &whc->umc->dev;
33 struct whc_qset *qset;
34
35 d_printf(4, dev, "ASL %s\n", tag);
36
37 list_for_each_entry(qset, &whc->async_list, list_node) {
38 dump_qset(qset, dev);
39 }
40}
41#else
42static inline void dump_asl(struct whc *whc, const char *tag)
43{
44}
45#endif
46
47
48static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset,
49 struct whc_qset **next, struct whc_qset **prev)
50{
51 struct list_head *n, *p;
52
53 BUG_ON(list_empty(&whc->async_list));
54
55 n = qset->list_node.next;
56 if (n == &whc->async_list)
57 n = n->next;
58 p = qset->list_node.prev;
59 if (p == &whc->async_list)
60 p = p->prev;
61
62 *next = container_of(n, struct whc_qset, list_node);
63 *prev = container_of(p, struct whc_qset, list_node);
64
65}
66
67static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset)
68{
69 list_move(&qset->list_node, &whc->async_list);
70 qset->in_sw_list = true;
71}
72
73static void asl_qset_insert(struct whc *whc, struct whc_qset *qset)
74{
75 struct whc_qset *next, *prev;
76
77 qset_clear(whc, qset);
78
79 /* Link into ASL. */
80 qset_get_next_prev(whc, qset, &next, &prev);
81 whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma);
82 whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma);
83 qset->in_hw_list = true;
84}
85
86static void asl_qset_remove(struct whc *whc, struct whc_qset *qset)
87{
88 struct whc_qset *prev, *next;
89
90 qset_get_next_prev(whc, qset, &next, &prev);
91
92 list_move(&qset->list_node, &whc->async_removed_list);
93 qset->in_sw_list = false;
94
95 /*
96 * No more qsets in the ASL? The caller must stop the ASL as
97 * it's no longer valid.
98 */
99 if (list_empty(&whc->async_list))
100 return;
101
102 /* Remove from ASL. */
103 whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma);
104 qset->in_hw_list = false;
105}
106
107/**
108 * process_qset - process any recently inactivated or halted qTDs in a
109 * qset.
110 *
111 * After inactive qTDs are removed, new qTDs can be added if the
112 * urb queue still contains URBs.
113 *
114 * Returns any additional WUSBCMD bits for the ASL sync command (i.e.,
115 * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed).
116 */
117static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
118{
119 enum whc_update update = 0;
120 uint32_t status = 0;
121
122 while (qset->ntds) {
123 struct whc_qtd *td;
124 int t;
125
126 t = qset->td_start;
127 td = &qset->qtd[qset->td_start];
128 status = le32_to_cpu(td->status);
129
130 /*
131 * Nothing to do with a still active qTD.
132 */
133 if (status & QTD_STS_ACTIVE)
134 break;
135
136 if (status & QTD_STS_HALTED) {
137 /* Ug, an error. */
138 process_halted_qtd(whc, qset, td);
139 goto done;
140 }
141
142 /* Mmm, a completed qTD. */
143 process_inactive_qtd(whc, qset, td);
144 }
145
146 update |= qset_add_qtds(whc, qset);
147
148done:
149 /*
150 * Remove this qset from the ASL if requested, but only if has
151 * no qTDs.
152 */
153 if (qset->remove && qset->ntds == 0) {
154 asl_qset_remove(whc, qset);
155 update |= WHC_UPDATE_REMOVED;
156 }
157 return update;
158}
159
160void asl_start(struct whc *whc)
161{
162 struct whc_qset *qset;
163
164 qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
165
166 le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR);
167
168 whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN);
169 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
170 WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED,
171 1000, "start ASL");
172}
173
174void asl_stop(struct whc *whc)
175{
176 whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0);
177 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
178 WUSBSTS_ASYNC_SCHED, 0,
179 1000, "stop ASL");
180}
181
182void asl_update(struct whc *whc, uint32_t wusbcmd)
183{
184 whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
185 wait_event(whc->async_list_wq,
186 (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0);
187}
188
189/**
190 * scan_async_work - scan the ASL for qsets to process.
191 *
192 * Process each qset in the ASL in turn and then signal the WHC that
193 * the ASL has been updated.
194 *
195 * Then start, stop or update the asynchronous schedule as required.
196 */
197void scan_async_work(struct work_struct *work)
198{
199 struct whc *whc = container_of(work, struct whc, async_work);
200 struct whc_qset *qset, *t;
201 enum whc_update update = 0;
202
203 spin_lock_irq(&whc->lock);
204
205 dump_asl(whc, "before processing");
206
207 /*
208 * Transerve the software list backwards so new qsets can be
209 * safely inserted into the ASL without making it non-circular.
210 */
211 list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
212 if (!qset->in_hw_list) {
213 asl_qset_insert(whc, qset);
214 update |= WHC_UPDATE_ADDED;
215 }
216
217 update |= process_qset(whc, qset);
218 }
219
220 dump_asl(whc, "after processing");
221
222 spin_unlock_irq(&whc->lock);
223
224 if (update) {
225 uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB;
226 if (update & WHC_UPDATE_REMOVED)
227 wusbcmd |= WUSBCMD_ASYNC_QSET_RM;
228 asl_update(whc, wusbcmd);
229 }
230
231 /*
232 * Now that the ASL is updated, complete the removal of any
233 * removed qsets.
234 */
235 spin_lock(&whc->lock);
236
237 list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
238 qset_remove_complete(whc, qset);
239 }
240
241 spin_unlock(&whc->lock);
242}
243
244/**
245 * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL).
246 * @whc: the WHCI host controller
247 * @urb: the URB to enqueue
248 * @mem_flags: flags for any memory allocations
249 *
250 * The qset for the endpoint is obtained and the urb queued on to it.
251 *
252 * Work is scheduled to update the hardware's view of the ASL.
253 */
254int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
255{
256 struct whc_qset *qset;
257 int err;
258 unsigned long flags;
259
260 spin_lock_irqsave(&whc->lock, flags);
261
262 qset = get_qset(whc, urb, GFP_ATOMIC);
263 if (qset == NULL)
264 err = -ENOMEM;
265 else
266 err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
267 if (!err) {
268 usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
269 if (!qset->in_sw_list)
270 asl_qset_insert_begin(whc, qset);
271 }
272
273 spin_unlock_irqrestore(&whc->lock, flags);
274
275 if (!err)
276 queue_work(whc->workqueue, &whc->async_work);
277
278 return 0;
279}
280
281/**
282 * asl_urb_dequeue - remove an URB (qset) from the async list.
283 * @whc: the WHCI host controller
284 * @urb: the URB to dequeue
285 * @status: the current status of the URB
286 *
287 * URBs that do yet have qTDs can simply be removed from the software
288 * queue, otherwise the qset must be removed from the ASL so the qTDs
289 * can be removed.
290 */
291int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
292{
293 struct whc_urb *wurb = urb->hcpriv;
294 struct whc_qset *qset = wurb->qset;
295 struct whc_std *std, *t;
296 int ret;
297 unsigned long flags;
298
299 spin_lock_irqsave(&whc->lock, flags);
300
301 ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
302 if (ret < 0)
303 goto out;
304
305 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
306 if (std->urb == urb)
307 qset_free_std(whc, std);
308 else
309 std->qtd = NULL; /* so this std is re-added when the qset is */
310 }
311
312 asl_qset_remove(whc, qset);
313 wurb->status = status;
314 wurb->is_async = true;
315 queue_work(whc->workqueue, &wurb->dequeue_work);
316
317out:
318 spin_unlock_irqrestore(&whc->lock, flags);
319
320 return ret;
321}
322
323/**
324 * asl_qset_delete - delete a qset from the ASL
325 */
326void asl_qset_delete(struct whc *whc, struct whc_qset *qset)
327{
328 qset->remove = 1;
329 queue_work(whc->workqueue, &whc->async_work);
330 qset_delete(whc, qset);
331}
332
333/**
334 * asl_init - initialize the asynchronous schedule list
335 *
336 * A dummy qset with no qTDs is added to the ASL to simplify removing
337 * qsets (no need to stop the ASL when the last qset is removed).
338 */
339int asl_init(struct whc *whc)
340{
341 struct whc_qset *qset;
342
343 qset = qset_alloc(whc, GFP_KERNEL);
344 if (qset == NULL)
345 return -ENOMEM;
346
347 asl_qset_insert_begin(whc, qset);
348 asl_qset_insert(whc, qset);
349
350 return 0;
351}
352
353/**
354 * asl_clean_up - free ASL resources
355 *
356 * The ASL is stopped and empty except for the dummy qset.
357 */
358void asl_clean_up(struct whc *whc)
359{
360 struct whc_qset *qset;
361
362 if (!list_empty(&whc->async_list)) {
363 qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
364 list_del(&qset->list_node);
365 qset_free(whc, qset);
366 }
367}
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
new file mode 100644
index 000000000000..ef3ad4dca945
--- /dev/null
+++ b/drivers/usb/host/whci/hcd.c
@@ -0,0 +1,339 @@
1/*
2 * Wireless Host Controller (WHC) driver.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/uwb/umc.h>
22
23#include "../../wusbcore/wusbhc.h"
24
25#include "whcd.h"
26
27/*
28 * One time initialization.
29 *
30 * Nothing to do here.
31 */
32static int whc_reset(struct usb_hcd *usb_hcd)
33{
34 return 0;
35}
36
37/*
38 * Start the wireless host controller.
39 *
40 * Start device notification.
41 *
42 * Put hc into run state, set DNTS parameters.
43 */
44static int whc_start(struct usb_hcd *usb_hcd)
45{
46 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
47 struct whc *whc = wusbhc_to_whc(wusbhc);
48 u8 bcid;
49 int ret;
50
51 mutex_lock(&wusbhc->mutex);
52
53 le_writel(WUSBINTR_GEN_CMD_DONE
54 | WUSBINTR_HOST_ERR
55 | WUSBINTR_ASYNC_SCHED_SYNCED
56 | WUSBINTR_DNTS_INT
57 | WUSBINTR_ERR_INT
58 | WUSBINTR_INT,
59 whc->base + WUSBINTR);
60
61 /* set cluster ID */
62 bcid = wusb_cluster_id_get();
63 ret = whc_set_cluster_id(whc, bcid);
64 if (ret < 0)
65 goto out;
66 wusbhc->cluster_id = bcid;
67
68 /* start HC */
69 whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN);
70
71 usb_hcd->uses_new_polling = 1;
72 usb_hcd->poll_rh = 1;
73 usb_hcd->state = HC_STATE_RUNNING;
74
75out:
76 mutex_unlock(&wusbhc->mutex);
77 return ret;
78}
79
80
81/*
82 * Stop the wireless host controller.
83 *
84 * Stop device notification.
85 *
86 * Wait for pending transfer to stop? Put hc into stop state?
87 */
88static void whc_stop(struct usb_hcd *usb_hcd)
89{
90 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
91 struct whc *whc = wusbhc_to_whc(wusbhc);
92
93 mutex_lock(&wusbhc->mutex);
94
95 wusbhc_stop(wusbhc);
96
97 /* stop HC */
98 le_writel(0, whc->base + WUSBINTR);
99 whc_write_wusbcmd(whc, WUSBCMD_RUN, 0);
100 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
101 WUSBSTS_HCHALTED, WUSBSTS_HCHALTED,
102 100, "HC to halt");
103
104 wusb_cluster_id_put(wusbhc->cluster_id);
105
106 mutex_unlock(&wusbhc->mutex);
107}
108
109static int whc_get_frame_number(struct usb_hcd *usb_hcd)
110{
111 /* Frame numbers are not applicable to WUSB. */
112 return -ENOSYS;
113}
114
115
116/*
117 * Queue an URB to the ASL or PZL
118 */
119static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
120 gfp_t mem_flags)
121{
122 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
123 struct whc *whc = wusbhc_to_whc(wusbhc);
124 int ret;
125
126 switch (usb_pipetype(urb->pipe)) {
127 case PIPE_INTERRUPT:
128 ret = pzl_urb_enqueue(whc, urb, mem_flags);
129 break;
130 case PIPE_ISOCHRONOUS:
131 dev_err(&whc->umc->dev, "isochronous transfers unsupported\n");
132 ret = -ENOTSUPP;
133 break;
134 case PIPE_CONTROL:
135 case PIPE_BULK:
136 default:
137 ret = asl_urb_enqueue(whc, urb, mem_flags);
138 break;
139 };
140
141 return ret;
142}
143
144/*
145 * Remove a queued URB from the ASL or PZL.
146 */
147static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
148{
149 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
150 struct whc *whc = wusbhc_to_whc(wusbhc);
151 int ret;
152
153 switch (usb_pipetype(urb->pipe)) {
154 case PIPE_INTERRUPT:
155 ret = pzl_urb_dequeue(whc, urb, status);
156 break;
157 case PIPE_ISOCHRONOUS:
158 ret = -ENOTSUPP;
159 break;
160 case PIPE_CONTROL:
161 case PIPE_BULK:
162 default:
163 ret = asl_urb_dequeue(whc, urb, status);
164 break;
165 };
166
167 return ret;
168}
169
170/*
171 * Wait for all URBs to the endpoint to be completed, then delete the
172 * qset.
173 */
174static void whc_endpoint_disable(struct usb_hcd *usb_hcd,
175 struct usb_host_endpoint *ep)
176{
177 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
178 struct whc *whc = wusbhc_to_whc(wusbhc);
179 struct whc_qset *qset;
180
181 qset = ep->hcpriv;
182 if (qset) {
183 ep->hcpriv = NULL;
184 if (usb_endpoint_xfer_bulk(&ep->desc)
185 || usb_endpoint_xfer_control(&ep->desc))
186 asl_qset_delete(whc, qset);
187 else
188 pzl_qset_delete(whc, qset);
189 }
190}
191
192static struct hc_driver whc_hc_driver = {
193 .description = "whci-hcd",
194 .product_desc = "Wireless host controller",
195 .hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd),
196 .irq = whc_int_handler,
197 .flags = HCD_USB2,
198
199 .reset = whc_reset,
200 .start = whc_start,
201 .stop = whc_stop,
202 .get_frame_number = whc_get_frame_number,
203 .urb_enqueue = whc_urb_enqueue,
204 .urb_dequeue = whc_urb_dequeue,
205 .endpoint_disable = whc_endpoint_disable,
206
207 .hub_status_data = wusbhc_rh_status_data,
208 .hub_control = wusbhc_rh_control,
209 .bus_suspend = wusbhc_rh_suspend,
210 .bus_resume = wusbhc_rh_resume,
211 .start_port_reset = wusbhc_rh_start_port_reset,
212};
213
214static int whc_probe(struct umc_dev *umc)
215{
216 int ret = -ENOMEM;
217 struct usb_hcd *usb_hcd;
218 struct wusbhc *wusbhc = NULL;
219 struct whc *whc = NULL;
220 struct device *dev = &umc->dev;
221
222 usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci");
223 if (usb_hcd == NULL) {
224 dev_err(dev, "unable to create hcd\n");
225 goto error;
226 }
227
228 usb_hcd->wireless = 1;
229
230 wusbhc = usb_hcd_to_wusbhc(usb_hcd);
231 whc = wusbhc_to_whc(wusbhc);
232 whc->umc = umc;
233
234 ret = whc_init(whc);
235 if (ret)
236 goto error;
237
238 wusbhc->dev = dev;
239 wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent);
240 if (!wusbhc->uwb_rc) {
241 ret = -ENODEV;
242 dev_err(dev, "cannot get radio controller\n");
243 goto error;
244 }
245
246 if (whc->n_devices > USB_MAXCHILDREN) {
247 dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n",
248 whc->n_devices);
249 wusbhc->ports_max = USB_MAXCHILDREN;
250 } else
251 wusbhc->ports_max = whc->n_devices;
252 wusbhc->mmcies_max = whc->n_mmc_ies;
253 wusbhc->start = whc_wusbhc_start;
254 wusbhc->stop = whc_wusbhc_stop;
255 wusbhc->mmcie_add = whc_mmcie_add;
256 wusbhc->mmcie_rm = whc_mmcie_rm;
257 wusbhc->dev_info_set = whc_dev_info_set;
258 wusbhc->bwa_set = whc_bwa_set;
259 wusbhc->set_num_dnts = whc_set_num_dnts;
260 wusbhc->set_ptk = whc_set_ptk;
261 wusbhc->set_gtk = whc_set_gtk;
262
263 ret = wusbhc_create(wusbhc);
264 if (ret)
265 goto error_wusbhc_create;
266
267 ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED);
268 if (ret) {
269 dev_err(dev, "cannot add HCD: %d\n", ret);
270 goto error_usb_add_hcd;
271 }
272
273 ret = wusbhc_b_create(wusbhc);
274 if (ret) {
275 dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret);
276 goto error_wusbhc_b_create;
277 }
278
279 return 0;
280
281error_wusbhc_b_create:
282 usb_remove_hcd(usb_hcd);
283error_usb_add_hcd:
284 wusbhc_destroy(wusbhc);
285error_wusbhc_create:
286 uwb_rc_put(wusbhc->uwb_rc);
287error:
288 whc_clean_up(whc);
289 if (usb_hcd)
290 usb_put_hcd(usb_hcd);
291 return ret;
292}
293
294
295static void whc_remove(struct umc_dev *umc)
296{
297 struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev);
298 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
299 struct whc *whc = wusbhc_to_whc(wusbhc);
300
301 if (usb_hcd) {
302 wusbhc_b_destroy(wusbhc);
303 usb_remove_hcd(usb_hcd);
304 wusbhc_destroy(wusbhc);
305 uwb_rc_put(wusbhc->uwb_rc);
306 whc_clean_up(whc);
307 usb_put_hcd(usb_hcd);
308 }
309}
310
311static struct umc_driver whci_hc_driver = {
312 .name = "whci-hcd",
313 .cap_id = UMC_CAP_ID_WHCI_WUSB_HC,
314 .probe = whc_probe,
315 .remove = whc_remove,
316};
317
318static int __init whci_hc_driver_init(void)
319{
320 return umc_driver_register(&whci_hc_driver);
321}
322module_init(whci_hc_driver_init);
323
324static void __exit whci_hc_driver_exit(void)
325{
326 umc_driver_unregister(&whci_hc_driver);
327}
328module_exit(whci_hc_driver_exit);
329
330/* PCI device ID's that we handle (so it gets loaded) */
331static struct pci_device_id whci_hcd_id_table[] = {
332 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
333 { /* empty last entry */ }
334};
335MODULE_DEVICE_TABLE(pci, whci_hcd_id_table);
336
337MODULE_DESCRIPTION("WHCI Wireless USB host controller driver");
338MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
339MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/whci/hw.c b/drivers/usb/host/whci/hw.c
new file mode 100644
index 000000000000..ac86e59c1225
--- /dev/null
+++ b/drivers/usb/host/whci/hw.c
@@ -0,0 +1,87 @@
1/*
2 * Wireless Host Controller (WHC) hardware access helpers.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h>
21
22#include "../../wusbcore/wusbhc.h"
23
24#include "whcd.h"
25
26void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val)
27{
28 unsigned long flags;
29 u32 cmd;
30
31 spin_lock_irqsave(&whc->lock, flags);
32
33 cmd = le_readl(whc->base + WUSBCMD);
34 cmd = (cmd & ~mask) | val;
35 le_writel(cmd, whc->base + WUSBCMD);
36
37 spin_unlock_irqrestore(&whc->lock, flags);
38}
39
40/**
41 * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register
42 * @whc: the WHCI HC
43 * @cmd: command to start.
44 * @params: parameters for the command (the WUSBGENCMDPARAMS register value).
45 * @addr: pointer to any data for the command (may be NULL).
46 * @len: length of the data (if any).
47 */
48int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len)
49{
50 unsigned long flags;
51 dma_addr_t dma_addr;
52 int t;
53
54 mutex_lock(&whc->mutex);
55
56 /* Wait for previous command to complete. */
57 t = wait_event_timeout(whc->cmd_wq,
58 (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0,
59 WHC_GENCMD_TIMEOUT_MS);
60 if (t == 0) {
61 dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n",
62 le_readl(whc->base + WUSBGENCMDSTS),
63 le_readl(whc->base + WUSBGENCMDPARAMS));
64 return -ETIMEDOUT;
65 }
66
67 if (addr) {
68 memcpy(whc->gen_cmd_buf, addr, len);
69 dma_addr = whc->gen_cmd_buf_dma;
70 } else
71 dma_addr = 0;
72
73 /* Poke registers to start cmd. */
74 spin_lock_irqsave(&whc->lock, flags);
75
76 le_writel(params, whc->base + WUSBGENCMDPARAMS);
77 le_writeq(dma_addr, whc->base + WUSBGENADDR);
78
79 le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd,
80 whc->base + WUSBGENCMDSTS);
81
82 spin_unlock_irqrestore(&whc->lock, flags);
83
84 mutex_unlock(&whc->mutex);
85
86 return 0;
87}
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c
new file mode 100644
index 000000000000..34a783cb0133
--- /dev/null
+++ b/drivers/usb/host/whci/init.c
@@ -0,0 +1,188 @@
1/*
2 * Wireless Host Controller (WHC) initialization.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h>
21
22#include "../../wusbcore/wusbhc.h"
23
24#include "whcd.h"
25
26/*
27 * Reset the host controller.
28 */
29static void whc_hw_reset(struct whc *whc)
30{
31 le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD);
32 whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0,
33 100, "reset");
34}
35
36static void whc_hw_init_di_buf(struct whc *whc)
37{
38 int d;
39
40 /* Disable all entries in the Device Information buffer. */
41 for (d = 0; d < whc->n_devices; d++)
42 whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE;
43
44 le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR);
45}
46
47static void whc_hw_init_dn_buf(struct whc *whc)
48{
49 /* Clear the Device Notification buffer to ensure the V (valid)
50 * bits are clear. */
51 memset(whc->dn_buf, 0, 4096);
52
53 le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR);
54}
55
56int whc_init(struct whc *whc)
57{
58 u32 whcsparams;
59 int ret, i;
60 resource_size_t start, len;
61
62 spin_lock_init(&whc->lock);
63 mutex_init(&whc->mutex);
64 init_waitqueue_head(&whc->cmd_wq);
65 init_waitqueue_head(&whc->async_list_wq);
66 init_waitqueue_head(&whc->periodic_list_wq);
67 whc->workqueue = create_singlethread_workqueue(dev_name(&whc->umc->dev));
68 if (whc->workqueue == NULL) {
69 ret = -ENOMEM;
70 goto error;
71 }
72 INIT_WORK(&whc->dn_work, whc_dn_work);
73
74 INIT_WORK(&whc->async_work, scan_async_work);
75 INIT_LIST_HEAD(&whc->async_list);
76 INIT_LIST_HEAD(&whc->async_removed_list);
77
78 INIT_WORK(&whc->periodic_work, scan_periodic_work);
79 for (i = 0; i < 5; i++)
80 INIT_LIST_HEAD(&whc->periodic_list[i]);
81 INIT_LIST_HEAD(&whc->periodic_removed_list);
82
83 /* Map HC registers. */
84 start = whc->umc->resource.start;
85 len = whc->umc->resource.end - start + 1;
86 if (!request_mem_region(start, len, "whci-hc")) {
87 dev_err(&whc->umc->dev, "can't request HC region\n");
88 ret = -EBUSY;
89 goto error;
90 }
91 whc->base_phys = start;
92 whc->base = ioremap(start, len);
93 if (!whc->base) {
94 dev_err(&whc->umc->dev, "ioremap\n");
95 ret = -ENOMEM;
96 goto error;
97 }
98
99 whc_hw_reset(whc);
100
101 /* Read maximum number of devices, keys and MMC IEs. */
102 whcsparams = le_readl(whc->base + WHCSPARAMS);
103 whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams);
104 whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams);
105 whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams);
106
107 dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n",
108 whc->n_devices, whc->n_keys, whc->n_mmc_ies);
109
110 whc->qset_pool = dma_pool_create("qset", &whc->umc->dev,
111 sizeof(struct whc_qset), 64, 0);
112 if (whc->qset_pool == NULL) {
113 ret = -ENOMEM;
114 goto error;
115 }
116
117 ret = asl_init(whc);
118 if (ret < 0)
119 goto error;
120 ret = pzl_init(whc);
121 if (ret < 0)
122 goto error;
123
124 /* Allocate and initialize a buffer for generic commands, the
125 Device Information buffer, and the Device Notification
126 buffer. */
127
128 whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
129 &whc->gen_cmd_buf_dma, GFP_KERNEL);
130 if (whc->gen_cmd_buf == NULL) {
131 ret = -ENOMEM;
132 goto error;
133 }
134
135 whc->dn_buf = dma_alloc_coherent(&whc->umc->dev,
136 sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
137 &whc->dn_buf_dma, GFP_KERNEL);
138 if (!whc->dn_buf) {
139 ret = -ENOMEM;
140 goto error;
141 }
142 whc_hw_init_dn_buf(whc);
143
144 whc->di_buf = dma_alloc_coherent(&whc->umc->dev,
145 sizeof(struct di_buf_entry) * whc->n_devices,
146 &whc->di_buf_dma, GFP_KERNEL);
147 if (!whc->di_buf) {
148 ret = -ENOMEM;
149 goto error;
150 }
151 whc_hw_init_di_buf(whc);
152
153 return 0;
154
155error:
156 whc_clean_up(whc);
157 return ret;
158}
159
160void whc_clean_up(struct whc *whc)
161{
162 resource_size_t len;
163
164 if (whc->di_buf)
165 dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices,
166 whc->di_buf, whc->di_buf_dma);
167 if (whc->dn_buf)
168 dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
169 whc->dn_buf, whc->dn_buf_dma);
170 if (whc->gen_cmd_buf)
171 dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
172 whc->gen_cmd_buf, whc->gen_cmd_buf_dma);
173
174 pzl_clean_up(whc);
175 asl_clean_up(whc);
176
177 if (whc->qset_pool)
178 dma_pool_destroy(whc->qset_pool);
179
180 len = whc->umc->resource.end - whc->umc->resource.start + 1;
181 if (whc->base)
182 iounmap(whc->base);
183 if (whc->base_phys)
184 release_mem_region(whc->base_phys, len);
185
186 if (whc->workqueue)
187 destroy_workqueue(whc->workqueue);
188}
diff --git a/drivers/usb/host/whci/int.c b/drivers/usb/host/whci/int.c
new file mode 100644
index 000000000000..fce01174aa9b
--- /dev/null
+++ b/drivers/usb/host/whci/int.c
@@ -0,0 +1,95 @@
1/*
2 * Wireless Host Controller (WHC) interrupt handling.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/uwb/umc.h>
22
23#include "../../wusbcore/wusbhc.h"
24
25#include "whcd.h"
26
27static void transfer_done(struct whc *whc)
28{
29 queue_work(whc->workqueue, &whc->async_work);
30 queue_work(whc->workqueue, &whc->periodic_work);
31}
32
33irqreturn_t whc_int_handler(struct usb_hcd *hcd)
34{
35 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd);
36 struct whc *whc = wusbhc_to_whc(wusbhc);
37 u32 sts;
38
39 sts = le_readl(whc->base + WUSBSTS);
40 if (!(sts & WUSBSTS_INT_MASK))
41 return IRQ_NONE;
42 le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS);
43
44 if (sts & WUSBSTS_GEN_CMD_DONE)
45 wake_up(&whc->cmd_wq);
46
47 if (sts & WUSBSTS_HOST_ERR)
48 dev_err(&whc->umc->dev, "FIXME: host system error\n");
49
50 if (sts & WUSBSTS_ASYNC_SCHED_SYNCED)
51 wake_up(&whc->async_list_wq);
52
53 if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED)
54 wake_up(&whc->periodic_list_wq);
55
56 if (sts & WUSBSTS_DNTS_INT)
57 queue_work(whc->workqueue, &whc->dn_work);
58
59 /*
60 * A transfer completed (see [WHCI] section 4.7.1.2 for when
61 * this occurs).
62 */
63 if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT))
64 transfer_done(whc);
65
66 return IRQ_HANDLED;
67}
68
69static int process_dn_buf(struct whc *whc)
70{
71 struct wusbhc *wusbhc = &whc->wusbhc;
72 struct dn_buf_entry *dn;
73 int processed = 0;
74
75 for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) {
76 if (dn->status & WHC_DN_STATUS_VALID) {
77 wusbhc_handle_dn(wusbhc, dn->src_addr,
78 (struct wusb_dn_hdr *)dn->dn_data,
79 dn->msg_size);
80 dn->status &= ~WHC_DN_STATUS_VALID;
81 processed++;
82 }
83 }
84 return processed;
85}
86
87void whc_dn_work(struct work_struct *work)
88{
89 struct whc *whc = container_of(work, struct whc, dn_work);
90 int processed;
91
92 do {
93 processed = process_dn_buf(whc);
94 } while (processed);
95}
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c
new file mode 100644
index 000000000000..8d62df0c330b
--- /dev/null
+++ b/drivers/usb/host/whci/pzl.c
@@ -0,0 +1,398 @@
1/*
2 * Wireless Host Controller (WHC) periodic schedule management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h>
21#include <linux/usb.h>
22#define D_LOCAL 0
23#include <linux/uwb/debug.h>
24
25#include "../../wusbcore/wusbhc.h"
26
27#include "whcd.h"
28
29#if D_LOCAL >= 4
30static void dump_pzl(struct whc *whc, const char *tag)
31{
32 struct device *dev = &whc->umc->dev;
33 struct whc_qset *qset;
34 int period = 0;
35
36 d_printf(4, dev, "PZL %s\n", tag);
37
38 for (period = 0; period < 5; period++) {
39 d_printf(4, dev, "Period %d\n", period);
40 list_for_each_entry(qset, &whc->periodic_list[period], list_node) {
41 dump_qset(qset, dev);
42 }
43 }
44}
45#else
46static inline void dump_pzl(struct whc *whc, const char *tag)
47{
48}
49#endif
50
51static void update_pzl_pointers(struct whc *whc, int period, u64 addr)
52{
53 switch (period) {
54 case 0:
55 whc_qset_set_link_ptr(&whc->pz_list[0], addr);
56 whc_qset_set_link_ptr(&whc->pz_list[2], addr);
57 whc_qset_set_link_ptr(&whc->pz_list[4], addr);
58 whc_qset_set_link_ptr(&whc->pz_list[6], addr);
59 whc_qset_set_link_ptr(&whc->pz_list[8], addr);
60 whc_qset_set_link_ptr(&whc->pz_list[10], addr);
61 whc_qset_set_link_ptr(&whc->pz_list[12], addr);
62 whc_qset_set_link_ptr(&whc->pz_list[14], addr);
63 break;
64 case 1:
65 whc_qset_set_link_ptr(&whc->pz_list[1], addr);
66 whc_qset_set_link_ptr(&whc->pz_list[5], addr);
67 whc_qset_set_link_ptr(&whc->pz_list[9], addr);
68 whc_qset_set_link_ptr(&whc->pz_list[13], addr);
69 break;
70 case 2:
71 whc_qset_set_link_ptr(&whc->pz_list[3], addr);
72 whc_qset_set_link_ptr(&whc->pz_list[11], addr);
73 break;
74 case 3:
75 whc_qset_set_link_ptr(&whc->pz_list[7], addr);
76 break;
77 case 4:
78 whc_qset_set_link_ptr(&whc->pz_list[15], addr);
79 break;
80 }
81}
82
83/*
84 * Return the 'period' to use for this qset. The minimum interval for
85 * the endpoint is used so whatever urbs are submitted the device is
86 * polled often enough.
87 */
88static int qset_get_period(struct whc *whc, struct whc_qset *qset)
89{
90 uint8_t bInterval = qset->ep->desc.bInterval;
91
92 if (bInterval < 6)
93 bInterval = 6;
94 if (bInterval > 10)
95 bInterval = 10;
96 return bInterval - 6;
97}
98
99static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset)
100{
101 int period;
102
103 period = qset_get_period(whc, qset);
104
105 qset_clear(whc, qset);
106 list_move(&qset->list_node, &whc->periodic_list[period]);
107 qset->in_sw_list = true;
108}
109
110static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset)
111{
112 list_move(&qset->list_node, &whc->periodic_removed_list);
113 qset->in_hw_list = false;
114 qset->in_sw_list = false;
115}
116
117/**
118 * pzl_process_qset - process any recently inactivated or halted qTDs
119 * in a qset.
120 *
121 * After inactive qTDs are removed, new qTDs can be added if the
122 * urb queue still contains URBs.
123 *
124 * Returns the schedule updates required.
125 */
126static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
127{
128 enum whc_update update = 0;
129 uint32_t status = 0;
130
131 while (qset->ntds) {
132 struct whc_qtd *td;
133 int t;
134
135 t = qset->td_start;
136 td = &qset->qtd[qset->td_start];
137 status = le32_to_cpu(td->status);
138
139 /*
140 * Nothing to do with a still active qTD.
141 */
142 if (status & QTD_STS_ACTIVE)
143 break;
144
145 if (status & QTD_STS_HALTED) {
146 /* Ug, an error. */
147 process_halted_qtd(whc, qset, td);
148 goto done;
149 }
150
151 /* Mmm, a completed qTD. */
152 process_inactive_qtd(whc, qset, td);
153 }
154
155 update |= qset_add_qtds(whc, qset);
156
157done:
158 /*
159 * If there are no qTDs in this qset, remove it from the PZL.
160 */
161 if (qset->remove && qset->ntds == 0) {
162 pzl_qset_remove(whc, qset);
163 update |= WHC_UPDATE_REMOVED;
164 }
165
166 return update;
167}
168
169/**
170 * pzl_start - start the periodic schedule
171 * @whc: the WHCI host controller
172 *
173 * The PZL must be valid (e.g., all entries in the list should have
174 * the T bit set).
175 */
176void pzl_start(struct whc *whc)
177{
178 le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
179
180 whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN);
181 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
182 WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED,
183 1000, "start PZL");
184}
185
186/**
187 * pzl_stop - stop the periodic schedule
188 * @whc: the WHCI host controller
189 */
190void pzl_stop(struct whc *whc)
191{
192 whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0);
193 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
194 WUSBSTS_PERIODIC_SCHED, 0,
195 1000, "stop PZL");
196}
197
198void pzl_update(struct whc *whc, uint32_t wusbcmd)
199{
200 whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
201 wait_event(whc->periodic_list_wq,
202 (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0);
203}
204
205static void update_pzl_hw_view(struct whc *whc)
206{
207 struct whc_qset *qset, *t;
208 int period;
209 u64 tmp_qh = 0;
210
211 for (period = 0; period < 5; period++) {
212 list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
213 whc_qset_set_link_ptr(&qset->qh.link, tmp_qh);
214 tmp_qh = qset->qset_dma;
215 qset->in_hw_list = true;
216 }
217 update_pzl_pointers(whc, period, tmp_qh);
218 }
219}
220
221/**
222 * scan_periodic_work - scan the PZL for qsets to process.
223 *
224 * Process each qset in the PZL in turn and then signal the WHC that
225 * the PZL has been updated.
226 *
227 * Then start, stop or update the periodic schedule as required.
228 */
229void scan_periodic_work(struct work_struct *work)
230{
231 struct whc *whc = container_of(work, struct whc, periodic_work);
232 struct whc_qset *qset, *t;
233 enum whc_update update = 0;
234 int period;
235
236 spin_lock_irq(&whc->lock);
237
238 dump_pzl(whc, "before processing");
239
240 for (period = 4; period >= 0; period--) {
241 list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
242 if (!qset->in_hw_list)
243 update |= WHC_UPDATE_ADDED;
244 update |= pzl_process_qset(whc, qset);
245 }
246 }
247
248 if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED))
249 update_pzl_hw_view(whc);
250
251 dump_pzl(whc, "after processing");
252
253 spin_unlock_irq(&whc->lock);
254
255 if (update) {
256 uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB;
257 if (update & WHC_UPDATE_REMOVED)
258 wusbcmd |= WUSBCMD_PERIODIC_QSET_RM;
259 pzl_update(whc, wusbcmd);
260 }
261
262 /*
263 * Now that the PZL is updated, complete the removal of any
264 * removed qsets.
265 */
266 spin_lock(&whc->lock);
267
268 list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
269 qset_remove_complete(whc, qset);
270 }
271
272 spin_unlock(&whc->lock);
273}
274
275/**
276 * pzl_urb_enqueue - queue an URB onto the periodic list (PZL)
277 * @whc: the WHCI host controller
278 * @urb: the URB to enqueue
279 * @mem_flags: flags for any memory allocations
280 *
281 * The qset for the endpoint is obtained and the urb queued on to it.
282 *
283 * Work is scheduled to update the hardware's view of the PZL.
284 */
285int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
286{
287 struct whc_qset *qset;
288 int err;
289 unsigned long flags;
290
291 spin_lock_irqsave(&whc->lock, flags);
292
293 qset = get_qset(whc, urb, GFP_ATOMIC);
294 if (qset == NULL)
295 err = -ENOMEM;
296 else
297 err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
298 if (!err) {
299 usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
300 if (!qset->in_sw_list)
301 qset_insert_in_sw_list(whc, qset);
302 }
303
304 spin_unlock_irqrestore(&whc->lock, flags);
305
306 if (!err)
307 queue_work(whc->workqueue, &whc->periodic_work);
308
309 return 0;
310}
311
312/**
313 * pzl_urb_dequeue - remove an URB (qset) from the periodic list
314 * @whc: the WHCI host controller
315 * @urb: the URB to dequeue
316 * @status: the current status of the URB
317 *
318 * URBs that do yet have qTDs can simply be removed from the software
319 * queue, otherwise the qset must be removed so the qTDs can be safely
320 * removed.
321 */
322int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
323{
324 struct whc_urb *wurb = urb->hcpriv;
325 struct whc_qset *qset = wurb->qset;
326 struct whc_std *std, *t;
327 int ret;
328 unsigned long flags;
329
330 spin_lock_irqsave(&whc->lock, flags);
331
332 ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
333 if (ret < 0)
334 goto out;
335
336 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
337 if (std->urb == urb)
338 qset_free_std(whc, std);
339 else
340 std->qtd = NULL; /* so this std is re-added when the qset is */
341 }
342
343 pzl_qset_remove(whc, qset);
344 wurb->status = status;
345 wurb->is_async = false;
346 queue_work(whc->workqueue, &wurb->dequeue_work);
347
348out:
349 spin_unlock_irqrestore(&whc->lock, flags);
350
351 return ret;
352}
353
354/**
355 * pzl_qset_delete - delete a qset from the PZL
356 */
357void pzl_qset_delete(struct whc *whc, struct whc_qset *qset)
358{
359 qset->remove = 1;
360 queue_work(whc->workqueue, &whc->periodic_work);
361 qset_delete(whc, qset);
362}
363
364
365/**
366 * pzl_init - initialize the periodic zone list
367 * @whc: the WHCI host controller
368 */
369int pzl_init(struct whc *whc)
370{
371 int i;
372
373 whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16,
374 &whc->pz_list_dma, GFP_KERNEL);
375 if (whc->pz_list == NULL)
376 return -ENOMEM;
377
378 /* Set T bit on all elements in PZL. */
379 for (i = 0; i < 16; i++)
380 whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
381
382 le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
383
384 return 0;
385}
386
387/**
388 * pzl_clean_up - free PZL resources
389 * @whc: the WHCI host controller
390 *
391 * The PZL is stopped and empty.
392 */
393void pzl_clean_up(struct whc *whc)
394{
395 if (whc->pz_list)
396 dma_free_coherent(&whc->umc->dev, sizeof(u64) * 16, whc->pz_list,
397 whc->pz_list_dma);
398}
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
new file mode 100644
index 000000000000..0420037d2e18
--- /dev/null
+++ b/drivers/usb/host/whci/qset.c
@@ -0,0 +1,567 @@
1/*
2 * Wireless Host Controller (WHC) qset management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h>
21#include <linux/usb.h>
22
23#include "../../wusbcore/wusbhc.h"
24
25#include "whcd.h"
26
27void dump_qset(struct whc_qset *qset, struct device *dev)
28{
29 struct whc_std *std;
30 struct urb *urb = NULL;
31 int i;
32
33 dev_dbg(dev, "qset %08x\n", (u32)qset->qset_dma);
34 dev_dbg(dev, " -> %08x\n", (u32)qset->qh.link);
35 dev_dbg(dev, " info: %08x %08x %08x\n",
36 qset->qh.info1, qset->qh.info2, qset->qh.info3);
37 dev_dbg(dev, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count);
38 dev_dbg(dev, " TD: sts: %08x opts: %08x\n",
39 qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
40
41 for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
42 dev_dbg(dev, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
43 i == qset->td_start ? 'S' : ' ',
44 i == qset->td_end ? 'E' : ' ',
45 i, qset->qtd[i].status, qset->qtd[i].options,
46 (u32)qset->qtd[i].page_list_ptr);
47 }
48 dev_dbg(dev, " ntds: %d\n", qset->ntds);
49 list_for_each_entry(std, &qset->stds, list_node) {
50 if (urb != std->urb) {
51 urb = std->urb;
52 dev_dbg(dev, " urb %p transferred: %d bytes\n", urb,
53 urb->actual_length);
54 }
55 if (std->qtd)
56 dev_dbg(dev, " sTD[%td]: %zu bytes @ %08x\n",
57 std->qtd - &qset->qtd[0],
58 std->len, std->num_pointers ?
59 (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
60 else
61 dev_dbg(dev, " sTD[-]: %zd bytes @ %08x\n",
62 std->len, std->num_pointers ?
63 (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
64 }
65}
66
67struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
68{
69 struct whc_qset *qset;
70 dma_addr_t dma;
71
72 qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
73 if (qset == NULL)
74 return NULL;
75 memset(qset, 0, sizeof(struct whc_qset));
76
77 qset->qset_dma = dma;
78 qset->whc = whc;
79
80 INIT_LIST_HEAD(&qset->list_node);
81 INIT_LIST_HEAD(&qset->stds);
82
83 return qset;
84}
85
86/**
87 * qset_fill_qh - fill the static endpoint state in a qset's QHead
88 * @qset: the qset whose QH needs initializing with static endpoint
89 * state
90 * @urb: an urb for a transfer to this endpoint
91 */
92static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
93{
94 struct usb_device *usb_dev = urb->dev;
95 struct usb_wireless_ep_comp_descriptor *epcd;
96 bool is_out;
97
98 is_out = usb_pipeout(urb->pipe);
99
100 epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
101
102 if (epcd) {
103 qset->max_seq = epcd->bMaxSequence;
104 qset->max_burst = epcd->bMaxBurst;
105 } else {
106 qset->max_seq = 2;
107 qset->max_burst = 1;
108 }
109
110 qset->qh.info1 = cpu_to_le32(
111 QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
112 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
113 | usb_pipe_to_qh_type(urb->pipe)
114 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
115 | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out))
116 );
117 qset->qh.info2 = cpu_to_le32(
118 QH_INFO2_BURST(qset->max_burst)
119 | QH_INFO2_DBP(0)
120 | QH_INFO2_MAX_COUNT(3)
121 | QH_INFO2_MAX_RETRY(3)
122 | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
123 );
124 /* FIXME: where can we obtain these Tx parameters from? Why
125 * doesn't the chip know what Tx power to use? It knows the Rx
126 * strength and can presumably guess the Tx power required
127 * from that? */
128 qset->qh.info3 = cpu_to_le32(
129 QH_INFO3_TX_RATE_53_3
130 | QH_INFO3_TX_PWR(0) /* 0 == max power */
131 );
132}
133
134/**
135 * qset_clear - clear fields in a qset so it may be reinserted into a
136 * schedule
137 */
138void qset_clear(struct whc *whc, struct whc_qset *qset)
139{
140 qset->td_start = qset->td_end = qset->ntds = 0;
141 qset->remove = 0;
142
143 qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
144 qset->qh.status = cpu_to_le16(QH_STATUS_ICUR(qset->td_start));
145 qset->qh.err_count = 0;
146 qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
147 qset->qh.scratch[0] = 0;
148 qset->qh.scratch[1] = 0;
149 qset->qh.scratch[2] = 0;
150
151 memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
152
153 init_completion(&qset->remove_complete);
154}
155
156/**
157 * get_qset - get the qset for an async endpoint
158 *
159 * A new qset is created if one does not already exist.
160 */
161struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
162 gfp_t mem_flags)
163{
164 struct whc_qset *qset;
165
166 qset = urb->ep->hcpriv;
167 if (qset == NULL) {
168 qset = qset_alloc(whc, mem_flags);
169 if (qset == NULL)
170 return NULL;
171
172 qset->ep = urb->ep;
173 urb->ep->hcpriv = qset;
174 qset_fill_qh(qset, urb);
175 }
176 return qset;
177}
178
179void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
180{
181 list_del_init(&qset->list_node);
182 complete(&qset->remove_complete);
183}
184
185/**
186 * qset_add_qtds - add qTDs for an URB to a qset
187 *
188 * Returns true if the list (ASL/PZL) must be updated because (for a
189 * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
190 */
191enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
192{
193 struct whc_std *std;
194 enum whc_update update = 0;
195
196 list_for_each_entry(std, &qset->stds, list_node) {
197 struct whc_qtd *qtd;
198 uint32_t status;
199
200 if (qset->ntds >= WHCI_QSET_TD_MAX
201 || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
202 break;
203
204 if (std->qtd)
205 continue; /* already has a qTD */
206
207 qtd = std->qtd = &qset->qtd[qset->td_end];
208
209 /* Fill in setup bytes for control transfers. */
210 if (usb_pipecontrol(std->urb->pipe))
211 memcpy(qtd->setup, std->urb->setup_packet, 8);
212
213 status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
214
215 if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
216 status |= QTD_STS_LAST_PKT;
217
218 /*
219 * For an IN transfer the iAlt field should be set so
220 * the h/w will automatically advance to the next
221 * transfer. However, if there are 8 or more TDs
222 * remaining in this transfer then iAlt cannot be set
223 * as it could point to somewhere in this transfer.
224 */
225 if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
226 int ialt;
227 ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
228 status |= QTD_STS_IALT(ialt);
229 } else if (usb_pipein(std->urb->pipe))
230 qset->pause_after_urb = std->urb;
231
232 if (std->num_pointers)
233 qtd->options = cpu_to_le32(QTD_OPT_IOC);
234 else
235 qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
236 qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
237
238 qtd->status = cpu_to_le32(status);
239
240 if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
241 update = WHC_UPDATE_UPDATED;
242
243 if (++qset->td_end >= WHCI_QSET_TD_MAX)
244 qset->td_end = 0;
245 qset->ntds++;
246 }
247
248 return update;
249}
250
251/**
252 * qset_remove_qtd - remove the first qTD from a qset.
253 *
254 * The qTD might be still active (if it's part of a IN URB that
255 * resulted in a short read) so ensure it's deactivated.
256 */
257static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
258{
259 qset->qtd[qset->td_start].status = 0;
260
261 if (++qset->td_start >= WHCI_QSET_TD_MAX)
262 qset->td_start = 0;
263 qset->ntds--;
264}
265
266/**
267 * qset_free_std - remove an sTD and free it.
268 * @whc: the WHCI host controller
269 * @std: the sTD to remove and free.
270 */
271void qset_free_std(struct whc *whc, struct whc_std *std)
272{
273 list_del(&std->list_node);
274 if (std->num_pointers) {
275 dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
276 std->num_pointers * sizeof(struct whc_page_list_entry),
277 DMA_TO_DEVICE);
278 kfree(std->pl_virt);
279 }
280
281 kfree(std);
282}
283
284/**
285 * qset_remove_qtds - remove an URB's qTDs (and sTDs).
286 */
287static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
288 struct urb *urb)
289{
290 struct whc_std *std, *t;
291
292 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
293 if (std->urb != urb)
294 break;
295 if (std->qtd != NULL)
296 qset_remove_qtd(whc, qset);
297 qset_free_std(whc, std);
298 }
299}
300
301/**
302 * qset_free_stds - free any remaining sTDs for an URB.
303 */
304static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
305{
306 struct whc_std *std, *t;
307
308 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
309 if (std->urb == urb)
310 qset_free_std(qset->whc, std);
311 }
312}
313
314static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
315{
316 dma_addr_t dma_addr = std->dma_addr;
317 dma_addr_t sp, ep;
318 size_t std_len = std->len;
319 size_t pl_len;
320 int p;
321
322 sp = ALIGN(dma_addr, WHCI_PAGE_SIZE);
323 ep = dma_addr + std_len;
324 std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
325
326 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
327 std->pl_virt = kmalloc(pl_len, mem_flags);
328 if (std->pl_virt == NULL)
329 return -ENOMEM;
330 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
331
332 for (p = 0; p < std->num_pointers; p++) {
333 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
334 dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE);
335 }
336
337 return 0;
338}
339
340/**
341 * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
342 */
343static void urb_dequeue_work(struct work_struct *work)
344{
345 struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
346 struct whc_qset *qset = wurb->qset;
347 struct whc *whc = qset->whc;
348 unsigned long flags;
349
350 if (wurb->is_async == true)
351 asl_update(whc, WUSBCMD_ASYNC_UPDATED
352 | WUSBCMD_ASYNC_SYNCED_DB
353 | WUSBCMD_ASYNC_QSET_RM);
354 else
355 pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
356 | WUSBCMD_PERIODIC_SYNCED_DB
357 | WUSBCMD_PERIODIC_QSET_RM);
358
359 spin_lock_irqsave(&whc->lock, flags);
360 qset_remove_urb(whc, qset, wurb->urb, wurb->status);
361 spin_unlock_irqrestore(&whc->lock, flags);
362}
363
364/**
365 * qset_add_urb - add an urb to the qset's queue.
366 *
367 * The URB is chopped into sTDs, one for each qTD that will required.
368 * At least one qTD (and sTD) is required even if the transfer has no
369 * data (e.g., for some control transfers).
370 */
371int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
372 gfp_t mem_flags)
373{
374 struct whc_urb *wurb;
375 int remaining = urb->transfer_buffer_length;
376 u64 transfer_dma = urb->transfer_dma;
377 int ntds_remaining;
378
379 ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
380 if (ntds_remaining == 0)
381 ntds_remaining = 1;
382
383 wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
384 if (wurb == NULL)
385 goto err_no_mem;
386 urb->hcpriv = wurb;
387 wurb->qset = qset;
388 wurb->urb = urb;
389 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
390
391 while (ntds_remaining) {
392 struct whc_std *std;
393 size_t std_len;
394
395 std = kmalloc(sizeof(struct whc_std), mem_flags);
396 if (std == NULL)
397 goto err_no_mem;
398
399 std_len = remaining;
400 if (std_len > QTD_MAX_XFER_SIZE)
401 std_len = QTD_MAX_XFER_SIZE;
402
403 std->urb = urb;
404 std->dma_addr = transfer_dma;
405 std->len = std_len;
406 std->ntds_remaining = ntds_remaining;
407 std->qtd = NULL;
408
409 INIT_LIST_HEAD(&std->list_node);
410 list_add_tail(&std->list_node, &qset->stds);
411
412 if (std_len > WHCI_PAGE_SIZE) {
413 if (qset_fill_page_list(whc, std, mem_flags) < 0)
414 goto err_no_mem;
415 } else
416 std->num_pointers = 0;
417
418 ntds_remaining--;
419 remaining -= std_len;
420 transfer_dma += std_len;
421 }
422
423 return 0;
424
425err_no_mem:
426 qset_free_stds(qset, urb);
427 return -ENOMEM;
428}
429
430/**
431 * qset_remove_urb - remove an URB from the urb queue.
432 *
433 * The URB is returned to the USB subsystem.
434 */
435void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
436 struct urb *urb, int status)
437{
438 struct wusbhc *wusbhc = &whc->wusbhc;
439 struct whc_urb *wurb = urb->hcpriv;
440
441 usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
442 /* Drop the lock as urb->complete() may enqueue another urb. */
443 spin_unlock(&whc->lock);
444 wusbhc_giveback_urb(wusbhc, urb, status);
445 spin_lock(&whc->lock);
446
447 kfree(wurb);
448}
449
450/**
451 * get_urb_status_from_qtd - get the completed urb status from qTD status
452 * @urb: completed urb
453 * @status: qTD status
454 */
455static int get_urb_status_from_qtd(struct urb *urb, u32 status)
456{
457 if (status & QTD_STS_HALTED) {
458 if (status & QTD_STS_DBE)
459 return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
460 else if (status & QTD_STS_BABBLE)
461 return -EOVERFLOW;
462 else if (status & QTD_STS_RCE)
463 return -ETIME;
464 return -EPIPE;
465 }
466 if (usb_pipein(urb->pipe)
467 && (urb->transfer_flags & URB_SHORT_NOT_OK)
468 && urb->actual_length < urb->transfer_buffer_length)
469 return -EREMOTEIO;
470 return 0;
471}
472
473/**
474 * process_inactive_qtd - process an inactive (but not halted) qTD.
475 *
476 * Update the urb with the transfer bytes from the qTD, if the urb is
477 * completely transfered or (in the case of an IN only) the LPF is
478 * set, then the transfer is complete and the urb should be returned
479 * to the system.
480 */
481void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
482 struct whc_qtd *qtd)
483{
484 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
485 struct urb *urb = std->urb;
486 uint32_t status;
487 bool complete;
488
489 status = le32_to_cpu(qtd->status);
490
491 urb->actual_length += std->len - QTD_STS_TO_LEN(status);
492
493 if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
494 complete = true;
495 else
496 complete = whc_std_last(std);
497
498 qset_remove_qtd(whc, qset);
499 qset_free_std(whc, std);
500
501 /*
502 * Transfers for this URB are complete? Then return it to the
503 * USB subsystem.
504 */
505 if (complete) {
506 qset_remove_qtds(whc, qset, urb);
507 qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
508
509 /*
510 * If iAlt isn't valid then the hardware didn't
511 * advance iCur. Adjust the start and end pointers to
512 * match iCur.
513 */
514 if (!(status & QTD_STS_IALT_VALID))
515 qset->td_start = qset->td_end
516 = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
517 qset->pause_after_urb = NULL;
518 }
519}
520
521/**
522 * process_halted_qtd - process a qset with a halted qtd
523 *
524 * Remove all the qTDs for the failed URB and return the failed URB to
525 * the USB subsystem. Then remove all other qTDs so the qset can be
526 * removed.
527 *
528 * FIXME: this is the point where rate adaptation can be done. If a
529 * transfer failed because it exceeded the maximum number of retries
530 * then it could be reactivated with a slower rate without having to
531 * remove the qset.
532 */
533void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
534 struct whc_qtd *qtd)
535{
536 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
537 struct urb *urb = std->urb;
538 int urb_status;
539
540 urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
541
542 qset_remove_qtds(whc, qset, urb);
543 qset_remove_urb(whc, qset, urb, urb_status);
544
545 list_for_each_entry(std, &qset->stds, list_node) {
546 if (qset->ntds == 0)
547 break;
548 qset_remove_qtd(whc, qset);
549 std->qtd = NULL;
550 }
551
552 qset->remove = 1;
553}
554
555void qset_free(struct whc *whc, struct whc_qset *qset)
556{
557 dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
558}
559
560/**
561 * qset_delete - wait for a qset to be unused, then free it.
562 */
563void qset_delete(struct whc *whc, struct whc_qset *qset)
564{
565 wait_for_completion(&qset->remove_complete);
566 qset_free(whc, qset);
567}
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h
new file mode 100644
index 000000000000..1d2a53bd39fd
--- /dev/null
+++ b/drivers/usb/host/whci/whcd.h
@@ -0,0 +1,197 @@
1/*
2 * Wireless Host Controller (WHC) private header.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20#ifndef __WHCD_H
21#define __WHCD_H
22
23#include <linux/uwb/whci.h>
24#include <linux/workqueue.h>
25
26#include "whci-hc.h"
27
28/* Generic command timeout. */
29#define WHC_GENCMD_TIMEOUT_MS 100
30
31
32struct whc {
33 struct wusbhc wusbhc;
34 struct umc_dev *umc;
35
36 resource_size_t base_phys;
37 void __iomem *base;
38 int irq;
39
40 u8 n_devices;
41 u8 n_keys;
42 u8 n_mmc_ies;
43
44 u64 *pz_list;
45 struct dn_buf_entry *dn_buf;
46 struct di_buf_entry *di_buf;
47 dma_addr_t pz_list_dma;
48 dma_addr_t dn_buf_dma;
49 dma_addr_t di_buf_dma;
50
51 spinlock_t lock;
52 struct mutex mutex;
53
54 void * gen_cmd_buf;
55 dma_addr_t gen_cmd_buf_dma;
56 wait_queue_head_t cmd_wq;
57
58 struct workqueue_struct *workqueue;
59 struct work_struct dn_work;
60
61 struct dma_pool *qset_pool;
62
63 struct list_head async_list;
64 struct list_head async_removed_list;
65 wait_queue_head_t async_list_wq;
66 struct work_struct async_work;
67
68 struct list_head periodic_list[5];
69 struct list_head periodic_removed_list;
70 wait_queue_head_t periodic_list_wq;
71 struct work_struct periodic_work;
72};
73
74#define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc))
75
76/**
77 * struct whc_std - a software TD.
78 * @urb: the URB this sTD is for.
79 * @offset: start of the URB's data for this TD.
80 * @len: the length of data in the associated TD.
81 * @ntds_remaining: number of TDs (starting from this one) in this transfer.
82 *
83 * Queued URBs may require more TDs than are available in a qset so we
84 * use a list of these "software TDs" (sTDs) to hold per-TD data.
85 */
86struct whc_std {
87 struct urb *urb;
88 size_t len;
89 int ntds_remaining;
90 struct whc_qtd *qtd;
91
92 struct list_head list_node;
93 int num_pointers;
94 dma_addr_t dma_addr;
95 struct whc_page_list_entry *pl_virt;
96};
97
98/**
99 * struct whc_urb - per URB host controller structure.
100 * @urb: the URB this struct is for.
101 * @qset: the qset associated to the URB.
102 * @dequeue_work: the work to remove the URB when dequeued.
103 * @is_async: the URB belongs to async sheduler or not.
104 * @status: the status to be returned when calling wusbhc_giveback_urb.
105 */
106struct whc_urb {
107 struct urb *urb;
108 struct whc_qset *qset;
109 struct work_struct dequeue_work;
110 bool is_async;
111 int status;
112};
113
114/**
115 * whc_std_last - is this sTD the URB's last?
116 * @std: the sTD to check.
117 */
118static inline bool whc_std_last(struct whc_std *std)
119{
120 return std->ntds_remaining <= 1;
121}
122
123enum whc_update {
124 WHC_UPDATE_ADDED = 0x01,
125 WHC_UPDATE_REMOVED = 0x02,
126 WHC_UPDATE_UPDATED = 0x04,
127};
128
129/* init.c */
130int whc_init(struct whc *whc);
131void whc_clean_up(struct whc *whc);
132
133/* hw.c */
134void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val);
135int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len);
136
137/* wusb.c */
138int whc_wusbhc_start(struct wusbhc *wusbhc);
139void whc_wusbhc_stop(struct wusbhc *wusbhc);
140int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
141 u8 handle, struct wuie_hdr *wuie);
142int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle);
143int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm);
144int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
145int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots);
146int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
147 const void *ptk, size_t key_size);
148int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
149 const void *gtk, size_t key_size);
150int whc_set_cluster_id(struct whc *whc, u8 bcid);
151
152/* int.c */
153irqreturn_t whc_int_handler(struct usb_hcd *hcd);
154void whc_dn_work(struct work_struct *work);
155
156/* asl.c */
157void asl_start(struct whc *whc);
158void asl_stop(struct whc *whc);
159int asl_init(struct whc *whc);
160void asl_clean_up(struct whc *whc);
161int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
162int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
163void asl_qset_delete(struct whc *whc, struct whc_qset *qset);
164void scan_async_work(struct work_struct *work);
165
166/* pzl.c */
167int pzl_init(struct whc *whc);
168void pzl_clean_up(struct whc *whc);
169void pzl_start(struct whc *whc);
170void pzl_stop(struct whc *whc);
171int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
172int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
173void pzl_qset_delete(struct whc *whc, struct whc_qset *qset);
174void scan_periodic_work(struct work_struct *work);
175
176/* qset.c */
177struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags);
178void qset_free(struct whc *whc, struct whc_qset *qset);
179struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags);
180void qset_delete(struct whc *whc, struct whc_qset *qset);
181void qset_clear(struct whc *whc, struct whc_qset *qset);
182int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
183 gfp_t mem_flags);
184void qset_free_std(struct whc *whc, struct whc_std *std);
185void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
186 struct urb *urb, int status);
187void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
188 struct whc_qtd *qtd);
189void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
190 struct whc_qtd *qtd);
191enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset);
192void qset_remove_complete(struct whc *whc, struct whc_qset *qset);
193void dump_qset(struct whc_qset *qset, struct device *dev);
194void pzl_update(struct whc *whc, uint32_t wusbcmd);
195void asl_update(struct whc *whc, uint32_t wusbcmd);
196
197#endif /* #ifndef __WHCD_H */
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h
new file mode 100644
index 000000000000..bff1eb7a35cf
--- /dev/null
+++ b/drivers/usb/host/whci/whci-hc.h
@@ -0,0 +1,416 @@
1/*
2 * Wireless Host Controller (WHC) data structures.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20#ifndef _WHCI_WHCI_HC_H
21#define _WHCI_WHCI_HC_H
22
23#include <linux/list.h>
24
25/**
26 * WHCI_PAGE_SIZE - page size use by WHCI
27 *
28 * WHCI assumes that host system uses pages of 4096 octets.
29 */
30#define WHCI_PAGE_SIZE 4096
31
32
33/**
34 * QTD_MAX_TXFER_SIZE - max number of bytes to transfer with a single
35 * qtd.
36 *
37 * This is 2^20 - 1.
38 */
39#define QTD_MAX_XFER_SIZE 1048575
40
41
42/**
43 * struct whc_qtd - Queue Element Transfer Descriptors (qTD)
44 *
45 * This describes the data for a bulk, control or interrupt transfer.
46 *
47 * [WHCI] section 3.2.4
48 */
49struct whc_qtd {
50 __le32 status; /*< remaining transfer len and transfer status */
51 __le32 options;
52 __le64 page_list_ptr; /*< physical pointer to data buffer page list*/
53 __u8 setup[8]; /*< setup data for control transfers */
54} __attribute__((packed));
55
56#define QTD_STS_ACTIVE (1 << 31) /* enable execution of transaction */
57#define QTD_STS_HALTED (1 << 30) /* transfer halted */
58#define QTD_STS_DBE (1 << 29) /* data buffer error */
59#define QTD_STS_BABBLE (1 << 28) /* babble detected */
60#define QTD_STS_RCE (1 << 27) /* retry count exceeded */
61#define QTD_STS_LAST_PKT (1 << 26) /* set Last Packet Flag in WUSB header */
62#define QTD_STS_INACTIVE (1 << 25) /* queue set is marked inactive */
63#define QTD_STS_IALT_VALID (1 << 23) /* iAlt field is valid */
64#define QTD_STS_IALT(i) (QTD_STS_IALT_VALID | ((i) << 20)) /* iAlt field */
65#define QTD_STS_LEN(l) ((l) << 0) /* transfer length */
66#define QTD_STS_TO_LEN(s) ((s) & 0x000fffff)
67
68#define QTD_OPT_IOC (1 << 1) /* page_list_ptr points to buffer directly */
69#define QTD_OPT_SMALL (1 << 0) /* interrupt on complete */
70
71/**
72 * struct whc_itd - Isochronous Queue Element Transfer Descriptors (iTD)
73 *
74 * This describes the data and other parameters for an isochronous
75 * transfer.
76 *
77 * [WHCI] section 3.2.5
78 */
79struct whc_itd {
80 __le16 presentation_time; /*< presentation time for OUT transfers */
81 __u8 num_segments; /*< number of data segments in segment list */
82 __u8 status; /*< command execution status */
83 __le32 options; /*< misc transfer options */
84 __le64 page_list_ptr; /*< physical pointer to data buffer page list */
85 __le64 seg_list_ptr; /*< physical pointer to segment list */
86} __attribute__((packed));
87
88#define ITD_STS_ACTIVE (1 << 7) /* enable execution of transaction */
89#define ITD_STS_DBE (1 << 5) /* data buffer error */
90#define ITD_STS_BABBLE (1 << 4) /* babble detected */
91#define ITD_STS_INACTIVE (1 << 1) /* queue set is marked inactive */
92
93#define ITD_OPT_IOC (1 << 1) /* interrupt on complete */
94#define ITD_OPT_SMALL (1 << 0) /* page_list_ptr points to buffer directly */
95
96/**
97 * Page list entry.
98 *
99 * A TD's page list must contain sufficient page list entries for the
100 * total data length in the TD.
101 *
102 * [WHCI] section 3.2.4.3
103 */
104struct whc_page_list_entry {
105 __le64 buf_ptr; /*< physical pointer to buffer */
106} __attribute__((packed));
107
108/**
109 * struct whc_seg_list_entry - Segment list entry.
110 *
111 * Describes a portion of the data buffer described in the containing
112 * qTD's page list.
113 *
114 * seg_ptr = qtd->page_list_ptr[qtd->seg_list_ptr[seg].idx].buf_ptr
115 * + qtd->seg_list_ptr[seg].offset;
116 *
117 * Segments can't cross page boundries.
118 *
119 * [WHCI] section 3.2.5.5
120 */
121struct whc_seg_list_entry {
122 __le16 len; /*< segment length */
123 __u8 idx; /*< index into page list */
124 __u8 status; /*< segment status */
125 __le16 offset; /*< 12 bit offset into page */
126} __attribute__((packed));
127
128/**
129 * struct whc_qhead - endpoint and status information for a qset.
130 *
131 * [WHCI] section 3.2.6
132 */
133struct whc_qhead {
134 __le64 link; /*< next qset in list */
135 __le32 info1;
136 __le32 info2;
137 __le32 info3;
138 __le16 status;
139 __le16 err_count; /*< transaction error count */
140 __le32 cur_window;
141 __le32 scratch[3]; /*< h/w scratch area */
142 union {
143 struct whc_qtd qtd;
144 struct whc_itd itd;
145 } overlay;
146} __attribute__((packed));
147
148#define QH_LINK_PTR_MASK (~0x03Full)
149#define QH_LINK_PTR(ptr) ((ptr) & QH_LINK_PTR_MASK)
150#define QH_LINK_IQS (1 << 4) /* isochronous queue set */
151#define QH_LINK_NTDS(n) (((n) - 1) << 1) /* number of TDs in queue set */
152#define QH_LINK_T (1 << 0) /* last queue set in periodic schedule list */
153
154#define QH_INFO1_EP(e) ((e) << 0) /* endpoint number */
155#define QH_INFO1_DIR_IN (1 << 4) /* IN transfer */
156#define QH_INFO1_DIR_OUT (0 << 4) /* OUT transfer */
157#define QH_INFO1_TR_TYPE_CTRL (0x0 << 5) /* control transfer */
158#define QH_INFO1_TR_TYPE_ISOC (0x1 << 5) /* isochronous transfer */
159#define QH_INFO1_TR_TYPE_BULK (0x2 << 5) /* bulk transfer */
160#define QH_INFO1_TR_TYPE_INT (0x3 << 5) /* interrupt */
161#define QH_INFO1_TR_TYPE_LP_INT (0x7 << 5) /* low power interrupt */
162#define QH_INFO1_DEV_INFO_IDX(i) ((i) << 8) /* index into device info buffer */
163#define QH_INFO1_SET_INACTIVE (1 << 15) /* set inactive after transfer */
164#define QH_INFO1_MAX_PKT_LEN(l) ((l) << 16) /* maximum packet length */
165
166#define QH_INFO2_BURST(b) ((b) << 0) /* maximum burst length */
167#define QH_INFO2_DBP(p) ((p) << 5) /* data burst policy (see [WUSB] table 5-7) */
168#define QH_INFO2_MAX_COUNT(c) ((c) << 8) /* max isoc/int pkts per zone */
169#define QH_INFO2_RQS (1 << 15) /* reactivate queue set */
170#define QH_INFO2_MAX_RETRY(r) ((r) << 16) /* maximum transaction retries */
171#define QH_INFO2_MAX_SEQ(s) ((s) << 20) /* maximum sequence number */
172#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */
173#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */
174
175#define QH_INFO3_TX_RATE_53_3 (0 << 24)
176#define QH_INFO3_TX_RATE_80 (1 << 24)
177#define QH_INFO3_TX_RATE_106_7 (2 << 24)
178#define QH_INFO3_TX_RATE_160 (3 << 24)
179#define QH_INFO3_TX_RATE_200 (4 << 24)
180#define QH_INFO3_TX_RATE_320 (5 << 24)
181#define QH_INFO3_TX_RATE_400 (6 << 24)
182#define QH_INFO3_TX_RATE_480 (7 << 24)
183#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
184
185#define QH_STATUS_FLOW_CTRL (1 << 15)
186#define QH_STATUS_ICUR(i) ((i) << 5)
187#define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7)
188
189/**
190 * usb_pipe_to_qh_type - USB core pipe type to QH transfer type
191 *
192 * Returns the QH type field for a USB core pipe type.
193 */
194static inline unsigned usb_pipe_to_qh_type(unsigned pipe)
195{
196 static const unsigned type[] = {
197 [PIPE_ISOCHRONOUS] = QH_INFO1_TR_TYPE_ISOC,
198 [PIPE_INTERRUPT] = QH_INFO1_TR_TYPE_INT,
199 [PIPE_CONTROL] = QH_INFO1_TR_TYPE_CTRL,
200 [PIPE_BULK] = QH_INFO1_TR_TYPE_BULK,
201 };
202 return type[usb_pipetype(pipe)];
203}
204
205/**
206 * Maxiumum number of TDs in a qset.
207 */
208#define WHCI_QSET_TD_MAX 8
209
210/**
211 * struct whc_qset - WUSB data transfers to a specific endpoint
212 * @qh: the QHead of this qset
213 * @qtd: up to 8 qTDs (for qsets for control, bulk and interrupt
214 * transfers)
215 * @itd: up to 8 iTDs (for qsets for isochronous transfers)
216 * @qset_dma: DMA address for this qset
217 * @whc: WHCI HC this qset is for
218 * @ep: endpoint
219 * @stds: list of sTDs queued to this qset
220 * @ntds: number of qTDs queued (not necessarily the same as nTDs
221 * field in the QH)
222 * @td_start: index of the first qTD in the list
223 * @td_end: index of next free qTD in the list (provided
224 * ntds < WHCI_QSET_TD_MAX)
225 *
226 * Queue Sets (qsets) are added to the asynchronous schedule list
227 * (ASL) or the periodic zone list (PZL).
228 *
229 * qsets may contain up to 8 TDs (either qTDs or iTDs as appropriate).
230 * Each TD may refer to at most 1 MiB of data. If a single transfer
231 * has > 8MiB of data, TDs can be reused as they are completed since
232 * the TD list is used as a circular buffer. Similarly, several
233 * (smaller) transfers may be queued in a qset.
234 *
235 * WHCI controllers may cache portions of the qsets in the ASL and
236 * PZL, requiring the WHCD to inform the WHC that the lists have been
237 * updated (fields changed or qsets inserted or removed). For safe
238 * insertion and removal of qsets from the lists the schedule must be
239 * stopped to avoid races in updating the QH link pointers.
240 *
241 * Since the HC is free to execute qsets in any order, all transfers
242 * to an endpoint should use the same qset to ensure transfers are
243 * executed in the order they're submitted.
244 *
245 * [WHCI] section 3.2.3
246 */
247struct whc_qset {
248 struct whc_qhead qh;
249 union {
250 struct whc_qtd qtd[WHCI_QSET_TD_MAX];
251 struct whc_itd itd[WHCI_QSET_TD_MAX];
252 };
253
254 /* private data for WHCD */
255 dma_addr_t qset_dma;
256 struct whc *whc;
257 struct usb_host_endpoint *ep;
258 struct list_head stds;
259 int ntds;
260 int td_start;
261 int td_end;
262 struct list_head list_node;
263 unsigned in_sw_list:1;
264 unsigned in_hw_list:1;
265 unsigned remove:1;
266 struct urb *pause_after_urb;
267 struct completion remove_complete;
268 int max_burst;
269 int max_seq;
270};
271
272static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
273{
274 if (target)
275 *ptr = (*ptr & ~(QH_LINK_PTR_MASK | QH_LINK_T)) | QH_LINK_PTR(target);
276 else
277 *ptr = QH_LINK_T;
278}
279
280/**
281 * struct di_buf_entry - Device Information (DI) buffer entry.
282 *
283 * There's one of these per connected device.
284 */
285struct di_buf_entry {
286 __le32 availability_info[8]; /*< MAS availability information, one MAS per bit */
287 __le32 addr_sec_info; /*< addressing and security info */
288 __le32 reserved[7];
289} __attribute__((packed));
290
291#define WHC_DI_SECURE (1 << 31)
292#define WHC_DI_DISABLE (1 << 30)
293#define WHC_DI_KEY_IDX(k) ((k) << 8)
294#define WHC_DI_KEY_IDX_MASK 0x0000ff00
295#define WHC_DI_DEV_ADDR(a) ((a) << 0)
296#define WHC_DI_DEV_ADDR_MASK 0x000000ff
297
298/**
299 * struct dn_buf_entry - Device Notification (DN) buffer entry.
300 *
301 * [WHCI] section 3.2.8
302 */
303struct dn_buf_entry {
304 __u8 msg_size; /*< number of octets of valid DN data */
305 __u8 reserved1;
306 __u8 src_addr; /*< source address */
307 __u8 status; /*< buffer entry status */
308 __le32 tkid; /*< TKID for source device, valid if secure bit is set */
309 __u8 dn_data[56]; /*< up to 56 octets of DN data */
310} __attribute__((packed));
311
312#define WHC_DN_STATUS_VALID (1 << 7) /* buffer entry is valid */
313#define WHC_DN_STATUS_SECURE (1 << 6) /* notification received using secure frame */
314
315#define WHC_N_DN_ENTRIES (4096 / sizeof(struct dn_buf_entry))
316
317/* The Add MMC IE WUSB Generic Command may take up to 256 bytes of
318 data. [WHCI] section 2.4.7. */
319#define WHC_GEN_CMD_DATA_LEN 256
320
321/*
322 * HC registers.
323 *
324 * [WHCI] section 2.4
325 */
326
327#define WHCIVERSION 0x00
328
329#define WHCSPARAMS 0x04
330# define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff)
331# define WHCSPARAMS_TO_N_KEYS(p) (((p) >> 8) & 0xff)
332# define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f)
333
334#define WUSBCMD 0x08
335# define WUSBCMD_BCID(b) ((b) << 16)
336# define WUSBCMD_BCID_MASK (0xff << 16)
337# define WUSBCMD_ASYNC_QSET_RM (1 << 12)
338# define WUSBCMD_PERIODIC_QSET_RM (1 << 11)
339# define WUSBCMD_WUSBSI(s) ((s) << 8)
340# define WUSBCMD_WUSBSI_MASK (0x7 << 8)
341# define WUSBCMD_ASYNC_SYNCED_DB (1 << 7)
342# define WUSBCMD_PERIODIC_SYNCED_DB (1 << 6)
343# define WUSBCMD_ASYNC_UPDATED (1 << 5)
344# define WUSBCMD_PERIODIC_UPDATED (1 << 4)
345# define WUSBCMD_ASYNC_EN (1 << 3)
346# define WUSBCMD_PERIODIC_EN (1 << 2)
347# define WUSBCMD_WHCRESET (1 << 1)
348# define WUSBCMD_RUN (1 << 0)
349
350#define WUSBSTS 0x0c
351# define WUSBSTS_ASYNC_SCHED (1 << 15)
352# define WUSBSTS_PERIODIC_SCHED (1 << 14)
353# define WUSBSTS_DNTS_SCHED (1 << 13)
354# define WUSBSTS_HCHALTED (1 << 12)
355# define WUSBSTS_GEN_CMD_DONE (1 << 9)
356# define WUSBSTS_CHAN_TIME_ROLLOVER (1 << 8)
357# define WUSBSTS_DNTS_OVERFLOW (1 << 7)
358# define WUSBSTS_BPST_ADJUSTMENT_CHANGED (1 << 6)
359# define WUSBSTS_HOST_ERR (1 << 5)
360# define WUSBSTS_ASYNC_SCHED_SYNCED (1 << 4)
361# define WUSBSTS_PERIODIC_SCHED_SYNCED (1 << 3)
362# define WUSBSTS_DNTS_INT (1 << 2)
363# define WUSBSTS_ERR_INT (1 << 1)
364# define WUSBSTS_INT (1 << 0)
365# define WUSBSTS_INT_MASK 0x3ff
366
367#define WUSBINTR 0x10
368# define WUSBINTR_GEN_CMD_DONE (1 << 9)
369# define WUSBINTR_CHAN_TIME_ROLLOVER (1 << 8)
370# define WUSBINTR_DNTS_OVERFLOW (1 << 7)
371# define WUSBINTR_BPST_ADJUSTMENT_CHANGED (1 << 6)
372# define WUSBINTR_HOST_ERR (1 << 5)
373# define WUSBINTR_ASYNC_SCHED_SYNCED (1 << 4)
374# define WUSBINTR_PERIODIC_SCHED_SYNCED (1 << 3)
375# define WUSBINTR_DNTS_INT (1 << 2)
376# define WUSBINTR_ERR_INT (1 << 1)
377# define WUSBINTR_INT (1 << 0)
378# define WUSBINTR_ALL 0x3ff
379
380#define WUSBGENCMDSTS 0x14
381# define WUSBGENCMDSTS_ACTIVE (1 << 31)
382# define WUSBGENCMDSTS_ERROR (1 << 24)
383# define WUSBGENCMDSTS_IOC (1 << 23)
384# define WUSBGENCMDSTS_MMCIE_ADD 0x01
385# define WUSBGENCMDSTS_MMCIE_RM 0x02
386# define WUSBGENCMDSTS_SET_MAS 0x03
387# define WUSBGENCMDSTS_CHAN_STOP 0x04
388# define WUSBGENCMDSTS_RWP_EN 0x05
389
390#define WUSBGENCMDPARAMS 0x18
391#define WUSBGENADDR 0x20
392#define WUSBASYNCLISTADDR 0x28
393#define WUSBDNTSBUFADDR 0x30
394#define WUSBDEVICEINFOADDR 0x38
395
396#define WUSBSETSECKEYCMD 0x40
397# define WUSBSETSECKEYCMD_SET (1 << 31)
398# define WUSBSETSECKEYCMD_ERASE (1 << 30)
399# define WUSBSETSECKEYCMD_GTK (1 << 8)
400# define WUSBSETSECKEYCMD_IDX(i) ((i) << 0)
401
402#define WUSBTKID 0x44
403#define WUSBSECKEY 0x48
404#define WUSBPERIODICLISTBASE 0x58
405#define WUSBMASINDEX 0x60
406
407#define WUSBDNTSCTRL 0x64
408# define WUSBDNTSCTRL_ACTIVE (1 << 31)
409# define WUSBDNTSCTRL_INTERVAL(i) ((i) << 8)
410# define WUSBDNTSCTRL_SLOTS(s) ((s) << 0)
411
412#define WUSBTIME 0x68
413#define WUSBBPST 0x6c
414#define WUSBDIBUPDATED 0x70
415
416#endif /* #ifndef _WHCI_WHCI_HC_H */
diff --git a/drivers/usb/host/whci/wusb.c b/drivers/usb/host/whci/wusb.c
new file mode 100644
index 000000000000..66e4ddcd961d
--- /dev/null
+++ b/drivers/usb/host/whci/wusb.c
@@ -0,0 +1,241 @@
1/*
2 * Wireless Host Controller (WHC) WUSB operations.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/uwb/umc.h>
22#define D_LOCAL 1
23#include <linux/uwb/debug.h>
24
25#include "../../wusbcore/wusbhc.h"
26
27#include "whcd.h"
28
29#if D_LOCAL >= 1
30static void dump_di(struct whc *whc, int idx)
31{
32 struct di_buf_entry *di = &whc->di_buf[idx];
33 struct device *dev = &whc->umc->dev;
34 char buf[128];
35
36 bitmap_scnprintf(buf, sizeof(buf), (unsigned long *)di->availability_info, UWB_NUM_MAS);
37
38 d_printf(1, dev, "DI[%d]\n", idx);
39 d_printf(1, dev, " availability: %s\n", buf);
40 d_printf(1, dev, " %c%c key idx: %d dev addr: %d\n",
41 (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ',
42 (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ',
43 (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8,
44 (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK));
45}
46#else
47static inline void dump_di(struct whc *whc, int idx)
48{
49}
50#endif
51
52static int whc_update_di(struct whc *whc, int idx)
53{
54 int offset = idx / 32;
55 u32 bit = 1 << (idx % 32);
56
57 dump_di(whc, idx);
58
59 le_writel(bit, whc->base + WUSBDIBUPDATED + offset);
60
61 return whci_wait_for(&whc->umc->dev,
62 whc->base + WUSBDIBUPDATED + offset, bit, 0,
63 100, "DI update");
64}
65
66/*
67 * WHCI starts and stops MMCs based on there being a valid GTK so
68 * these need only start/stop the asynchronous and periodic schedules.
69 */
70
71int whc_wusbhc_start(struct wusbhc *wusbhc)
72{
73 struct whc *whc = wusbhc_to_whc(wusbhc);
74
75 asl_start(whc);
76 pzl_start(whc);
77
78 return 0;
79}
80
81void whc_wusbhc_stop(struct wusbhc *wusbhc)
82{
83 struct whc *whc = wusbhc_to_whc(wusbhc);
84
85 pzl_stop(whc);
86 asl_stop(whc);
87}
88
89int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
90 u8 handle, struct wuie_hdr *wuie)
91{
92 struct whc *whc = wusbhc_to_whc(wusbhc);
93 u32 params;
94
95 params = (interval << 24)
96 | (repeat_cnt << 16)
97 | (wuie->bLength << 8)
98 | handle;
99
100 return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength);
101}
102
103int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
104{
105 struct whc *whc = wusbhc_to_whc(wusbhc);
106 u32 params;
107
108 params = handle;
109
110 return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0);
111}
112
113int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm)
114{
115 struct whc *whc = wusbhc_to_whc(wusbhc);
116
117 if (stream_index >= 0)
118 whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index));
119
120 return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm));
121}
122
123int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
124{
125 struct whc *whc = wusbhc_to_whc(wusbhc);
126 int idx = wusb_dev->port_idx;
127 struct di_buf_entry *di = &whc->di_buf[idx];
128 int ret;
129
130 mutex_lock(&whc->mutex);
131
132 uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability);
133 di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK);
134 di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr);
135
136 ret = whc_update_di(whc, idx);
137
138 mutex_unlock(&whc->mutex);
139
140 return ret;
141}
142
143/*
144 * Set the number of Device Notification Time Slots (DNTS) and enable
145 * device notifications.
146 */
147int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
148{
149 struct whc *whc = wusbhc_to_whc(wusbhc);
150 u32 dntsctrl;
151
152 dntsctrl = WUSBDNTSCTRL_ACTIVE
153 | WUSBDNTSCTRL_INTERVAL(interval)
154 | WUSBDNTSCTRL_SLOTS(slots);
155
156 le_writel(dntsctrl, whc->base + WUSBDNTSCTRL);
157
158 return 0;
159}
160
161static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid,
162 const void *key, size_t key_size, bool is_gtk)
163{
164 uint32_t setkeycmd;
165 uint32_t seckey[4];
166 int i;
167 int ret;
168
169 memcpy(seckey, key, key_size);
170 setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index);
171 if (is_gtk)
172 setkeycmd |= WUSBSETSECKEYCMD_GTK;
173
174 le_writel(tkid, whc->base + WUSBTKID);
175 for (i = 0; i < 4; i++)
176 le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i);
177 le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD);
178
179 ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD,
180 WUSBSETSECKEYCMD_SET, 0, 100, "set key");
181
182 return ret;
183}
184
185/**
186 * whc_set_ptk - set the PTK to use for a device.
187 *
188 * The index into the key table for this PTK is the same as the
189 * device's port index.
190 */
191int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
192 const void *ptk, size_t key_size)
193{
194 struct whc *whc = wusbhc_to_whc(wusbhc);
195 struct di_buf_entry *di = &whc->di_buf[port_idx];
196 int ret;
197
198 mutex_lock(&whc->mutex);
199
200 if (ptk) {
201 ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false);
202 if (ret)
203 goto out;
204
205 di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK;
206 di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx);
207 } else
208 di->addr_sec_info &= ~WHC_DI_SECURE;
209
210 ret = whc_update_di(whc, port_idx);
211out:
212 mutex_unlock(&whc->mutex);
213 return ret;
214}
215
216/**
217 * whc_set_gtk - set the GTK for subsequent broadcast packets
218 *
219 * The GTK is stored in the last entry in the key table (the previous
220 * N_DEVICES entries are for the per-device PTKs).
221 */
222int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
223 const void *gtk, size_t key_size)
224{
225 struct whc *whc = wusbhc_to_whc(wusbhc);
226 int ret;
227
228 mutex_lock(&whc->mutex);
229
230 ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true);
231
232 mutex_unlock(&whc->mutex);
233
234 return ret;
235}
236
237int whc_set_cluster_id(struct whc *whc, u8 bcid)
238{
239 whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid));
240 return 0;
241}
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 69c34a58e205..b4ec716de7da 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3270,6 +3270,7 @@ static struct usb_device_id sisusb_table [] = {
3270 { USB_DEVICE(0x0711, 0x0900) }, 3270 { USB_DEVICE(0x0711, 0x0900) },
3271 { USB_DEVICE(0x0711, 0x0901) }, 3271 { USB_DEVICE(0x0711, 0x0901) },
3272 { USB_DEVICE(0x0711, 0x0902) }, 3272 { USB_DEVICE(0x0711, 0x0902) },
3273 { USB_DEVICE(0x0711, 0x0903) },
3273 { USB_DEVICE(0x0711, 0x0918) }, 3274 { USB_DEVICE(0x0711, 0x0918) },
3274 { USB_DEVICE(0x182d, 0x021c) }, 3275 { USB_DEVICE(0x182d, 0x021c) },
3275 { USB_DEVICE(0x182d, 0x0269) }, 3276 { USB_DEVICE(0x182d, 0x0269) },
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index b358c4e1cf21..444c69c447be 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1561,8 +1561,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1561 if (code != USBTEST_REQUEST) 1561 if (code != USBTEST_REQUEST)
1562 return -EOPNOTSUPP; 1562 return -EOPNOTSUPP;
1563 1563
1564 if (param->iterations <= 0 || param->length < 0 1564 if (param->iterations <= 0)
1565 || param->sglen < 0 || param->vary < 0)
1566 return -EINVAL; 1565 return -EINVAL;
1567 1566
1568 if (mutex_lock_interruptible(&dev->lock)) 1567 if (mutex_lock_interruptible(&dev->lock))
diff --git a/drivers/usb/misc/vstusb.c b/drivers/usb/misc/vstusb.c
index 8648470c81ca..63dff9ba73c5 100644
--- a/drivers/usb/misc/vstusb.c
+++ b/drivers/usb/misc/vstusb.c
@@ -620,7 +620,7 @@ static long vstusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
620 __func__); 620 __func__);
621 retval = -EFAULT; 621 retval = -EFAULT;
622 } else { 622 } else {
623 dev_dbg(&dev->dev, "%s: recv %d bytes from pipe %d\n", 623 dev_dbg(&dev->dev, "%s: recv %zd bytes from pipe %d\n",
624 __func__, usb_data.count, usb_data.pipe); 624 __func__, usb_data.count, usb_data.pipe);
625 } 625 }
626 626
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index c9de3f027aab..e06810aef2df 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -687,7 +687,10 @@ static ssize_t mon_bin_read(struct file *file, char __user *buf,
687 } 687 }
688 688
689 if (rp->b_read >= sizeof(struct mon_bin_hdr)) { 689 if (rp->b_read >= sizeof(struct mon_bin_hdr)) {
690 step_len = min(nbytes, (size_t)ep->len_cap); 690 step_len = ep->len_cap;
691 step_len -= rp->b_read - sizeof(struct mon_bin_hdr);
692 if (step_len > nbytes)
693 step_len = nbytes;
691 offset = rp->b_out + PKT_SIZE; 694 offset = rp->b_out + PKT_SIZE;
692 offset += rp->b_read - sizeof(struct mon_bin_hdr); 695 offset += rp->b_read - sizeof(struct mon_bin_hdr);
693 if (offset >= rp->b_size) 696 if (offset >= rp->b_size)
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 4a35745b30be..5280dba9b1fb 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -114,8 +114,8 @@
114 114
115 115
116 116
117unsigned debug; 117unsigned musb_debug;
118module_param(debug, uint, S_IRUGO | S_IWUSR); 118module_param(musb_debug, uint, S_IRUGO | S_IWUSR);
119MODULE_PARM_DESC(debug, "Debug message level. Default = 0"); 119MODULE_PARM_DESC(debug, "Debug message level. Default = 0");
120 120
121#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia" 121#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
@@ -2248,7 +2248,7 @@ static int __init musb_init(void)
2248 "host" 2248 "host"
2249#endif 2249#endif
2250 ", debug=%d\n", 2250 ", debug=%d\n",
2251 musb_driver_name, debug); 2251 musb_driver_name, musb_debug);
2252 return platform_driver_probe(&musb_driver, musb_probe); 2252 return platform_driver_probe(&musb_driver, musb_probe);
2253} 2253}
2254 2254
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index 4d2794441b15..9fc1db44c72c 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -48,11 +48,11 @@
48 __func__, __LINE__ , ## args); \ 48 __func__, __LINE__ , ## args); \
49 } } while (0) 49 } } while (0)
50 50
51extern unsigned debug; 51extern unsigned musb_debug;
52 52
53static inline int _dbg_level(unsigned l) 53static inline int _dbg_level(unsigned l)
54{ 54{
55 return debug >= l; 55 return musb_debug >= l;
56} 56}
57 57
58#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args) 58#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 3133990f04ec..cc64462d4c4e 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -378,6 +378,19 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
378 378
379 switch (qh->type) { 379 switch (qh->type) {
380 380
381 case USB_ENDPOINT_XFER_CONTROL:
382 case USB_ENDPOINT_XFER_BULK:
383 /* fifo policy for these lists, except that NAKing
384 * should rotate a qh to the end (for fairness).
385 */
386 if (qh->mux == 1) {
387 head = qh->ring.prev;
388 list_del(&qh->ring);
389 kfree(qh);
390 qh = first_qh(head);
391 break;
392 }
393
381 case USB_ENDPOINT_XFER_ISOC: 394 case USB_ENDPOINT_XFER_ISOC:
382 case USB_ENDPOINT_XFER_INT: 395 case USB_ENDPOINT_XFER_INT:
383 /* this is where periodic bandwidth should be 396 /* this is where periodic bandwidth should be
@@ -388,17 +401,6 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
388 kfree(qh); 401 kfree(qh);
389 qh = NULL; 402 qh = NULL;
390 break; 403 break;
391
392 case USB_ENDPOINT_XFER_CONTROL:
393 case USB_ENDPOINT_XFER_BULK:
394 /* fifo policy for these lists, except that NAKing
395 * should rotate a qh to the end (for fairness).
396 */
397 head = qh->ring.prev;
398 list_del(&qh->ring);
399 kfree(qh);
400 qh = first_qh(head);
401 break;
402 } 404 }
403 } 405 }
404 return qh; 406 return qh;
@@ -1507,10 +1509,29 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1507 musb_writew(hw_ep->regs, MUSB_RXCSR, val); 1509 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1508 1510
1509#ifdef CONFIG_USB_INVENTRA_DMA 1511#ifdef CONFIG_USB_INVENTRA_DMA
1512 if (usb_pipeisoc(pipe)) {
1513 struct usb_iso_packet_descriptor *d;
1514
1515 d = urb->iso_frame_desc + qh->iso_idx;
1516 d->actual_length = xfer_len;
1517
1518 /* even if there was an error, we did the dma
1519 * for iso_frame_desc->length
1520 */
1521 if (d->status != EILSEQ && d->status != -EOVERFLOW)
1522 d->status = 0;
1523
1524 if (++qh->iso_idx >= urb->number_of_packets)
1525 done = true;
1526 else
1527 done = false;
1528
1529 } else {
1510 /* done if urb buffer is full or short packet is recd */ 1530 /* done if urb buffer is full or short packet is recd */
1511 done = (urb->actual_length + xfer_len >= 1531 done = (urb->actual_length + xfer_len >=
1512 urb->transfer_buffer_length 1532 urb->transfer_buffer_length
1513 || dma->actual_len < qh->maxpacket); 1533 || dma->actual_len < qh->maxpacket);
1534 }
1514 1535
1515 /* send IN token for next packet, without AUTOREQ */ 1536 /* send IN token for next packet, without AUTOREQ */
1516 if (!done) { 1537 if (!done) {
@@ -1547,7 +1568,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1547 if (dma) { 1568 if (dma) {
1548 struct dma_controller *c; 1569 struct dma_controller *c;
1549 u16 rx_count; 1570 u16 rx_count;
1550 int ret; 1571 int ret, length;
1572 dma_addr_t buf;
1551 1573
1552 rx_count = musb_readw(epio, MUSB_RXCOUNT); 1574 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1553 1575
@@ -1560,6 +1582,35 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1560 1582
1561 c = musb->dma_controller; 1583 c = musb->dma_controller;
1562 1584
1585 if (usb_pipeisoc(pipe)) {
1586 int status = 0;
1587 struct usb_iso_packet_descriptor *d;
1588
1589 d = urb->iso_frame_desc + qh->iso_idx;
1590
1591 if (iso_err) {
1592 status = -EILSEQ;
1593 urb->error_count++;
1594 }
1595 if (rx_count > d->length) {
1596 if (status == 0) {
1597 status = -EOVERFLOW;
1598 urb->error_count++;
1599 }
1600 DBG(2, "** OVERFLOW %d into %d\n",\
1601 rx_count, d->length);
1602
1603 length = d->length;
1604 } else
1605 length = rx_count;
1606 d->status = status;
1607 buf = urb->transfer_dma + d->offset;
1608 } else {
1609 length = rx_count;
1610 buf = urb->transfer_dma +
1611 urb->actual_length;
1612 }
1613
1563 dma->desired_mode = 0; 1614 dma->desired_mode = 0;
1564#ifdef USE_MODE1 1615#ifdef USE_MODE1
1565 /* because of the issue below, mode 1 will 1616 /* because of the issue below, mode 1 will
@@ -1571,6 +1622,12 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1571 urb->actual_length) 1622 urb->actual_length)
1572 > qh->maxpacket) 1623 > qh->maxpacket)
1573 dma->desired_mode = 1; 1624 dma->desired_mode = 1;
1625 if (rx_count < hw_ep->max_packet_sz_rx) {
1626 length = rx_count;
1627 dma->bDesiredMode = 0;
1628 } else {
1629 length = urb->transfer_buffer_length;
1630 }
1574#endif 1631#endif
1575 1632
1576/* Disadvantage of using mode 1: 1633/* Disadvantage of using mode 1:
@@ -1608,12 +1665,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1608 */ 1665 */
1609 ret = c->channel_program( 1666 ret = c->channel_program(
1610 dma, qh->maxpacket, 1667 dma, qh->maxpacket,
1611 dma->desired_mode, 1668 dma->desired_mode, buf, length);
1612 urb->transfer_dma
1613 + urb->actual_length,
1614 (dma->desired_mode == 0)
1615 ? rx_count
1616 : urb->transfer_buffer_length);
1617 1669
1618 if (!ret) { 1670 if (!ret) {
1619 c->channel_release(dma); 1671 c->channel_release(dma);
@@ -1631,19 +1683,6 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1631 } 1683 }
1632 } 1684 }
1633 1685
1634 if (dma && usb_pipeisoc(pipe)) {
1635 struct usb_iso_packet_descriptor *d;
1636 int iso_stat = status;
1637
1638 d = urb->iso_frame_desc + qh->iso_idx;
1639 d->actual_length += xfer_len;
1640 if (iso_err) {
1641 iso_stat = -EILSEQ;
1642 urb->error_count++;
1643 }
1644 d->status = iso_stat;
1645 }
1646
1647finish: 1686finish:
1648 urb->actual_length += xfer_len; 1687 urb->actual_length += xfer_len;
1649 qh->offset += xfer_len; 1688 qh->offset += xfer_len;
@@ -1671,22 +1710,9 @@ static int musb_schedule(
1671 struct list_head *head = NULL; 1710 struct list_head *head = NULL;
1672 1711
1673 /* use fixed hardware for control and bulk */ 1712 /* use fixed hardware for control and bulk */
1674 switch (qh->type) { 1713 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1675 case USB_ENDPOINT_XFER_CONTROL:
1676 head = &musb->control; 1714 head = &musb->control;
1677 hw_ep = musb->control_ep; 1715 hw_ep = musb->control_ep;
1678 break;
1679 case USB_ENDPOINT_XFER_BULK:
1680 hw_ep = musb->bulk_ep;
1681 if (is_in)
1682 head = &musb->in_bulk;
1683 else
1684 head = &musb->out_bulk;
1685 break;
1686 }
1687 if (head) {
1688 idle = list_empty(head);
1689 list_add_tail(&qh->ring, head);
1690 goto success; 1716 goto success;
1691 } 1717 }
1692 1718
@@ -1725,19 +1751,34 @@ static int musb_schedule(
1725 else 1751 else
1726 diff = hw_ep->max_packet_sz_tx - qh->maxpacket; 1752 diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
1727 1753
1728 if (diff > 0 && best_diff > diff) { 1754 if (diff >= 0 && best_diff > diff) {
1729 best_diff = diff; 1755 best_diff = diff;
1730 best_end = epnum; 1756 best_end = epnum;
1731 } 1757 }
1732 } 1758 }
1733 if (best_end < 0) 1759 /* use bulk reserved ep1 if no other ep is free */
1760 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1761 hw_ep = musb->bulk_ep;
1762 if (is_in)
1763 head = &musb->in_bulk;
1764 else
1765 head = &musb->out_bulk;
1766 goto success;
1767 } else if (best_end < 0) {
1734 return -ENOSPC; 1768 return -ENOSPC;
1769 }
1735 1770
1736 idle = 1; 1771 idle = 1;
1772 qh->mux = 0;
1737 hw_ep = musb->endpoints + best_end; 1773 hw_ep = musb->endpoints + best_end;
1738 musb->periodic[best_end] = qh; 1774 musb->periodic[best_end] = qh;
1739 DBG(4, "qh %p periodic slot %d\n", qh, best_end); 1775 DBG(4, "qh %p periodic slot %d\n", qh, best_end);
1740success: 1776success:
1777 if (head) {
1778 idle = list_empty(head);
1779 list_add_tail(&qh->ring, head);
1780 qh->mux = 1;
1781 }
1741 qh->hw_ep = hw_ep; 1782 qh->hw_ep = hw_ep;
1742 qh->hep->hcpriv = qh; 1783 qh->hep->hcpriv = qh;
1743 if (idle) 1784 if (idle)
@@ -2015,11 +2056,13 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2015 sched = &musb->control; 2056 sched = &musb->control;
2016 break; 2057 break;
2017 case USB_ENDPOINT_XFER_BULK: 2058 case USB_ENDPOINT_XFER_BULK:
2018 if (usb_pipein(urb->pipe)) 2059 if (qh->mux == 1) {
2019 sched = &musb->in_bulk; 2060 if (usb_pipein(urb->pipe))
2020 else 2061 sched = &musb->in_bulk;
2021 sched = &musb->out_bulk; 2062 else
2022 break; 2063 sched = &musb->out_bulk;
2064 break;
2065 }
2023 default: 2066 default:
2024 /* REVISIT when we get a schedule tree, periodic 2067 /* REVISIT when we get a schedule tree, periodic
2025 * transfers won't always be at the head of a 2068 * transfers won't always be at the head of a
@@ -2067,11 +2110,13 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2067 sched = &musb->control; 2110 sched = &musb->control;
2068 break; 2111 break;
2069 case USB_ENDPOINT_XFER_BULK: 2112 case USB_ENDPOINT_XFER_BULK:
2070 if (is_in) 2113 if (qh->mux == 1) {
2071 sched = &musb->in_bulk; 2114 if (is_in)
2072 else 2115 sched = &musb->in_bulk;
2073 sched = &musb->out_bulk; 2116 else
2074 break; 2117 sched = &musb->out_bulk;
2118 break;
2119 }
2075 default: 2120 default:
2076 /* REVISIT when we get a schedule tree, periodic transfers 2121 /* REVISIT when we get a schedule tree, periodic transfers
2077 * won't always be at the head of a singleton queue... 2122 * won't always be at the head of a singleton queue...
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 77bcdb9d5b32..0b7fbcd21963 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -53,6 +53,7 @@ struct musb_qh {
53 53
54 struct list_head ring; /* of musb_qh */ 54 struct list_head ring; /* of musb_qh */
55 /* struct musb_qh *next; */ /* for periodic tree */ 55 /* struct musb_qh *next; */ /* for periodic tree */
56 u8 mux; /* qh multiplexed to hw_ep */
56 57
57 unsigned offset; /* in urb->transfer_buffer */ 58 unsigned offset; /* in urb->transfer_buffer */
58 unsigned segsize; /* current xfer fragment */ 59 unsigned segsize; /* current xfer fragment */
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 9d2dcb121c5e..ce6c162920f7 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -53,7 +53,9 @@ static void musb_do_idle(unsigned long _musb)
53{ 53{
54 struct musb *musb = (void *)_musb; 54 struct musb *musb = (void *)_musb;
55 unsigned long flags; 55 unsigned long flags;
56#ifdef CONFIG_USB_MUSB_HDRC_HCD
56 u8 power; 57 u8 power;
58#endif
57 u8 devctl; 59 u8 devctl;
58 60
59 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 61 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index b73b036f3d77..ee8fca92a4ac 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -605,7 +605,7 @@ void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
605 605
606 if (musb->board_mode != MUSB_OTG) { 606 if (musb->board_mode != MUSB_OTG) {
607 ERR("Changing mode currently only supported in OTG mode\n"); 607 ERR("Changing mode currently only supported in OTG mode\n");
608 return; 608 return -EINVAL;
609 } 609 }
610 610
611 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); 611 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 5b20de130e08..5b95009d2fbb 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -135,6 +135,7 @@ static int usb_console_setup(struct console *co, char *options)
135 err("no more memory"); 135 err("no more memory");
136 goto reset_open_count; 136 goto reset_open_count;
137 } 137 }
138 kref_init(&tty->kref);
138 termios = kzalloc(sizeof(*termios), GFP_KERNEL); 139 termios = kzalloc(sizeof(*termios), GFP_KERNEL);
139 if (!termios) { 140 if (!termios) {
140 retval = -ENOMEM; 141 retval = -ENOMEM;
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c
index 8008d0bc80ad..cfaf1f085535 100644
--- a/drivers/usb/serial/cp2101.c
+++ b/drivers/usb/serial/cp2101.c
@@ -56,6 +56,7 @@ static void cp2101_shutdown(struct usb_serial *);
56static int debug; 56static int debug;
57 57
58static struct usb_device_id id_table [] = { 58static struct usb_device_id id_table [] = {
59 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
59 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 60 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
60 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ 61 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
61 { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ 62 { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
@@ -67,6 +68,7 @@ static struct usb_device_id id_table [] = {
67 { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */ 68 { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
68 { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */ 69 { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
69 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ 70 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
71 { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
70 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ 72 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
71 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ 73 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
72 { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ 74 { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
@@ -85,6 +87,7 @@ static struct usb_device_id id_table [] = {
85 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ 87 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
86 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ 88 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
87 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 89 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
90 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
88 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 91 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
89 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 92 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
90 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ 93 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 51d7bdea2869..fb6f2933b01b 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -143,6 +143,7 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
143static struct usb_device_id id_table_combined [] = { 143static struct usb_device_id id_table_combined [] = {
144 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, 144 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
145 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, 145 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
146 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
146 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, 147 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
147 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, 148 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
148 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, 149 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -166,6 +167,7 @@ static struct usb_device_id id_table_combined [] = {
166 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, 167 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
167 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, 168 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
168 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, 169 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
170 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
169 { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, 171 { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
170 { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, 172 { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
171 { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, 173 { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
@@ -1498,7 +1500,7 @@ static int ftdi_open(struct tty_struct *tty,
1498 priv->interface, buf, 0, WDR_TIMEOUT); 1500 priv->interface, buf, 0, WDR_TIMEOUT);
1499 1501
1500 /* Termios defaults are set by usb_serial_init. We don't change 1502 /* Termios defaults are set by usb_serial_init. We don't change
1501 port->tty->termios - this would loose speed settings, etc. 1503 port->tty->termios - this would lose speed settings, etc.
1502 This is same behaviour as serial.c/rs_open() - Kuba */ 1504 This is same behaviour as serial.c/rs_open() - Kuba */
1503 1505
1504 /* ftdi_set_termios will send usb control messages */ 1506 /* ftdi_set_termios will send usb control messages */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 07a3992abad2..373ee09975bb 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -40,6 +40,9 @@
40/* AlphaMicro Components AMC-232USB01 device */ 40/* AlphaMicro Components AMC-232USB01 device */
41#define FTDI_AMC232_PID 0xFF00 /* Product Id */ 41#define FTDI_AMC232_PID 0xFF00 /* Product Id */
42 42
43/* www.candapter.com Ewert Energy Systems CANdapter device */
44#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
45
43/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */ 46/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
44/* the VID is the standard ftdi vid (FTDI_VID) */ 47/* the VID is the standard ftdi vid (FTDI_VID) */
45#define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */ 48#define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */
@@ -75,6 +78,9 @@
75/* OpenDCC (www.opendcc.de) product id */ 78/* OpenDCC (www.opendcc.de) product id */
76#define FTDI_OPENDCC_PID 0xBFD8 79#define FTDI_OPENDCC_PID 0xBFD8
77 80
81/* Sprog II (Andrew Crosland's SprogII DCC interface) */
82#define FTDI_SPROG_II 0xF0C8
83
78/* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */ 84/* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */
79/* they use the ftdi chipset for the USB interface and the vendor id is the same */ 85/* they use the ftdi chipset for the USB interface and the vendor id is the same */
80#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */ 86#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index b679a556b98d..4e2cda93da59 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -26,7 +26,7 @@
26 * Introduced common header to be used also in USB Gadget Framework. 26 * Introduced common header to be used also in USB Gadget Framework.
27 * Still needs some other style fixes. 27 * Still needs some other style fixes.
28 * 28 *
29 * 2007_Jun_21 Alan Cox <alan@redhat.com> 29 * 2007_Jun_21 Alan Cox <alan@lxorguk.ukuu.org.uk>
30 * Minimal cleanups for some of the driver problens and tty layer abuse. 30 * Minimal cleanups for some of the driver problens and tty layer abuse.
31 * Still needs fixing to allow multiple dongles. 31 * Still needs fixing to allow multiple dongles.
32 * 32 *
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3d87eabcd922..809697b3c7fc 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -95,11 +95,20 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
95#define HUAWEI_PRODUCT_E220 0x1003 95#define HUAWEI_PRODUCT_E220 0x1003
96#define HUAWEI_PRODUCT_E220BIS 0x1004 96#define HUAWEI_PRODUCT_E220BIS 0x1004
97#define HUAWEI_PRODUCT_E1401 0x1401 97#define HUAWEI_PRODUCT_E1401 0x1401
98#define HUAWEI_PRODUCT_E1402 0x1402
98#define HUAWEI_PRODUCT_E1403 0x1403 99#define HUAWEI_PRODUCT_E1403 0x1403
100#define HUAWEI_PRODUCT_E1404 0x1404
99#define HUAWEI_PRODUCT_E1405 0x1405 101#define HUAWEI_PRODUCT_E1405 0x1405
100#define HUAWEI_PRODUCT_E1406 0x1406 102#define HUAWEI_PRODUCT_E1406 0x1406
103#define HUAWEI_PRODUCT_E1407 0x1407
101#define HUAWEI_PRODUCT_E1408 0x1408 104#define HUAWEI_PRODUCT_E1408 0x1408
102#define HUAWEI_PRODUCT_E1409 0x1409 105#define HUAWEI_PRODUCT_E1409 0x1409
106#define HUAWEI_PRODUCT_E140A 0x140A
107#define HUAWEI_PRODUCT_E140B 0x140B
108#define HUAWEI_PRODUCT_E140C 0x140C
109#define HUAWEI_PRODUCT_E140D 0x140D
110#define HUAWEI_PRODUCT_E140E 0x140E
111#define HUAWEI_PRODUCT_E140F 0x140F
103#define HUAWEI_PRODUCT_E1410 0x1410 112#define HUAWEI_PRODUCT_E1410 0x1410
104#define HUAWEI_PRODUCT_E1411 0x1411 113#define HUAWEI_PRODUCT_E1411 0x1411
105#define HUAWEI_PRODUCT_E1412 0x1412 114#define HUAWEI_PRODUCT_E1412 0x1412
@@ -110,9 +119,52 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
110#define HUAWEI_PRODUCT_E1417 0x1417 119#define HUAWEI_PRODUCT_E1417 0x1417
111#define HUAWEI_PRODUCT_E1418 0x1418 120#define HUAWEI_PRODUCT_E1418 0x1418
112#define HUAWEI_PRODUCT_E1419 0x1419 121#define HUAWEI_PRODUCT_E1419 0x1419
122#define HUAWEI_PRODUCT_E141A 0x141A
123#define HUAWEI_PRODUCT_E141B 0x141B
124#define HUAWEI_PRODUCT_E141C 0x141C
125#define HUAWEI_PRODUCT_E141D 0x141D
126#define HUAWEI_PRODUCT_E141E 0x141E
127#define HUAWEI_PRODUCT_E141F 0x141F
128#define HUAWEI_PRODUCT_E1420 0x1420
129#define HUAWEI_PRODUCT_E1421 0x1421
130#define HUAWEI_PRODUCT_E1422 0x1422
131#define HUAWEI_PRODUCT_E1423 0x1423
132#define HUAWEI_PRODUCT_E1424 0x1424
133#define HUAWEI_PRODUCT_E1425 0x1425
134#define HUAWEI_PRODUCT_E1426 0x1426
135#define HUAWEI_PRODUCT_E1427 0x1427
136#define HUAWEI_PRODUCT_E1428 0x1428
137#define HUAWEI_PRODUCT_E1429 0x1429
138#define HUAWEI_PRODUCT_E142A 0x142A
139#define HUAWEI_PRODUCT_E142B 0x142B
140#define HUAWEI_PRODUCT_E142C 0x142C
141#define HUAWEI_PRODUCT_E142D 0x142D
142#define HUAWEI_PRODUCT_E142E 0x142E
143#define HUAWEI_PRODUCT_E142F 0x142F
144#define HUAWEI_PRODUCT_E1430 0x1430
145#define HUAWEI_PRODUCT_E1431 0x1431
146#define HUAWEI_PRODUCT_E1432 0x1432
147#define HUAWEI_PRODUCT_E1433 0x1433
148#define HUAWEI_PRODUCT_E1434 0x1434
149#define HUAWEI_PRODUCT_E1435 0x1435
150#define HUAWEI_PRODUCT_E1436 0x1436
151#define HUAWEI_PRODUCT_E1437 0x1437
152#define HUAWEI_PRODUCT_E1438 0x1438
153#define HUAWEI_PRODUCT_E1439 0x1439
154#define HUAWEI_PRODUCT_E143A 0x143A
155#define HUAWEI_PRODUCT_E143B 0x143B
156#define HUAWEI_PRODUCT_E143C 0x143C
157#define HUAWEI_PRODUCT_E143D 0x143D
158#define HUAWEI_PRODUCT_E143E 0x143E
159#define HUAWEI_PRODUCT_E143F 0x143F
113 160
114#define NOVATELWIRELESS_VENDOR_ID 0x1410 161#define NOVATELWIRELESS_VENDOR_ID 0x1410
115 162
163/* YISO PRODUCTS */
164
165#define YISO_VENDOR_ID 0x0EAB
166#define YISO_PRODUCT_U893 0xC893
167
116/* MERLIN EVDO PRODUCTS */ 168/* MERLIN EVDO PRODUCTS */
117#define NOVATELWIRELESS_PRODUCT_V640 0x1100 169#define NOVATELWIRELESS_PRODUCT_V640 0x1100
118#define NOVATELWIRELESS_PRODUCT_V620 0x1110 170#define NOVATELWIRELESS_PRODUCT_V620 0x1110
@@ -172,6 +224,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
172#define ONDA_VENDOR_ID 0x19d2 224#define ONDA_VENDOR_ID 0x19d2
173#define ONDA_PRODUCT_MSA501HS 0x0001 225#define ONDA_PRODUCT_MSA501HS 0x0001
174#define ONDA_PRODUCT_ET502HS 0x0002 226#define ONDA_PRODUCT_ET502HS 0x0002
227#define ONDA_PRODUCT_MT503HS 0x0200
175 228
176#define BANDRICH_VENDOR_ID 0x1A8D 229#define BANDRICH_VENDOR_ID 0x1A8D
177#define BANDRICH_PRODUCT_C100_1 0x1002 230#define BANDRICH_PRODUCT_C100_1 0x1002
@@ -207,6 +260,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
207/* ZTE PRODUCTS */ 260/* ZTE PRODUCTS */
208#define ZTE_VENDOR_ID 0x19d2 261#define ZTE_VENDOR_ID 0x19d2
209#define ZTE_PRODUCT_MF628 0x0015 262#define ZTE_PRODUCT_MF628 0x0015
263#define ZTE_PRODUCT_MF626 0x0031
210#define ZTE_PRODUCT_CDMA_TECH 0xfffe 264#define ZTE_PRODUCT_CDMA_TECH 0xfffe
211 265
212/* Ericsson products */ 266/* Ericsson products */
@@ -248,11 +302,20 @@ static struct usb_device_id option_ids[] = {
248 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, 302 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
249 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, 303 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
250 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) }, 304 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) },
305 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) },
251 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) }, 306 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) },
307 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) },
252 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) }, 308 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) },
253 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) }, 309 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) },
310 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) },
254 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) }, 311 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) },
255 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) }, 312 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) },
313 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) },
314 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) },
315 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) },
316 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) },
317 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) },
318 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) },
256 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) }, 319 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) },
257 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) }, 320 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) },
258 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) }, 321 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) },
@@ -263,6 +326,44 @@ static struct usb_device_id option_ids[] = {
263 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) }, 326 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) },
264 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) }, 327 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) },
265 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) }, 328 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) },
329 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) },
330 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) },
331 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) },
332 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) },
333 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) },
334 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) },
335 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) },
336 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) },
337 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) },
338 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) },
339 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) },
340 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) },
341 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) },
342 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) },
343 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) },
344 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) },
345 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) },
346 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) },
347 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) },
348 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) },
349 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) },
350 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) },
351 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) },
352 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) },
353 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) },
354 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) },
355 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) },
356 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) },
357 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) },
358 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) },
359 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) },
360 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) },
361 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) },
362 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) },
363 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) },
364 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
365 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
366 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
266 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) }, 367 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) },
267 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ 368 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
268 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ 369 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
@@ -313,6 +414,41 @@ static struct usb_device_id option_ids[] = {
313 { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, 414 { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
314 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) }, 415 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) },
315 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, 416 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
417 { USB_DEVICE(ONDA_VENDOR_ID, 0x0003) },
418 { USB_DEVICE(ONDA_VENDOR_ID, 0x0004) },
419 { USB_DEVICE(ONDA_VENDOR_ID, 0x0005) },
420 { USB_DEVICE(ONDA_VENDOR_ID, 0x0006) },
421 { USB_DEVICE(ONDA_VENDOR_ID, 0x0007) },
422 { USB_DEVICE(ONDA_VENDOR_ID, 0x0008) },
423 { USB_DEVICE(ONDA_VENDOR_ID, 0x0009) },
424 { USB_DEVICE(ONDA_VENDOR_ID, 0x000a) },
425 { USB_DEVICE(ONDA_VENDOR_ID, 0x000b) },
426 { USB_DEVICE(ONDA_VENDOR_ID, 0x000c) },
427 { USB_DEVICE(ONDA_VENDOR_ID, 0x000d) },
428 { USB_DEVICE(ONDA_VENDOR_ID, 0x000e) },
429 { USB_DEVICE(ONDA_VENDOR_ID, 0x000f) },
430 { USB_DEVICE(ONDA_VENDOR_ID, 0x0010) },
431 { USB_DEVICE(ONDA_VENDOR_ID, 0x0011) },
432 { USB_DEVICE(ONDA_VENDOR_ID, 0x0012) },
433 { USB_DEVICE(ONDA_VENDOR_ID, 0x0013) },
434 { USB_DEVICE(ONDA_VENDOR_ID, 0x0014) },
435 { USB_DEVICE(ONDA_VENDOR_ID, 0x0015) },
436 { USB_DEVICE(ONDA_VENDOR_ID, 0x0016) },
437 { USB_DEVICE(ONDA_VENDOR_ID, 0x0017) },
438 { USB_DEVICE(ONDA_VENDOR_ID, 0x0018) },
439 { USB_DEVICE(ONDA_VENDOR_ID, 0x0019) },
440 { USB_DEVICE(ONDA_VENDOR_ID, 0x0020) },
441 { USB_DEVICE(ONDA_VENDOR_ID, 0x0021) },
442 { USB_DEVICE(ONDA_VENDOR_ID, 0x0022) },
443 { USB_DEVICE(ONDA_VENDOR_ID, 0x0023) },
444 { USB_DEVICE(ONDA_VENDOR_ID, 0x0024) },
445 { USB_DEVICE(ONDA_VENDOR_ID, 0x0025) },
446 { USB_DEVICE(ONDA_VENDOR_ID, 0x0026) },
447 { USB_DEVICE(ONDA_VENDOR_ID, 0x0027) },
448 { USB_DEVICE(ONDA_VENDOR_ID, 0x0028) },
449 { USB_DEVICE(ONDA_VENDOR_ID, 0x0029) },
450 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MT503HS) },
451 { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) },
316 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, 452 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
317 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, 453 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
318 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) }, 454 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) },
@@ -336,6 +472,7 @@ static struct usb_device_id option_ids[] = {
336 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 472 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
337 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ 473 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
338 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, 474 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
475 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) },
339 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, 476 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
340 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, 477 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
341 { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) }, 478 { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 491c8857b644..1aed584be5eb 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -91,6 +91,8 @@ static struct usb_device_id id_table [] = {
91 { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) }, 91 { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
92 { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) }, 92 { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
93 { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, 93 { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
94 { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
95 { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
94 { } /* Terminating entry */ 96 { } /* Terminating entry */
95}; 97};
96 98
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index a3bd039c78e9..54974f446a8c 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -110,3 +110,11 @@
110/* Y.C. Cable U.S.A., Inc - USB to RS-232 */ 110/* Y.C. Cable U.S.A., Inc - USB to RS-232 */
111#define YCCABLE_VENDOR_ID 0x05ad 111#define YCCABLE_VENDOR_ID 0x05ad
112#define YCCABLE_PRODUCT_ID 0x0fba 112#define YCCABLE_PRODUCT_ID 0x0fba
113
114/* "Superial" USB - Serial */
115#define SUPERIAL_VENDOR_ID 0x5372
116#define SUPERIAL_PRODUCT_ID 0x2303
117
118/* Hewlett-Packard LD220-HP POS Pole Display */
119#define HP_VENDOR_ID 0x03f0
120#define HP_LD220_PRODUCT_ID 0x3524
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 31c42d1cae13..01d0c70d60e9 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -16,56 +16,6 @@
16 * For questions or problems with this driver, contact Texas Instruments 16 * For questions or problems with this driver, contact Texas Instruments
17 * technical support, or Al Borchers <alborchers@steinerpoint.com>, or 17 * technical support, or Al Borchers <alborchers@steinerpoint.com>, or
18 * Peter Berger <pberger@brimson.com>. 18 * Peter Berger <pberger@brimson.com>.
19 *
20 * This driver needs this hotplug script in /etc/hotplug/usb/ti_usb_3410_5052
21 * or in /etc/hotplug.d/usb/ti_usb_3410_5052.hotplug to set the device
22 * configuration.
23 *
24 * #!/bin/bash
25 *
26 * BOOT_CONFIG=1
27 * ACTIVE_CONFIG=2
28 *
29 * if [[ "$ACTION" != "add" ]]
30 * then
31 * exit
32 * fi
33 *
34 * CONFIG_PATH=/sys${DEVPATH%/?*}/bConfigurationValue
35 *
36 * if [[ 0`cat $CONFIG_PATH` -ne $BOOT_CONFIG ]]
37 * then
38 * exit
39 * fi
40 *
41 * PRODUCT=${PRODUCT%/?*} # delete version
42 * VENDOR_ID=`printf "%d" 0x${PRODUCT%/?*}`
43 * PRODUCT_ID=`printf "%d" 0x${PRODUCT#*?/}`
44 *
45 * PARAM_PATH=/sys/module/ti_usb_3410_5052/parameters
46 *
47 * function scan() {
48 * s=$1
49 * shift
50 * for i
51 * do
52 * if [[ $s -eq $i ]]
53 * then
54 * return 0
55 * fi
56 * done
57 * return 1
58 * }
59 *
60 * IFS=$IFS,
61 *
62 * if (scan $VENDOR_ID 1105 `cat $PARAM_PATH/vendor_3410` &&
63 * scan $PRODUCT_ID 13328 `cat $PARAM_PATH/product_3410`) ||
64 * (scan $VENDOR_ID 1105 `cat $PARAM_PATH/vendor_5052` &&
65 * scan $PRODUCT_ID 20562 20818 20570 20575 `cat $PARAM_PATH/product_5052`)
66 * then
67 * echo $ACTIVE_CONFIG > $CONFIG_PATH
68 * fi
69 */ 19 */
70 20
71#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -457,9 +407,10 @@ static int ti_startup(struct usb_serial *serial)
457 goto free_tdev; 407 goto free_tdev;
458 } 408 }
459 409
460 /* the second configuration must be set (in sysfs by hotplug script) */ 410 /* the second configuration must be set */
461 if (dev->actconfig->desc.bConfigurationValue == TI_BOOT_CONFIG) { 411 if (dev->actconfig->desc.bConfigurationValue == TI_BOOT_CONFIG) {
462 status = -ENODEV; 412 status = usb_driver_set_configuration(dev, TI_ACTIVE_CONFIG);
413 status = status ? status : -ENODEV;
463 goto free_tdev; 414 goto free_tdev;
464 } 415 }
465 416
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 8be3f39891c7..794b5ffe4397 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -281,6 +281,7 @@ static void serial_close(struct tty_struct *tty, struct file *filp)
281 if (tty->driver_data) 281 if (tty->driver_data)
282 tty->driver_data = NULL; 282 tty->driver_data = NULL;
283 tty_port_tty_set(&port->port, NULL); 283 tty_port_tty_set(&port->port, NULL);
284 tty_kref_put(tty);
284 } 285 }
285 } 286 }
286 287
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 3d9249632ae1..c68b738900bd 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -2,8 +2,8 @@
2# USB Storage driver configuration 2# USB Storage driver configuration
3# 3#
4 4
5comment "NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'" 5comment "NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;"
6comment "may also be needed; see USB_STORAGE Help for more information" 6comment "see USB_STORAGE Help for more information"
7 depends on USB 7 depends on USB
8 8
9config USB_STORAGE 9config USB_STORAGE
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 4995bb595aef..2dd9bd4bff56 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -95,11 +95,10 @@ int usb_stor_huawei_e220_init(struct us_data *us)
95{ 95{
96 int result; 96 int result;
97 97
98 us->iobuf[0] = 0x1;
99 result = usb_stor_control_msg(us, us->send_ctrl_pipe, 98 result = usb_stor_control_msg(us, us->send_ctrl_pipe,
100 USB_REQ_SET_FEATURE, 99 USB_REQ_SET_FEATURE,
101 USB_TYPE_STANDARD | USB_RECIP_DEVICE, 100 USB_TYPE_STANDARD | USB_RECIP_DEVICE,
102 0x01, 0x0, us->iobuf, 0x1, 1000); 101 0x01, 0x0, NULL, 0x0, 1000);
103 US_DEBUGP("usb_control_msg performing result is %d\n", result); 102 US_DEBUGP("usb_control_msg performing result is %d\n", result);
104 return (result ? 0 : -1); 103 return (result ? 0 : -1);
105} 104}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index cd155475cb6e..bfcc1fe82518 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -167,6 +167,27 @@ UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600,
167 US_SC_DEVICE, US_PR_DEVICE, NULL, 167 US_SC_DEVICE, US_PR_DEVICE, NULL,
168 US_FL_FIX_CAPACITY ), 168 US_FL_FIX_CAPACITY ),
169 169
170/* Reported by Ozan Sener <themgzzy@gmail.com> */
171UNUSUAL_DEV( 0x0421, 0x0060, 0x0551, 0x0551,
172 "Nokia",
173 "3500c",
174 US_SC_DEVICE, US_PR_DEVICE, NULL,
175 US_FL_FIX_CAPACITY ),
176
177/* Reported by CSECSY Laszlo <boobaa@frugalware.org> */
178UNUSUAL_DEV( 0x0421, 0x0063, 0x0001, 0x0601,
179 "Nokia",
180 "Nokia 3109c",
181 US_SC_DEVICE, US_PR_DEVICE, NULL,
182 US_FL_FIX_CAPACITY ),
183
184/* Patch for Nokia 5310 capacity */
185UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0701,
186 "Nokia",
187 "5310",
188 US_SC_DEVICE, US_PR_DEVICE, NULL,
189 US_FL_FIX_CAPACITY ),
190
170/* Reported by Mario Rettig <mariorettig@web.de> */ 191/* Reported by Mario Rettig <mariorettig@web.de> */
171UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100, 192UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100,
172 "Nokia", 193 "Nokia",
@@ -233,14 +254,14 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
233 US_FL_MAX_SECTORS_64 ), 254 US_FL_MAX_SECTORS_64 ),
234 255
235/* Reported by Cedric Godin <cedric@belbone.be> */ 256/* Reported by Cedric Godin <cedric@belbone.be> */
236UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551, 257UNUSUAL_DEV( 0x0421, 0x04b9, 0x0500, 0x0551,
237 "Nokia", 258 "Nokia",
238 "5300", 259 "5300",
239 US_SC_DEVICE, US_PR_DEVICE, NULL, 260 US_SC_DEVICE, US_PR_DEVICE, NULL,
240 US_FL_FIX_CAPACITY ), 261 US_FL_FIX_CAPACITY ),
241 262
242/* Reported by Richard Nauber <RichardNauber@web.de> */ 263/* Reported by Richard Nauber <RichardNauber@web.de> */
243UNUSUAL_DEV( 0x0421, 0x04fa, 0x0601, 0x0601, 264UNUSUAL_DEV( 0x0421, 0x04fa, 0x0550, 0x0660,
244 "Nokia", 265 "Nokia",
245 "6300", 266 "6300",
246 US_SC_DEVICE, US_PR_DEVICE, NULL, 267 US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -253,6 +274,14 @@ UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591,
253 US_SC_DEVICE, US_PR_DEVICE, NULL, 274 US_SC_DEVICE, US_PR_DEVICE, NULL,
254 US_FL_FIX_CAPACITY ), 275 US_FL_FIX_CAPACITY ),
255 276
277/* Submitted by Ricky Wong Yung Fei <evilbladewarrior@gmail.com> */
278/* Nokia 7610 Supernova - Too many sectors reported in usb storage mode */
279UNUSUAL_DEV( 0x0421, 0x00f5, 0x0000, 0x0470,
280 "Nokia",
281 "7610 Supernova",
282 US_SC_DEVICE, US_PR_DEVICE, NULL,
283 US_FL_FIX_CAPACITY ),
284
256/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */ 285/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
257UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210, 286UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210,
258 "SMSC", 287 "SMSC",
@@ -303,6 +332,18 @@ UNUSUAL_DEV( 0x045a, 0x5210, 0x0101, 0x0101,
303 US_SC_SCSI, US_PR_KARMA, rio_karma_init, 0), 332 US_SC_SCSI, US_PR_KARMA, rio_karma_init, 0),
304#endif 333#endif
305 334
335/* Reported by Tamas Kerecsen <kerecsen@bigfoot.com>
336 * Obviously the PROM has not been customized by the VAR;
337 * the Vendor and Product string descriptors are:
338 * Generic Mass Storage (PROTOTYPE--Remember to change idVendor)
339 * Generic Manufacturer (PROTOTYPE--Remember to change idVendor)
340 */
341UNUSUAL_DEV( 0x045e, 0xffff, 0x0000, 0x0000,
342 "Mitac",
343 "GPS",
344 US_SC_DEVICE, US_PR_DEVICE, NULL,
345 US_FL_MAX_SECTORS_64 ),
346
306/* 347/*
307 * This virtual floppy is found in Sun equipment (x4600, x4200m2, etc.) 348 * This virtual floppy is found in Sun equipment (x4600, x4200m2, etc.)
308 * Reported by Pete Zaitcev <zaitcev@redhat.com> 349 * Reported by Pete Zaitcev <zaitcev@redhat.com>
@@ -333,6 +374,13 @@ UNUSUAL_DEV( 0x0482, 0x0103, 0x0100, 0x0100,
333 "Finecam S5", 374 "Finecam S5",
334 US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY), 375 US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY),
335 376
377/* Patch submitted by Jens Taprogge <jens.taprogge@taprogge.org> */
378UNUSUAL_DEV( 0x0482, 0x0107, 0x0100, 0x0100,
379 "Kyocera",
380 "CONTAX SL300R T*",
381 US_SC_DEVICE, US_PR_DEVICE, NULL,
382 US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE),
383
336/* Reported by Paul Stewart <stewart@wetlogic.net> 384/* Reported by Paul Stewart <stewart@wetlogic.net>
337 * This entry is needed because the device reports Sub=ff */ 385 * This entry is needed because the device reports Sub=ff */
338UNUSUAL_DEV( 0x04a4, 0x0004, 0x0001, 0x0001, 386UNUSUAL_DEV( 0x04a4, 0x0004, 0x0001, 0x0001,
@@ -355,6 +403,13 @@ UNUSUAL_DEV( 0x04b0, 0x0401, 0x0200, 0x0200,
355 US_SC_DEVICE, US_PR_DEVICE, NULL, 403 US_SC_DEVICE, US_PR_DEVICE, NULL,
356 US_FL_FIX_CAPACITY), 404 US_FL_FIX_CAPACITY),
357 405
406/* Reported by Tobias Kunze Briseno <t-linux@fictive.com> */
407UNUSUAL_DEV( 0x04b0, 0x0403, 0x0200, 0x0200,
408 "NIKON",
409 "NIKON DSC D2H",
410 US_SC_DEVICE, US_PR_DEVICE, NULL,
411 US_FL_FIX_CAPACITY),
412
358/* Reported by Milinevsky Dmitry <niam.niam@gmail.com> */ 413/* Reported by Milinevsky Dmitry <niam.niam@gmail.com> */
359UNUSUAL_DEV( 0x04b0, 0x0409, 0x0100, 0x0100, 414UNUSUAL_DEV( 0x04b0, 0x0409, 0x0100, 0x0100,
360 "NIKON", 415 "NIKON",
@@ -411,6 +466,13 @@ UNUSUAL_DEV( 0x04b0, 0x0417, 0x0100, 0x0100,
411 US_SC_DEVICE, US_PR_DEVICE, NULL, 466 US_SC_DEVICE, US_PR_DEVICE, NULL,
412 US_FL_FIX_CAPACITY), 467 US_FL_FIX_CAPACITY),
413 468
469/* Reported by paul ready <lxtwin@homecall.co.uk> */
470UNUSUAL_DEV( 0x04b0, 0x0419, 0x0100, 0x0200,
471 "NIKON",
472 "NIKON DSC D300",
473 US_SC_DEVICE, US_PR_DEVICE, NULL,
474 US_FL_FIX_CAPACITY),
475
414/* Reported by Doug Maxey (dwm@austin.ibm.com) */ 476/* Reported by Doug Maxey (dwm@austin.ibm.com) */
415UNUSUAL_DEV( 0x04b3, 0x4001, 0x0110, 0x0110, 477UNUSUAL_DEV( 0x04b3, 0x4001, 0x0110, 0x0110,
416 "IBM", 478 "IBM",
@@ -1251,6 +1313,13 @@ UNUSUAL_DEV( 0x0839, 0x000a, 0x0001, 0x0001,
1251 US_SC_DEVICE, US_PR_DEVICE, NULL, 1313 US_SC_DEVICE, US_PR_DEVICE, NULL,
1252 US_FL_FIX_INQUIRY), 1314 US_FL_FIX_INQUIRY),
1253 1315
1316/* Reported by Luciano Rocha <luciano@eurotux.com> */
1317UNUSUAL_DEV( 0x0840, 0x0082, 0x0001, 0x0001,
1318 "Argosy",
1319 "Storage",
1320 US_SC_DEVICE, US_PR_DEVICE, NULL,
1321 US_FL_FIX_CAPACITY),
1322
1254/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. 1323/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
1255 * Flag will support Bulk devices which use a standards-violating 32-byte 1324 * Flag will support Bulk devices which use a standards-violating 32-byte
1256 * Command Block Wrapper. Here, the "DC2MEGA" cameras (several brands) with 1325 * Command Block Wrapper. Here, the "DC2MEGA" cameras (several brands) with
@@ -1628,97 +1697,332 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
1628/* Reported by fangxiaozhi <huananhu@huawei.com> 1697/* Reported by fangxiaozhi <huananhu@huawei.com>
1629 * This brings the HUAWEI data card devices into multi-port mode 1698 * This brings the HUAWEI data card devices into multi-port mode
1630 */ 1699 */
1631UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, 1700UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
1701 "HUAWEI MOBILE",
1702 "Mass Storage",
1703 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1704 0),
1705UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
1706 "HUAWEI MOBILE",
1707 "Mass Storage",
1708 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1709 0),
1710UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
1711 "HUAWEI MOBILE",
1712 "Mass Storage",
1713 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1714 0),
1715UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
1716 "HUAWEI MOBILE",
1717 "Mass Storage",
1718 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1719 0),
1720UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
1721 "HUAWEI MOBILE",
1722 "Mass Storage",
1723 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1724 0),
1725UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
1726 "HUAWEI MOBILE",
1727 "Mass Storage",
1728 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1729 0),
1730UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
1731 "HUAWEI MOBILE",
1732 "Mass Storage",
1733 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1734 0),
1735UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
1632 "HUAWEI MOBILE", 1736 "HUAWEI MOBILE",
1633 "Mass Storage", 1737 "Mass Storage",
1634 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1738 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1635 0), 1739 0),
1636UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, 1740UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
1637 "HUAWEI MOBILE", 1741 "HUAWEI MOBILE",
1638 "Mass Storage", 1742 "Mass Storage",
1639 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1743 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1640 0), 1744 0),
1641UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, 1745UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
1642 "HUAWEI MOBILE", 1746 "HUAWEI MOBILE",
1643 "Mass Storage", 1747 "Mass Storage",
1644 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1748 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1645 0), 1749 0),
1646UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, 1750UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
1647 "HUAWEI MOBILE", 1751 "HUAWEI MOBILE",
1648 "Mass Storage", 1752 "Mass Storage",
1649 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1753 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1650 0), 1754 0),
1651UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, 1755UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
1652 "HUAWEI MOBILE", 1756 "HUAWEI MOBILE",
1653 "Mass Storage", 1757 "Mass Storage",
1654 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1758 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1655 0), 1759 0),
1656UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, 1760UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
1657 "HUAWEI MOBILE", 1761 "HUAWEI MOBILE",
1658 "Mass Storage", 1762 "Mass Storage",
1659 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1763 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1660 0), 1764 0),
1661UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, 1765UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
1662 "HUAWEI MOBILE", 1766 "HUAWEI MOBILE",
1663 "Mass Storage", 1767 "Mass Storage",
1664 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1768 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1665 0), 1769 0),
1666UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, 1770UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
1667 "HUAWEI MOBILE", 1771 "HUAWEI MOBILE",
1668 "Mass Storage", 1772 "Mass Storage",
1669 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1773 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1670 0), 1774 0),
1671UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, 1775UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
1672 "HUAWEI MOBILE", 1776 "HUAWEI MOBILE",
1673 "Mass Storage", 1777 "Mass Storage",
1674 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1778 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1675 0), 1779 0),
1676UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, 1780UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
1677 "HUAWEI MOBILE", 1781 "HUAWEI MOBILE",
1678 "Mass Storage", 1782 "Mass Storage",
1679 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1783 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1680 0), 1784 0),
1681UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, 1785UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
1682 "HUAWEI MOBILE", 1786 "HUAWEI MOBILE",
1683 "Mass Storage", 1787 "Mass Storage",
1684 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1788 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1685 0), 1789 0),
1686UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, 1790UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
1687 "HUAWEI MOBILE", 1791 "HUAWEI MOBILE",
1688 "Mass Storage", 1792 "Mass Storage",
1689 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1793 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1690 0), 1794 0),
1691UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, 1795UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
1692 "HUAWEI MOBILE", 1796 "HUAWEI MOBILE",
1693 "Mass Storage", 1797 "Mass Storage",
1694 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1798 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1695 0), 1799 0),
1696UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, 1800UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
1697 "HUAWEI MOBILE", 1801 "HUAWEI MOBILE",
1698 "Mass Storage", 1802 "Mass Storage",
1699 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1803 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1700 0), 1804 0),
1701UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, 1805UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
1702 "HUAWEI MOBILE", 1806 "HUAWEI MOBILE",
1703 "Mass Storage", 1807 "Mass Storage",
1704 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1808 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1705 0), 1809 0),
1706UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, 1810UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
1707 "HUAWEI MOBILE", 1811 "HUAWEI MOBILE",
1708 "Mass Storage", 1812 "Mass Storage",
1709 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1813 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1710 0), 1814 0),
1711UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, 1815UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
1712 "HUAWEI MOBILE", 1816 "HUAWEI MOBILE",
1713 "Mass Storage", 1817 "Mass Storage",
1714 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1818 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1715 0), 1819 0),
1716UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, 1820UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
1717 "HUAWEI MOBILE", 1821 "HUAWEI MOBILE",
1718 "Mass Storage", 1822 "Mass Storage",
1719 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1823 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1720 0), 1824 0),
1721UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, 1825UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
1826 "HUAWEI MOBILE",
1827 "Mass Storage",
1828 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1829 0),
1830UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
1831 "HUAWEI MOBILE",
1832 "Mass Storage",
1833 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1834 0),
1835UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
1836 "HUAWEI MOBILE",
1837 "Mass Storage",
1838 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1839 0),
1840UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
1841 "HUAWEI MOBILE",
1842 "Mass Storage",
1843 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1844 0),
1845UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
1846 "HUAWEI MOBILE",
1847 "Mass Storage",
1848 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1849 0),
1850UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
1851 "HUAWEI MOBILE",
1852 "Mass Storage",
1853 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1854 0),
1855UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
1856 "HUAWEI MOBILE",
1857 "Mass Storage",
1858 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1859 0),
1860UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
1861 "HUAWEI MOBILE",
1862 "Mass Storage",
1863 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1864 0),
1865UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
1866 "HUAWEI MOBILE",
1867 "Mass Storage",
1868 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1869 0),
1870UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
1871 "HUAWEI MOBILE",
1872 "Mass Storage",
1873 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1874 0),
1875UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
1876 "HUAWEI MOBILE",
1877 "Mass Storage",
1878 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1879 0),
1880UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
1881 "HUAWEI MOBILE",
1882 "Mass Storage",
1883 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1884 0),
1885UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
1886 "HUAWEI MOBILE",
1887 "Mass Storage",
1888 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1889 0),
1890UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
1891 "HUAWEI MOBILE",
1892 "Mass Storage",
1893 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1894 0),
1895UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
1896 "HUAWEI MOBILE",
1897 "Mass Storage",
1898 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1899 0),
1900UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
1901 "HUAWEI MOBILE",
1902 "Mass Storage",
1903 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1904 0),
1905UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
1906 "HUAWEI MOBILE",
1907 "Mass Storage",
1908 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1909 0),
1910UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
1911 "HUAWEI MOBILE",
1912 "Mass Storage",
1913 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1914 0),
1915UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
1916 "HUAWEI MOBILE",
1917 "Mass Storage",
1918 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1919 0),
1920UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
1921 "HUAWEI MOBILE",
1922 "Mass Storage",
1923 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1924 0),
1925UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
1926 "HUAWEI MOBILE",
1927 "Mass Storage",
1928 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1929 0),
1930UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
1931 "HUAWEI MOBILE",
1932 "Mass Storage",
1933 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1934 0),
1935UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
1936 "HUAWEI MOBILE",
1937 "Mass Storage",
1938 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1939 0),
1940UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
1941 "HUAWEI MOBILE",
1942 "Mass Storage",
1943 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1944 0),
1945UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
1946 "HUAWEI MOBILE",
1947 "Mass Storage",
1948 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1949 0),
1950UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
1951 "HUAWEI MOBILE",
1952 "Mass Storage",
1953 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1954 0),
1955UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
1956 "HUAWEI MOBILE",
1957 "Mass Storage",
1958 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1959 0),
1960UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
1961 "HUAWEI MOBILE",
1962 "Mass Storage",
1963 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1964 0),
1965UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
1966 "HUAWEI MOBILE",
1967 "Mass Storage",
1968 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1969 0),
1970UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
1971 "HUAWEI MOBILE",
1972 "Mass Storage",
1973 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1974 0),
1975UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
1976 "HUAWEI MOBILE",
1977 "Mass Storage",
1978 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1979 0),
1980UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
1981 "HUAWEI MOBILE",
1982 "Mass Storage",
1983 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1984 0),
1985UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
1986 "HUAWEI MOBILE",
1987 "Mass Storage",
1988 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1989 0),
1990UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
1991 "HUAWEI MOBILE",
1992 "Mass Storage",
1993 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1994 0),
1995UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
1996 "HUAWEI MOBILE",
1997 "Mass Storage",
1998 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1999 0),
2000UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
2001 "HUAWEI MOBILE",
2002 "Mass Storage",
2003 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
2004 0),
2005UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
2006 "HUAWEI MOBILE",
2007 "Mass Storage",
2008 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
2009 0),
2010UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
2011 "HUAWEI MOBILE",
2012 "Mass Storage",
2013 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
2014 0),
2015UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
2016 "HUAWEI MOBILE",
2017 "Mass Storage",
2018 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
2019 0),
2020UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
2021 "HUAWEI MOBILE",
2022 "Mass Storage",
2023 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
2024 0),
2025UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
1722 "HUAWEI MOBILE", 2026 "HUAWEI MOBILE",
1723 "Mass Storage", 2027 "Mass Storage",
1724 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 2028 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
@@ -1745,6 +2049,15 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1745 US_SC_DEVICE, US_PR_DEVICE, NULL, 2049 US_SC_DEVICE, US_PR_DEVICE, NULL,
1746 US_FL_IGNORE_RESIDUE ), 2050 US_FL_IGNORE_RESIDUE ),
1747 2051
2052/* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
2053 * JMicron responds to USN and several other SCSI ioctls with a
2054 * residue that causes subsequent I/O requests to fail. */
2055UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
2056 "JMicron",
2057 "USB to ATA/ATAPI Bridge",
2058 US_SC_DEVICE, US_PR_DEVICE, NULL,
2059 US_FL_IGNORE_RESIDUE ),
2060
1748/* Reported by Robert Schedel <r.schedel@yahoo.de> 2061/* Reported by Robert Schedel <r.schedel@yahoo.de>
1749 * Note: this is a 'super top' device like the above 14cd/6600 device */ 2062 * Note: this is a 'super top' device like the above 14cd/6600 device */
1750UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201, 2063UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
@@ -1818,6 +2131,15 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
1818 US_SC_DEVICE, US_PR_DEVICE, NULL, 2131 US_SC_DEVICE, US_PR_DEVICE, NULL,
1819 US_FL_FIX_CAPACITY ), 2132 US_FL_FIX_CAPACITY ),
1820 2133
2134/* Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
2135 * Mio Moov 330
2136 */
2137UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000,
2138 "Mitac",
2139 "Mio DigiWalker USB Sync",
2140 US_SC_DEVICE,US_PR_DEVICE,NULL,
2141 US_FL_MAX_SECTORS_64 ),
2142
1821/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */ 2143/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
1822UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, 2144UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
1823 "iRiver", 2145 "iRiver",
diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig
new file mode 100644
index 000000000000..eb09a0a14a80
--- /dev/null
+++ b/drivers/usb/wusbcore/Kconfig
@@ -0,0 +1,41 @@
1#
2# Wireless USB Core configuration
3#
4config USB_WUSB
5 tristate "Enable Wireless USB extensions (EXPERIMENTAL)"
6 depends on EXPERIMENTAL
7 depends on USB
8 select UWB
9 select CRYPTO
10 select CRYPTO_BLKCIPHER
11 select CRYPTO_CBC
12 select CRYPTO_MANAGER
13 select CRYPTO_AES
14 help
15 Enable the host-side support for Wireless USB.
16
17 To compile this support select Y (built in). It is safe to
18 select even if you don't have the hardware.
19
20config USB_WUSB_CBAF
21 tristate "Support WUSB Cable Based Association (CBA)"
22 depends on USB
23 help
24 Some WUSB devices support Cable Based Association. It's used to
25 enable the secure communication between the host and the
26 device.
27
28 Enable this option if your WUSB device must to be connected
29 via wired USB before establishing a wireless link.
30
31 It is safe to select even if you don't have a compatible
32 hardware.
33
34config USB_WUSB_CBAF_DEBUG
35 bool "Enable CBA debug messages"
36 depends on USB_WUSB_CBAF
37 help
38 Say Y here if you want the CBA to produce a bunch of debug messages
39 to the system log. Select this if you are having a problem with
40 CBA support and want to see more of what is going on.
41
diff --git a/drivers/usb/wusbcore/Makefile b/drivers/usb/wusbcore/Makefile
new file mode 100644
index 000000000000..75f1ade66258
--- /dev/null
+++ b/drivers/usb/wusbcore/Makefile
@@ -0,0 +1,26 @@
1obj-$(CONFIG_USB_WUSB) += wusbcore.o
2obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o
3obj-$(CONFIG_USB_WUSB_CBAF) += wusb-cbaf.o
4
5
6wusbcore-objs := \
7 crypto.o \
8 devconnect.o \
9 dev-sysfs.o \
10 mmc.o \
11 pal.o \
12 rh.o \
13 reservation.o \
14 security.o \
15 wusbhc.o
16
17wusb-cbaf-objs := cbaf.o
18
19wusb-wa-objs := wa-hc.o \
20 wa-nep.o \
21 wa-rpipe.o \
22 wa-xfer.o
23
24ifeq ($(CONFIG_USB_WUSB_CBAF_DEBUG),y)
25EXTRA_CFLAGS += -DDEBUG
26endif
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
new file mode 100644
index 000000000000..ab4788d1785a
--- /dev/null
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -0,0 +1,673 @@
1/*
2 * Wireless USB - Cable Based Association
3 *
4 *
5 * Copyright (C) 2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * WUSB devices have to be paired (associated in WUSB lingo) so
25 * that they can connect to the system.
26 *
27 * One way of pairing is using CBA-Cable Based Association. First
28 * time you plug the device with a cable, association is done between
29 * host and device and subsequent times, you can connect wirelessly
30 * without having to associate again. That's the idea.
31 *
32 * This driver does nothing Earth shattering. It just provides an
33 * interface to chat with the wire-connected device so we can get a
34 * CDID (device ID) that might have been previously associated to a
35 * CHID (host ID) and to set up a new <CHID,CDID,CK> triplet
36 * (connection context), with the CK being the secret, or connection
37 * key. This is the pairing data.
38 *
39 * When a device with the CBA capability connects, the probe routine
40 * just creates a bunch of sysfs files that a user space enumeration
41 * manager uses to allow it to connect wirelessly to the system or not.
42 *
43 * The process goes like this:
44 *
45 * 1. Device plugs, cbaf is loaded, notifications happen.
46 *
47 * 2. The connection manager (CM) sees a device with CBAF capability
48 * (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE).
49 *
50 * 3. The CM writes the host name, supported band groups, and the CHID
51 * (host ID) into the wusb_host_name, wusb_host_band_groups and
52 * wusb_chid files. These get sent to the device and the CDID (if
53 * any) for this host is requested.
54 *
55 * 4. The CM can verify that the device's supported band groups
56 * (wusb_device_band_groups) are compatible with the host.
57 *
58 * 5. The CM reads the wusb_cdid file.
59 *
60 * 6. The CM looks up its database
61 *
62 * 6.1 If it has a matching CHID,CDID entry, the device has been
63 * authorized before (paired) and nothing further needs to be
64 * done.
65 *
66 * 6.2 If the CDID is zero (or the CM doesn't find a matching CDID in
67 * its database), the device is assumed to be not known. The CM
68 * may associate the host with device by: writing a randomly
69 * generated CDID to wusb_cdid and then a random CK to wusb_ck
70 * (this uploads the new CC to the device).
71 *
72 * CMD may choose to prompt the user before associating with a new
73 * device.
74 *
75 * 7. Device is unplugged.
76 *
77 * When the device tries to connect wirelessly, it will present its
78 * CDID to the WUSB host controller. The CM will query the
79 * database. If the CHID/CDID pair found, it will (with a 4-way
80 * handshake) challenge the device to demonstrate it has the CK secret
81 * key (from our database) without actually exchanging it. Once
82 * satisfied, crypto keys are derived from the CK, the device is
83 * connected and all communication is encrypted.
84 *
85 * References:
86 * [WUSB-AM] Association Models Supplement to the Certified Wireless
87 * Universal Serial Bus Specification, version 1.0.
88 */
89#include <linux/module.h>
90#include <linux/ctype.h>
91#include <linux/version.h>
92#include <linux/usb.h>
93#include <linux/interrupt.h>
94#include <linux/delay.h>
95#include <linux/random.h>
96#include <linux/mutex.h>
97#include <linux/uwb.h>
98#include <linux/usb/wusb.h>
99#include <linux/usb/association.h>
100
101#define CBA_NAME_LEN 0x40 /* [WUSB-AM] table 4-7 */
102
103/* An instance of a Cable-Based-Association-Framework device */
104struct cbaf {
105 struct usb_device *usb_dev;
106 struct usb_interface *usb_iface;
107 void *buffer;
108 size_t buffer_size;
109
110 struct wusb_ckhdid chid;
111 char host_name[CBA_NAME_LEN];
112 u16 host_band_groups;
113
114 struct wusb_ckhdid cdid;
115 char device_name[CBA_NAME_LEN];
116 u16 device_band_groups;
117
118 struct wusb_ckhdid ck;
119};
120
121/*
122 * Verify that a CBAF USB-interface has what we need
123 *
124 * According to [WUSB-AM], CBA devices should provide at least two
125 * interfaces:
126 * - RETRIEVE_HOST_INFO
127 * - ASSOCIATE
128 *
129 * If the device doesn't provide these interfaces, we do not know how
130 * to deal with it.
131 */
132static int cbaf_check(struct cbaf *cbaf)
133{
134 int result;
135 struct device *dev = &cbaf->usb_iface->dev;
136 struct wusb_cbaf_assoc_info *assoc_info;
137 struct wusb_cbaf_assoc_request *assoc_request;
138 size_t assoc_size;
139 void *itr, *top;
140 int ar_rhi = 0, ar_assoc = 0;
141
142 result = usb_control_msg(
143 cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
144 CBAF_REQ_GET_ASSOCIATION_INFORMATION,
145 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
146 0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
147 cbaf->buffer, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
148 if (result < 0) {
149 dev_err(dev, "Cannot get available association types: %d\n",
150 result);
151 return result;
152 }
153
154 assoc_info = cbaf->buffer;
155 if (result < sizeof(*assoc_info)) {
156 dev_err(dev, "Not enough data to decode association info "
157 "header (%zu vs %zu bytes required)\n",
158 (size_t)result, sizeof(*assoc_info));
159 return result;
160 }
161
162 assoc_size = le16_to_cpu(assoc_info->Length);
163 if (result < assoc_size) {
164 dev_err(dev, "Not enough data to decode association info "
165 "(%zu vs %zu bytes required)\n",
166 (size_t)assoc_size, sizeof(*assoc_info));
167 return result;
168 }
169 /*
170 * From now on, we just verify, but won't error out unless we
171 * don't find the AR_TYPE_WUSB_{RETRIEVE_HOST_INFO,ASSOCIATE}
172 * types.
173 */
174 itr = cbaf->buffer + sizeof(*assoc_info);
175 top = cbaf->buffer + assoc_size;
176 dev_dbg(dev, "Found %u association requests (%zu bytes)\n",
177 assoc_info->NumAssociationRequests, assoc_size);
178
179 while (itr < top) {
180 u16 ar_type, ar_subtype;
181 u32 ar_size;
182 const char *ar_name;
183
184 assoc_request = itr;
185
186 if (top - itr < sizeof(*assoc_request)) {
187 dev_err(dev, "Not enough data to decode associaton "
188 "request (%zu vs %zu bytes needed)\n",
189 top - itr, sizeof(*assoc_request));
190 break;
191 }
192
193 ar_type = le16_to_cpu(assoc_request->AssociationTypeId);
194 ar_subtype = le16_to_cpu(assoc_request->AssociationSubTypeId);
195 ar_size = le32_to_cpu(assoc_request->AssociationTypeInfoSize);
196 ar_name = "unknown";
197
198 switch (ar_type) {
199 case AR_TYPE_WUSB:
200 /* Verify we have what is mandated by [WUSB-AM]. */
201 switch (ar_subtype) {
202 case AR_TYPE_WUSB_RETRIEVE_HOST_INFO:
203 ar_name = "RETRIEVE_HOST_INFO";
204 ar_rhi = 1;
205 break;
206 case AR_TYPE_WUSB_ASSOCIATE:
207 /* send assoc data */
208 ar_name = "ASSOCIATE";
209 ar_assoc = 1;
210 break;
211 };
212 break;
213 };
214
215 dev_dbg(dev, "Association request #%02u: 0x%04x/%04x "
216 "(%zu bytes): %s\n",
217 assoc_request->AssociationDataIndex, ar_type,
218 ar_subtype, (size_t)ar_size, ar_name);
219
220 itr += sizeof(*assoc_request);
221 }
222
223 if (!ar_rhi) {
224 dev_err(dev, "Missing RETRIEVE_HOST_INFO association "
225 "request\n");
226 return -EINVAL;
227 }
228 if (!ar_assoc) {
229 dev_err(dev, "Missing ASSOCIATE association request\n");
230 return -EINVAL;
231 }
232
233 return 0;
234}
235
236static const struct wusb_cbaf_host_info cbaf_host_info_defaults = {
237 .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
238 .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
239 .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
240 .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO),
241 .CHID_hdr = WUSB_AR_CHID,
242 .LangID_hdr = WUSB_AR_LangID,
243 .HostFriendlyName_hdr = WUSB_AR_HostFriendlyName,
244};
245
246/* Send WUSB host information (CHID and name) to a CBAF device */
247static int cbaf_send_host_info(struct cbaf *cbaf)
248{
249 struct wusb_cbaf_host_info *hi;
250 size_t name_len;
251 size_t hi_size;
252
253 hi = cbaf->buffer;
254 memset(hi, 0, sizeof(*hi));
255 *hi = cbaf_host_info_defaults;
256 hi->CHID = cbaf->chid;
257 hi->LangID = 0; /* FIXME: I guess... */
258 strlcpy(hi->HostFriendlyName, cbaf->host_name, CBA_NAME_LEN);
259 name_len = strlen(cbaf->host_name);
260 hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len);
261 hi_size = sizeof(*hi) + name_len;
262
263 return usb_control_msg(cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
264 CBAF_REQ_SET_ASSOCIATION_RESPONSE,
265 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
266 0x0101,
267 cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
268 hi, hi_size, 1000 /* FIXME: arbitrary */);
269}
270
271/*
272 * Get device's information (CDID) associated to CHID
273 *
274 * The device will return it's information (CDID, name, bandgroups)
275 * associated to the CHID we have set before, or 0 CDID and default
276 * name and bandgroup if no CHID set or unknown.
277 */
278static int cbaf_cdid_get(struct cbaf *cbaf)
279{
280 int result;
281 struct device *dev = &cbaf->usb_iface->dev;
282 struct wusb_cbaf_device_info *di;
283 size_t needed;
284
285 di = cbaf->buffer;
286 result = usb_control_msg(
287 cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
288 CBAF_REQ_GET_ASSOCIATION_REQUEST,
289 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
290 0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
291 di, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
292 if (result < 0) {
293 dev_err(dev, "Cannot request device information: %d\n", result);
294 return result;
295 }
296
297 needed = result < sizeof(*di) ? sizeof(*di) : le32_to_cpu(di->Length);
298 if (result < needed) {
299 dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs "
300 "%zu bytes needed)\n", (size_t)result, needed);
301 return result;
302 }
303
304 strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN);
305 cbaf->cdid = di->CDID;
306 cbaf->device_band_groups = le16_to_cpu(di->BandGroups);
307
308 return 0;
309}
310
311static ssize_t cbaf_wusb_chid_show(struct device *dev,
312 struct device_attribute *attr,
313 char *buf)
314{
315 struct usb_interface *iface = to_usb_interface(dev);
316 struct cbaf *cbaf = usb_get_intfdata(iface);
317 char pr_chid[WUSB_CKHDID_STRSIZE];
318
319 ckhdid_printf(pr_chid, sizeof(pr_chid), &cbaf->chid);
320 return scnprintf(buf, PAGE_SIZE, "%s\n", pr_chid);
321}
322
323static ssize_t cbaf_wusb_chid_store(struct device *dev,
324 struct device_attribute *attr,
325 const char *buf, size_t size)
326{
327 ssize_t result;
328 struct usb_interface *iface = to_usb_interface(dev);
329 struct cbaf *cbaf = usb_get_intfdata(iface);
330
331 result = sscanf(buf,
332 "%02hhx %02hhx %02hhx %02hhx "
333 "%02hhx %02hhx %02hhx %02hhx "
334 "%02hhx %02hhx %02hhx %02hhx "
335 "%02hhx %02hhx %02hhx %02hhx",
336 &cbaf->chid.data[0] , &cbaf->chid.data[1],
337 &cbaf->chid.data[2] , &cbaf->chid.data[3],
338 &cbaf->chid.data[4] , &cbaf->chid.data[5],
339 &cbaf->chid.data[6] , &cbaf->chid.data[7],
340 &cbaf->chid.data[8] , &cbaf->chid.data[9],
341 &cbaf->chid.data[10], &cbaf->chid.data[11],
342 &cbaf->chid.data[12], &cbaf->chid.data[13],
343 &cbaf->chid.data[14], &cbaf->chid.data[15]);
344
345 if (result != 16)
346 return -EINVAL;
347
348 result = cbaf_send_host_info(cbaf);
349 if (result < 0)
350 return result;
351 result = cbaf_cdid_get(cbaf);
352 if (result < 0)
353 return -result;
354 return size;
355}
356static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store);
357
358static ssize_t cbaf_wusb_host_name_show(struct device *dev,
359 struct device_attribute *attr,
360 char *buf)
361{
362 struct usb_interface *iface = to_usb_interface(dev);
363 struct cbaf *cbaf = usb_get_intfdata(iface);
364
365 return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->host_name);
366}
367
368static ssize_t cbaf_wusb_host_name_store(struct device *dev,
369 struct device_attribute *attr,
370 const char *buf, size_t size)
371{
372 ssize_t result;
373 struct usb_interface *iface = to_usb_interface(dev);
374 struct cbaf *cbaf = usb_get_intfdata(iface);
375
376 result = sscanf(buf, "%63s", cbaf->host_name);
377 if (result != 1)
378 return -EINVAL;
379
380 return size;
381}
382static DEVICE_ATTR(wusb_host_name, 0600, cbaf_wusb_host_name_show,
383 cbaf_wusb_host_name_store);
384
385static ssize_t cbaf_wusb_host_band_groups_show(struct device *dev,
386 struct device_attribute *attr,
387 char *buf)
388{
389 struct usb_interface *iface = to_usb_interface(dev);
390 struct cbaf *cbaf = usb_get_intfdata(iface);
391
392 return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->host_band_groups);
393}
394
395static ssize_t cbaf_wusb_host_band_groups_store(struct device *dev,
396 struct device_attribute *attr,
397 const char *buf, size_t size)
398{
399 ssize_t result;
400 struct usb_interface *iface = to_usb_interface(dev);
401 struct cbaf *cbaf = usb_get_intfdata(iface);
402 u16 band_groups = 0;
403
404 result = sscanf(buf, "%04hx", &band_groups);
405 if (result != 1)
406 return -EINVAL;
407
408 cbaf->host_band_groups = band_groups;
409
410 return size;
411}
412
413static DEVICE_ATTR(wusb_host_band_groups, 0600,
414 cbaf_wusb_host_band_groups_show,
415 cbaf_wusb_host_band_groups_store);
416
417static const struct wusb_cbaf_device_info cbaf_device_info_defaults = {
418 .Length_hdr = WUSB_AR_Length,
419 .CDID_hdr = WUSB_AR_CDID,
420 .BandGroups_hdr = WUSB_AR_BandGroups,
421 .LangID_hdr = WUSB_AR_LangID,
422 .DeviceFriendlyName_hdr = WUSB_AR_DeviceFriendlyName,
423};
424
425static ssize_t cbaf_wusb_cdid_show(struct device *dev,
426 struct device_attribute *attr, char *buf)
427{
428 struct usb_interface *iface = to_usb_interface(dev);
429 struct cbaf *cbaf = usb_get_intfdata(iface);
430 char pr_cdid[WUSB_CKHDID_STRSIZE];
431
432 ckhdid_printf(pr_cdid, sizeof(pr_cdid), &cbaf->cdid);
433 return scnprintf(buf, PAGE_SIZE, "%s\n", pr_cdid);
434}
435
436static ssize_t cbaf_wusb_cdid_store(struct device *dev,
437 struct device_attribute *attr,
438 const char *buf, size_t size)
439{
440 ssize_t result;
441 struct usb_interface *iface = to_usb_interface(dev);
442 struct cbaf *cbaf = usb_get_intfdata(iface);
443 struct wusb_ckhdid cdid;
444
445 result = sscanf(buf,
446 "%02hhx %02hhx %02hhx %02hhx "
447 "%02hhx %02hhx %02hhx %02hhx "
448 "%02hhx %02hhx %02hhx %02hhx "
449 "%02hhx %02hhx %02hhx %02hhx",
450 &cdid.data[0] , &cdid.data[1],
451 &cdid.data[2] , &cdid.data[3],
452 &cdid.data[4] , &cdid.data[5],
453 &cdid.data[6] , &cdid.data[7],
454 &cdid.data[8] , &cdid.data[9],
455 &cdid.data[10], &cdid.data[11],
456 &cdid.data[12], &cdid.data[13],
457 &cdid.data[14], &cdid.data[15]);
458 if (result != 16)
459 return -EINVAL;
460
461 cbaf->cdid = cdid;
462
463 return size;
464}
465static DEVICE_ATTR(wusb_cdid, 0600, cbaf_wusb_cdid_show, cbaf_wusb_cdid_store);
466
467static ssize_t cbaf_wusb_device_band_groups_show(struct device *dev,
468 struct device_attribute *attr,
469 char *buf)
470{
471 struct usb_interface *iface = to_usb_interface(dev);
472 struct cbaf *cbaf = usb_get_intfdata(iface);
473
474 return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->device_band_groups);
475}
476
477static DEVICE_ATTR(wusb_device_band_groups, 0600,
478 cbaf_wusb_device_band_groups_show,
479 NULL);
480
481static ssize_t cbaf_wusb_device_name_show(struct device *dev,
482 struct device_attribute *attr,
483 char *buf)
484{
485 struct usb_interface *iface = to_usb_interface(dev);
486 struct cbaf *cbaf = usb_get_intfdata(iface);
487
488 return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->device_name);
489}
490static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL);
491
492static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = {
493 .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
494 .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
495 .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
496 .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE),
497 .Length_hdr = WUSB_AR_Length,
498 .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
499 .ConnectionContext_hdr = WUSB_AR_ConnectionContext,
500 .BandGroups_hdr = WUSB_AR_BandGroups,
501};
502
503static const struct wusb_cbaf_cc_data_fail cbaf_cc_data_fail_defaults = {
504 .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
505 .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
506 .Length_hdr = WUSB_AR_Length,
507 .AssociationStatus_hdr = WUSB_AR_AssociationStatus,
508};
509
510/*
511 * Send a new CC to the device.
512 */
513static int cbaf_cc_upload(struct cbaf *cbaf)
514{
515 int result;
516 struct device *dev = &cbaf->usb_iface->dev;
517 struct wusb_cbaf_cc_data *ccd;
518 char pr_cdid[WUSB_CKHDID_STRSIZE];
519
520 ccd = cbaf->buffer;
521 *ccd = cbaf_cc_data_defaults;
522 ccd->CHID = cbaf->chid;
523 ccd->CDID = cbaf->cdid;
524 ccd->CK = cbaf->ck;
525 ccd->BandGroups = cpu_to_le16(cbaf->host_band_groups);
526
527 dev_dbg(dev, "Trying to upload CC:\n");
528 ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CHID);
529 dev_dbg(dev, " CHID %s\n", pr_cdid);
530 ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CDID);
531 dev_dbg(dev, " CDID %s\n", pr_cdid);
532 dev_dbg(dev, " Bandgroups 0x%04x\n", cbaf->host_band_groups);
533
534 result = usb_control_msg(
535 cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
536 CBAF_REQ_SET_ASSOCIATION_RESPONSE,
537 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
538 0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
539 ccd, sizeof(*ccd), 1000 /* FIXME: arbitrary */);
540
541 return result;
542}
543
544static ssize_t cbaf_wusb_ck_store(struct device *dev,
545 struct device_attribute *attr,
546 const char *buf, size_t size)
547{
548 ssize_t result;
549 struct usb_interface *iface = to_usb_interface(dev);
550 struct cbaf *cbaf = usb_get_intfdata(iface);
551
552 result = sscanf(buf,
553 "%02hhx %02hhx %02hhx %02hhx "
554 "%02hhx %02hhx %02hhx %02hhx "
555 "%02hhx %02hhx %02hhx %02hhx "
556 "%02hhx %02hhx %02hhx %02hhx",
557 &cbaf->ck.data[0] , &cbaf->ck.data[1],
558 &cbaf->ck.data[2] , &cbaf->ck.data[3],
559 &cbaf->ck.data[4] , &cbaf->ck.data[5],
560 &cbaf->ck.data[6] , &cbaf->ck.data[7],
561 &cbaf->ck.data[8] , &cbaf->ck.data[9],
562 &cbaf->ck.data[10], &cbaf->ck.data[11],
563 &cbaf->ck.data[12], &cbaf->ck.data[13],
564 &cbaf->ck.data[14], &cbaf->ck.data[15]);
565 if (result != 16)
566 return -EINVAL;
567
568 result = cbaf_cc_upload(cbaf);
569 if (result < 0)
570 return result;
571
572 return size;
573}
574static DEVICE_ATTR(wusb_ck, 0600, NULL, cbaf_wusb_ck_store);
575
576static struct attribute *cbaf_dev_attrs[] = {
577 &dev_attr_wusb_host_name.attr,
578 &dev_attr_wusb_host_band_groups.attr,
579 &dev_attr_wusb_chid.attr,
580 &dev_attr_wusb_cdid.attr,
581 &dev_attr_wusb_device_name.attr,
582 &dev_attr_wusb_device_band_groups.attr,
583 &dev_attr_wusb_ck.attr,
584 NULL,
585};
586
587static struct attribute_group cbaf_dev_attr_group = {
588 .name = NULL, /* we want them in the same directory */
589 .attrs = cbaf_dev_attrs,
590};
591
592static int cbaf_probe(struct usb_interface *iface,
593 const struct usb_device_id *id)
594{
595 struct cbaf *cbaf;
596 struct device *dev = &iface->dev;
597 int result = -ENOMEM;
598
599 cbaf = kzalloc(sizeof(*cbaf), GFP_KERNEL);
600 if (cbaf == NULL)
601 goto error_kzalloc;
602 cbaf->buffer = kmalloc(512, GFP_KERNEL);
603 if (cbaf->buffer == NULL)
604 goto error_kmalloc_buffer;
605
606 cbaf->buffer_size = 512;
607 cbaf->usb_dev = usb_get_dev(interface_to_usbdev(iface));
608 cbaf->usb_iface = usb_get_intf(iface);
609 result = cbaf_check(cbaf);
610 if (result < 0) {
611 dev_err(dev, "This device is not WUSB-CBAF compliant"
612 "and is not supported yet.\n");
613 goto error_check;
614 }
615
616 result = sysfs_create_group(&dev->kobj, &cbaf_dev_attr_group);
617 if (result < 0) {
618 dev_err(dev, "Can't register sysfs attr group: %d\n", result);
619 goto error_create_group;
620 }
621 usb_set_intfdata(iface, cbaf);
622 return 0;
623
624error_create_group:
625error_check:
626 kfree(cbaf->buffer);
627error_kmalloc_buffer:
628 kfree(cbaf);
629error_kzalloc:
630 return result;
631}
632
633static void cbaf_disconnect(struct usb_interface *iface)
634{
635 struct cbaf *cbaf = usb_get_intfdata(iface);
636 struct device *dev = &iface->dev;
637 sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group);
638 usb_set_intfdata(iface, NULL);
639 usb_put_intf(iface);
640 kfree(cbaf->buffer);
641 /* paranoia: clean up crypto keys */
642 memset(cbaf, 0, sizeof(*cbaf));
643 kfree(cbaf);
644}
645
646static struct usb_device_id cbaf_id_table[] = {
647 { USB_INTERFACE_INFO(0xef, 0x03, 0x01), },
648 { },
649};
650MODULE_DEVICE_TABLE(usb, cbaf_id_table);
651
652static struct usb_driver cbaf_driver = {
653 .name = "wusb-cbaf",
654 .id_table = cbaf_id_table,
655 .probe = cbaf_probe,
656 .disconnect = cbaf_disconnect,
657};
658
659static int __init cbaf_driver_init(void)
660{
661 return usb_register(&cbaf_driver);
662}
663module_init(cbaf_driver_init);
664
665static void __exit cbaf_driver_exit(void)
666{
667 usb_deregister(&cbaf_driver);
668}
669module_exit(cbaf_driver_exit);
670
671MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
672MODULE_DESCRIPTION("Wireless USB Cable Based Association");
673MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
new file mode 100644
index 000000000000..c36c4389baae
--- /dev/null
+++ b/drivers/usb/wusbcore/crypto.c
@@ -0,0 +1,538 @@
1/*
2 * Ultra Wide Band
3 * AES-128 CCM Encryption
4 *
5 * Copyright (C) 2007 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * We don't do any encryption here; we use the Linux Kernel's AES-128
24 * crypto modules to construct keys and payload blocks in a way
25 * defined by WUSB1.0[6]. Check the erratas, as typos are are patched
26 * there.
27 *
28 * Thanks a zillion to John Keys for his help and clarifications over
29 * the designed-by-a-committee text.
30 *
31 * So the idea is that there is this basic Pseudo-Random-Function
32 * defined in WUSB1.0[6.5] which is the core of everything. It works
33 * by tweaking some blocks, AES crypting them and then xoring
34 * something else with them (this seems to be called CBC(AES) -- can
35 * you tell I know jack about crypto?). So we just funnel it into the
36 * Linux Crypto API.
37 *
38 * We leave a crypto test module so we can verify that vectors match,
39 * every now and then.
40 *
41 * Block size: 16 bytes -- AES seems to do things in 'block sizes'. I
42 * am learning a lot...
43 *
44 * Conveniently, some data structures that need to be
45 * funneled through AES are...16 bytes in size!
46 */
47
48#include <linux/crypto.h>
49#include <linux/module.h>
50#include <linux/err.h>
51#include <linux/uwb.h>
52#include <linux/usb/wusb.h>
53#include <linux/scatterlist.h>
54#define D_LOCAL 0
55#include <linux/uwb/debug.h>
56
57
58/*
59 * Block of data, as understood by AES-CCM
60 *
61 * The code assumes this structure is nothing but a 16 byte array
62 * (packed in a struct to avoid common mess ups that I usually do with
63 * arrays and enforcing type checking).
64 */
65struct aes_ccm_block {
66 u8 data[16];
67} __attribute__((packed));
68
69/*
70 * Counter-mode Blocks (WUSB1.0[6.4])
71 *
72 * According to CCM (or so it seems), for the purpose of calculating
73 * the MIC, the message is broken in N counter-mode blocks, B0, B1,
74 * ... BN.
75 *
76 * B0 contains flags, the CCM nonce and l(m).
77 *
78 * B1 contains l(a), the MAC header, the encryption offset and padding.
79 *
80 * If EO is nonzero, additional blocks are built from payload bytes
81 * until EO is exahusted (FIXME: padding to 16 bytes, I guess). The
82 * padding is not xmitted.
83 */
84
85/* WUSB1.0[T6.4] */
86struct aes_ccm_b0 {
87 u8 flags; /* 0x59, per CCM spec */
88 struct aes_ccm_nonce ccm_nonce;
89 __be16 lm;
90} __attribute__((packed));
91
92/* WUSB1.0[T6.5] */
93struct aes_ccm_b1 {
94 __be16 la;
95 u8 mac_header[10];
96 __le16 eo;
97 u8 security_reserved; /* This is always zero */
98 u8 padding; /* 0 */
99} __attribute__((packed));
100
101/*
102 * Encryption Blocks (WUSB1.0[6.4.4])
103 *
104 * CCM uses Ax blocks to generate a keystream with which the MIC and
105 * the message's payload are encoded. A0 always encrypts/decrypts the
106 * MIC. Ax (x>0) are used for the sucesive payload blocks.
107 *
108 * The x is the counter, and is increased for each block.
109 */
110struct aes_ccm_a {
111 u8 flags; /* 0x01, per CCM spec */
112 struct aes_ccm_nonce ccm_nonce;
113 __be16 counter; /* Value of x */
114} __attribute__((packed));
115
116static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
117 size_t size)
118{
119 u8 *bo = _bo;
120 const u8 *bi1 = _bi1, *bi2 = _bi2;
121 size_t itr;
122 for (itr = 0; itr < size; itr++)
123 bo[itr] = bi1[itr] ^ bi2[itr];
124}
125
126/*
127 * CC-MAC function WUSB1.0[6.5]
128 *
129 * Take a data string and produce the encrypted CBC Counter-mode MIC
130 *
131 * Note the names for most function arguments are made to (more or
132 * less) match those used in the pseudo-function definition given in
133 * WUSB1.0[6.5].
134 *
135 * @tfm_cbc: CBC(AES) blkcipher handle (initialized)
136 *
137 * @tfm_aes: AES cipher handle (initialized)
138 *
139 * @mic: buffer for placing the computed MIC (Message Integrity
140 * Code). This is exactly 8 bytes, and we expect the buffer to
141 * be at least eight bytes in length.
142 *
143 * @key: 128 bit symmetric key
144 *
145 * @n: CCM nonce
146 *
147 * @a: ASCII string, 14 bytes long (I guess zero padded if needed;
148 * we use exactly 14 bytes).
149 *
150 * @b: data stream to be processed; cannot be a global or const local
151 * (will confuse the scatterlists)
152 *
153 * @blen: size of b...
154 *
155 * Still not very clear how this is done, but looks like this: we
156 * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with
157 * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we
158 * take the payload and divide it in blocks (16 bytes), xor them with
159 * the previous crypto result (16 bytes) and crypt it, repeat the next
160 * block with the output of the previous one, rinse wash (I guess this
161 * is what AES CBC mode means...but I truly have no idea). So we use
162 * the CBC(AES) blkcipher, that does precisely that. The IV (Initial
163 * Vector) is 16 bytes and is set to zero, so
164 *
165 * See rfc3610. Linux crypto has a CBC implementation, but the
166 * documentation is scarce, to say the least, and the example code is
167 * so intricated that is difficult to understand how things work. Most
168 * of this is guess work -- bite me.
169 *
170 * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and
171 * using the 14 bytes of @a to fill up
172 * b1.{mac_header,e0,security_reserved,padding}.
173 *
174 * NOTE: The definiton of l(a) in WUSB1.0[6.5] vs the definition of
175 * l(m) is orthogonal, they bear no relationship, so it is not
176 * in conflict with the parameter's relation that
177 * WUSB1.0[6.4.2]) defines.
178 *
179 * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in
180 * first errata released on 2005/07.
181 *
182 * NOTE: we need to clean IV to zero at each invocation to make sure
183 * we start with a fresh empty Initial Vector, so that the CBC
184 * works ok.
185 *
186 * NOTE: blen is not aligned to a block size, we'll pad zeros, that's
187 * what sg[4] is for. Maybe there is a smarter way to do this.
188 */
189static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
190 struct crypto_cipher *tfm_aes, void *mic,
191 const struct aes_ccm_nonce *n,
192 const struct aes_ccm_label *a, const void *b,
193 size_t blen)
194{
195 int result = 0;
196 struct blkcipher_desc desc;
197 struct aes_ccm_b0 b0;
198 struct aes_ccm_b1 b1;
199 struct aes_ccm_a ax;
200 struct scatterlist sg[4], sg_dst;
201 void *iv, *dst_buf;
202 size_t ivsize, dst_size;
203 const u8 bzero[16] = { 0 };
204 size_t zero_padding;
205
206 d_fnstart(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, "
207 "n %p, a %p, b %p, blen %zu)\n",
208 tfm_cbc, tfm_aes, mic, n, a, b, blen);
209 /*
210 * These checks should be compile time optimized out
211 * ensure @a fills b1's mac_header and following fields
212 */
213 WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
214 WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
215 WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
216 WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));
217
218 result = -ENOMEM;
219 zero_padding = sizeof(struct aes_ccm_block)
220 - blen % sizeof(struct aes_ccm_block);
221 zero_padding = blen % sizeof(struct aes_ccm_block);
222 if (zero_padding)
223 zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
224 dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
225 dst_buf = kzalloc(dst_size, GFP_KERNEL);
226 if (dst_buf == NULL) {
227 printk(KERN_ERR "E: can't alloc destination buffer\n");
228 goto error_dst_buf;
229 }
230
231 iv = crypto_blkcipher_crt(tfm_cbc)->iv;
232 ivsize = crypto_blkcipher_ivsize(tfm_cbc);
233 memset(iv, 0, ivsize);
234
235 /* Setup B0 */
236 b0.flags = 0x59; /* Format B0 */
237 b0.ccm_nonce = *n;
238 b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
239
240 /* Setup B1
241 *
242 * The WUSB spec is anything but clear! WUSB1.0[6.5]
243 * says that to initialize B1 from A with 'l(a) = blen +
244 * 14'--after clarification, it means to use A's contents
245 * for MAC Header, EO, sec reserved and padding.
246 */
247 b1.la = cpu_to_be16(blen + 14);
248 memcpy(&b1.mac_header, a, sizeof(*a));
249
250 d_printf(4, NULL, "I: B0 (%zu bytes)\n", sizeof(b0));
251 d_dump(4, NULL, &b0, sizeof(b0));
252 d_printf(4, NULL, "I: B1 (%zu bytes)\n", sizeof(b1));
253 d_dump(4, NULL, &b1, sizeof(b1));
254 d_printf(4, NULL, "I: B (%zu bytes)\n", blen);
255 d_dump(4, NULL, b, blen);
256 d_printf(4, NULL, "I: B 0-padding (%zu bytes)\n", zero_padding);
257 d_printf(4, NULL, "D: IV before crypto (%zu)\n", ivsize);
258 d_dump(4, NULL, iv, ivsize);
259
260 sg_init_table(sg, ARRAY_SIZE(sg));
261 sg_set_buf(&sg[0], &b0, sizeof(b0));
262 sg_set_buf(&sg[1], &b1, sizeof(b1));
263 sg_set_buf(&sg[2], b, blen);
264 /* 0 if well behaved :) */
265 sg_set_buf(&sg[3], bzero, zero_padding);
266 sg_init_one(&sg_dst, dst_buf, dst_size);
267
268 desc.tfm = tfm_cbc;
269 desc.flags = 0;
270 result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size);
271 if (result < 0) {
272 printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n",
273 result);
274 goto error_cbc_crypt;
275 }
276 d_printf(4, NULL, "D: MIC tag\n");
277 d_dump(4, NULL, iv, ivsize);
278
279 /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5]
280 * The procedure is to AES crypt the A0 block and XOR the MIC
281 * Tag agains it; we only do the first 8 bytes and place it
282 * directly in the destination buffer.
283 *
284 * POS Crypto API: size is assumed to be AES's block size.
285 * Thanks for documenting it -- tip taken from airo.c
286 */
287 ax.flags = 0x01; /* as per WUSB 1.0 spec */
288 ax.ccm_nonce = *n;
289 ax.counter = 0;
290 crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
291 bytewise_xor(mic, &ax, iv, 8);
292 d_printf(4, NULL, "D: CTR[MIC]\n");
293 d_dump(4, NULL, &ax, 8);
294 d_printf(4, NULL, "D: CCM-MIC tag\n");
295 d_dump(4, NULL, mic, 8);
296 result = 8;
297error_cbc_crypt:
298 kfree(dst_buf);
299error_dst_buf:
300 d_fnend(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, "
301 "n %p, a %p, b %p, blen %zu)\n",
302 tfm_cbc, tfm_aes, mic, n, a, b, blen);
303 return result;
304}
305
306/*
307 * WUSB Pseudo Random Function (WUSB1.0[6.5])
308 *
309 * @b: buffer to the source data; cannot be a global or const local
310 * (will confuse the scatterlists)
311 */
312ssize_t wusb_prf(void *out, size_t out_size,
313 const u8 key[16], const struct aes_ccm_nonce *_n,
314 const struct aes_ccm_label *a,
315 const void *b, size_t blen, size_t len)
316{
317 ssize_t result, bytes = 0, bitr;
318 struct aes_ccm_nonce n = *_n;
319 struct crypto_blkcipher *tfm_cbc;
320 struct crypto_cipher *tfm_aes;
321 u64 sfn = 0;
322 __le64 sfn_le;
323
324 d_fnstart(3, NULL, "(out %p, out_size %zu, key %p, _n %p, "
325 "a %p, b %p, blen %zu, len %zu)\n", out, out_size,
326 key, _n, a, b, blen, len);
327
328 tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
329 if (IS_ERR(tfm_cbc)) {
330 result = PTR_ERR(tfm_cbc);
331 printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
332 goto error_alloc_cbc;
333 }
334 result = crypto_blkcipher_setkey(tfm_cbc, key, 16);
335 if (result < 0) {
336 printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
337 goto error_setkey_cbc;
338 }
339
340 tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
341 if (IS_ERR(tfm_aes)) {
342 result = PTR_ERR(tfm_aes);
343 printk(KERN_ERR "E: can't load AES: %d\n", (int)result);
344 goto error_alloc_aes;
345 }
346 result = crypto_cipher_setkey(tfm_aes, key, 16);
347 if (result < 0) {
348 printk(KERN_ERR "E: can't set AES key: %d\n", (int)result);
349 goto error_setkey_aes;
350 }
351
352 for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
353 sfn_le = cpu_to_le64(sfn++);
354 memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */
355 result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes,
356 &n, a, b, blen);
357 if (result < 0)
358 goto error_ccm_mac;
359 bytes += result;
360 }
361 result = bytes;
362error_ccm_mac:
363error_setkey_aes:
364 crypto_free_cipher(tfm_aes);
365error_alloc_aes:
366error_setkey_cbc:
367 crypto_free_blkcipher(tfm_cbc);
368error_alloc_cbc:
369 d_fnend(3, NULL, "(out %p, out_size %zu, key %p, _n %p, "
370 "a %p, b %p, blen %zu, len %zu) = %d\n", out, out_size,
371 key, _n, a, b, blen, len, (int)bytes);
372 return result;
373}
374
375/* WUSB1.0[A.2] test vectors */
376static const u8 stv_hsmic_key[16] = {
377 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
378 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
379};
380
381static const struct aes_ccm_nonce stv_hsmic_n = {
382 .sfn = { 0 },
383 .tkid = { 0x76, 0x98, 0x01, },
384 .dest_addr = { .data = { 0xbe, 0x00 } },
385 .src_addr = { .data = { 0x76, 0x98 } },
386};
387
388/*
389 * Out-of-band MIC Generation verification code
390 *
391 */
392static int wusb_oob_mic_verify(void)
393{
394 int result;
395 u8 mic[8];
396 /* WUSB1.0[A.2] test vectors
397 *
398 * Need to keep it in the local stack as GCC 4.1.3something
399 * messes up and generates noise.
400 */
401 struct usb_handshake stv_hsmic_hs = {
402 .bMessageNumber = 2,
403 .bStatus = 00,
404 .tTKID = { 0x76, 0x98, 0x01 },
405 .bReserved = 00,
406 .CDID = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
407 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
408 0x3c, 0x3d, 0x3e, 0x3f },
409 .nonce = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
410 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
411 0x2c, 0x2d, 0x2e, 0x2f },
412 .MIC = { 0x75, 0x6a, 0x97, 0x51, 0x0c, 0x8c,
413 0x14, 0x7b } ,
414 };
415 size_t hs_size;
416
417 result = wusb_oob_mic(mic, stv_hsmic_key, &stv_hsmic_n, &stv_hsmic_hs);
418 if (result < 0)
419 printk(KERN_ERR "E: WUSB OOB MIC test: failed: %d\n", result);
420 else if (memcmp(stv_hsmic_hs.MIC, mic, sizeof(mic))) {
421 printk(KERN_ERR "E: OOB MIC test: "
422 "mismatch between MIC result and WUSB1.0[A2]\n");
423 hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC);
424 printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size);
425 dump_bytes(NULL, &stv_hsmic_hs, hs_size);
426 printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n",
427 sizeof(stv_hsmic_n));
428 dump_bytes(NULL, &stv_hsmic_n, sizeof(stv_hsmic_n));
429 printk(KERN_ERR "E: MIC out:\n");
430 dump_bytes(NULL, mic, sizeof(mic));
431 printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n");
432 dump_bytes(NULL, stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC));
433 result = -EINVAL;
434 } else
435 result = 0;
436 return result;
437}
438
439/*
440 * Test vectors for Key derivation
441 *
442 * These come from WUSB1.0[6.5.1], the vectors in WUSB1.0[A.1]
443 * (errata corrected in 2005/07).
444 */
445static const u8 stv_key_a1[16] __attribute__ ((__aligned__(4))) = {
446 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87,
447 0x78, 0x69, 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f
448};
449
450static const struct aes_ccm_nonce stv_keydvt_n_a1 = {
451 .sfn = { 0 },
452 .tkid = { 0x76, 0x98, 0x01, },
453 .dest_addr = { .data = { 0xbe, 0x00 } },
454 .src_addr = { .data = { 0x76, 0x98 } },
455};
456
457static const struct wusb_keydvt_out stv_keydvt_out_a1 = {
458 .kck = {
459 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
460 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
461 },
462 .ptk = {
463 0xc8, 0x70, 0x62, 0x82, 0xb6, 0x7c, 0xe9, 0x06,
464 0x7b, 0xc5, 0x25, 0x69, 0xf2, 0x36, 0x61, 0x2d
465 }
466};
467
468/*
469 * Performa a test to make sure we match the vectors defined in
470 * WUSB1.0[A.1](Errata2006/12)
471 */
472static int wusb_key_derive_verify(void)
473{
474 int result = 0;
475 struct wusb_keydvt_out keydvt_out;
476 /* These come from WUSB1.0[A.1] + 2006/12 errata
477 * NOTE: can't make this const or global -- somehow it seems
478 * the scatterlists for crypto get confused and we get
479 * bad data. There is no doc on this... */
480 struct wusb_keydvt_in stv_keydvt_in_a1 = {
481 .hnonce = {
482 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
483 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
484 },
485 .dnonce = {
486 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
487 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f
488 }
489 };
490
491 result = wusb_key_derive(&keydvt_out, stv_key_a1, &stv_keydvt_n_a1,
492 &stv_keydvt_in_a1);
493 if (result < 0)
494 printk(KERN_ERR "E: WUSB key derivation test: "
495 "derivation failed: %d\n", result);
496 if (memcmp(&stv_keydvt_out_a1, &keydvt_out, sizeof(keydvt_out))) {
497 printk(KERN_ERR "E: WUSB key derivation test: "
498 "mismatch between key derivation result "
499 "and WUSB1.0[A1] Errata 2006/12\n");
500 printk(KERN_ERR "E: keydvt in: key (%zu bytes)\n",
501 sizeof(stv_key_a1));
502 dump_bytes(NULL, stv_key_a1, sizeof(stv_key_a1));
503 printk(KERN_ERR "E: keydvt in: nonce (%zu bytes)\n",
504 sizeof(stv_keydvt_n_a1));
505 dump_bytes(NULL, &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1));
506 printk(KERN_ERR "E: keydvt in: hnonce & dnonce (%zu bytes)\n",
507 sizeof(stv_keydvt_in_a1));
508 dump_bytes(NULL, &stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1));
509 printk(KERN_ERR "E: keydvt out: KCK\n");
510 dump_bytes(NULL, &keydvt_out.kck, sizeof(keydvt_out.kck));
511 printk(KERN_ERR "E: keydvt out: PTK\n");
512 dump_bytes(NULL, &keydvt_out.ptk, sizeof(keydvt_out.ptk));
513 result = -EINVAL;
514 } else
515 result = 0;
516 return result;
517}
518
519/*
520 * Initialize crypto system
521 *
522 * FIXME: we do nothing now, other than verifying. Later on we'll
523 * cache the encryption stuff, so that's why we have a separate init.
524 */
525int wusb_crypto_init(void)
526{
527 int result;
528
529 result = wusb_key_derive_verify();
530 if (result < 0)
531 return result;
532 return wusb_oob_mic_verify();
533}
534
535void wusb_crypto_exit(void)
536{
537 /* FIXME: free cached crypto transforms */
538}
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c
new file mode 100644
index 000000000000..7897a19652e5
--- /dev/null
+++ b/drivers/usb/wusbcore/dev-sysfs.c
@@ -0,0 +1,143 @@
1/*
2 * WUSB devices
3 * sysfs bindings
4 *
5 * Copyright (C) 2007 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Get them out of the way...
24 */
25
26#include <linux/jiffies.h>
27#include <linux/ctype.h>
28#include <linux/workqueue.h>
29#include "wusbhc.h"
30
31#undef D_LOCAL
32#define D_LOCAL 4
33#include <linux/uwb/debug.h>
34
35static ssize_t wusb_disconnect_store(struct device *dev,
36 struct device_attribute *attr,
37 const char *buf, size_t size)
38{
39 struct usb_device *usb_dev;
40 struct wusbhc *wusbhc;
41 unsigned command;
42 u8 port_idx;
43
44 if (sscanf(buf, "%u", &command) != 1)
45 return -EINVAL;
46 if (command == 0)
47 return size;
48 usb_dev = to_usb_device(dev);
49 wusbhc = wusbhc_get_by_usb_dev(usb_dev);
50 if (wusbhc == NULL)
51 return -ENODEV;
52
53 mutex_lock(&wusbhc->mutex);
54 port_idx = wusb_port_no_to_idx(usb_dev->portnum);
55 __wusbhc_dev_disable(wusbhc, port_idx);
56 mutex_unlock(&wusbhc->mutex);
57 wusbhc_put(wusbhc);
58 return size;
59}
60static DEVICE_ATTR(wusb_disconnect, 0200, NULL, wusb_disconnect_store);
61
62static ssize_t wusb_cdid_show(struct device *dev,
63 struct device_attribute *attr, char *buf)
64{
65 ssize_t result;
66 struct wusb_dev *wusb_dev;
67
68 wusb_dev = wusb_dev_get_by_usb_dev(to_usb_device(dev));
69 if (wusb_dev == NULL)
70 return -ENODEV;
71 result = ckhdid_printf(buf, PAGE_SIZE, &wusb_dev->cdid);
72 strcat(buf, "\n");
73 wusb_dev_put(wusb_dev);
74 return result + 1;
75}
76static DEVICE_ATTR(wusb_cdid, 0444, wusb_cdid_show, NULL);
77
78static ssize_t wusb_ck_store(struct device *dev,
79 struct device_attribute *attr,
80 const char *buf, size_t size)
81{
82 int result;
83 struct usb_device *usb_dev;
84 struct wusbhc *wusbhc;
85 struct wusb_ckhdid ck;
86
87 result = sscanf(buf,
88 "%02hhx %02hhx %02hhx %02hhx "
89 "%02hhx %02hhx %02hhx %02hhx "
90 "%02hhx %02hhx %02hhx %02hhx "
91 "%02hhx %02hhx %02hhx %02hhx\n",
92 &ck.data[0] , &ck.data[1],
93 &ck.data[2] , &ck.data[3],
94 &ck.data[4] , &ck.data[5],
95 &ck.data[6] , &ck.data[7],
96 &ck.data[8] , &ck.data[9],
97 &ck.data[10], &ck.data[11],
98 &ck.data[12], &ck.data[13],
99 &ck.data[14], &ck.data[15]);
100 if (result != 16)
101 return -EINVAL;
102
103 usb_dev = to_usb_device(dev);
104 wusbhc = wusbhc_get_by_usb_dev(usb_dev);
105 if (wusbhc == NULL)
106 return -ENODEV;
107 result = wusb_dev_4way_handshake(wusbhc, usb_dev->wusb_dev, &ck);
108 memset(&ck, 0, sizeof(ck));
109 wusbhc_put(wusbhc);
110 return result < 0 ? result : size;
111}
112static DEVICE_ATTR(wusb_ck, 0200, NULL, wusb_ck_store);
113
114static struct attribute *wusb_dev_attrs[] = {
115 &dev_attr_wusb_disconnect.attr,
116 &dev_attr_wusb_cdid.attr,
117 &dev_attr_wusb_ck.attr,
118 NULL,
119};
120
121static struct attribute_group wusb_dev_attr_group = {
122 .name = NULL, /* we want them in the same directory */
123 .attrs = wusb_dev_attrs,
124};
125
126int wusb_dev_sysfs_add(struct wusbhc *wusbhc, struct usb_device *usb_dev,
127 struct wusb_dev *wusb_dev)
128{
129 int result = sysfs_create_group(&usb_dev->dev.kobj,
130 &wusb_dev_attr_group);
131 struct device *dev = &usb_dev->dev;
132 if (result < 0)
133 dev_err(dev, "Cannot register WUSB-dev attributes: %d\n",
134 result);
135 return result;
136}
137
138void wusb_dev_sysfs_rm(struct wusb_dev *wusb_dev)
139{
140 struct usb_device *usb_dev = wusb_dev->usb_dev;
141 if (usb_dev)
142 sysfs_remove_group(&usb_dev->dev.kobj, &wusb_dev_attr_group);
143}
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
new file mode 100644
index 000000000000..f45d777bef34
--- /dev/null
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -0,0 +1,1297 @@
1/*
2 * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
3 * Device Connect handling
4 *
5 * Copyright (C) 2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 * FIXME: this file needs to be broken up, it's grown too big
25 *
26 *
27 * WUSB1.0[7.1, 7.5.1, ]
28 *
29 * WUSB device connection is kind of messy. Some background:
30 *
31 * When a device wants to connect it scans the UWB radio channels
32 * looking for a WUSB Channel; a WUSB channel is defined by MMCs
33 * (Micro Managed Commands or something like that) [see
34 * Design-overview for more on this] .
35 *
36 * So, device scans the radio, finds MMCs and thus a host and checks
37 * when the next DNTS is. It sends a Device Notification Connect
38 * (DN_Connect); the host picks it up (through nep.c and notif.c, ends
39 * up in wusb_devconnect_ack(), which creates a wusb_dev structure in
40 * wusbhc->port[port_number].wusb_dev), assigns an unauth address
41 * to the device (this means from 0x80 to 0xfe) and sends, in the MMC
42 * a Connect Ack Information Element (ConnAck IE).
43 *
44 * So now the device now has a WUSB address. From now on, we use
45 * that to talk to it in the RPipes.
46 *
47 * ASSUMPTIONS:
48 *
49 * - We use the the as device address the port number where it is
50 * connected (port 0 doesn't exist). For unauth, it is 128 + that.
51 *
52 * ROADMAP:
53 *
54 * This file contains the logic for doing that--entry points:
55 *
56 * wusb_devconnect_ack() Ack a device until _acked() called.
57 * Called by notif.c:wusb_handle_dn_connect()
58 * when a DN_Connect is received.
59 *
60 * wusbhc_devconnect_auth() Called by rh.c:wusbhc_rh_port_reset() when
61 * doing the device connect sequence.
62 *
63 * wusb_devconnect_acked() Ack done, release resources.
64 *
65 * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn()
66 * for processing a DN_Alive pong from a device.
67 *
68 * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to
69 * process a disconenct request from a
70 * device.
71 *
72 * wusb_dev_reset() Called by rh.c:wusbhc_rh_port_reset() when
73 * resetting a device.
74 *
75 * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when
76 * disabling a port.
77 *
78 * wusb_devconnect_create() Called when creating the host by
79 * lc.c:wusbhc_create().
80 *
81 * wusb_devconnect_destroy() Cleanup called removing the host. Called
82 * by lc.c:wusbhc_destroy().
83 *
84 * Each Wireless USB host maintains a list of DN_Connect requests
85 * (actually we maintain a list of pending Connect Acks, the
86 * wusbhc->ca_list).
87 *
88 * LIFE CYCLE OF port->wusb_dev
89 *
90 * Before the @wusbhc structure put()s the reference it owns for
91 * port->wusb_dev [and clean the wusb_dev pointer], it needs to
92 * lock @wusbhc->mutex.
93 */
94
95#include <linux/jiffies.h>
96#include <linux/ctype.h>
97#include <linux/workqueue.h>
98#include "wusbhc.h"
99
100#undef D_LOCAL
101#define D_LOCAL 1
102#include <linux/uwb/debug.h>
103
104static void wusbhc_devconnect_acked_work(struct work_struct *work);
105
106static void wusb_dev_free(struct wusb_dev *wusb_dev)
107{
108 if (wusb_dev) {
109 kfree(wusb_dev->set_gtk_req);
110 usb_free_urb(wusb_dev->set_gtk_urb);
111 kfree(wusb_dev);
112 }
113}
114
115static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
116{
117 struct wusb_dev *wusb_dev;
118 struct urb *urb;
119 struct usb_ctrlrequest *req;
120
121 wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL);
122 if (wusb_dev == NULL)
123 goto err;
124
125 wusb_dev->wusbhc = wusbhc;
126
127 INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work);
128
129 urb = usb_alloc_urb(0, GFP_KERNEL);
130 if (urb == NULL)
131 goto err;
132
133 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
134 if (req == NULL)
135 goto err;
136
137 req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
138 req->bRequest = USB_REQ_SET_DESCRIPTOR;
139 req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index);
140 req->wIndex = 0;
141 req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength);
142
143 wusb_dev->set_gtk_urb = urb;
144 wusb_dev->set_gtk_req = req;
145
146 return wusb_dev;
147err:
148 wusb_dev_free(wusb_dev);
149 return NULL;
150}
151
152
153/*
154 * Using the Connect-Ack list, fill out the @wusbhc Connect-Ack WUSB IE
155 * properly so that it can be added to the MMC.
156 *
157 * We just get the @wusbhc->ca_list and fill out the first four ones or
158 * less (per-spec WUSB1.0[7.5, before T7-38). If the ConnectAck WUSB
159 * IE is not allocated, we alloc it.
160 *
161 * @wusbhc->mutex must be taken
162 */
163static void wusbhc_fill_cack_ie(struct wusbhc *wusbhc)
164{
165 unsigned cnt;
166 struct wusb_dev *dev_itr;
167 struct wuie_connect_ack *cack_ie;
168
169 cack_ie = &wusbhc->cack_ie;
170 cnt = 0;
171 list_for_each_entry(dev_itr, &wusbhc->cack_list, cack_node) {
172 cack_ie->blk[cnt].CDID = dev_itr->cdid;
173 cack_ie->blk[cnt].bDeviceAddress = dev_itr->addr;
174 if (++cnt >= WUIE_ELT_MAX)
175 break;
176 }
177 cack_ie->hdr.bLength = sizeof(cack_ie->hdr)
178 + cnt * sizeof(cack_ie->blk[0]);
179}
180
181/*
182 * Register a new device that wants to connect
183 *
184 * A new device wants to connect, so we add it to the Connect-Ack
185 * list. We give it an address in the unauthorized range (bit 8 set);
186 * user space will have to drive authorization further on.
187 *
188 * @dev_addr: address to use for the device (which is also the port
189 * number).
190 *
191 * @wusbhc->mutex must be taken
192 */
193static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc,
194 struct wusb_dn_connect *dnc,
195 const char *pr_cdid, u8 port_idx)
196{
197 struct device *dev = wusbhc->dev;
198 struct wusb_dev *wusb_dev;
199 int new_connection = wusb_dn_connect_new_connection(dnc);
200 u8 dev_addr;
201 int result;
202
203 /* Is it registered already? */
204 list_for_each_entry(wusb_dev, &wusbhc->cack_list, cack_node)
205 if (!memcmp(&wusb_dev->cdid, &dnc->CDID,
206 sizeof(wusb_dev->cdid)))
207 return wusb_dev;
208 /* We don't have it, create an entry, register it */
209 wusb_dev = wusb_dev_alloc(wusbhc);
210 if (wusb_dev == NULL)
211 return NULL;
212 wusb_dev_init(wusb_dev);
213 wusb_dev->cdid = dnc->CDID;
214 wusb_dev->port_idx = port_idx;
215
216 /*
217 * Devices are always available within the cluster reservation
218 * and since the hardware will take the intersection of the
219 * per-device availability and the cluster reservation, the
220 * per-device availability can simply be set to always
221 * available.
222 */
223 bitmap_fill(wusb_dev->availability.bm, UWB_NUM_MAS);
224
225 /* FIXME: handle reconnects instead of assuming connects are
226 always new. */
227 if (1 && new_connection == 0)
228 new_connection = 1;
229 if (new_connection) {
230 dev_addr = (port_idx + 2) | WUSB_DEV_ADDR_UNAUTH;
231
232 dev_info(dev, "Connecting new WUSB device to address %u, "
233 "port %u\n", dev_addr, port_idx);
234
235 result = wusb_set_dev_addr(wusbhc, wusb_dev, dev_addr);
236 if (result < 0)
237 return NULL;
238 }
239 wusb_dev->entry_ts = jiffies;
240 list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list);
241 wusbhc->cack_count++;
242 wusbhc_fill_cack_ie(wusbhc);
243 return wusb_dev;
244}
245
246/*
247 * Remove a Connect-Ack context entry from the HCs view
248 *
249 * @wusbhc->mutex must be taken
250 */
251static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
252{
253 struct device *dev = wusbhc->dev;
254 d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev);
255 list_del_init(&wusb_dev->cack_node);
256 wusbhc->cack_count--;
257 wusbhc_fill_cack_ie(wusbhc);
258 d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev);
259}
260
261/*
262 * @wusbhc->mutex must be taken */
263static
264void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
265{
266 struct device *dev = wusbhc->dev;
267 d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev);
268 wusbhc_cack_rm(wusbhc, wusb_dev);
269 if (wusbhc->cack_count)
270 wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr);
271 else
272 wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr);
273 d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev);
274}
275
276static void wusbhc_devconnect_acked_work(struct work_struct *work)
277{
278 struct wusb_dev *wusb_dev = container_of(work, struct wusb_dev,
279 devconnect_acked_work);
280 struct wusbhc *wusbhc = wusb_dev->wusbhc;
281
282 mutex_lock(&wusbhc->mutex);
283 wusbhc_devconnect_acked(wusbhc, wusb_dev);
284 mutex_unlock(&wusbhc->mutex);
285}
286
287/*
288 * Ack a device for connection
289 *
290 * FIXME: docs
291 *
292 * @pr_cdid: Printable CDID...hex Use @dnc->cdid for the real deal.
293 *
294 * So we get the connect ack IE (may have been allocated already),
295 * find an empty connect block, an empty virtual port, create an
296 * address with it (see below), make it an unauth addr [bit 7 set] and
297 * set the MMC.
298 *
299 * Addresses: because WUSB hosts have no downstream hubs, we can do a
300 * 1:1 mapping between 'port number' and device
301 * address. This simplifies many things, as during this
302 * initial connect phase the USB stack has no knoledge of
303 * the device and hasn't assigned an address yet--we know
304 * USB's choose_address() will use the same euristics we
305 * use here, so we can assume which address will be assigned.
306 *
307 * USB stack always assigns address 1 to the root hub, so
308 * to the port number we add 2 (thus virtual port #0 is
309 * addr #2).
310 *
311 * @wusbhc shall be referenced
312 */
313static
314void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
315 const char *pr_cdid)
316{
317 int result;
318 struct device *dev = wusbhc->dev;
319 struct wusb_dev *wusb_dev;
320 struct wusb_port *port;
321 unsigned idx, devnum;
322
323 d_fnstart(3, dev, "(%p, %p, %s)\n", wusbhc, dnc, pr_cdid);
324 mutex_lock(&wusbhc->mutex);
325
326 /* Check we are not handling it already */
327 for (idx = 0; idx < wusbhc->ports_max; idx++) {
328 port = wusb_port_by_idx(wusbhc, idx);
329 if (port->wusb_dev
330 && memcmp(&dnc->CDID, &port->wusb_dev->cdid, sizeof(dnc->CDID)) == 0)
331 goto error_unlock;
332 }
333 /* Look up those fake ports we have for a free one */
334 for (idx = 0; idx < wusbhc->ports_max; idx++) {
335 port = wusb_port_by_idx(wusbhc, idx);
336 if ((port->status & USB_PORT_STAT_POWER)
337 && !(port->status & USB_PORT_STAT_CONNECTION))
338 break;
339 }
340 if (idx >= wusbhc->ports_max) {
341 dev_err(dev, "Host controller can't connect more devices "
342 "(%u already connected); device %s rejected\n",
343 wusbhc->ports_max, pr_cdid);
344 /* NOTE: we could send a WUIE_Disconnect here, but we haven't
345 * event acked, so the device will eventually timeout the
346 * connection, right? */
347 goto error_unlock;
348 }
349
350 devnum = idx + 2;
351
352 /* Make sure we are using no crypto on that "virtual port" */
353 wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0);
354
355 /* Grab a filled in Connect-Ack context, fill out the
356 * Connect-Ack Wireless USB IE, set the MMC */
357 wusb_dev = wusbhc_cack_add(wusbhc, dnc, pr_cdid, idx);
358 if (wusb_dev == NULL)
359 goto error_unlock;
360 result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr);
361 if (result < 0)
362 goto error_unlock;
363 /* Give the device at least 2ms (WUSB1.0[7.5.1p3]), let's do
364 * three for a good measure */
365 msleep(3);
366 port->wusb_dev = wusb_dev;
367 port->status |= USB_PORT_STAT_CONNECTION;
368 port->change |= USB_PORT_STAT_C_CONNECTION;
369 port->reset_count = 0;
370 /* Now the port status changed to connected; khubd will
371 * pick the change up and try to reset the port to bring it to
372 * the enabled state--so this process returns up to the stack
373 * and it calls back into wusbhc_rh_port_reset() who will call
374 * devconnect_auth().
375 */
376error_unlock:
377 mutex_unlock(&wusbhc->mutex);
378 d_fnend(3, dev, "(%p, %p, %s) = void\n", wusbhc, dnc, pr_cdid);
379 return;
380
381}
382
383/*
384 * Disconnect a Wireless USB device from its fake port
385 *
386 * Marks the port as disconnected so that khubd can pick up the change
387 * and drops our knowledge about the device.
388 *
389 * Assumes there is a device connected
390 *
391 * @port_index: zero based port number
392 *
393 * NOTE: @wusbhc->mutex is locked
394 *
395 * WARNING: From here it is not very safe to access anything hanging off
396 * wusb_dev
397 */
398static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc,
399 struct wusb_port *port)
400{
401 struct device *dev = wusbhc->dev;
402 struct wusb_dev *wusb_dev = port->wusb_dev;
403
404 d_fnstart(3, dev, "(wusbhc %p, port %p)\n", wusbhc, port);
405 port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE
406 | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET
407 | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED);
408 port->change |= USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE;
409 if (wusb_dev) {
410 if (!list_empty(&wusb_dev->cack_node))
411 list_del_init(&wusb_dev->cack_node);
412 /* For the one in cack_add() */
413 wusb_dev_put(wusb_dev);
414 }
415 port->wusb_dev = NULL;
416 /* don't reset the reset_count to zero or wusbhc_rh_port_reset will get
417 * confused! We only reset to zero when we connect a new device.
418 */
419
420 /* After a device disconnects, change the GTK (see [WUSB]
421 * section 6.2.11.2). */
422 wusbhc_gtk_rekey(wusbhc);
423
424 d_fnend(3, dev, "(wusbhc %p, port %p) = void\n", wusbhc, port);
425 /* The Wireless USB part has forgotten about the device already; now
426 * khubd's timer will pick up the disconnection and remove the USB
427 * device from the system
428 */
429}
430
431/*
432 * Authenticate a device into the WUSB Cluster
433 *
434 * Called from the Root Hub code (rh.c:wusbhc_rh_port_reset()) when
435 * asking for a reset on a port that is not enabled (ie: first connect
436 * on the port).
437 *
438 * Performs the 4way handshake to allow the device to comunicate w/ the
439 * WUSB Cluster securely; once done, issue a request to the device for
440 * it to change to address 0.
441 *
442 * This mimics the reset step of Wired USB that once resetting a
443 * device, leaves the port in enabled state and the dev with the
444 * default address (0).
445 *
446 * WUSB1.0[7.1.2]
447 *
448 * @port_idx: port where the change happened--This is the index into
449 * the wusbhc port array, not the USB port number.
450 */
451int wusbhc_devconnect_auth(struct wusbhc *wusbhc, u8 port_idx)
452{
453 struct device *dev = wusbhc->dev;
454 struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx);
455
456 d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx);
457 port->status &= ~USB_PORT_STAT_RESET;
458 port->status |= USB_PORT_STAT_ENABLE;
459 port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE;
460 d_fnend(3, dev, "(%p, %u) = 0\n", wusbhc, port_idx);
461 return 0;
462}
463
464/*
465 * Refresh the list of keep alives to emit in the MMC
466 *
467 * Some devices don't respond to keep alives unless they've been
468 * authenticated, so skip unauthenticated devices.
469 *
470 * We only publish the first four devices that have a coming timeout
471 * condition. Then when we are done processing those, we go for the
472 * next ones. We ignore the ones that have timed out already (they'll
473 * be purged).
474 *
475 * This might cause the first devices to timeout the last devices in
476 * the port array...FIXME: come up with a better algorithm?
477 *
478 * Note we can't do much about MMC's ops errors; we hope next refresh
479 * will kind of handle it.
480 *
481 * NOTE: @wusbhc->mutex is locked
482 */
483static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
484{
485 struct device *dev = wusbhc->dev;
486 unsigned cnt;
487 struct wusb_dev *wusb_dev;
488 struct wusb_port *wusb_port;
489 struct wuie_keep_alive *ie = &wusbhc->keep_alive_ie;
490 unsigned keep_alives, old_keep_alives;
491
492 old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr);
493 keep_alives = 0;
494 for (cnt = 0;
495 keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max;
496 cnt++) {
497 unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout);
498
499 wusb_port = wusb_port_by_idx(wusbhc, cnt);
500 wusb_dev = wusb_port->wusb_dev;
501
502 if (wusb_dev == NULL)
503 continue;
504 if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated)
505 continue;
506
507 if (time_after(jiffies, wusb_dev->entry_ts + tt)) {
508 dev_err(dev, "KEEPALIVE: device %u timed out\n",
509 wusb_dev->addr);
510 __wusbhc_dev_disconnect(wusbhc, wusb_port);
511 } else if (time_after(jiffies, wusb_dev->entry_ts + tt/2)) {
512 /* Approaching timeout cut out, need to refresh */
513 ie->bDeviceAddress[keep_alives++] = wusb_dev->addr;
514 }
515 }
516 if (keep_alives & 0x1) /* pad to even number ([WUSB] section 7.5.9) */
517 ie->bDeviceAddress[keep_alives++] = 0x7f;
518 ie->hdr.bLength = sizeof(ie->hdr) +
519 keep_alives*sizeof(ie->bDeviceAddress[0]);
520 if (keep_alives > 0)
521 wusbhc_mmcie_set(wusbhc, 10, 5, &ie->hdr);
522 else if (old_keep_alives != 0)
523 wusbhc_mmcie_rm(wusbhc, &ie->hdr);
524}
525
526/*
527 * Do a run through all devices checking for timeouts
528 */
529static void wusbhc_keep_alive_run(struct work_struct *ws)
530{
531 struct delayed_work *dw =
532 container_of(ws, struct delayed_work, work);
533 struct wusbhc *wusbhc =
534 container_of(dw, struct wusbhc, keep_alive_timer);
535
536 d_fnstart(5, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
537 if (wusbhc->active) {
538 mutex_lock(&wusbhc->mutex);
539 __wusbhc_keep_alive(wusbhc);
540 mutex_unlock(&wusbhc->mutex);
541 queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
542 (wusbhc->trust_timeout * CONFIG_HZ)/1000/2);
543 }
544 d_fnend(5, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc);
545 return;
546}
547
548/*
549 * Find the wusb_dev from its device address.
550 *
551 * The device can be found directly from the address (see
552 * wusb_cack_add() for where the device address is set to port_idx
553 * +2), except when the address is zero.
554 */
555static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr)
556{
557 int p;
558
559 if (addr == 0xff) /* unconnected */
560 return NULL;
561
562 if (addr > 0) {
563 int port = (addr & ~0x80) - 2;
564 if (port < 0 || port >= wusbhc->ports_max)
565 return NULL;
566 return wusb_port_by_idx(wusbhc, port)->wusb_dev;
567 }
568
569 /* Look for the device with address 0. */
570 for (p = 0; p < wusbhc->ports_max; p++) {
571 struct wusb_dev *wusb_dev = wusb_port_by_idx(wusbhc, p)->wusb_dev;
572 if (wusb_dev && wusb_dev->addr == addr)
573 return wusb_dev;
574 }
575 return NULL;
576}
577
578/*
579 * Handle a DN_Alive notification (WUSB1.0[7.6.1])
580 *
581 * This just updates the device activity timestamp and then refreshes
582 * the keep alive IE.
583 *
584 * @wusbhc shall be referenced and unlocked
585 */
586static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
587{
588 struct device *dev = wusbhc->dev;
589
590 d_printf(2, dev, "DN ALIVE: device 0x%02x pong\n", wusb_dev->addr);
591
592 mutex_lock(&wusbhc->mutex);
593 wusb_dev->entry_ts = jiffies;
594 __wusbhc_keep_alive(wusbhc);
595 mutex_unlock(&wusbhc->mutex);
596}
597
598/*
599 * Handle a DN_Connect notification (WUSB1.0[7.6.1])
600 *
601 * @wusbhc
602 * @pkt_hdr
603 * @size: Size of the buffer where the notification resides; if the
604 * notification data suggests there should be more data than
605 * available, an error will be signaled and the whole buffer
606 * consumed.
607 *
608 * @wusbhc->mutex shall be held
609 */
610static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc,
611 struct wusb_dn_hdr *dn_hdr,
612 size_t size)
613{
614 struct device *dev = wusbhc->dev;
615 struct wusb_dn_connect *dnc;
616 char pr_cdid[WUSB_CKHDID_STRSIZE];
617 static const char *beacon_behaviour[] = {
618 "reserved",
619 "self-beacon",
620 "directed-beacon",
621 "no-beacon"
622 };
623
624 d_fnstart(3, dev, "(%p, %p, %zu)\n", wusbhc, dn_hdr, size);
625 if (size < sizeof(*dnc)) {
626 dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n",
627 size, sizeof(*dnc));
628 goto out;
629 }
630
631 dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr);
632 ckhdid_printf(pr_cdid, sizeof(pr_cdid), &dnc->CDID);
633 dev_info(dev, "DN CONNECT: device %s @ %x (%s) wants to %s\n",
634 pr_cdid,
635 wusb_dn_connect_prev_dev_addr(dnc),
636 beacon_behaviour[wusb_dn_connect_beacon_behavior(dnc)],
637 wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect");
638 /* ACK the connect */
639 wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid);
640out:
641 d_fnend(3, dev, "(%p, %p, %zu) = void\n",
642 wusbhc, dn_hdr, size);
643 return;
644}
645
646/*
647 * Handle a DN_Disconnect notification (WUSB1.0[7.6.1])
648 *
649 * Device is going down -- do the disconnect.
650 *
651 * @wusbhc shall be referenced and unlocked
652 */
653static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
654{
655 struct device *dev = wusbhc->dev;
656
657 dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr);
658
659 mutex_lock(&wusbhc->mutex);
660 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx));
661 mutex_unlock(&wusbhc->mutex);
662}
663
664/*
665 * Reset a WUSB device on a HWA
666 *
667 * @wusbhc
668 * @port_idx Index of the port where the device is
669 *
670 * In Wireless USB, a reset is more or less equivalent to a full
671 * disconnect; so we just do a full disconnect and send the device a
672 * Device Reset IE (WUSB1.0[7.5.11]) giving it a few millisecs (6 MMCs).
673 *
674 * @wusbhc should be refcounted and unlocked
675 */
676int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port_idx)
677{
678 int result;
679 struct device *dev = wusbhc->dev;
680 struct wusb_dev *wusb_dev;
681 struct wuie_reset *ie;
682
683 d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx);
684 mutex_lock(&wusbhc->mutex);
685 result = 0;
686 wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
687 if (wusb_dev == NULL) {
688 /* reset no device? ignore */
689 dev_dbg(dev, "RESET: no device at port %u, ignoring\n",
690 port_idx);
691 goto error_unlock;
692 }
693 result = -ENOMEM;
694 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
695 if (ie == NULL)
696 goto error_unlock;
697 ie->hdr.bLength = sizeof(ie->hdr) + sizeof(ie->CDID);
698 ie->hdr.bIEIdentifier = WUIE_ID_RESET_DEVICE;
699 ie->CDID = wusb_dev->cdid;
700 result = wusbhc_mmcie_set(wusbhc, 0xff, 6, &ie->hdr);
701 if (result < 0) {
702 dev_err(dev, "RESET: cant's set MMC: %d\n", result);
703 goto error_kfree;
704 }
705 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
706
707 /* 120ms, hopefully 6 MMCs (FIXME) */
708 msleep(120);
709 wusbhc_mmcie_rm(wusbhc, &ie->hdr);
710error_kfree:
711 kfree(ie);
712error_unlock:
713 mutex_unlock(&wusbhc->mutex);
714 d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result);
715 return result;
716}
717
718/*
719 * Handle a Device Notification coming a host
720 *
721 * The Device Notification comes from a host (HWA, DWA or WHCI)
722 * wrapped in a set of headers. Somebody else has peeled off those
723 * headers for us and we just get one Device Notifications.
724 *
725 * Invalid DNs (e.g., too short) are discarded.
726 *
727 * @wusbhc shall be referenced
728 *
729 * FIXMES:
730 * - implement priorities as in WUSB1.0[Table 7-55]?
731 */
732void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr,
733 struct wusb_dn_hdr *dn_hdr, size_t size)
734{
735 struct device *dev = wusbhc->dev;
736 struct wusb_dev *wusb_dev;
737
738 d_fnstart(3, dev, "(%p, %p)\n", wusbhc, dn_hdr);
739
740 if (size < sizeof(struct wusb_dn_hdr)) {
741 dev_err(dev, "DN data shorter than DN header (%d < %d)\n",
742 (int)size, (int)sizeof(struct wusb_dn_hdr));
743 goto out;
744 }
745
746 wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
747 if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) {
748 dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n",
749 dn_hdr->bType, srcaddr);
750 goto out;
751 }
752
753 switch (dn_hdr->bType) {
754 case WUSB_DN_CONNECT:
755 wusbhc_handle_dn_connect(wusbhc, dn_hdr, size);
756 break;
757 case WUSB_DN_ALIVE:
758 wusbhc_handle_dn_alive(wusbhc, wusb_dev);
759 break;
760 case WUSB_DN_DISCONNECT:
761 wusbhc_handle_dn_disconnect(wusbhc, wusb_dev);
762 break;
763 case WUSB_DN_MASAVAILCHANGED:
764 case WUSB_DN_RWAKE:
765 case WUSB_DN_SLEEP:
766 /* FIXME: handle these DNs. */
767 break;
768 case WUSB_DN_EPRDY:
769 /* The hardware handles these. */
770 break;
771 default:
772 dev_warn(dev, "unknown DN %u (%d octets) from %u\n",
773 dn_hdr->bType, (int)size, srcaddr);
774 }
775out:
776 d_fnend(3, dev, "(%p, %p) = void\n", wusbhc, dn_hdr);
777 return;
778}
779EXPORT_SYMBOL_GPL(wusbhc_handle_dn);
780
781/*
782 * Disconnect a WUSB device from a the cluster
783 *
784 * @wusbhc
785 * @port Fake port where the device is (wusbhc index, not USB port number).
786 *
787 * In Wireless USB, a disconnect is basically telling the device he is
788 * being disconnected and forgetting about him.
789 *
790 * We send the device a Device Disconnect IE (WUSB1.0[7.5.11]) for 100
791 * ms and then keep going.
792 *
793 * We don't do much in case of error; we always pretend we disabled
794 * the port and disconnected the device. If physically the request
795 * didn't get there (many things can fail in the way there), the stack
796 * will reject the device's communication attempts.
797 *
798 * @wusbhc should be refcounted and locked
799 */
800void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port_idx)
801{
802 int result;
803 struct device *dev = wusbhc->dev;
804 struct wusb_dev *wusb_dev;
805 struct wuie_disconnect *ie;
806
807 d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx);
808 result = 0;
809 wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
810 if (wusb_dev == NULL) {
811 /* reset no device? ignore */
812 dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n",
813 port_idx);
814 goto error;
815 }
816 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
817
818 result = -ENOMEM;
819 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
820 if (ie == NULL)
821 goto error;
822 ie->hdr.bLength = sizeof(*ie);
823 ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT;
824 ie->bDeviceAddress = wusb_dev->addr;
825 result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr);
826 if (result < 0) {
827 dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result);
828 goto error_kfree;
829 }
830
831 /* 120ms, hopefully 6 MMCs */
832 msleep(100);
833 wusbhc_mmcie_rm(wusbhc, &ie->hdr);
834error_kfree:
835 kfree(ie);
836error:
837 d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result);
838 return;
839}
840
841static void wusb_cap_descr_printf(const unsigned level, struct device *dev,
842 const struct usb_wireless_cap_descriptor *wcd)
843{
844 d_printf(level, dev,
845 "WUSB Capability Descriptor\n"
846 " bDevCapabilityType 0x%02x\n"
847 " bmAttributes 0x%02x\n"
848 " wPhyRates 0x%04x\n"
849 " bmTFITXPowerInfo 0x%02x\n"
850 " bmFFITXPowerInfo 0x%02x\n"
851 " bmBandGroup 0x%04x\n"
852 " bReserved 0x%02x\n",
853 wcd->bDevCapabilityType,
854 wcd->bmAttributes,
855 le16_to_cpu(wcd->wPHYRates),
856 wcd->bmTFITXPowerInfo,
857 wcd->bmFFITXPowerInfo,
858 wcd->bmBandGroup,
859 wcd->bReserved);
860}
861
862/*
863 * Walk over the BOS descriptor, verify and grok it
864 *
865 * @usb_dev: referenced
866 * @wusb_dev: referenced and unlocked
867 *
868 * The BOS descriptor is defined at WUSB1.0[7.4.1], and it defines a
869 * "flexible" way to wrap all kinds of descriptors inside an standard
870 * descriptor (wonder why they didn't use normal descriptors,
871 * btw). Not like they lack code.
872 *
873 * At the end we go to look for the WUSB Device Capabilities
874 * (WUSB1.0[7.4.1.1]) that is wrapped in a device capability descriptor
875 * that is part of the BOS descriptor set. That tells us what does the
876 * device support (dual role, beacon type, UWB PHY rates).
877 */
878static int wusb_dev_bos_grok(struct usb_device *usb_dev,
879 struct wusb_dev *wusb_dev,
880 struct usb_bos_descriptor *bos, size_t desc_size)
881{
882 ssize_t result;
883 struct device *dev = &usb_dev->dev;
884 void *itr, *top;
885
886 /* Walk over BOS capabilities, verify them */
887 itr = (void *)bos + sizeof(*bos);
888 top = itr + desc_size - sizeof(*bos);
889 while (itr < top) {
890 struct usb_dev_cap_header *cap_hdr = itr;
891 size_t cap_size;
892 u8 cap_type;
893 if (top - itr < sizeof(*cap_hdr)) {
894 dev_err(dev, "Device BUG? premature end of BOS header "
895 "data [offset 0x%02x]: only %zu bytes left\n",
896 (int)(itr - (void *)bos), top - itr);
897 result = -ENOSPC;
898 goto error_bad_cap;
899 }
900 cap_size = cap_hdr->bLength;
901 cap_type = cap_hdr->bDevCapabilityType;
902 d_printf(4, dev, "BOS Capability: 0x%02x (%zu bytes)\n",
903 cap_type, cap_size);
904 if (cap_size == 0)
905 break;
906 if (cap_size > top - itr) {
907 dev_err(dev, "Device BUG? premature end of BOS data "
908 "[offset 0x%02x cap %02x %zu bytes]: "
909 "only %zu bytes left\n",
910 (int)(itr - (void *)bos),
911 cap_type, cap_size, top - itr);
912 result = -EBADF;
913 goto error_bad_cap;
914 }
915 d_dump(3, dev, itr, cap_size);
916 switch (cap_type) {
917 case USB_CAP_TYPE_WIRELESS_USB:
918 if (cap_size != sizeof(*wusb_dev->wusb_cap_descr))
919 dev_err(dev, "Device BUG? WUSB Capability "
920 "descriptor is %zu bytes vs %zu "
921 "needed\n", cap_size,
922 sizeof(*wusb_dev->wusb_cap_descr));
923 else {
924 wusb_dev->wusb_cap_descr = itr;
925 wusb_cap_descr_printf(3, dev, itr);
926 }
927 break;
928 default:
929 dev_err(dev, "BUG? Unknown BOS capability 0x%02x "
930 "(%zu bytes) at offset 0x%02x\n", cap_type,
931 cap_size, (int)(itr - (void *)bos));
932 }
933 itr += cap_size;
934 }
935 result = 0;
936error_bad_cap:
937 return result;
938}
939
940/*
941 * Add information from the BOS descriptors to the device
942 *
943 * @usb_dev: referenced
944 * @wusb_dev: referenced and unlocked
945 *
946 * So what we do is we alloc a space for the BOS descriptor of 64
947 * bytes; read the first four bytes which include the wTotalLength
948 * field (WUSB1.0[T7-26]) and if it fits in those 64 bytes, read the
949 * whole thing. If not we realloc to that size.
950 *
951 * Then we call the groking function, that will fill up
952 * wusb_dev->wusb_cap_descr, which is what we'll need later on.
953 */
954static int wusb_dev_bos_add(struct usb_device *usb_dev,
955 struct wusb_dev *wusb_dev)
956{
957 ssize_t result;
958 struct device *dev = &usb_dev->dev;
959 struct usb_bos_descriptor *bos;
960 size_t alloc_size = 32, desc_size = 4;
961
962 bos = kmalloc(alloc_size, GFP_KERNEL);
963 if (bos == NULL)
964 return -ENOMEM;
965 result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size);
966 if (result < 4) {
967 dev_err(dev, "Can't get BOS descriptor or too short: %zd\n",
968 result);
969 goto error_get_descriptor;
970 }
971 desc_size = le16_to_cpu(bos->wTotalLength);
972 if (desc_size >= alloc_size) {
973 kfree(bos);
974 alloc_size = desc_size;
975 bos = kmalloc(alloc_size, GFP_KERNEL);
976 if (bos == NULL)
977 return -ENOMEM;
978 }
979 result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size);
980 if (result < 0 || result != desc_size) {
981 dev_err(dev, "Can't get BOS descriptor or too short (need "
982 "%zu bytes): %zd\n", desc_size, result);
983 goto error_get_descriptor;
984 }
985 if (result < sizeof(*bos)
986 || le16_to_cpu(bos->wTotalLength) != desc_size) {
987 dev_err(dev, "Can't get BOS descriptor or too short (need "
988 "%zu bytes): %zd\n", desc_size, result);
989 goto error_get_descriptor;
990 }
991 d_printf(2, dev, "Got BOS descriptor %zd bytes, %u capabilities\n",
992 result, bos->bNumDeviceCaps);
993 d_dump(2, dev, bos, result);
994 result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result);
995 if (result < 0)
996 goto error_bad_bos;
997 wusb_dev->bos = bos;
998 return 0;
999
1000error_bad_bos:
1001error_get_descriptor:
1002 kfree(bos);
1003 wusb_dev->wusb_cap_descr = NULL;
1004 return result;
1005}
1006
1007static void wusb_dev_bos_rm(struct wusb_dev *wusb_dev)
1008{
1009 kfree(wusb_dev->bos);
1010 wusb_dev->wusb_cap_descr = NULL;
1011};
1012
1013static struct usb_wireless_cap_descriptor wusb_cap_descr_default = {
1014 .bLength = sizeof(wusb_cap_descr_default),
1015 .bDescriptorType = USB_DT_DEVICE_CAPABILITY,
1016 .bDevCapabilityType = USB_CAP_TYPE_WIRELESS_USB,
1017
1018 .bmAttributes = USB_WIRELESS_BEACON_NONE,
1019 .wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53),
1020 .bmTFITXPowerInfo = 0,
1021 .bmFFITXPowerInfo = 0,
1022 .bmBandGroup = cpu_to_le16(0x0001), /* WUSB1.0[7.4.1] bottom */
1023 .bReserved = 0
1024};
1025
1026/*
1027 * USB stack's device addition Notifier Callback
1028 *
1029 * Called from drivers/usb/core/hub.c when a new device is added; we
1030 * use this hook to perform certain WUSB specific setup work on the
1031 * new device. As well, it is the first time we can connect the
1032 * wusb_dev and the usb_dev. So we note it down in wusb_dev and take a
1033 * reference that we'll drop.
1034 *
1035 * First we need to determine if the device is a WUSB device (else we
1036 * ignore it). For that we use the speed setting (USB_SPEED_VARIABLE)
1037 * [FIXME: maybe we'd need something more definitive]. If so, we track
1038 * it's usb_busd and from there, the WUSB HC.
1039 *
1040 * Because all WUSB HCs are contained in a 'struct wusbhc', voila, we
1041 * get the wusbhc for the device.
1042 *
1043 * We have a reference on @usb_dev (as we are called at the end of its
1044 * enumeration).
1045 *
1046 * NOTE: @usb_dev locked
1047 */
1048static void wusb_dev_add_ncb(struct usb_device *usb_dev)
1049{
1050 int result = 0;
1051 struct wusb_dev *wusb_dev;
1052 struct wusbhc *wusbhc;
1053 struct device *dev = &usb_dev->dev;
1054 u8 port_idx;
1055
1056 if (usb_dev->wusb == 0 || usb_dev->devnum == 1)
1057 return; /* skip non wusb and wusb RHs */
1058
1059 d_fnstart(3, dev, "(usb_dev %p)\n", usb_dev);
1060
1061 wusbhc = wusbhc_get_by_usb_dev(usb_dev);
1062 if (wusbhc == NULL)
1063 goto error_nodev;
1064 mutex_lock(&wusbhc->mutex);
1065 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
1066 port_idx = wusb_port_no_to_idx(usb_dev->portnum);
1067 mutex_unlock(&wusbhc->mutex);
1068 if (wusb_dev == NULL)
1069 goto error_nodev;
1070 wusb_dev->usb_dev = usb_get_dev(usb_dev);
1071 usb_dev->wusb_dev = wusb_dev_get(wusb_dev);
1072 result = wusb_dev_sec_add(wusbhc, usb_dev, wusb_dev);
1073 if (result < 0) {
1074 dev_err(dev, "Cannot enable security: %d\n", result);
1075 goto error_sec_add;
1076 }
1077 /* Now query the device for it's BOS and attach it to wusb_dev */
1078 result = wusb_dev_bos_add(usb_dev, wusb_dev);
1079 if (result < 0) {
1080 dev_err(dev, "Cannot get BOS descriptors: %d\n", result);
1081 goto error_bos_add;
1082 }
1083 result = wusb_dev_sysfs_add(wusbhc, usb_dev, wusb_dev);
1084 if (result < 0)
1085 goto error_add_sysfs;
1086out:
1087 wusb_dev_put(wusb_dev);
1088 wusbhc_put(wusbhc);
1089error_nodev:
1090 d_fnend(3, dev, "(usb_dev %p) = void\n", usb_dev);
1091 return;
1092
1093 wusb_dev_sysfs_rm(wusb_dev);
1094error_add_sysfs:
1095 wusb_dev_bos_rm(wusb_dev);
1096error_bos_add:
1097 wusb_dev_sec_rm(wusb_dev);
1098error_sec_add:
1099 mutex_lock(&wusbhc->mutex);
1100 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
1101 mutex_unlock(&wusbhc->mutex);
1102 goto out;
1103}
1104
1105/*
1106 * Undo all the steps done at connection by the notifier callback
1107 *
1108 * NOTE: @usb_dev locked
1109 */
1110static void wusb_dev_rm_ncb(struct usb_device *usb_dev)
1111{
1112 struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
1113
1114 if (usb_dev->wusb == 0 || usb_dev->devnum == 1)
1115 return; /* skip non wusb and wusb RHs */
1116
1117 wusb_dev_sysfs_rm(wusb_dev);
1118 wusb_dev_bos_rm(wusb_dev);
1119 wusb_dev_sec_rm(wusb_dev);
1120 wusb_dev->usb_dev = NULL;
1121 usb_dev->wusb_dev = NULL;
1122 wusb_dev_put(wusb_dev);
1123 usb_put_dev(usb_dev);
1124}
1125
1126/*
1127 * Handle notifications from the USB stack (notifier call back)
1128 *
1129 * This is called when the USB stack does a
1130 * usb_{bus,device}_{add,remove}() so we can do WUSB specific
1131 * handling. It is called with [for the case of
1132 * USB_DEVICE_{ADD,REMOVE} with the usb_dev locked.
1133 */
1134int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
1135 void *priv)
1136{
1137 int result = NOTIFY_OK;
1138
1139 switch (val) {
1140 case USB_DEVICE_ADD:
1141 wusb_dev_add_ncb(priv);
1142 break;
1143 case USB_DEVICE_REMOVE:
1144 wusb_dev_rm_ncb(priv);
1145 break;
1146 case USB_BUS_ADD:
1147 /* ignore (for now) */
1148 case USB_BUS_REMOVE:
1149 break;
1150 default:
1151 WARN_ON(1);
1152 result = NOTIFY_BAD;
1153 };
1154 return result;
1155}
1156
1157/*
1158 * Return a referenced wusb_dev given a @wusbhc and @usb_dev
1159 */
1160struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *wusbhc,
1161 struct usb_device *usb_dev)
1162{
1163 struct wusb_dev *wusb_dev;
1164 u8 port_idx;
1165
1166 port_idx = wusb_port_no_to_idx(usb_dev->portnum);
1167 BUG_ON(port_idx > wusbhc->ports_max);
1168 wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
1169 if (wusb_dev != NULL) /* ops, device is gone */
1170 wusb_dev_get(wusb_dev);
1171 return wusb_dev;
1172}
1173EXPORT_SYMBOL_GPL(__wusb_dev_get_by_usb_dev);
1174
1175void wusb_dev_destroy(struct kref *_wusb_dev)
1176{
1177 struct wusb_dev *wusb_dev
1178 = container_of(_wusb_dev, struct wusb_dev, refcnt);
1179 list_del_init(&wusb_dev->cack_node);
1180 wusb_dev_free(wusb_dev);
1181 d_fnend(1, NULL, "%s (wusb_dev %p) = void\n", __func__, wusb_dev);
1182}
1183EXPORT_SYMBOL_GPL(wusb_dev_destroy);
1184
1185/*
1186 * Create all the device connect handling infrastructure
1187 *
1188 * This is basically the device info array, Connect Acknowledgement
1189 * (cack) lists, keep-alive timers (and delayed work thread).
1190 */
1191int wusbhc_devconnect_create(struct wusbhc *wusbhc)
1192{
1193 d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
1194
1195 wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE;
1196 wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr);
1197 INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run);
1198
1199 wusbhc->cack_ie.hdr.bIEIdentifier = WUIE_ID_CONNECTACK;
1200 wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr);
1201 INIT_LIST_HEAD(&wusbhc->cack_list);
1202
1203 d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc);
1204 return 0;
1205}
1206
1207/*
1208 * Release all resources taken by the devconnect stuff
1209 */
1210void wusbhc_devconnect_destroy(struct wusbhc *wusbhc)
1211{
1212 d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
1213 d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc);
1214}
1215
1216/*
1217 * wusbhc_devconnect_start - start accepting device connections
1218 * @wusbhc: the WUSB HC
1219 *
1220 * Sets the Host Info IE to accept all new connections.
1221 *
1222 * FIXME: This also enables the keep alives but this is not necessary
1223 * until there are connected and authenticated devices.
1224 */
1225int wusbhc_devconnect_start(struct wusbhc *wusbhc,
1226 const struct wusb_ckhdid *chid)
1227{
1228 struct device *dev = wusbhc->dev;
1229 struct wuie_host_info *hi;
1230 int result;
1231
1232 hi = kzalloc(sizeof(*hi), GFP_KERNEL);
1233 if (hi == NULL)
1234 return -ENOMEM;
1235
1236 hi->hdr.bLength = sizeof(*hi);
1237 hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO;
1238 hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL);
1239 hi->CHID = *chid;
1240 result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr);
1241 if (result < 0) {
1242 dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result);
1243 goto error_mmcie_set;
1244 }
1245 wusbhc->wuie_host_info = hi;
1246
1247 queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
1248 (wusbhc->trust_timeout*CONFIG_HZ)/1000/2);
1249
1250 return 0;
1251
1252error_mmcie_set:
1253 kfree(hi);
1254 return result;
1255}
1256
1257/*
1258 * wusbhc_devconnect_stop - stop managing connected devices
1259 * @wusbhc: the WUSB HC
1260 *
1261 * Removes the Host Info IE and stops the keep alives.
1262 *
1263 * FIXME: should this disconnect all devices?
1264 */
1265void wusbhc_devconnect_stop(struct wusbhc *wusbhc)
1266{
1267 cancel_delayed_work_sync(&wusbhc->keep_alive_timer);
1268 WARN_ON(!list_empty(&wusbhc->cack_list));
1269
1270 wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr);
1271 kfree(wusbhc->wuie_host_info);
1272 wusbhc->wuie_host_info = NULL;
1273}
1274
1275/*
1276 * wusb_set_dev_addr - set the WUSB device address used by the host
1277 * @wusbhc: the WUSB HC the device is connect to
1278 * @wusb_dev: the WUSB device
1279 * @addr: new device address
1280 */
1281int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, u8 addr)
1282{
1283 int result;
1284
1285 wusb_dev->addr = addr;
1286 result = wusbhc->dev_info_set(wusbhc, wusb_dev);
1287 if (result < 0)
1288 dev_err(wusbhc->dev, "device %d: failed to set device "
1289 "address\n", wusb_dev->port_idx);
1290 else
1291 dev_info(wusbhc->dev, "device %d: %s addr %u\n",
1292 wusb_dev->port_idx,
1293 (addr & WUSB_DEV_ADDR_UNAUTH) ? "unauth" : "auth",
1294 wusb_dev->addr);
1295
1296 return result;
1297}
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
new file mode 100644
index 000000000000..cfa77a01cebd
--- /dev/null
+++ b/drivers/usb/wusbcore/mmc.c
@@ -0,0 +1,321 @@
1/*
2 * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
3 * MMC (Microscheduled Management Command) handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * WUIEs and MMC IEs...well, they are almost the same at the end. MMC
24 * IEs are Wireless USB IEs that go into the MMC period...[what is
25 * that? look in Design-overview.txt].
26 *
27 *
28 * This is a simple subsystem to keep track of which IEs are being
29 * sent by the host in the MMC period.
30 *
31 * For each WUIE we ask to send, we keep it in an array, so we can
32 * request its removal later, or replace the content. They are tracked
33 * by pointer, so be sure to use the same pointer if you want to
34 * remove it or update the contents.
35 *
36 * FIXME:
37 * - add timers that autoremove intervalled IEs?
38 */
39#include <linux/usb/wusb.h>
40#include "wusbhc.h"
41
42/* Initialize the MMCIEs handling mechanism */
43int wusbhc_mmcie_create(struct wusbhc *wusbhc)
44{
45 u8 mmcies = wusbhc->mmcies_max;
46 wusbhc->mmcie = kcalloc(mmcies, sizeof(wusbhc->mmcie[0]), GFP_KERNEL);
47 if (wusbhc->mmcie == NULL)
48 return -ENOMEM;
49 mutex_init(&wusbhc->mmcie_mutex);
50 return 0;
51}
52
53/* Release resources used by the MMCIEs handling mechanism */
54void wusbhc_mmcie_destroy(struct wusbhc *wusbhc)
55{
56 kfree(wusbhc->mmcie);
57}
58
59/*
60 * Add or replace an MMC Wireless USB IE.
61 *
62 * @interval: See WUSB1.0[8.5.3.1]
63 * @repeat_cnt: See WUSB1.0[8.5.3.1]
64 * @handle: See WUSB1.0[8.5.3.1]
65 * @wuie: Pointer to the header of the WUSB IE data to add.
66 * MUST BE allocated in a kmalloc buffer (no stack or
67 * vmalloc).
68 * THE CALLER ALWAYS OWNS THE POINTER (we don't free it
69 * on remove, we just forget about it).
70 * @returns: 0 if ok, < 0 errno code on error.
71 *
72 * Goes over the *whole* @wusbhc->mmcie array looking for (a) the
73 * first free spot and (b) if @wuie is already in the array (aka:
74 * transmitted in the MMCs) the spot were it is.
75 *
76 * If present, we "overwrite it" (update).
77 *
78 *
79 * NOTE: Need special ordering rules -- see below WUSB1.0 Table 7-38.
80 * The host uses the handle as the 'sort' index. We
81 * allocate the last one always for the WUIE_ID_HOST_INFO, and
82 * the rest, first come first serve in inverse order.
83 *
84 * Host software must make sure that it adds the other IEs in
85 * the right order... the host hardware is responsible for
86 * placing the WCTA IEs in the right place with the other IEs
87 * set by host software.
88 *
89 * NOTE: we can access wusbhc->wa_descr without locking because it is
90 * read only.
91 */
92int wusbhc_mmcie_set(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
93 struct wuie_hdr *wuie)
94{
95 int result = -ENOBUFS;
96 unsigned handle, itr;
97
98 /* Search a handle, taking into account the ordering */
99 mutex_lock(&wusbhc->mmcie_mutex);
100 switch (wuie->bIEIdentifier) {
101 case WUIE_ID_HOST_INFO:
102 /* Always last */
103 handle = wusbhc->mmcies_max - 1;
104 break;
105 case WUIE_ID_ISOCH_DISCARD:
106 dev_err(wusbhc->dev, "Special ordering case for WUIE ID 0x%x "
107 "unimplemented\n", wuie->bIEIdentifier);
108 result = -ENOSYS;
109 goto error_unlock;
110 default:
111 /* search for it or find the last empty slot */
112 handle = ~0;
113 for (itr = 0; itr < wusbhc->mmcies_max - 1; itr++) {
114 if (wusbhc->mmcie[itr] == wuie) {
115 handle = itr;
116 break;
117 }
118 if (wusbhc->mmcie[itr] == NULL)
119 handle = itr;
120 }
121 if (handle == ~0)
122 goto error_unlock;
123 }
124 result = (wusbhc->mmcie_add)(wusbhc, interval, repeat_cnt, handle,
125 wuie);
126 if (result >= 0)
127 wusbhc->mmcie[handle] = wuie;
128error_unlock:
129 mutex_unlock(&wusbhc->mmcie_mutex);
130 return result;
131}
132EXPORT_SYMBOL_GPL(wusbhc_mmcie_set);
133
134/*
135 * Remove an MMC IE previously added with wusbhc_mmcie_set()
136 *
137 * @wuie Pointer used to add the WUIE
138 */
139void wusbhc_mmcie_rm(struct wusbhc *wusbhc, struct wuie_hdr *wuie)
140{
141 int result;
142 unsigned handle, itr;
143
144 mutex_lock(&wusbhc->mmcie_mutex);
145 for (itr = 0; itr < wusbhc->mmcies_max; itr++) {
146 if (wusbhc->mmcie[itr] == wuie) {
147 handle = itr;
148 goto found;
149 }
150 }
151 mutex_unlock(&wusbhc->mmcie_mutex);
152 return;
153
154found:
155 result = (wusbhc->mmcie_rm)(wusbhc, handle);
156 if (result == 0)
157 wusbhc->mmcie[itr] = NULL;
158 mutex_unlock(&wusbhc->mmcie_mutex);
159}
160EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm);
161
162/*
163 * wusbhc_start - start transmitting MMCs and accepting connections
164 * @wusbhc: the HC to start
165 * @chid: the CHID to use for this host
166 *
167 * Establishes a cluster reservation, enables device connections, and
168 * starts MMCs with appropriate DNTS parameters.
169 */
170int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
171{
172 int result;
173 struct device *dev = wusbhc->dev;
174
175 WARN_ON(wusbhc->wuie_host_info != NULL);
176
177 result = wusbhc_rsv_establish(wusbhc);
178 if (result < 0) {
179 dev_err(dev, "cannot establish cluster reservation: %d\n",
180 result);
181 goto error_rsv_establish;
182 }
183
184 result = wusbhc_devconnect_start(wusbhc, chid);
185 if (result < 0) {
186 dev_err(dev, "error enabling device connections: %d\n", result);
187 goto error_devconnect_start;
188 }
189
190 result = wusbhc_sec_start(wusbhc);
191 if (result < 0) {
192 dev_err(dev, "error starting security in the HC: %d\n", result);
193 goto error_sec_start;
194 }
195 /* FIXME: the choice of the DNTS parameters is somewhat
196 * arbitrary */
197 result = wusbhc->set_num_dnts(wusbhc, 0, 15);
198 if (result < 0) {
199 dev_err(dev, "Cannot set DNTS parameters: %d\n", result);
200 goto error_set_num_dnts;
201 }
202 result = wusbhc->start(wusbhc);
203 if (result < 0) {
204 dev_err(dev, "error starting wusbch: %d\n", result);
205 goto error_wusbhc_start;
206 }
207 wusbhc->active = 1;
208 return 0;
209
210error_wusbhc_start:
211 wusbhc_sec_stop(wusbhc);
212error_set_num_dnts:
213error_sec_start:
214 wusbhc_devconnect_stop(wusbhc);
215error_devconnect_start:
216 wusbhc_rsv_terminate(wusbhc);
217error_rsv_establish:
218 return result;
219}
220
221/*
222 * Disconnect all from the WUSB Channel
223 *
224 * Send a Host Disconnect IE in the MMC, wait, don't send it any more
225 */
226static int __wusbhc_host_disconnect_ie(struct wusbhc *wusbhc)
227{
228 int result = -ENOMEM;
229 struct wuie_host_disconnect *host_disconnect_ie;
230 might_sleep();
231 host_disconnect_ie = kmalloc(sizeof(*host_disconnect_ie), GFP_KERNEL);
232 if (host_disconnect_ie == NULL)
233 goto error_alloc;
234 host_disconnect_ie->hdr.bLength = sizeof(*host_disconnect_ie);
235 host_disconnect_ie->hdr.bIEIdentifier = WUIE_ID_HOST_DISCONNECT;
236 result = wusbhc_mmcie_set(wusbhc, 0, 0, &host_disconnect_ie->hdr);
237 if (result < 0)
238 goto error_mmcie_set;
239
240 /* WUSB1.0[8.5.3.1 & 7.5.2] */
241 msleep(100);
242 wusbhc_mmcie_rm(wusbhc, &host_disconnect_ie->hdr);
243error_mmcie_set:
244 kfree(host_disconnect_ie);
245error_alloc:
246 return result;
247}
248
249/*
250 * wusbhc_stop - stop transmitting MMCs
251 * @wusbhc: the HC to stop
252 *
253 * Send a Host Disconnect IE, wait, remove all the MMCs (stop sending MMCs).
254 *
255 * If we can't allocate a Host Stop IE, screw it, we don't notify the
256 * devices we are disconnecting...
257 */
258void wusbhc_stop(struct wusbhc *wusbhc)
259{
260 if (wusbhc->active) {
261 wusbhc->active = 0;
262 wusbhc->stop(wusbhc);
263 wusbhc_sec_stop(wusbhc);
264 __wusbhc_host_disconnect_ie(wusbhc);
265 wusbhc_devconnect_stop(wusbhc);
266 wusbhc_rsv_terminate(wusbhc);
267 }
268}
269EXPORT_SYMBOL_GPL(wusbhc_stop);
270
271/*
272 * Change the CHID in a WUSB Channel
273 *
274 * If it is just a new CHID, send a Host Disconnect IE and then change
275 * the CHID IE.
276 */
277static int __wusbhc_chid_change(struct wusbhc *wusbhc,
278 const struct wusb_ckhdid *chid)
279{
280 int result = -ENOSYS;
281 struct device *dev = wusbhc->dev;
282 dev_err(dev, "%s() not implemented yet\n", __func__);
283 return result;
284
285 BUG_ON(wusbhc->wuie_host_info == NULL);
286 __wusbhc_host_disconnect_ie(wusbhc);
287 wusbhc->wuie_host_info->CHID = *chid;
288 result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->wuie_host_info->hdr);
289 if (result < 0)
290 dev_err(dev, "Can't update Host Info WUSB IE: %d\n", result);
291 return result;
292}
293
294/*
295 * Set/reset/update a new CHID
296 *
297 * Depending on the previous state of the MMCs, start, stop or change
298 * the sent MMC. This effectively switches the host controller on and
299 * off (radio wise).
300 */
301int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
302{
303 int result = 0;
304
305 if (memcmp(chid, &wusb_ckhdid_zero, sizeof(chid)) == 0)
306 chid = NULL;
307
308 mutex_lock(&wusbhc->mutex);
309 if (wusbhc->active) {
310 if (chid)
311 result = __wusbhc_chid_change(wusbhc, chid);
312 else
313 wusbhc_stop(wusbhc);
314 } else {
315 if (chid)
316 wusbhc_start(wusbhc, chid);
317 }
318 mutex_unlock(&wusbhc->mutex);
319 return result;
320}
321EXPORT_SYMBOL_GPL(wusbhc_chid_set);
diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c
new file mode 100644
index 000000000000..7cc51e9905cf
--- /dev/null
+++ b/drivers/usb/wusbcore/pal.c
@@ -0,0 +1,42 @@
1/*
2 * Wireless USB Host Controller
3 * UWB Protocol Adaptation Layer (PAL) glue.
4 *
5 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#include "wusbhc.h"
20
21/**
22 * wusbhc_pal_register - register the WUSB HC as a UWB PAL
23 * @wusbhc: the WUSB HC
24 */
25int wusbhc_pal_register(struct wusbhc *wusbhc)
26{
27 uwb_pal_init(&wusbhc->pal);
28
29 wusbhc->pal.name = "wusbhc";
30 wusbhc->pal.device = wusbhc->usb_hcd.self.controller;
31
32 return uwb_pal_register(wusbhc->uwb_rc, &wusbhc->pal);
33}
34
35/**
36 * wusbhc_pal_register - unregister the WUSB HC as a UWB PAL
37 * @wusbhc: the WUSB HC
38 */
39void wusbhc_pal_unregister(struct wusbhc *wusbhc)
40{
41 uwb_pal_unregister(wusbhc->uwb_rc, &wusbhc->pal);
42}
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c
new file mode 100644
index 000000000000..fc63e77ded2d
--- /dev/null
+++ b/drivers/usb/wusbcore/reservation.c
@@ -0,0 +1,115 @@
1/*
2 * WUSB cluster reservation management
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/uwb.h>
20
21#include "wusbhc.h"
22
23/*
24 * WUSB cluster reservations are multicast reservations with the
25 * broadcast cluster ID (BCID) as the target DevAddr.
26 *
27 * FIXME: consider adjusting the reservation depending on what devices
28 * are attached.
29 */
30
31static int wusbhc_bwa_set(struct wusbhc *wusbhc, u8 stream,
32 const struct uwb_mas_bm *mas)
33{
34 if (mas == NULL)
35 mas = &uwb_mas_bm_zero;
36 return wusbhc->bwa_set(wusbhc, stream, mas);
37}
38
39/**
40 * wusbhc_rsv_complete_cb - WUSB HC reservation complete callback
41 * @rsv: the reservation
42 *
43 * Either set or clear the HC's view of the reservation.
44 *
45 * FIXME: when a reservation is denied the HC should be stopped.
46 */
47static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
48{
49 struct wusbhc *wusbhc = rsv->pal_priv;
50 struct device *dev = wusbhc->dev;
51 char buf[72];
52
53 switch (rsv->state) {
54 case UWB_RSV_STATE_O_ESTABLISHED:
55 bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS);
56 dev_dbg(dev, "established reservation: %s\n", buf);
57 wusbhc_bwa_set(wusbhc, rsv->stream, &rsv->mas);
58 break;
59 case UWB_RSV_STATE_NONE:
60 dev_dbg(dev, "removed reservation\n");
61 wusbhc_bwa_set(wusbhc, 0, NULL);
62 wusbhc->rsv = NULL;
63 break;
64 default:
65 dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state);
66 break;
67 }
68}
69
70
71/**
72 * wusbhc_rsv_establish - establish a reservation for the cluster
73 * @wusbhc: the WUSB HC requesting a bandwith reservation
74 */
75int wusbhc_rsv_establish(struct wusbhc *wusbhc)
76{
77 struct uwb_rc *rc = wusbhc->uwb_rc;
78 struct uwb_rsv *rsv;
79 struct uwb_dev_addr bcid;
80 int ret;
81
82 rsv = uwb_rsv_create(rc, wusbhc_rsv_complete_cb, wusbhc);
83 if (rsv == NULL)
84 return -ENOMEM;
85
86 bcid.data[0] = wusbhc->cluster_id;
87 bcid.data[1] = 0;
88
89 rsv->owner = &rc->uwb_dev;
90 rsv->target.type = UWB_RSV_TARGET_DEVADDR;
91 rsv->target.devaddr = bcid;
92 rsv->type = UWB_DRP_TYPE_PRIVATE;
93 rsv->max_mas = 256;
94 rsv->min_mas = 16; /* one MAS per zone? */
95 rsv->sparsity = 16; /* at least one MAS in each zone? */
96 rsv->is_multicast = true;
97
98 ret = uwb_rsv_establish(rsv);
99 if (ret == 0)
100 wusbhc->rsv = rsv;
101 else
102 uwb_rsv_destroy(rsv);
103 return ret;
104}
105
106
107/**
108 * wusbhc_rsv_terminate - terminate any cluster reservation
109 * @wusbhc: the WUSB host whose reservation is to be terminated
110 */
111void wusbhc_rsv_terminate(struct wusbhc *wusbhc)
112{
113 if (wusbhc->rsv)
114 uwb_rsv_terminate(wusbhc->rsv);
115}
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
new file mode 100644
index 000000000000..267a64325106
--- /dev/null
+++ b/drivers/usb/wusbcore/rh.c
@@ -0,0 +1,477 @@
1/*
2 * Wireless USB Host Controller
3 * Root Hub operations
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * We fake a root hub that has fake ports (as many as simultaneous
25 * devices the Wireless USB Host Controller can deal with). For each
26 * port we keep an state in @wusbhc->port[index] identical to the one
27 * specified in the USB2.0[ch11] spec and some extra device
28 * information that complements the one in 'struct usb_device' (as
29 * this lacs a hcpriv pointer).
30 *
31 * Note this is common to WHCI and HWA host controllers.
32 *
33 * Through here we enable most of the state changes that the USB stack
34 * will use to connect or disconnect devices. We need to do some
35 * forced adaptation of Wireless USB device states vs. wired:
36 *
37 * USB: WUSB:
38 *
39 * Port Powered-off port slot n/a
40 * Powered-on port slot available
41 * Disconnected port slot available
42 * Connected port slot assigned device
43 * device sent DN_Connect
44 * device was authenticated
45 * Enabled device is authenticated, transitioned
46 * from unauth -> auth -> default address
47 * -> enabled
48 * Reset disconnect
49 * Disable disconnect
50 *
51 * This maps the standard USB port states with the WUSB device states
52 * so we can fake ports without having to modify the USB stack.
53 *
54 * FIXME: this process will change in the future
55 *
56 *
57 * ENTRY POINTS
58 *
59 * Our entry points into here are, as in hcd.c, the USB stack root hub
60 * ops defined in the usb_hcd struct:
61 *
62 * wusbhc_rh_status_data() Provide hub and port status data bitmap
63 *
64 * wusbhc_rh_control() Execution of all the major requests
65 * you can do to a hub (Set|Clear
66 * features, get descriptors, status, etc).
67 *
68 * wusbhc_rh_[suspend|resume]() That
69 *
70 * wusbhc_rh_start_port_reset() ??? unimplemented
71 */
72#include "wusbhc.h"
73
74#define D_LOCAL 0
75#include <linux/uwb/debug.h>
76
77/*
78 * Reset a fake port
79 *
80 * This can be called to reset a port from any other state or to reset
81 * it when connecting. In Wireless USB they are different; when doing
82 * a new connect that involves going over the authentication. When
83 * just reseting, its a different story.
84 *
85 * The Linux USB stack resets a port twice before it considers it
86 * enabled, so we have to detect and ignore that.
87 *
88 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
89 *
90 * Supposedly we are the only thread accesing @wusbhc->port; in any
91 * case, maybe we should move the mutex locking from
92 * wusbhc_devconnect_auth() to here.
93 *
94 * @port_idx refers to the wusbhc's port index, not the USB port number
95 */
96static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx)
97{
98 int result = 0;
99 struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx);
100
101 d_fnstart(3, wusbhc->dev, "(wusbhc %p port_idx %u)\n",
102 wusbhc, port_idx);
103 if (port->reset_count == 0) {
104 wusbhc_devconnect_auth(wusbhc, port_idx);
105 port->reset_count++;
106 } else if (port->reset_count == 1)
107 /* see header */
108 d_printf(2, wusbhc->dev, "Ignoring second reset on port_idx "
109 "%u\n", port_idx);
110 else
111 result = wusbhc_dev_reset(wusbhc, port_idx);
112 d_fnend(3, wusbhc->dev, "(wusbhc %p port_idx %u) = %d\n",
113 wusbhc, port_idx, result);
114 return result;
115}
116
117/*
118 * Return the hub change status bitmap
119 *
120 * The bits in the change status bitmap are cleared when a
121 * ClearPortFeature request is issued (USB2.0[11.12.3,11.12.4].
122 *
123 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
124 *
125 * WARNING!! This gets called from atomic context; we cannot get the
126 * mutex--the only race condition we can find is some bit
127 * changing just after we copy it, which shouldn't be too
128 * big of a problem [and we can't make it an spinlock
129 * because other parts need to take it and sleep] .
130 *
131 * @usb_hcd is refcounted, so it won't dissapear under us
132 * and before killing a host, the polling of the root hub
133 * would be stopped anyway.
134 */
135int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf)
136{
137 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
138 size_t cnt, size;
139 unsigned long *buf = (unsigned long *) _buf;
140
141 d_fnstart(1, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
142 /* WE DON'T LOCK, see comment */
143 size = wusbhc->ports_max + 1 /* hub bit */;
144 size = (size + 8 - 1) / 8; /* round to bytes */
145 for (cnt = 0; cnt < wusbhc->ports_max; cnt++)
146 if (wusb_port_by_idx(wusbhc, cnt)->change)
147 set_bit(cnt + 1, buf);
148 else
149 clear_bit(cnt + 1, buf);
150 d_fnend(1, wusbhc->dev, "(wusbhc %p) %u, buffer:\n", wusbhc, (int)size);
151 d_dump(1, wusbhc->dev, _buf, size);
152 return size;
153}
154EXPORT_SYMBOL_GPL(wusbhc_rh_status_data);
155
156/*
157 * Return the hub's desciptor
158 *
159 * NOTE: almost cut and paste from ehci-hub.c
160 *
161 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked
162 */
163static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue,
164 u16 wIndex,
165 struct usb_hub_descriptor *descr,
166 u16 wLength)
167{
168 u16 temp = 1 + (wusbhc->ports_max / 8);
169 u8 length = 7 + 2 * temp;
170
171 if (wLength < length)
172 return -ENOSPC;
173 descr->bDescLength = 7 + 2 * temp;
174 descr->bDescriptorType = 0x29; /* HUB type */
175 descr->bNbrPorts = wusbhc->ports_max;
176 descr->wHubCharacteristics = cpu_to_le16(
177 0x00 /* All ports power at once */
178 | 0x00 /* not part of compound device */
179 | 0x10 /* No overcurrent protection */
180 | 0x00 /* 8 FS think time FIXME ?? */
181 | 0x00); /* No port indicators */
182 descr->bPwrOn2PwrGood = 0;
183 descr->bHubContrCurrent = 0;
184 /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
185 memset(&descr->bitmap[0], 0, temp);
186 memset(&descr->bitmap[temp], 0xff, temp);
187 return 0;
188}
189
190/*
191 * Clear a hub feature
192 *
193 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
194 *
195 * Nothing to do, so no locking needed ;)
196 */
197static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature)
198{
199 int result;
200 struct device *dev = wusbhc->dev;
201
202 d_fnstart(4, dev, "(%p, feature 0x%04u)\n", wusbhc, feature);
203 switch (feature) {
204 case C_HUB_LOCAL_POWER:
205 /* FIXME: maybe plug bit 0 to the power input status,
206 * if any?
207 * see wusbhc_rh_get_hub_status() */
208 case C_HUB_OVER_CURRENT:
209 result = 0;
210 break;
211 default:
212 result = -EPIPE;
213 }
214 d_fnend(4, dev, "(%p, feature 0x%04u), %d\n", wusbhc, feature, result);
215 return result;
216}
217
218/*
219 * Return hub status (it is always zero...)
220 *
221 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
222 *
223 * Nothing to do, so no locking needed ;)
224 */
225static int wusbhc_rh_get_hub_status(struct wusbhc *wusbhc, u32 *buf,
226 u16 wLength)
227{
228 /* FIXME: maybe plug bit 0 to the power input status (if any)? */
229 *buf = 0;
230 return 0;
231}
232
233/*
234 * Set a port feature
235 *
236 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
237 */
238static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature,
239 u8 selector, u8 port_idx)
240{
241 int result = -EINVAL;
242 struct device *dev = wusbhc->dev;
243
244 d_fnstart(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d)\n",
245 feature, selector, port_idx);
246
247 if (port_idx > wusbhc->ports_max)
248 goto error;
249
250 switch (feature) {
251 /* According to USB2.0[11.24.2.13]p2, these features
252 * are not required to be implemented. */
253 case USB_PORT_FEAT_C_OVER_CURRENT:
254 case USB_PORT_FEAT_C_ENABLE:
255 case USB_PORT_FEAT_C_SUSPEND:
256 case USB_PORT_FEAT_C_CONNECTION:
257 case USB_PORT_FEAT_C_RESET:
258 result = 0;
259 break;
260
261 case USB_PORT_FEAT_POWER:
262 /* No such thing, but we fake it works */
263 mutex_lock(&wusbhc->mutex);
264 wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER;
265 mutex_unlock(&wusbhc->mutex);
266 result = 0;
267 break;
268 case USB_PORT_FEAT_RESET:
269 result = wusbhc_rh_port_reset(wusbhc, port_idx);
270 break;
271 case USB_PORT_FEAT_ENABLE:
272 case USB_PORT_FEAT_SUSPEND:
273 dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n",
274 port_idx, feature, selector);
275 result = -ENOSYS;
276 break;
277 default:
278 dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n",
279 port_idx, feature, selector);
280 result = -EPIPE;
281 break;
282 }
283error:
284 d_fnend(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d) = %d\n",
285 feature, selector, port_idx, result);
286 return result;
287}
288
289/*
290 * Clear a port feature...
291 *
292 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
293 */
294static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature,
295 u8 selector, u8 port_idx)
296{
297 int result = -EINVAL;
298 struct device *dev = wusbhc->dev;
299
300 d_fnstart(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d)\n",
301 wusbhc, feature, selector, port_idx);
302
303 if (port_idx > wusbhc->ports_max)
304 goto error;
305
306 mutex_lock(&wusbhc->mutex);
307 result = 0;
308 switch (feature) {
309 case USB_PORT_FEAT_POWER: /* fake port always on */
310 /* According to USB2.0[11.24.2.7.1.4], no need to implement? */
311 case USB_PORT_FEAT_C_OVER_CURRENT:
312 break;
313 case USB_PORT_FEAT_C_RESET:
314 wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_RESET;
315 break;
316 case USB_PORT_FEAT_C_CONNECTION:
317 wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_CONNECTION;
318 break;
319 case USB_PORT_FEAT_ENABLE:
320 __wusbhc_dev_disable(wusbhc, port_idx);
321 break;
322 case USB_PORT_FEAT_C_ENABLE:
323 wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_ENABLE;
324 break;
325 case USB_PORT_FEAT_SUSPEND:
326 case USB_PORT_FEAT_C_SUSPEND:
327 case 0xffff: /* ??? FIXME */
328 dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n",
329 port_idx, feature, selector);
330 /* dump_stack(); */
331 result = -ENOSYS;
332 break;
333 default:
334 dev_err(dev, "(port_idx %d) Clear feat %d/%d UNKNOWN\n",
335 port_idx, feature, selector);
336 result = -EPIPE;
337 break;
338 }
339 mutex_unlock(&wusbhc->mutex);
340error:
341 d_fnend(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d) = "
342 "%d\n", wusbhc, feature, selector, port_idx, result);
343 return result;
344}
345
346/*
347 * Return the port's status
348 *
349 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
350 */
351static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx,
352 u32 *_buf, u16 wLength)
353{
354 int result = -EINVAL;
355 u16 *buf = (u16 *) _buf;
356
357 d_fnstart(1, wusbhc->dev, "(wusbhc %p port_idx %u wLength %u)\n",
358 wusbhc, port_idx, wLength);
359 if (port_idx > wusbhc->ports_max)
360 goto error;
361 mutex_lock(&wusbhc->mutex);
362 buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status);
363 buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change);
364 result = 0;
365 mutex_unlock(&wusbhc->mutex);
366error:
367 d_fnend(1, wusbhc->dev, "(wusbhc %p) = %d, buffer:\n", wusbhc, result);
368 d_dump(1, wusbhc->dev, _buf, wLength);
369 return result;
370}
371
372/*
373 * Entry point for Root Hub operations
374 *
375 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
376 */
377int wusbhc_rh_control(struct usb_hcd *usb_hcd, u16 reqntype, u16 wValue,
378 u16 wIndex, char *buf, u16 wLength)
379{
380 int result = -ENOSYS;
381 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
382
383 switch (reqntype) {
384 case GetHubDescriptor:
385 result = wusbhc_rh_get_hub_descr(
386 wusbhc, wValue, wIndex,
387 (struct usb_hub_descriptor *) buf, wLength);
388 break;
389 case ClearHubFeature:
390 result = wusbhc_rh_clear_hub_feat(wusbhc, wValue);
391 break;
392 case GetHubStatus:
393 result = wusbhc_rh_get_hub_status(wusbhc, (u32 *)buf, wLength);
394 break;
395
396 case SetPortFeature:
397 result = wusbhc_rh_set_port_feat(wusbhc, wValue, wIndex >> 8,
398 (wIndex & 0xff) - 1);
399 break;
400 case ClearPortFeature:
401 result = wusbhc_rh_clear_port_feat(wusbhc, wValue, wIndex >> 8,
402 (wIndex & 0xff) - 1);
403 break;
404 case GetPortStatus:
405 result = wusbhc_rh_get_port_status(wusbhc, wIndex - 1,
406 (u32 *)buf, wLength);
407 break;
408
409 case SetHubFeature:
410 default:
411 dev_err(wusbhc->dev, "%s (%p [%p], %x, %x, %x, %p, %x) "
412 "UNIMPLEMENTED\n", __func__, usb_hcd, wusbhc, reqntype,
413 wValue, wIndex, buf, wLength);
414 /* dump_stack(); */
415 result = -ENOSYS;
416 }
417 return result;
418}
419EXPORT_SYMBOL_GPL(wusbhc_rh_control);
420
421int wusbhc_rh_suspend(struct usb_hcd *usb_hcd)
422{
423 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
424 dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
425 usb_hcd, wusbhc);
426 /* dump_stack(); */
427 return -ENOSYS;
428}
429EXPORT_SYMBOL_GPL(wusbhc_rh_suspend);
430
431int wusbhc_rh_resume(struct usb_hcd *usb_hcd)
432{
433 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
434 dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
435 usb_hcd, wusbhc);
436 /* dump_stack(); */
437 return -ENOSYS;
438}
439EXPORT_SYMBOL_GPL(wusbhc_rh_resume);
440
441int wusbhc_rh_start_port_reset(struct usb_hcd *usb_hcd, unsigned port_idx)
442{
443 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
444 dev_err(wusbhc->dev, "%s (%p [%p], port_idx %u) UNIMPLEMENTED\n",
445 __func__, usb_hcd, wusbhc, port_idx);
446 WARN_ON(1);
447 return -ENOSYS;
448}
449EXPORT_SYMBOL_GPL(wusbhc_rh_start_port_reset);
450
451static void wusb_port_init(struct wusb_port *port)
452{
453 port->status |= USB_PORT_STAT_HIGH_SPEED;
454}
455
456/*
457 * Alloc fake port specific fields and status.
458 */
459int wusbhc_rh_create(struct wusbhc *wusbhc)
460{
461 int result = -ENOMEM;
462 size_t port_size, itr;
463 port_size = wusbhc->ports_max * sizeof(wusbhc->port[0]);
464 wusbhc->port = kzalloc(port_size, GFP_KERNEL);
465 if (wusbhc->port == NULL)
466 goto error_port_alloc;
467 for (itr = 0; itr < wusbhc->ports_max; itr++)
468 wusb_port_init(&wusbhc->port[itr]);
469 result = 0;
470error_port_alloc:
471 return result;
472}
473
474void wusbhc_rh_destroy(struct wusbhc *wusbhc)
475{
476 kfree(wusbhc->port);
477}
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
new file mode 100644
index 000000000000..a101cad6a8d4
--- /dev/null
+++ b/drivers/usb/wusbcore/security.c
@@ -0,0 +1,642 @@
1/*
2 * Wireless USB Host Controller
3 * Security support: encryption enablement, etc
4 *
5 * Copyright (C) 2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25#include <linux/types.h>
26#include <linux/usb/ch9.h>
27#include <linux/random.h>
28#include "wusbhc.h"
29
30/*
31 * DEBUG & SECURITY WARNING!!!!
32 *
33 * If you enable this past 1, the debug code will weaken the
34 * cryptographic safety of the system (on purpose, for debugging).
35 *
36 * Weaken means:
37 * we print secret keys and intermediate values all the way,
38 */
39#undef D_LOCAL
40#define D_LOCAL 2
41#include <linux/uwb/debug.h>
42
43static void wusbhc_set_gtk_callback(struct urb *urb);
44static void wusbhc_gtk_rekey_done_work(struct work_struct *work);
45
46int wusbhc_sec_create(struct wusbhc *wusbhc)
47{
48 wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data);
49 wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
50 wusbhc->gtk.descr.bReserved = 0;
51
52 wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
53 WUSB_KEY_INDEX_ORIGINATOR_HOST);
54
55 INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work);
56
57 return 0;
58}
59
60
61/* Called when the HC is destroyed */
62void wusbhc_sec_destroy(struct wusbhc *wusbhc)
63{
64}
65
66
67/**
68 * wusbhc_next_tkid - generate a new, currently unused, TKID
69 * @wusbhc: the WUSB host controller
70 * @wusb_dev: the device whose PTK the TKID is for
71 * (or NULL for a TKID for a GTK)
72 *
73 * The generated TKID consist of two parts: the device's authenicated
74 * address (or 0 or a GTK); and an incrementing number. This ensures
75 * that TKIDs cannot be shared between devices and by the time the
76 * incrementing number wraps around the older TKIDs will no longer be
77 * in use (a maximum of two keys may be active at any one time).
78 */
79static u32 wusbhc_next_tkid(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
80{
81 u32 *tkid;
82 u32 addr;
83
84 if (wusb_dev == NULL) {
85 tkid = &wusbhc->gtk_tkid;
86 addr = 0;
87 } else {
88 tkid = &wusb_port_by_idx(wusbhc, wusb_dev->port_idx)->ptk_tkid;
89 addr = wusb_dev->addr & 0x7f;
90 }
91
92 *tkid = (addr << 8) | ((*tkid + 1) & 0xff);
93
94 return *tkid;
95}
96
97static void wusbhc_generate_gtk(struct wusbhc *wusbhc)
98{
99 const size_t key_size = sizeof(wusbhc->gtk.data);
100 u32 tkid;
101
102 tkid = wusbhc_next_tkid(wusbhc, NULL);
103
104 wusbhc->gtk.descr.tTKID[0] = (tkid >> 0) & 0xff;
105 wusbhc->gtk.descr.tTKID[1] = (tkid >> 8) & 0xff;
106 wusbhc->gtk.descr.tTKID[2] = (tkid >> 16) & 0xff;
107
108 get_random_bytes(wusbhc->gtk.descr.bKeyData, key_size);
109}
110
111/**
112 * wusbhc_sec_start - start the security management process
113 * @wusbhc: the WUSB host controller
114 *
115 * Generate and set an initial GTK on the host controller.
116 *
117 * Called when the HC is started.
118 */
119int wusbhc_sec_start(struct wusbhc *wusbhc)
120{
121 const size_t key_size = sizeof(wusbhc->gtk.data);
122 int result;
123
124 wusbhc_generate_gtk(wusbhc);
125
126 result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
127 &wusbhc->gtk.descr.bKeyData, key_size);
128 if (result < 0)
129 dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n",
130 result);
131
132 return result;
133}
134
135/**
136 * wusbhc_sec_stop - stop the security management process
137 * @wusbhc: the WUSB host controller
138 *
139 * Wait for any pending GTK rekeys to stop.
140 */
141void wusbhc_sec_stop(struct wusbhc *wusbhc)
142{
143 cancel_work_sync(&wusbhc->gtk_rekey_done_work);
144}
145
146
147/** @returns encryption type name */
148const char *wusb_et_name(u8 x)
149{
150 switch (x) {
151 case USB_ENC_TYPE_UNSECURE: return "unsecure";
152 case USB_ENC_TYPE_WIRED: return "wired";
153 case USB_ENC_TYPE_CCM_1: return "CCM-1";
154 case USB_ENC_TYPE_RSA_1: return "RSA-1";
155 default: return "unknown";
156 }
157}
158EXPORT_SYMBOL_GPL(wusb_et_name);
159
160/*
161 * Set the device encryption method
162 *
163 * We tell the device which encryption method to use; we do this when
164 * setting up the device's security.
165 */
166static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
167{
168 int result;
169 struct device *dev = &usb_dev->dev;
170 struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
171
172 if (value) {
173 value = wusb_dev->ccm1_etd.bEncryptionValue;
174 } else {
175 /* FIXME: should be wusb_dev->etd[UNSECURE].bEncryptionValue */
176 value = 0;
177 }
178 /* Set device's */
179 result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
180 USB_REQ_SET_ENCRYPTION,
181 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
182 value, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
183 if (result < 0)
184 dev_err(dev, "Can't set device's WUSB encryption to "
185 "%s (value %d): %d\n",
186 wusb_et_name(wusb_dev->ccm1_etd.bEncryptionType),
187 wusb_dev->ccm1_etd.bEncryptionValue, result);
188 return result;
189}
190
191/*
192 * Set the GTK to be used by a device.
193 *
194 * The device must be authenticated.
195 */
196static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
197{
198 struct usb_device *usb_dev = wusb_dev->usb_dev;
199
200 return usb_control_msg(
201 usb_dev, usb_sndctrlpipe(usb_dev, 0),
202 USB_REQ_SET_DESCRIPTOR,
203 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
204 USB_DT_KEY << 8 | wusbhc->gtk_index, 0,
205 &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
206 1000);
207}
208
209
210/* FIXME: prototype for adding security */
211int wusb_dev_sec_add(struct wusbhc *wusbhc,
212 struct usb_device *usb_dev, struct wusb_dev *wusb_dev)
213{
214 int result, bytes, secd_size;
215 struct device *dev = &usb_dev->dev;
216 struct usb_security_descriptor secd;
217 const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL;
218 void *secd_buf;
219 const void *itr, *top;
220 char buf[64];
221
222 d_fnstart(3, dev, "(usb_dev %p, wusb_dev %p)\n", usb_dev, wusb_dev);
223 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
224 0, &secd, sizeof(secd));
225 if (result < sizeof(secd)) {
226 dev_err(dev, "Can't read security descriptor or "
227 "not enough data: %d\n", result);
228 goto error_secd;
229 }
230 secd_size = le16_to_cpu(secd.wTotalLength);
231 d_printf(5, dev, "got %d bytes of sec descriptor, total is %d\n",
232 result, secd_size);
233 secd_buf = kmalloc(secd_size, GFP_KERNEL);
234 if (secd_buf == NULL) {
235 dev_err(dev, "Can't allocate space for security descriptors\n");
236 goto error_secd_alloc;
237 }
238 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
239 0, secd_buf, secd_size);
240 if (result < secd_size) {
241 dev_err(dev, "Can't read security descriptor or "
242 "not enough data: %d\n", result);
243 goto error_secd_all;
244 }
245 d_printf(5, dev, "got %d bytes of sec descriptors\n", result);
246 bytes = 0;
247 itr = secd_buf + sizeof(secd);
248 top = secd_buf + result;
249 while (itr < top) {
250 etd = itr;
251 if (top - itr < sizeof(*etd)) {
252 dev_err(dev, "BUG: bad device security descriptor; "
253 "not enough data (%zu vs %zu bytes left)\n",
254 top - itr, sizeof(*etd));
255 break;
256 }
257 if (etd->bLength < sizeof(*etd)) {
258 dev_err(dev, "BUG: bad device encryption descriptor; "
259 "descriptor is too short "
260 "(%u vs %zu needed)\n",
261 etd->bLength, sizeof(*etd));
262 break;
263 }
264 itr += etd->bLength;
265 bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
266 "%s (0x%02x/%02x) ",
267 wusb_et_name(etd->bEncryptionType),
268 etd->bEncryptionValue, etd->bAuthKeyIndex);
269 if (etd->bEncryptionType == USB_ENC_TYPE_CCM_1)
270 ccm1_etd = etd;
271 }
272 /* This code only supports CCM1 as of now. */
273 /* FIXME: user has to choose which sec mode to use?
274 * In theory we want CCM */
275 if (ccm1_etd == NULL) {
276 dev_err(dev, "WUSB device doesn't support CCM1 encryption, "
277 "can't use!\n");
278 result = -EINVAL;
279 goto error_no_ccm1;
280 }
281 wusb_dev->ccm1_etd = *ccm1_etd;
282 dev_info(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n",
283 buf, wusb_et_name(ccm1_etd->bEncryptionType),
284 ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex);
285 result = 0;
286 kfree(secd_buf);
287out:
288 d_fnend(3, dev, "(usb_dev %p, wusb_dev %p) = %d\n",
289 usb_dev, wusb_dev, result);
290 return result;
291
292
293error_no_ccm1:
294error_secd_all:
295 kfree(secd_buf);
296error_secd_alloc:
297error_secd:
298 goto out;
299}
300
301void wusb_dev_sec_rm(struct wusb_dev *wusb_dev)
302{
303 /* Nothing so far */
304}
305
306static void hs_printk(unsigned level, struct device *dev,
307 struct usb_handshake *hs)
308{
309 d_printf(level, dev,
310 " bMessageNumber: %u\n"
311 " bStatus: %u\n"
312 " tTKID: %02x %02x %02x\n"
313 " CDID: %02x %02x %02x %02x %02x %02x %02x %02x\n"
314 " %02x %02x %02x %02x %02x %02x %02x %02x\n"
315 " nonce: %02x %02x %02x %02x %02x %02x %02x %02x\n"
316 " %02x %02x %02x %02x %02x %02x %02x %02x\n"
317 " MIC: %02x %02x %02x %02x %02x %02x %02x %02x\n",
318 hs->bMessageNumber, hs->bStatus,
319 hs->tTKID[2], hs->tTKID[1], hs->tTKID[0],
320 hs->CDID[0], hs->CDID[1], hs->CDID[2], hs->CDID[3],
321 hs->CDID[4], hs->CDID[5], hs->CDID[6], hs->CDID[7],
322 hs->CDID[8], hs->CDID[9], hs->CDID[10], hs->CDID[11],
323 hs->CDID[12], hs->CDID[13], hs->CDID[14], hs->CDID[15],
324 hs->nonce[0], hs->nonce[1], hs->nonce[2], hs->nonce[3],
325 hs->nonce[4], hs->nonce[5], hs->nonce[6], hs->nonce[7],
326 hs->nonce[8], hs->nonce[9], hs->nonce[10], hs->nonce[11],
327 hs->nonce[12], hs->nonce[13], hs->nonce[14], hs->nonce[15],
328 hs->MIC[0], hs->MIC[1], hs->MIC[2], hs->MIC[3],
329 hs->MIC[4], hs->MIC[5], hs->MIC[6], hs->MIC[7]);
330}
331
332/**
333 * Update the address of an unauthenticated WUSB device
334 *
335 * Once we have successfully authenticated, we take it to addr0 state
336 * and then to a normal address.
337 *
338 * Before the device's address (as known by it) was usb_dev->devnum |
339 * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum.
340 */
341static int wusb_dev_update_address(struct wusbhc *wusbhc,
342 struct wusb_dev *wusb_dev)
343{
344 int result = -ENOMEM;
345 struct usb_device *usb_dev = wusb_dev->usb_dev;
346 struct device *dev = &usb_dev->dev;
347 u8 new_address = wusb_dev->addr & 0x7F;
348
349 /* Set address 0 */
350 result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
351 USB_REQ_SET_ADDRESS, 0,
352 0, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
353 if (result < 0) {
354 dev_err(dev, "auth failed: can't set address 0: %d\n",
355 result);
356 goto error_addr0;
357 }
358 result = wusb_set_dev_addr(wusbhc, wusb_dev, 0);
359 if (result < 0)
360 goto error_addr0;
361 usb_ep0_reinit(usb_dev);
362
363 /* Set new (authenticated) address. */
364 result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
365 USB_REQ_SET_ADDRESS, 0,
366 new_address, 0, NULL, 0,
367 1000 /* FIXME: arbitrary */);
368 if (result < 0) {
369 dev_err(dev, "auth failed: can't set address %u: %d\n",
370 new_address, result);
371 goto error_addr;
372 }
373 result = wusb_set_dev_addr(wusbhc, wusb_dev, new_address);
374 if (result < 0)
375 goto error_addr;
376 usb_ep0_reinit(usb_dev);
377 usb_dev->authenticated = 1;
378error_addr:
379error_addr0:
380 return result;
381}
382
383/*
384 *
385 *
386 */
387/* FIXME: split and cleanup */
388int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
389 struct wusb_ckhdid *ck)
390{
391 int result = -ENOMEM;
392 struct usb_device *usb_dev = wusb_dev->usb_dev;
393 struct device *dev = &usb_dev->dev;
394 u32 tkid;
395 __le32 tkid_le;
396 struct usb_handshake *hs;
397 struct aes_ccm_nonce ccm_n;
398 u8 mic[8];
399 struct wusb_keydvt_in keydvt_in;
400 struct wusb_keydvt_out keydvt_out;
401
402 hs = kzalloc(3*sizeof(hs[0]), GFP_KERNEL);
403 if (hs == NULL) {
404 dev_err(dev, "can't allocate handshake data\n");
405 goto error_kzalloc;
406 }
407
408 /* We need to turn encryption before beginning the 4way
409 * hshake (WUSB1.0[.3.2.2]) */
410 result = wusb_dev_set_encryption(usb_dev, 1);
411 if (result < 0)
412 goto error_dev_set_encryption;
413
414 tkid = wusbhc_next_tkid(wusbhc, wusb_dev);
415 tkid_le = cpu_to_le32(tkid);
416
417 hs[0].bMessageNumber = 1;
418 hs[0].bStatus = 0;
419 memcpy(hs[0].tTKID, &tkid_le, sizeof(hs[0].tTKID));
420 hs[0].bReserved = 0;
421 memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
422 get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
423 memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */
424
425 d_printf(1, dev, "I: sending hs1:\n");
426 hs_printk(2, dev, &hs[0]);
427
428 result = usb_control_msg(
429 usb_dev, usb_sndctrlpipe(usb_dev, 0),
430 USB_REQ_SET_HANDSHAKE,
431 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
432 1, 0, &hs[0], sizeof(hs[0]), 1000 /* FIXME: arbitrary */);
433 if (result < 0) {
434 dev_err(dev, "Handshake1: request failed: %d\n", result);
435 goto error_hs1;
436 }
437
438 /* Handshake 2, from the device -- need to verify fields */
439 result = usb_control_msg(
440 usb_dev, usb_rcvctrlpipe(usb_dev, 0),
441 USB_REQ_GET_HANDSHAKE,
442 USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
443 2, 0, &hs[1], sizeof(hs[1]), 1000 /* FIXME: arbitrary */);
444 if (result < 0) {
445 dev_err(dev, "Handshake2: request failed: %d\n", result);
446 goto error_hs2;
447 }
448 d_printf(1, dev, "got HS2:\n");
449 hs_printk(2, dev, &hs[1]);
450
451 result = -EINVAL;
452 if (hs[1].bMessageNumber != 2) {
453 dev_err(dev, "Handshake2 failed: bad message number %u\n",
454 hs[1].bMessageNumber);
455 goto error_hs2;
456 }
457 if (hs[1].bStatus != 0) {
458 dev_err(dev, "Handshake2 failed: bad status %u\n",
459 hs[1].bStatus);
460 goto error_hs2;
461 }
462 if (memcmp(hs[0].tTKID, hs[1].tTKID, sizeof(hs[0].tTKID))) {
463 dev_err(dev, "Handshake2 failed: TKID mismatch "
464 "(#1 0x%02x%02x%02x vs #2 0x%02x%02x%02x)\n",
465 hs[0].tTKID[0], hs[0].tTKID[1], hs[0].tTKID[2],
466 hs[1].tTKID[0], hs[1].tTKID[1], hs[1].tTKID[2]);
467 goto error_hs2;
468 }
469 if (memcmp(hs[0].CDID, hs[1].CDID, sizeof(hs[0].CDID))) {
470 dev_err(dev, "Handshake2 failed: CDID mismatch\n");
471 goto error_hs2;
472 }
473
474 /* Setup the CCM nonce */
475 memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
476 memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid));
477 ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
478 ccm_n.dest_addr.data[0] = wusb_dev->addr;
479 ccm_n.dest_addr.data[1] = 0;
480
481 /* Derive the KCK and PTK from CK, the CCM, H and D nonces */
482 memcpy(keydvt_in.hnonce, hs[0].nonce, sizeof(keydvt_in.hnonce));
483 memcpy(keydvt_in.dnonce, hs[1].nonce, sizeof(keydvt_in.dnonce));
484 result = wusb_key_derive(&keydvt_out, ck->data, &ccm_n, &keydvt_in);
485 if (result < 0) {
486 dev_err(dev, "Handshake2 failed: cannot derive keys: %d\n",
487 result);
488 goto error_hs2;
489 }
490 d_printf(2, dev, "KCK:\n");
491 d_dump(2, dev, keydvt_out.kck, sizeof(keydvt_out.kck));
492 d_printf(2, dev, "PTK:\n");
493 d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk));
494
495 /* Compute MIC and verify it */
496 result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]);
497 if (result < 0) {
498 dev_err(dev, "Handshake2 failed: cannot compute MIC: %d\n",
499 result);
500 goto error_hs2;
501 }
502
503 d_printf(2, dev, "MIC:\n");
504 d_dump(2, dev, mic, sizeof(mic));
505 if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) {
506 dev_err(dev, "Handshake2 failed: MIC mismatch\n");
507 goto error_hs2;
508 }
509
510 /* Send Handshake3 */
511 hs[2].bMessageNumber = 3;
512 hs[2].bStatus = 0;
513 memcpy(hs[2].tTKID, &tkid_le, sizeof(hs[2].tTKID));
514 hs[2].bReserved = 0;
515 memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID));
516 memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce));
517 result = wusb_oob_mic(hs[2].MIC, keydvt_out.kck, &ccm_n, &hs[2]);
518 if (result < 0) {
519 dev_err(dev, "Handshake3 failed: cannot compute MIC: %d\n",
520 result);
521 goto error_hs2;
522 }
523
524 d_printf(1, dev, "I: sending hs3:\n");
525 hs_printk(2, dev, &hs[2]);
526
527 result = usb_control_msg(
528 usb_dev, usb_sndctrlpipe(usb_dev, 0),
529 USB_REQ_SET_HANDSHAKE,
530 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
531 3, 0, &hs[2], sizeof(hs[2]), 1000 /* FIXME: arbitrary */);
532 if (result < 0) {
533 dev_err(dev, "Handshake3: request failed: %d\n", result);
534 goto error_hs3;
535 }
536
537 d_printf(1, dev, "I: turning on encryption on host for device\n");
538 d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk));
539 result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid,
540 keydvt_out.ptk, sizeof(keydvt_out.ptk));
541 if (result < 0)
542 goto error_wusbhc_set_ptk;
543
544 d_printf(1, dev, "I: setting a GTK\n");
545 result = wusb_dev_set_gtk(wusbhc, wusb_dev);
546 if (result < 0) {
547 dev_err(dev, "Set GTK for device: request failed: %d\n",
548 result);
549 goto error_wusbhc_set_gtk;
550 }
551
552 /* Update the device's address from unauth to auth */
553 if (usb_dev->authenticated == 0) {
554 d_printf(1, dev, "I: updating addres to auth from non-auth\n");
555 result = wusb_dev_update_address(wusbhc, wusb_dev);
556 if (result < 0)
557 goto error_dev_update_address;
558 }
559 result = 0;
560 d_printf(1, dev, "I: 4way handshke done, device authenticated\n");
561
562error_dev_update_address:
563error_wusbhc_set_gtk:
564error_wusbhc_set_ptk:
565error_hs3:
566error_hs2:
567error_hs1:
568 memset(hs, 0, 3*sizeof(hs[0]));
569 memset(&keydvt_out, 0, sizeof(keydvt_out));
570 memset(&keydvt_in, 0, sizeof(keydvt_in));
571 memset(&ccm_n, 0, sizeof(ccm_n));
572 memset(mic, 0, sizeof(mic));
573 if (result < 0) {
574 /* error path */
575 wusb_dev_set_encryption(usb_dev, 0);
576 }
577error_dev_set_encryption:
578 kfree(hs);
579error_kzalloc:
580 return result;
581}
582
583/*
584 * Once all connected and authenticated devices have received the new
585 * GTK, switch the host to using it.
586 */
587static void wusbhc_gtk_rekey_done_work(struct work_struct *work)
588{
589 struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work);
590 size_t key_size = sizeof(wusbhc->gtk.data);
591
592 mutex_lock(&wusbhc->mutex);
593
594 if (--wusbhc->pending_set_gtks == 0)
595 wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
596
597 mutex_unlock(&wusbhc->mutex);
598}
599
600static void wusbhc_set_gtk_callback(struct urb *urb)
601{
602 struct wusbhc *wusbhc = urb->context;
603
604 queue_work(wusbd, &wusbhc->gtk_rekey_done_work);
605}
606
607/**
608 * wusbhc_gtk_rekey - generate and distribute a new GTK
609 * @wusbhc: the WUSB host controller
610 *
611 * Generate a new GTK and distribute it to all connected and
612 * authenticated devices. When all devices have the new GTK, the host
613 * starts using it.
614 *
615 * This must be called after every device disconnect (see [WUSB]
616 * section 6.2.11.2).
617 */
618void wusbhc_gtk_rekey(struct wusbhc *wusbhc)
619{
620 static const size_t key_size = sizeof(wusbhc->gtk.data);
621 int p;
622
623 wusbhc_generate_gtk(wusbhc);
624
625 for (p = 0; p < wusbhc->ports_max; p++) {
626 struct wusb_dev *wusb_dev;
627
628 wusb_dev = wusbhc->port[p].wusb_dev;
629 if (!wusb_dev || !wusb_dev->usb_dev | !wusb_dev->usb_dev->authenticated)
630 continue;
631
632 usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev,
633 usb_sndctrlpipe(wusb_dev->usb_dev, 0),
634 (void *)wusb_dev->set_gtk_req,
635 &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
636 wusbhc_set_gtk_callback, wusbhc);
637 if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0)
638 wusbhc->pending_set_gtks++;
639 }
640 if (wusbhc->pending_set_gtks == 0)
641 wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
642}
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
new file mode 100644
index 000000000000..9d04722415bb
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -0,0 +1,95 @@
1/*
2 * Wire Adapter Host Controller Driver
3 * Common items to HWA and DWA based HCDs
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25#include "wusbhc.h"
26#include "wa-hc.h"
27
28/**
29 * Assumes
30 *
31 * wa->usb_dev and wa->usb_iface initialized and refcounted,
32 * wa->wa_descr initialized.
33 */
34int wa_create(struct wahc *wa, struct usb_interface *iface)
35{
36 int result;
37 struct device *dev = &iface->dev;
38
39 result = wa_rpipes_create(wa);
40 if (result < 0)
41 goto error_rpipes_create;
42 /* Fill up Data Transfer EP pointers */
43 wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;
44 wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc;
45 wa->xfer_result_size = le16_to_cpu(wa->dti_epd->wMaxPacketSize);
46 wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL);
47 if (wa->xfer_result == NULL)
48 goto error_xfer_result_alloc;
49 result = wa_nep_create(wa, iface);
50 if (result < 0) {
51 dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n",
52 result);
53 goto error_nep_create;
54 }
55 return 0;
56
57error_nep_create:
58 kfree(wa->xfer_result);
59error_xfer_result_alloc:
60 wa_rpipes_destroy(wa);
61error_rpipes_create:
62 return result;
63}
64EXPORT_SYMBOL_GPL(wa_create);
65
66
67void __wa_destroy(struct wahc *wa)
68{
69 if (wa->dti_urb) {
70 usb_kill_urb(wa->dti_urb);
71 usb_put_urb(wa->dti_urb);
72 usb_kill_urb(wa->buf_in_urb);
73 usb_put_urb(wa->buf_in_urb);
74 }
75 kfree(wa->xfer_result);
76 wa_nep_destroy(wa);
77 wa_rpipes_destroy(wa);
78}
79EXPORT_SYMBOL_GPL(__wa_destroy);
80
81/**
82 * wa_reset_all - reset the WA device
83 * @wa: the WA to be reset
84 *
85 * For HWAs the radio controller and all other PALs are also reset.
86 */
87void wa_reset_all(struct wahc *wa)
88{
89 /* FIXME: assuming HWA. */
90 wusbhc_reset_all(wa->wusb);
91}
92
93MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
94MODULE_DESCRIPTION("Wireless USB Wire Adapter core");
95MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
new file mode 100644
index 000000000000..586d350cdb4d
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -0,0 +1,417 @@
1/*
2 * HWA Host Controller Driver
3 * Wire Adapter Control/Data Streaming Iface (WUSB1.0[8])
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * This driver implements a USB Host Controller (struct usb_hcd) for a
24 * Wireless USB Host Controller based on the Wireless USB 1.0
25 * Host-Wire-Adapter specification (in layman terms, a USB-dongle that
26 * implements a Wireless USB host).
27 *
28 * Check out the Design-overview.txt file in the source documentation
29 * for other details on the implementation.
30 *
31 * Main blocks:
32 *
33 * driver glue with the driver API, workqueue daemon
34 *
35 * lc RC instance life cycle management (create, destroy...)
36 *
37 * hcd glue with the USB API Host Controller Interface API.
38 *
39 * nep Notification EndPoint managent: collect notifications
40 * and queue them with the workqueue daemon.
41 *
42 * Handle notifications as coming from the NEP. Sends them
43 * off others to their respective modules (eg: connect,
44 * disconnect and reset go to devconnect).
45 *
46 * rpipe Remote Pipe management; rpipe is what we use to write
47 * to an endpoint on a WUSB device that is connected to a
48 * HWA RC.
49 *
50 * xfer Transfer managment -- this is all the code that gets a
51 * buffer and pushes it to a device (or viceversa). *
52 *
53 * Some day a lot of this code will be shared between this driver and
54 * the drivers for DWA (xfer, rpipe).
55 *
56 * All starts at driver.c:hwahc_probe(), when one of this guys is
57 * connected. hwahc_disconnect() stops it.
58 *
59 * During operation, the main driver is devices connecting or
60 * disconnecting. They cause the HWA RC to send notifications into
61 * nep.c:hwahc_nep_cb() that will dispatch them to
62 * notif.c:wa_notif_dispatch(). From there they will fan to cause
63 * device connects, disconnects, etc.
64 *
65 * Note much of the activity is difficult to follow. For example a
66 * device connect goes to devconnect, which will cause the "fake" root
67 * hub port to show a connect and stop there. Then khubd will notice
68 * and call into the rh.c:hwahc_rc_port_reset() code to authenticate
69 * the device (and this might require user intervention) and enable
70 * the port.
71 *
72 * We also have a timer workqueue going from devconnect.c that
73 * schedules in hwahc_devconnect_create().
74 *
75 * The rest of the traffic is in the usual entry points of a USB HCD,
76 * which are hooked up in driver.c:hwahc_rc_driver, and defined in
77 * hcd.c.
78 */
79
80#ifndef __HWAHC_INTERNAL_H__
81#define __HWAHC_INTERNAL_H__
82
83#include <linux/completion.h>
84#include <linux/usb.h>
85#include <linux/mutex.h>
86#include <linux/spinlock.h>
87#include <linux/uwb.h>
88#include <linux/usb/wusb.h>
89#include <linux/usb/wusb-wa.h>
90
91struct wusbhc;
92struct wahc;
93extern void wa_urb_enqueue_run(struct work_struct *ws);
94
95/**
96 * RPipe instance
97 *
98 * @descr's fields are kept in LE, as we need to send it back and
99 * forth.
100 *
101 * @wa is referenced when set
102 *
103 * @segs_available is the number of requests segments that still can
104 * be submitted to the controller without overloading
105 * it. It is initialized to descr->wRequests when
106 * aiming.
107 *
108 * A rpipe supports a max of descr->wRequests at the same time; before
109 * submitting seg_lock has to be taken. If segs_avail > 0, then we can
110 * submit; if not, we have to queue them.
111 */
112struct wa_rpipe {
113 struct kref refcnt;
114 struct usb_rpipe_descriptor descr;
115 struct usb_host_endpoint *ep;
116 struct wahc *wa;
117 spinlock_t seg_lock;
118 struct list_head seg_list;
119 atomic_t segs_available;
120 u8 buffer[1]; /* For reads/writes on USB */
121};
122
123
124/**
125 * Instance of a HWA Host Controller
126 *
127 * Except where a more specific lock/mutex applies or atomic, all
128 * fields protected by @mutex.
129 *
130 * @wa_descr Can be accessed without locking because it is in
131 * the same area where the device descriptors were
132 * read, so it is guaranteed to exist umodified while
133 * the device exists.
134 *
135 * Endianess has been converted to CPU's.
136 *
137 * @nep_* can be accessed without locking as its processing is
138 * serialized; we submit a NEP URB and it comes to
139 * hwahc_nep_cb(), which won't issue another URB until it is
140 * done processing it.
141 *
142 * @xfer_list:
143 *
144 * List of active transfers to verify existence from a xfer id
145 * gotten from the xfer result message. Can't use urb->list because
146 * it goes by endpoint, and we don't know the endpoint at the time
147 * when we get the xfer result message. We can't really rely on the
148 * pointer (will have to change for 64 bits) as the xfer id is 32 bits.
149 *
150 * @xfer_delayed_list: List of transfers that need to be started
151 * (with a workqueue, because they were
152 * submitted from an atomic context).
153 *
154 * FIXME: this needs to be layered up: a wusbhc layer (for sharing
155 * comonalities with WHCI), a wa layer (for sharing
156 * comonalities with DWA-RC).
157 */
158struct wahc {
159 struct usb_device *usb_dev;
160 struct usb_interface *usb_iface;
161
162 /* HC to deliver notifications */
163 union {
164 struct wusbhc *wusb;
165 struct dwahc *dwa;
166 };
167
168 const struct usb_endpoint_descriptor *dto_epd, *dti_epd;
169 const struct usb_wa_descriptor *wa_descr;
170
171 struct urb *nep_urb; /* Notification EndPoint [lockless] */
172 struct edc nep_edc;
173 void *nep_buffer;
174 size_t nep_buffer_size;
175
176 atomic_t notifs_queued;
177
178 u16 rpipes;
179 unsigned long *rpipe_bm; /* rpipe usage bitmap */
180 spinlock_t rpipe_bm_lock; /* protect rpipe_bm */
181 struct mutex rpipe_mutex; /* assigning resources to endpoints */
182
183 struct urb *dti_urb; /* URB for reading xfer results */
184 struct urb *buf_in_urb; /* URB for reading data in */
185 struct edc dti_edc; /* DTI error density counter */
186 struct wa_xfer_result *xfer_result; /* real size = dti_ep maxpktsize */
187 size_t xfer_result_size;
188
189 s32 status; /* For reading status */
190
191 struct list_head xfer_list;
192 struct list_head xfer_delayed_list;
193 spinlock_t xfer_list_lock;
194 struct work_struct xfer_work;
195 atomic_t xfer_id_count;
196};
197
198
199extern int wa_create(struct wahc *wa, struct usb_interface *iface);
200extern void __wa_destroy(struct wahc *wa);
201void wa_reset_all(struct wahc *wa);
202
203
204/* Miscellaneous constants */
205enum {
206 /** Max number of EPROTO errors we tolerate on the NEP in a
207 * period of time */
208 HWAHC_EPROTO_MAX = 16,
209 /** Period of time for EPROTO errors (in jiffies) */
210 HWAHC_EPROTO_PERIOD = 4 * HZ,
211};
212
213
214/* Notification endpoint handling */
215extern int wa_nep_create(struct wahc *, struct usb_interface *);
216extern void wa_nep_destroy(struct wahc *);
217
218static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask)
219{
220 struct urb *urb = wa->nep_urb;
221 urb->transfer_buffer = wa->nep_buffer;
222 urb->transfer_buffer_length = wa->nep_buffer_size;
223 return usb_submit_urb(urb, gfp_mask);
224}
225
226static inline void wa_nep_disarm(struct wahc *wa)
227{
228 usb_kill_urb(wa->nep_urb);
229}
230
231
232/* RPipes */
233static inline void wa_rpipe_init(struct wahc *wa)
234{
235 spin_lock_init(&wa->rpipe_bm_lock);
236 mutex_init(&wa->rpipe_mutex);
237}
238
239static inline void wa_init(struct wahc *wa)
240{
241 edc_init(&wa->nep_edc);
242 atomic_set(&wa->notifs_queued, 0);
243 wa_rpipe_init(wa);
244 edc_init(&wa->dti_edc);
245 INIT_LIST_HEAD(&wa->xfer_list);
246 INIT_LIST_HEAD(&wa->xfer_delayed_list);
247 spin_lock_init(&wa->xfer_list_lock);
248 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
249 atomic_set(&wa->xfer_id_count, 1);
250}
251
252/**
253 * Destroy a pipe (when refcount drops to zero)
254 *
255 * Assumes it has been moved to the "QUIESCING" state.
256 */
257struct wa_xfer;
258extern void rpipe_destroy(struct kref *_rpipe);
259static inline
260void __rpipe_get(struct wa_rpipe *rpipe)
261{
262 kref_get(&rpipe->refcnt);
263}
264extern int rpipe_get_by_ep(struct wahc *, struct usb_host_endpoint *,
265 struct urb *, gfp_t);
266static inline void rpipe_put(struct wa_rpipe *rpipe)
267{
268 kref_put(&rpipe->refcnt, rpipe_destroy);
269
270}
271extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
272extern int wa_rpipes_create(struct wahc *);
273extern void wa_rpipes_destroy(struct wahc *);
274static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
275{
276 atomic_dec(&rpipe->segs_available);
277}
278
279/**
280 * Returns true if the rpipe is ready to submit more segments.
281 */
282static inline int rpipe_avail_inc(struct wa_rpipe *rpipe)
283{
284 return atomic_inc_return(&rpipe->segs_available) > 0
285 && !list_empty(&rpipe->seg_list);
286}
287
288
289/* Transferring data */
290extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *,
291 struct urb *, gfp_t);
292extern int wa_urb_dequeue(struct wahc *, struct urb *);
293extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
294
295
296/* Misc
297 *
298 * FIXME: Refcounting for the actual @hwahc object is not correct; I
299 * mean, this should be refcounting on the HCD underneath, but
300 * it is not. In any case, the semantics for HCD refcounting
301 * are *weird*...on refcount reaching zero it just frees
302 * it...no RC specific function is called...unless I miss
303 * something.
304 *
305 * FIXME: has to go away in favour of an 'struct' hcd based sollution
306 */
307static inline struct wahc *wa_get(struct wahc *wa)
308{
309 usb_get_intf(wa->usb_iface);
310 return wa;
311}
312
313static inline void wa_put(struct wahc *wa)
314{
315 usb_put_intf(wa->usb_iface);
316}
317
318
319static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature)
320{
321 return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
322 op ? USB_REQ_SET_FEATURE : USB_REQ_CLEAR_FEATURE,
323 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
324 feature,
325 wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
326 NULL, 0, 1000 /* FIXME: arbitrary */);
327}
328
329
330static inline int __wa_set_feature(struct wahc *wa, u16 feature)
331{
332 return __wa_feature(wa, 1, feature);
333}
334
335
336static inline int __wa_clear_feature(struct wahc *wa, u16 feature)
337{
338 return __wa_feature(wa, 0, feature);
339}
340
341
342/**
343 * Return the status of a Wire Adapter
344 *
345 * @wa: Wire Adapter instance
346 * @returns < 0 errno code on error, or status bitmap as described
347 * in WUSB1.0[8.3.1.6].
348 *
349 * NOTE: need malloc, some arches don't take USB from the stack
350 */
351static inline
352s32 __wa_get_status(struct wahc *wa)
353{
354 s32 result;
355 result = usb_control_msg(
356 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
357 USB_REQ_GET_STATUS,
358 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
359 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
360 &wa->status, sizeof(wa->status),
361 1000 /* FIXME: arbitrary */);
362 if (result >= 0)
363 result = wa->status;
364 return result;
365}
366
367
368/**
369 * Waits until the Wire Adapter's status matches @mask/@value
370 *
371 * @wa: Wire Adapter instance.
372 * @returns < 0 errno code on error, otherwise status.
373 *
374 * Loop until the WAs status matches the mask and value (status & mask
375 * == value). Timeout if it doesn't happen.
376 *
377 * FIXME: is there an official specification on how long status
378 * changes can take?
379 */
380static inline s32 __wa_wait_status(struct wahc *wa, u32 mask, u32 value)
381{
382 s32 result;
383 unsigned loops = 10;
384 do {
385 msleep(50);
386 result = __wa_get_status(wa);
387 if ((result & mask) == value)
388 break;
389 if (loops-- == 0) {
390 result = -ETIMEDOUT;
391 break;
392 }
393 } while (result >= 0);
394 return result;
395}
396
397
398/** Command @hwahc to stop, @returns 0 if ok, < 0 errno code on error */
399static inline int __wa_stop(struct wahc *wa)
400{
401 int result;
402 struct device *dev = &wa->usb_iface->dev;
403
404 result = __wa_clear_feature(wa, WA_ENABLE);
405 if (result < 0 && result != -ENODEV) {
406 dev_err(dev, "error commanding HC to stop: %d\n", result);
407 goto out;
408 }
409 result = __wa_wait_status(wa, WA_ENABLE, 0);
410 if (result < 0 && result != -ENODEV)
411 dev_err(dev, "error waiting for HC to stop: %d\n", result);
412out:
413 return 0;
414}
415
416
417#endif /* #ifndef __HWAHC_INTERNAL_H__ */
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
new file mode 100644
index 000000000000..3f542990c73f
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -0,0 +1,310 @@
1/*
2 * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
3 * Notification EndPoint support
4 *
5 * Copyright (C) 2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * This part takes care of getting the notification from the hw
24 * only and dispatching through wusbwad into
25 * wa_notif_dispatch. Handling is done there.
26 *
27 * WA notifications are limited in size; most of them are three or
28 * four bytes long, and the longest is the HWA Device Notification,
29 * which would not exceed 38 bytes (DNs are limited in payload to 32
30 * bytes plus 3 bytes header (WUSB1.0[7.6p2]), plus 3 bytes HWA
31 * header (WUSB1.0[8.5.4.2]).
32 *
33 * It is not clear if more than one Device Notification can be packed
34 * in a HWA Notification, I assume no because of the wording in
35 * WUSB1.0[8.5.4.2]. In any case, the bigger any notification could
36 * get is 256 bytes (as the bLength field is a byte).
37 *
38 * So what we do is we have this buffer and read into it; when a
39 * notification arrives we schedule work to a specific, single thread
40 * workqueue (so notifications are serialized) and copy the
41 * notification data. After scheduling the work, we rearm the read from
42 * the notification endpoint.
43 *
44 * Entry points here are:
45 *
46 * wa_nep_[create|destroy]() To initialize/release this subsystem
47 *
48 * wa_nep_cb() Callback for the notification
49 * endpoint; when data is ready, this
50 * does the dispatching.
51 */
52#include <linux/workqueue.h>
53#include <linux/ctype.h>
54#include <linux/uwb/debug.h>
55#include "wa-hc.h"
56#include "wusbhc.h"
57
58/* Structure for queueing notifications to the workqueue */
59struct wa_notif_work {
60 struct work_struct work;
61 struct wahc *wa;
62 size_t size;
63 u8 data[];
64};
65
66/*
67 * Process incoming notifications from the WA's Notification EndPoint
68 * [the wuswad daemon, basically]
69 *
70 * @_nw: Pointer to a descriptor which has the pointer to the
71 * @wa, the size of the buffer and the work queue
72 * structure (so we can free all when done).
73 * @returns 0 if ok, < 0 errno code on error.
74 *
75 * All notifications follow the same format; they need to start with a
76 * 'struct wa_notif_hdr' header, so it is easy to parse through
77 * them. We just break the buffer in individual notifications (the
78 * standard doesn't say if it can be done or is forbidden, so we are
79 * cautious) and dispatch each.
80 *
81 * So the handling layers are is:
82 *
83 * WA specific notification (from NEP)
84 * Device Notification Received -> wa_handle_notif_dn()
85 * WUSB Device notification generic handling
86 * BPST Adjustment -> wa_handle_notif_bpst_adj()
87 * ... -> ...
88 *
89 * @wa has to be referenced
90 */
91static void wa_notif_dispatch(struct work_struct *ws)
92{
93 void *itr;
94 u8 missing = 0;
95 struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, work);
96 struct wahc *wa = nw->wa;
97 struct wa_notif_hdr *notif_hdr;
98 size_t size;
99
100 struct device *dev = &wa->usb_iface->dev;
101
102#if 0
103 /* FIXME: need to check for this??? */
104 if (usb_hcd->state == HC_STATE_QUIESCING) /* Going down? */
105 goto out; /* screw it */
106#endif
107 atomic_dec(&wa->notifs_queued); /* Throttling ctl */
108 dev = &wa->usb_iface->dev;
109 size = nw->size;
110 itr = nw->data;
111
112 while (size) {
113 if (size < sizeof(*notif_hdr)) {
114 missing = sizeof(*notif_hdr) - size;
115 goto exhausted_buffer;
116 }
117 notif_hdr = itr;
118 if (size < notif_hdr->bLength)
119 goto exhausted_buffer;
120 itr += notif_hdr->bLength;
121 size -= notif_hdr->bLength;
122 /* Dispatch the notification [don't use itr or size!] */
123 switch (notif_hdr->bNotifyType) {
124 case HWA_NOTIF_DN: {
125 struct hwa_notif_dn *hwa_dn;
126 hwa_dn = container_of(notif_hdr, struct hwa_notif_dn,
127 hdr);
128 wusbhc_handle_dn(wa->wusb, hwa_dn->bSourceDeviceAddr,
129 hwa_dn->dndata,
130 notif_hdr->bLength - sizeof(*hwa_dn));
131 break;
132 }
133 case WA_NOTIF_TRANSFER:
134 wa_handle_notif_xfer(wa, notif_hdr);
135 break;
136 case DWA_NOTIF_RWAKE:
137 case DWA_NOTIF_PORTSTATUS:
138 case HWA_NOTIF_BPST_ADJ:
139 /* FIXME: unimplemented WA NOTIFs */
140 /* fallthru */
141 default:
142 if (printk_ratelimit()) {
143 dev_err(dev, "HWA: unknown notification 0x%x, "
144 "%zu bytes; discarding\n",
145 notif_hdr->bNotifyType,
146 (size_t)notif_hdr->bLength);
147 dump_bytes(dev, notif_hdr, 16);
148 }
149 break;
150 }
151 }
152out:
153 wa_put(wa);
154 kfree(nw);
155 return;
156
157 /* THIS SHOULD NOT HAPPEN
158 *
159 * Buffer exahusted with partial data remaining; just warn and
160 * discard the data, as this should not happen.
161 */
162exhausted_buffer:
163 if (!printk_ratelimit())
164 goto out;
165 dev_warn(dev, "HWA: device sent short notification, "
166 "%d bytes missing; discarding %d bytes.\n",
167 missing, (int)size);
168 dump_bytes(dev, itr, size);
169 goto out;
170}
171
172/*
173 * Deliver incoming WA notifications to the wusbwa workqueue
174 *
175 * @wa: Pointer the Wire Adapter Controller Data Streaming
176 * instance (part of an 'struct usb_hcd').
177 * @size: Size of the received buffer
178 * @returns 0 if ok, < 0 errno code on error.
179 *
180 * The input buffer is @wa->nep_buffer, with @size bytes
181 * (guaranteed to fit in the allocated space,
182 * @wa->nep_buffer_size).
183 */
184static int wa_nep_queue(struct wahc *wa, size_t size)
185{
186 int result = 0;
187 struct device *dev = &wa->usb_iface->dev;
188 struct wa_notif_work *nw;
189
190 /* dev_fnstart(dev, "(wa %p, size %zu)\n", wa, size); */
191 BUG_ON(size > wa->nep_buffer_size);
192 if (size == 0)
193 goto out;
194 if (atomic_read(&wa->notifs_queued) > 200) {
195 if (printk_ratelimit())
196 dev_err(dev, "Too many notifications queued, "
197 "throttling back\n");
198 goto out;
199 }
200 nw = kzalloc(sizeof(*nw) + size, GFP_ATOMIC);
201 if (nw == NULL) {
202 if (printk_ratelimit())
203 dev_err(dev, "No memory to queue notification\n");
204 goto out;
205 }
206 INIT_WORK(&nw->work, wa_notif_dispatch);
207 nw->wa = wa_get(wa);
208 nw->size = size;
209 memcpy(nw->data, wa->nep_buffer, size);
210 atomic_inc(&wa->notifs_queued); /* Throttling ctl */
211 queue_work(wusbd, &nw->work);
212out:
213 /* dev_fnend(dev, "(wa %p, size %zu) = result\n", wa, size, result); */
214 return result;
215}
216
217/*
218 * Callback for the notification event endpoint
219 *
220 * Check's that everything is fine and then passes the data to be
221 * queued to the workqueue.
222 */
223static void wa_nep_cb(struct urb *urb)
224{
225 int result;
226 struct wahc *wa = urb->context;
227 struct device *dev = &wa->usb_iface->dev;
228
229 switch (result = urb->status) {
230 case 0:
231 result = wa_nep_queue(wa, urb->actual_length);
232 if (result < 0)
233 dev_err(dev, "NEP: unable to process notification(s): "
234 "%d\n", result);
235 break;
236 case -ECONNRESET: /* Not an error, but a controlled situation; */
237 case -ENOENT: /* (we killed the URB)...so, no broadcast */
238 case -ESHUTDOWN:
239 dev_dbg(dev, "NEP: going down %d\n", urb->status);
240 goto out;
241 default: /* On general errors, we retry unless it gets ugly */
242 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
243 EDC_ERROR_TIMEFRAME)) {
244 dev_err(dev, "NEP: URB max acceptable errors "
245 "exceeded, resetting device\n");
246 wa_reset_all(wa);
247 goto out;
248 }
249 dev_err(dev, "NEP: URB error %d\n", urb->status);
250 }
251 result = wa_nep_arm(wa, GFP_ATOMIC);
252 if (result < 0) {
253 dev_err(dev, "NEP: cannot submit URB: %d\n", result);
254 wa_reset_all(wa);
255 }
256out:
257 return;
258}
259
260/*
261 * Initialize @wa's notification and event's endpoint stuff
262 *
263 * This includes the allocating the read buffer, the context ID
264 * allocation bitmap, the URB and submitting the URB.
265 */
266int wa_nep_create(struct wahc *wa, struct usb_interface *iface)
267{
268 int result;
269 struct usb_endpoint_descriptor *epd;
270 struct usb_device *usb_dev = interface_to_usbdev(iface);
271 struct device *dev = &iface->dev;
272
273 edc_init(&wa->nep_edc);
274 epd = &iface->cur_altsetting->endpoint[0].desc;
275 wa->nep_buffer_size = 1024;
276 wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);
277 if (wa->nep_buffer == NULL) {
278 dev_err(dev, "Unable to allocate notification's read buffer\n");
279 goto error_nep_buffer;
280 }
281 wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL);
282 if (wa->nep_urb == NULL) {
283 dev_err(dev, "Unable to allocate notification URB\n");
284 goto error_urb_alloc;
285 }
286 usb_fill_int_urb(wa->nep_urb, usb_dev,
287 usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
288 wa->nep_buffer, wa->nep_buffer_size,
289 wa_nep_cb, wa, epd->bInterval);
290 result = wa_nep_arm(wa, GFP_KERNEL);
291 if (result < 0) {
292 dev_err(dev, "Cannot submit notification URB: %d\n", result);
293 goto error_nep_arm;
294 }
295 return 0;
296
297error_nep_arm:
298 usb_free_urb(wa->nep_urb);
299error_urb_alloc:
300 kfree(wa->nep_buffer);
301error_nep_buffer:
302 return -ENOMEM;
303}
304
305void wa_nep_destroy(struct wahc *wa)
306{
307 wa_nep_disarm(wa);
308 usb_free_urb(wa->nep_urb);
309 kfree(wa->nep_buffer);
310}
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
new file mode 100644
index 000000000000..f18e4aae66e9
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -0,0 +1,562 @@
1/*
2 * WUSB Wire Adapter
3 * rpipe management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * RPIPE
26 *
27 * Targetted at different downstream endpoints
28 *
29 * Descriptor: use to config the remote pipe.
30 *
31 * The number of blocks could be dynamic (wBlocks in descriptor is
32 * 0)--need to schedule them then.
33 *
34 * Each bit in wa->rpipe_bm represents if an rpipe is being used or
35 * not. Rpipes are represented with a 'struct wa_rpipe' that is
36 * attached to the hcpriv member of a 'struct usb_host_endpoint'.
37 *
38 * When you need to xfer data to an endpoint, you get an rpipe for it
39 * with wa_ep_rpipe_get(), which gives you a reference to the rpipe
40 * and keeps a single one (the first one) with the endpoint. When you
41 * are done transferring, you drop that reference. At the end the
42 * rpipe is always allocated and bound to the endpoint. There it might
43 * be recycled when not used.
44 *
45 * Addresses:
46 *
47 * We use a 1:1 mapping mechanism between port address (0 based
48 * index, actually) and the address. The USB stack knows about this.
49 *
50 * USB Stack port number 4 (1 based)
51 * WUSB code port index 3 (0 based)
52 * USB Addresss 5 (2 based -- 0 is for default, 1 for root hub)
53 *
54 * Now, because we don't use the concept as default address exactly
55 * like the (wired) USB code does, we need to kind of skip it. So we
56 * never take addresses from the urb->pipe, but from the
57 * urb->dev->devnum, to make sure that we always have the right
58 * destination address.
59 */
60#include <linux/init.h>
61#include <asm/atomic.h>
62#include <linux/bitmap.h>
63#include "wusbhc.h"
64#include "wa-hc.h"
65
66#define D_LOCAL 0
67#include <linux/uwb/debug.h>
68
69
70static int __rpipe_get_descr(struct wahc *wa,
71 struct usb_rpipe_descriptor *descr, u16 index)
72{
73 ssize_t result;
74 struct device *dev = &wa->usb_iface->dev;
75
76 /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor()
77 * function because the arguments are different.
78 */
79 d_printf(1, dev, "rpipe %u: get descr\n", index);
80 result = usb_control_msg(
81 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
82 USB_REQ_GET_DESCRIPTOR,
83 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
84 USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
85 1000 /* FIXME: arbitrary */);
86 if (result < 0) {
87 dev_err(dev, "rpipe %u: get descriptor failed: %d\n",
88 index, (int)result);
89 goto error;
90 }
91 if (result < sizeof(*descr)) {
92 dev_err(dev, "rpipe %u: got short descriptor "
93 "(%zd vs %zd bytes needed)\n",
94 index, result, sizeof(*descr));
95 result = -EINVAL;
96 goto error;
97 }
98 result = 0;
99
100error:
101 return result;
102}
103
104/*
105 *
106 * The descriptor is assumed to be properly initialized (ie: you got
107 * it through __rpipe_get_descr()).
108 */
109static int __rpipe_set_descr(struct wahc *wa,
110 struct usb_rpipe_descriptor *descr, u16 index)
111{
112 ssize_t result;
113 struct device *dev = &wa->usb_iface->dev;
114
115 /* we cannot use the usb_get_descriptor() function because the
116 * arguments are different.
117 */
118 d_printf(1, dev, "rpipe %u: set descr\n", index);
119 result = usb_control_msg(
120 wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
121 USB_REQ_SET_DESCRIPTOR,
122 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
123 USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
124 HZ / 10);
125 if (result < 0) {
126 dev_err(dev, "rpipe %u: set descriptor failed: %d\n",
127 index, (int)result);
128 goto error;
129 }
130 if (result < sizeof(*descr)) {
131 dev_err(dev, "rpipe %u: sent short descriptor "
132 "(%zd vs %zd bytes required)\n",
133 index, result, sizeof(*descr));
134 result = -EINVAL;
135 goto error;
136 }
137 result = 0;
138
139error:
140 return result;
141
142}
143
144static void rpipe_init(struct wa_rpipe *rpipe)
145{
146 kref_init(&rpipe->refcnt);
147 spin_lock_init(&rpipe->seg_lock);
148 INIT_LIST_HEAD(&rpipe->seg_list);
149}
150
151static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
152{
153 unsigned long flags;
154
155 spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
156 rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
157 if (rpipe_idx < wa->rpipes)
158 set_bit(rpipe_idx, wa->rpipe_bm);
159 spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
160
161 return rpipe_idx;
162}
163
164static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)
165{
166 unsigned long flags;
167
168 spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
169 clear_bit(rpipe_idx, wa->rpipe_bm);
170 spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
171}
172
173void rpipe_destroy(struct kref *_rpipe)
174{
175 struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt);
176 u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
177 d_fnstart(1, NULL, "(rpipe %p %u)\n", rpipe, index);
178 if (rpipe->ep)
179 rpipe->ep->hcpriv = NULL;
180 rpipe_put_idx(rpipe->wa, index);
181 wa_put(rpipe->wa);
182 kfree(rpipe);
183 d_fnend(1, NULL, "(rpipe %p %u)\n", rpipe, index);
184}
185EXPORT_SYMBOL_GPL(rpipe_destroy);
186
187/*
188 * Locate an idle rpipe, create an structure for it and return it
189 *
190 * @wa is referenced and unlocked
191 * @crs enum rpipe_attr, required endpoint characteristics
192 *
193 * The rpipe can be used only sequentially (not in parallel).
194 *
195 * The rpipe is moved into the "ready" state.
196 */
197static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs,
198 gfp_t gfp)
199{
200 int result;
201 unsigned rpipe_idx;
202 struct wa_rpipe *rpipe;
203 struct device *dev = &wa->usb_iface->dev;
204
205 d_fnstart(3, dev, "(wa %p crs 0x%02x)\n", wa, crs);
206 rpipe = kzalloc(sizeof(*rpipe), gfp);
207 if (rpipe == NULL)
208 return -ENOMEM;
209 rpipe_init(rpipe);
210
211 /* Look for an idle pipe */
212 for (rpipe_idx = 0; rpipe_idx < wa->rpipes; rpipe_idx++) {
213 rpipe_idx = rpipe_get_idx(wa, rpipe_idx);
214 if (rpipe_idx >= wa->rpipes) /* no more pipes :( */
215 break;
216 result = __rpipe_get_descr(wa, &rpipe->descr, rpipe_idx);
217 if (result < 0)
218 dev_err(dev, "Can't get descriptor for rpipe %u: %d\n",
219 rpipe_idx, result);
220 else if ((rpipe->descr.bmCharacteristics & crs) != 0)
221 goto found;
222 rpipe_put_idx(wa, rpipe_idx);
223 }
224 *prpipe = NULL;
225 kfree(rpipe);
226 d_fnend(3, dev, "(wa %p crs 0x%02x) = -ENXIO\n", wa, crs);
227 return -ENXIO;
228
229found:
230 set_bit(rpipe_idx, wa->rpipe_bm);
231 rpipe->wa = wa_get(wa);
232 *prpipe = rpipe;
233 d_fnstart(3, dev, "(wa %p crs 0x%02x) = 0\n", wa, crs);
234 return 0;
235}
236
237static int __rpipe_reset(struct wahc *wa, unsigned index)
238{
239 int result;
240 struct device *dev = &wa->usb_iface->dev;
241
242 d_printf(1, dev, "rpipe %u: reset\n", index);
243 result = usb_control_msg(
244 wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
245 USB_REQ_RPIPE_RESET,
246 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
247 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
248 if (result < 0)
249 dev_err(dev, "rpipe %u: reset failed: %d\n",
250 index, result);
251 return result;
252}
253
254/*
255 * Fake companion descriptor for ep0
256 *
257 * See WUSB1.0[7.4.4], most of this is zero for bulk/int/ctl
258 */
259static struct usb_wireless_ep_comp_descriptor epc0 = {
260 .bLength = sizeof(epc0),
261 .bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP,
262/* .bMaxBurst = 1, */
263 .bMaxSequence = 31,
264};
265
266/*
267 * Look for EP companion descriptor
268 *
269 * Get there, look for Inara in the endpoint's extra descriptors
270 */
271static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
272 struct device *dev, struct usb_host_endpoint *ep)
273{
274 void *itr;
275 size_t itr_size;
276 struct usb_descriptor_header *hdr;
277 struct usb_wireless_ep_comp_descriptor *epcd;
278
279 d_fnstart(3, dev, "(ep %p)\n", ep);
280 if (ep->desc.bEndpointAddress == 0) {
281 epcd = &epc0;
282 goto out;
283 }
284 itr = ep->extra;
285 itr_size = ep->extralen;
286 epcd = NULL;
287 while (itr_size > 0) {
288 if (itr_size < sizeof(*hdr)) {
289 dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors "
290 "at offset %zu: only %zu bytes left\n",
291 ep->desc.bEndpointAddress,
292 itr - (void *) ep->extra, itr_size);
293 break;
294 }
295 hdr = itr;
296 if (hdr->bDescriptorType == USB_DT_WIRELESS_ENDPOINT_COMP) {
297 epcd = itr;
298 break;
299 }
300 if (hdr->bLength > itr_size) {
301 dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor "
302 "at offset %zu (type 0x%02x) "
303 "length %d but only %zu bytes left\n",
304 ep->desc.bEndpointAddress,
305 itr - (void *) ep->extra, hdr->bDescriptorType,
306 hdr->bLength, itr_size);
307 break;
308 }
309 itr += hdr->bLength;
310 itr_size -= hdr->bDescriptorType;
311 }
312out:
313 d_fnend(3, dev, "(ep %p) = %p\n", ep, epcd);
314 return epcd;
315}
316
317/*
318 * Aim an rpipe to its device & endpoint destination
319 *
320 * Make sure we change the address to unauthenticathed if the device
321 * is WUSB and it is not authenticated.
322 */
323static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
324 struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp)
325{
326 int result = -ENOMSG; /* better code for lack of companion? */
327 struct device *dev = &wa->usb_iface->dev;
328 struct usb_device *usb_dev = urb->dev;
329 struct usb_wireless_ep_comp_descriptor *epcd;
330 u8 unauth;
331
332 d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n",
333 rpipe, wa, ep, urb);
334 epcd = rpipe_epc_find(dev, ep);
335 if (epcd == NULL) {
336 dev_err(dev, "ep 0x%02x: can't find companion descriptor\n",
337 ep->desc.bEndpointAddress);
338 goto error;
339 }
340 unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;
341 __rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex));
342 atomic_set(&rpipe->segs_available, le16_to_cpu(rpipe->descr.wRequests));
343 /* FIXME: block allocation system; request with queuing and timeout */
344 /* FIXME: compute so seg_size > ep->maxpktsize */
345 rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
346 /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
347 rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize);
348 rpipe->descr.bHSHubAddress = 0; /* reserved: zero */
349 rpipe->descr.bHSHubPort = wusb_port_no_to_idx(urb->dev->portnum);
350 /* FIXME: use maximum speed as supported or recommended by device */
351 rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ?
352 UWB_PHY_RATE_53 : UWB_PHY_RATE_200;
353 d_printf(2, dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n",
354 urb->dev->devnum, urb->dev->devnum | unauth,
355 le16_to_cpu(rpipe->descr.wRPipeIndex),
356 usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed);
357 /* see security.c:wusb_update_address() */
358 if (unlikely(urb->dev->devnum == 0x80))
359 rpipe->descr.bDeviceAddress = 0;
360 else
361 rpipe->descr.bDeviceAddress = urb->dev->devnum | unauth;
362 rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress;
363 /* FIXME: bDataSequence */
364 rpipe->descr.bDataSequence = 0;
365 /* FIXME: dwCurrentWindow */
366 rpipe->descr.dwCurrentWindow = cpu_to_le32(1);
367 /* FIXME: bMaxDataSequence */
368 rpipe->descr.bMaxDataSequence = epcd->bMaxSequence - 1;
369 rpipe->descr.bInterval = ep->desc.bInterval;
370 /* FIXME: bOverTheAirInterval */
371 rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */
372 /* FIXME: xmit power & preamble blah blah */
373 rpipe->descr.bmAttribute = ep->desc.bmAttributes & 0x03;
374 /* rpipe->descr.bmCharacteristics RO */
375 /* FIXME: bmRetryOptions */
376 rpipe->descr.bmRetryOptions = 15;
377 /* FIXME: use for assessing link quality? */
378 rpipe->descr.wNumTransactionErrors = 0;
379 result = __rpipe_set_descr(wa, &rpipe->descr,
380 le16_to_cpu(rpipe->descr.wRPipeIndex));
381 if (result < 0) {
382 dev_err(dev, "Cannot aim rpipe: %d\n", result);
383 goto error;
384 }
385 result = 0;
386error:
387 d_fnend(3, dev, "(rpipe %p wa %p ep %p urb %p) = %d\n",
388 rpipe, wa, ep, urb, result);
389 return result;
390}
391
392/*
393 * Check an aimed rpipe to make sure it points to where we want
394 *
395 * We use bit 19 of the Linux USB pipe bitmap for unauth vs auth
396 * space; when it is like that, we or 0x80 to make an unauth address.
397 */
398static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
399 const struct usb_host_endpoint *ep,
400 const struct urb *urb, gfp_t gfp)
401{
402 int result = 0; /* better code for lack of companion? */
403 struct device *dev = &wa->usb_iface->dev;
404 struct usb_device *usb_dev = urb->dev;
405 u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0;
406 u8 portnum = wusb_port_no_to_idx(urb->dev->portnum);
407
408 d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n",
409 rpipe, wa, ep, urb);
410#define AIM_CHECK(rdf, val, text) \
411 do { \
412 if (rpipe->descr.rdf != (val)) { \
413 dev_err(dev, \
414 "rpipe aim discrepancy: " #rdf " " text "\n", \
415 rpipe->descr.rdf, (val)); \
416 result = -EINVAL; \
417 WARN_ON(1); \
418 } \
419 } while (0)
420 AIM_CHECK(wMaxPacketSize, cpu_to_le16(ep->desc.wMaxPacketSize),
421 "(%u vs %u)");
422 AIM_CHECK(bHSHubPort, portnum, "(%u vs %u)");
423 AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ?
424 UWB_PHY_RATE_53 : UWB_PHY_RATE_200,
425 "(%u vs %u)");
426 AIM_CHECK(bDeviceAddress, urb->dev->devnum | unauth, "(%u vs %u)");
427 AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)");
428 AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)");
429 AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)");
430#undef AIM_CHECK
431 return result;
432}
433
434#ifndef CONFIG_BUG
435#define CONFIG_BUG 0
436#endif
437
438/*
439 * Make sure there is an rpipe allocated for an endpoint
440 *
441 * If already allocated, we just refcount it; if not, we get an
442 * idle one, aim it to the right location and take it.
443 *
444 * Attaches to ep->hcpriv and rpipe->ep to ep.
445 */
446int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep,
447 struct urb *urb, gfp_t gfp)
448{
449 int result = 0;
450 struct device *dev = &wa->usb_iface->dev;
451 struct wa_rpipe *rpipe;
452 u8 eptype;
453
454 d_fnstart(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb,
455 gfp);
456 mutex_lock(&wa->rpipe_mutex);
457 rpipe = ep->hcpriv;
458 if (rpipe != NULL) {
459 if (CONFIG_BUG == 1) {
460 result = rpipe_check_aim(rpipe, wa, ep, urb, gfp);
461 if (result < 0)
462 goto error;
463 }
464 __rpipe_get(rpipe);
465 d_printf(2, dev, "ep 0x%02x: reusing rpipe %u\n",
466 ep->desc.bEndpointAddress,
467 le16_to_cpu(rpipe->descr.wRPipeIndex));
468 } else {
469 /* hmm, assign idle rpipe, aim it */
470 result = -ENOBUFS;
471 eptype = ep->desc.bmAttributes & 0x03;
472 result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp);
473 if (result < 0)
474 goto error;
475 result = rpipe_aim(rpipe, wa, ep, urb, gfp);
476 if (result < 0) {
477 rpipe_put(rpipe);
478 goto error;
479 }
480 ep->hcpriv = rpipe;
481 rpipe->ep = ep;
482 __rpipe_get(rpipe); /* for caching into ep->hcpriv */
483 d_printf(2, dev, "ep 0x%02x: using rpipe %u\n",
484 ep->desc.bEndpointAddress,
485 le16_to_cpu(rpipe->descr.wRPipeIndex));
486 }
487 d_dump(4, dev, &rpipe->descr, sizeof(rpipe->descr));
488error:
489 mutex_unlock(&wa->rpipe_mutex);
490 d_fnend(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, gfp);
491 return result;
492}
493
494/*
495 * Allocate the bitmap for each rpipe.
496 */
497int wa_rpipes_create(struct wahc *wa)
498{
499 wa->rpipes = wa->wa_descr->wNumRPipes;
500 wa->rpipe_bm = kzalloc(BITS_TO_LONGS(wa->rpipes)*sizeof(unsigned long),
501 GFP_KERNEL);
502 if (wa->rpipe_bm == NULL)
503 return -ENOMEM;
504 return 0;
505}
506
507void wa_rpipes_destroy(struct wahc *wa)
508{
509 struct device *dev = &wa->usb_iface->dev;
510 d_fnstart(3, dev, "(wa %p)\n", wa);
511 if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) {
512 char buf[256];
513 WARN_ON(1);
514 bitmap_scnprintf(buf, sizeof(buf), wa->rpipe_bm, wa->rpipes);
515 dev_err(dev, "BUG: pipes not released on exit: %s\n", buf);
516 }
517 kfree(wa->rpipe_bm);
518 d_fnend(3, dev, "(wa %p)\n", wa);
519}
520
521/*
522 * Release resources allocated for an endpoint
523 *
524 * If there is an associated rpipe to this endpoint, Abort any pending
525 * transfers and put it. If the rpipe ends up being destroyed,
526 * __rpipe_destroy() will cleanup ep->hcpriv.
527 *
528 * This is called before calling hcd->stop(), so you don't need to do
529 * anything else in there.
530 */
531void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
532{
533 struct device *dev = &wa->usb_iface->dev;
534 struct wa_rpipe *rpipe;
535 d_fnstart(2, dev, "(wa %p ep %p)\n", wa, ep);
536 mutex_lock(&wa->rpipe_mutex);
537 rpipe = ep->hcpriv;
538 if (rpipe != NULL) {
539 unsigned rc = atomic_read(&rpipe->refcnt.refcount);
540 int result;
541 u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
542
543 if (rc != 1)
544 d_printf(1, dev, "(wa %p ep %p) rpipe %p refcnt %u\n",
545 wa, ep, rpipe, rc);
546
547 d_printf(1, dev, "rpipe %u: abort\n", index);
548 result = usb_control_msg(
549 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
550 USB_REQ_RPIPE_ABORT,
551 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
552 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
553 if (result < 0 && result != -ENODEV /* dev is gone */)
554 d_printf(1, dev, "(wa %p rpipe %u): abort failed: %d\n",
555 wa, index, result);
556 rpipe_put(rpipe);
557 }
558 mutex_unlock(&wa->rpipe_mutex);
559 d_fnend(2, dev, "(wa %p ep %p)\n", wa, ep);
560 return;
561}
562EXPORT_SYMBOL_GPL(rpipe_ep_disable);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
new file mode 100644
index 000000000000..c038635d1c64
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -0,0 +1,1709 @@
1/*
2 * WUSB Wire Adapter
3 * Data transfer and URB enqueing
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
27 *
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
32 *
33 * Sounds simple, it is a pain to implement.
34 *
35 *
36 * ENTRY POINTS
37 *
38 * FIXME
39 *
40 * LIFE CYCLE / STATE DIAGRAM
41 *
42 * FIXME
43 *
44 * THIS CODE IS DISGUSTING
45 *
46 * Warned you are; it's my second try and still not happy with it.
47 *
48 * NOTES:
49 *
50 * - No iso
51 *
52 * - Supports DMA xfers, control, bulk and maybe interrupt
53 *
54 * - Does not recycle unused rpipes
55 *
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
61 *
62 * Two methods it could be done:
63 *
64 * (a) set up a timer everytime an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
68 *
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
73 *
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different requried components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
81 */
82#include <linux/init.h>
83#include <linux/spinlock.h>
84#include <linux/hash.h>
85#include "wa-hc.h"
86#include "wusbhc.h"
87
88#undef D_LOCAL
89#define D_LOCAL 0 /* 0 disabled, > 0 different levels... */
90#include <linux/uwb/debug.h>
91
92enum {
93 WA_SEGS_MAX = 255,
94};
95
96enum wa_seg_status {
97 WA_SEG_NOTREADY,
98 WA_SEG_READY,
99 WA_SEG_DELAYED,
100 WA_SEG_SUBMITTED,
101 WA_SEG_PENDING,
102 WA_SEG_DTI_PENDING,
103 WA_SEG_DONE,
104 WA_SEG_ERROR,
105 WA_SEG_ABORTED,
106};
107
108static void wa_xfer_delayed_run(struct wa_rpipe *);
109
110/*
111 * Life cycle governed by 'struct urb' (the refcount of the struct is
112 * that of the 'struct urb' and usb_free_urb() would free the whole
113 * struct).
114 */
115struct wa_seg {
116 struct urb urb;
117 struct urb *dto_urb; /* for data output? */
118 struct list_head list_node; /* for rpipe->req_list */
119 struct wa_xfer *xfer; /* out xfer */
120 u8 index; /* which segment we are */
121 enum wa_seg_status status;
122 ssize_t result; /* bytes xfered or error */
123 struct wa_xfer_hdr xfer_hdr;
124 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
125};
126
127static void wa_seg_init(struct wa_seg *seg)
128{
129 /* usb_init_urb() repeats a lot of work, so we do it here */
130 kref_init(&seg->urb.kref);
131}
132
133/*
134 * Protected by xfer->lock
135 *
136 */
137struct wa_xfer {
138 struct kref refcnt;
139 struct list_head list_node;
140 spinlock_t lock;
141 u32 id;
142
143 struct wahc *wa; /* Wire adapter we are plugged to */
144 struct usb_host_endpoint *ep;
145 struct urb *urb; /* URB we are transfering for */
146 struct wa_seg **seg; /* transfer segments */
147 u8 segs, segs_submitted, segs_done;
148 unsigned is_inbound:1;
149 unsigned is_dma:1;
150 size_t seg_size;
151 int result;
152
153 gfp_t gfp; /* allocation mask */
154
155 struct wusb_dev *wusb_dev; /* for activity timestamps */
156};
157
158static inline void wa_xfer_init(struct wa_xfer *xfer)
159{
160 kref_init(&xfer->refcnt);
161 INIT_LIST_HEAD(&xfer->list_node);
162 spin_lock_init(&xfer->lock);
163}
164
165/*
166 * Destory a transfer structure
167 *
168 * Note that the xfer->seg[index] thingies follow the URB life cycle,
169 * so we need to put them, not free them.
170 */
171static void wa_xfer_destroy(struct kref *_xfer)
172{
173 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
174 if (xfer->seg) {
175 unsigned cnt;
176 for (cnt = 0; cnt < xfer->segs; cnt++) {
177 if (xfer->is_inbound)
178 usb_put_urb(xfer->seg[cnt]->dto_urb);
179 usb_put_urb(&xfer->seg[cnt]->urb);
180 }
181 }
182 kfree(xfer);
183 d_printf(2, NULL, "xfer %p destroyed\n", xfer);
184}
185
186static void wa_xfer_get(struct wa_xfer *xfer)
187{
188 kref_get(&xfer->refcnt);
189}
190
191static void wa_xfer_put(struct wa_xfer *xfer)
192{
193 d_fnstart(3, NULL, "(xfer %p) -- ref count bef put %d\n",
194 xfer, atomic_read(&xfer->refcnt.refcount));
195 kref_put(&xfer->refcnt, wa_xfer_destroy);
196 d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
197}
198
199/*
200 * xfer is referenced
201 *
202 * xfer->lock has to be unlocked
203 *
204 * We take xfer->lock for setting the result; this is a barrier
205 * against drivers/usb/core/hcd.c:unlink1() being called after we call
206 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
207 * reference to the transfer.
208 */
209static void wa_xfer_giveback(struct wa_xfer *xfer)
210{
211 unsigned long flags;
212 d_fnstart(3, NULL, "(xfer %p)\n", xfer);
213 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
214 list_del_init(&xfer->list_node);
215 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
216 /* FIXME: segmentation broken -- kills DWA */
217 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
218 wa_put(xfer->wa);
219 wa_xfer_put(xfer);
220 d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
221}
222
223/*
224 * xfer is referenced
225 *
226 * xfer->lock has to be unlocked
227 */
228static void wa_xfer_completion(struct wa_xfer *xfer)
229{
230 d_fnstart(3, NULL, "(xfer %p)\n", xfer);
231 if (xfer->wusb_dev)
232 wusb_dev_put(xfer->wusb_dev);
233 rpipe_put(xfer->ep->hcpriv);
234 wa_xfer_giveback(xfer);
235 d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
236 return;
237}
238
239/*
240 * If transfer is done, wrap it up and return true
241 *
242 * xfer->lock has to be locked
243 */
244static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
245{
246 unsigned result, cnt;
247 struct wa_seg *seg;
248 struct urb *urb = xfer->urb;
249 unsigned found_short = 0;
250
251 d_fnstart(3, NULL, "(xfer %p)\n", xfer);
252 result = xfer->segs_done == xfer->segs_submitted;
253 if (result == 0)
254 goto out;
255 urb->actual_length = 0;
256 for (cnt = 0; cnt < xfer->segs; cnt++) {
257 seg = xfer->seg[cnt];
258 switch (seg->status) {
259 case WA_SEG_DONE:
260 if (found_short && seg->result > 0) {
261 if (printk_ratelimit())
262 printk(KERN_ERR "xfer %p#%u: bad short "
263 "segments (%zu)\n", xfer, cnt,
264 seg->result);
265 urb->status = -EINVAL;
266 goto out;
267 }
268 urb->actual_length += seg->result;
269 if (seg->result < xfer->seg_size
270 && cnt != xfer->segs-1)
271 found_short = 1;
272 d_printf(2, NULL, "xfer %p#%u: DONE short %d "
273 "result %zu urb->actual_length %d\n",
274 xfer, seg->index, found_short, seg->result,
275 urb->actual_length);
276 break;
277 case WA_SEG_ERROR:
278 xfer->result = seg->result;
279 d_printf(2, NULL, "xfer %p#%u: ERROR result %zu\n",
280 xfer, seg->index, seg->result);
281 goto out;
282 case WA_SEG_ABORTED:
283 WARN_ON(urb->status != -ECONNRESET
284 && urb->status != -ENOENT);
285 d_printf(2, NULL, "xfer %p#%u ABORTED: result %d\n",
286 xfer, seg->index, urb->status);
287 xfer->result = urb->status;
288 goto out;
289 default:
290 /* if (printk_ratelimit()) */
291 printk(KERN_ERR "xfer %p#%u: "
292 "is_done bad state %d\n",
293 xfer, cnt, seg->status);
294 xfer->result = -EINVAL;
295 WARN_ON(1);
296 goto out;
297 }
298 }
299 xfer->result = 0;
300out:
301 d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
302 return result;
303}
304
305/*
306 * Initialize a transfer's ID
307 *
308 * We need to use a sequential number; if we use the pointer or the
309 * hash of the pointer, it can repeat over sequential transfers and
310 * then it will confuse the HWA....wonder why in hell they put a 32
311 * bit handle in there then.
312 */
313static void wa_xfer_id_init(struct wa_xfer *xfer)
314{
315 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
316}
317
318/*
319 * Return the xfer's ID associated with xfer
320 *
321 * Need to generate a
322 */
323static u32 wa_xfer_id(struct wa_xfer *xfer)
324{
325 return xfer->id;
326}
327
328/*
329 * Search for a transfer list ID on the HCD's URB list
330 *
331 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
332 * 32-bit hash of the pointer.
333 *
334 * @returns NULL if not found.
335 */
336static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
337{
338 unsigned long flags;
339 struct wa_xfer *xfer_itr;
340 spin_lock_irqsave(&wa->xfer_list_lock, flags);
341 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
342 if (id == xfer_itr->id) {
343 wa_xfer_get(xfer_itr);
344 goto out;
345 }
346 }
347 xfer_itr = NULL;
348out:
349 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
350 return xfer_itr;
351}
352
353struct wa_xfer_abort_buffer {
354 struct urb urb;
355 struct wa_xfer_abort cmd;
356};
357
358static void __wa_xfer_abort_cb(struct urb *urb)
359{
360 struct wa_xfer_abort_buffer *b = urb->context;
361 usb_put_urb(&b->urb);
362}
363
364/*
365 * Aborts an ongoing transaction
366 *
367 * Assumes the transfer is referenced and locked and in a submitted
368 * state (mainly that there is an endpoint/rpipe assigned).
369 *
370 * The callback (see above) does nothing but freeing up the data by
371 * putting the URB. Because the URB is allocated at the head of the
372 * struct, the whole space we allocated is kfreed.
373 *
374 * We'll get an 'aborted transaction' xfer result on DTI, that'll
375 * politely ignore because at this point the transaction has been
376 * marked as aborted already.
377 */
378static void __wa_xfer_abort(struct wa_xfer *xfer)
379{
380 int result;
381 struct device *dev = &xfer->wa->usb_iface->dev;
382 struct wa_xfer_abort_buffer *b;
383 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
384
385 b = kmalloc(sizeof(*b), GFP_ATOMIC);
386 if (b == NULL)
387 goto error_kmalloc;
388 b->cmd.bLength = sizeof(b->cmd);
389 b->cmd.bRequestType = WA_XFER_ABORT;
390 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
391 b->cmd.dwTransferID = wa_xfer_id(xfer);
392
393 usb_init_urb(&b->urb);
394 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
395 usb_sndbulkpipe(xfer->wa->usb_dev,
396 xfer->wa->dto_epd->bEndpointAddress),
397 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
398 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
399 if (result < 0)
400 goto error_submit;
401 return; /* callback frees! */
402
403
404error_submit:
405 if (printk_ratelimit())
406 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
407 xfer, result);
408 kfree(b);
409error_kmalloc:
410 return;
411
412}
413
414/*
415 *
416 * @returns < 0 on error, transfer segment request size if ok
417 */
418static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
419 enum wa_xfer_type *pxfer_type)
420{
421 ssize_t result;
422 struct device *dev = &xfer->wa->usb_iface->dev;
423 size_t maxpktsize;
424 struct urb *urb = xfer->urb;
425 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
426
427 d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n",
428 xfer, rpipe, urb);
429 switch (rpipe->descr.bmAttribute & 0x3) {
430 case USB_ENDPOINT_XFER_CONTROL:
431 *pxfer_type = WA_XFER_TYPE_CTL;
432 result = sizeof(struct wa_xfer_ctl);
433 break;
434 case USB_ENDPOINT_XFER_INT:
435 case USB_ENDPOINT_XFER_BULK:
436 *pxfer_type = WA_XFER_TYPE_BI;
437 result = sizeof(struct wa_xfer_bi);
438 break;
439 case USB_ENDPOINT_XFER_ISOC:
440 dev_err(dev, "FIXME: ISOC not implemented\n");
441 result = -ENOSYS;
442 goto error;
443 default:
444 /* never happens */
445 BUG();
446 result = -EINVAL; /* shut gcc up */
447 };
448 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
449 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
450 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
451 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
452 /* Compute the segment size and make sure it is a multiple of
453 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
454 * a check (FIXME) */
455 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
456 if (xfer->seg_size < maxpktsize) {
457 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
458 "%zu\n", xfer->seg_size, maxpktsize);
459 result = -EINVAL;
460 goto error;
461 }
462 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
463 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
464 / xfer->seg_size;
465 if (xfer->segs >= WA_SEGS_MAX) {
466 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
467 (int)(urb->transfer_buffer_length / xfer->seg_size),
468 WA_SEGS_MAX);
469 result = -EINVAL;
470 goto error;
471 }
472 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
473 xfer->segs = 1;
474error:
475 d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n",
476 xfer, rpipe, urb, (int)result);
477 return result;
478}
479
480/** Fill in the common request header and xfer-type specific data. */
481static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
482 struct wa_xfer_hdr *xfer_hdr0,
483 enum wa_xfer_type xfer_type,
484 size_t xfer_hdr_size)
485{
486 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
487
488 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
489 xfer_hdr0->bLength = xfer_hdr_size;
490 xfer_hdr0->bRequestType = xfer_type;
491 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
492 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
493 xfer_hdr0->bTransferSegment = 0;
494 switch (xfer_type) {
495 case WA_XFER_TYPE_CTL: {
496 struct wa_xfer_ctl *xfer_ctl =
497 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
498 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
499 BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP
500 && xfer->urb->setup_packet == NULL);
501 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
502 sizeof(xfer_ctl->baSetupData));
503 break;
504 }
505 case WA_XFER_TYPE_BI:
506 break;
507 case WA_XFER_TYPE_ISO:
508 printk(KERN_ERR "FIXME: ISOC not implemented\n");
509 default:
510 BUG();
511 };
512}
513
514/*
515 * Callback for the OUT data phase of the segment request
516 *
517 * Check wa_seg_cb(); most comments also apply here because this
518 * function does almost the same thing and they work closely
519 * together.
520 *
521 * If the seg request has failed but this DTO phase has suceeded,
522 * wa_seg_cb() has already failed the segment and moved the
523 * status to WA_SEG_ERROR, so this will go through 'case 0' and
524 * effectively do nothing.
525 */
526static void wa_seg_dto_cb(struct urb *urb)
527{
528 struct wa_seg *seg = urb->context;
529 struct wa_xfer *xfer = seg->xfer;
530 struct wahc *wa;
531 struct device *dev;
532 struct wa_rpipe *rpipe;
533 unsigned long flags;
534 unsigned rpipe_ready = 0;
535 u8 done = 0;
536
537 d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
538 switch (urb->status) {
539 case 0:
540 spin_lock_irqsave(&xfer->lock, flags);
541 wa = xfer->wa;
542 dev = &wa->usb_iface->dev;
543 d_printf(2, dev, "xfer %p#%u: data out done (%d bytes)\n",
544 xfer, seg->index, urb->actual_length);
545 if (seg->status < WA_SEG_PENDING)
546 seg->status = WA_SEG_PENDING;
547 seg->result = urb->actual_length;
548 spin_unlock_irqrestore(&xfer->lock, flags);
549 break;
550 case -ECONNRESET: /* URB unlinked; no need to do anything */
551 case -ENOENT: /* as it was done by the who unlinked us */
552 break;
553 default: /* Other errors ... */
554 spin_lock_irqsave(&xfer->lock, flags);
555 wa = xfer->wa;
556 dev = &wa->usb_iface->dev;
557 rpipe = xfer->ep->hcpriv;
558 if (printk_ratelimit())
559 dev_err(dev, "xfer %p#%u: data out error %d\n",
560 xfer, seg->index, urb->status);
561 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
562 EDC_ERROR_TIMEFRAME)){
563 dev_err(dev, "DTO: URB max acceptable errors "
564 "exceeded, resetting device\n");
565 wa_reset_all(wa);
566 }
567 if (seg->status != WA_SEG_ERROR) {
568 seg->status = WA_SEG_ERROR;
569 seg->result = urb->status;
570 xfer->segs_done++;
571 __wa_xfer_abort(xfer);
572 rpipe_ready = rpipe_avail_inc(rpipe);
573 done = __wa_xfer_is_done(xfer);
574 }
575 spin_unlock_irqrestore(&xfer->lock, flags);
576 if (done)
577 wa_xfer_completion(xfer);
578 if (rpipe_ready)
579 wa_xfer_delayed_run(rpipe);
580 }
581 d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
582}
583
584/*
585 * Callback for the segment request
586 *
587 * If succesful transition state (unless already transitioned or
588 * outbound transfer); otherwise, take a note of the error, mark this
589 * segment done and try completion.
590 *
591 * Note we don't access until we are sure that the transfer hasn't
592 * been cancelled (ECONNRESET, ENOENT), which could mean that
593 * seg->xfer could be already gone.
594 *
595 * We have to check before setting the status to WA_SEG_PENDING
596 * because sometimes the xfer result callback arrives before this
597 * callback (geeeeeeze), so it might happen that we are already in
598 * another state. As well, we don't set it if the transfer is inbound,
599 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
600 * finishes.
601 */
602static void wa_seg_cb(struct urb *urb)
603{
604 struct wa_seg *seg = urb->context;
605 struct wa_xfer *xfer = seg->xfer;
606 struct wahc *wa;
607 struct device *dev;
608 struct wa_rpipe *rpipe;
609 unsigned long flags;
610 unsigned rpipe_ready;
611 u8 done = 0;
612
613 d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
614 switch (urb->status) {
615 case 0:
616 spin_lock_irqsave(&xfer->lock, flags);
617 wa = xfer->wa;
618 dev = &wa->usb_iface->dev;
619 d_printf(2, dev, "xfer %p#%u: request done\n",
620 xfer, seg->index);
621 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
622 seg->status = WA_SEG_PENDING;
623 spin_unlock_irqrestore(&xfer->lock, flags);
624 break;
625 case -ECONNRESET: /* URB unlinked; no need to do anything */
626 case -ENOENT: /* as it was done by the who unlinked us */
627 break;
628 default: /* Other errors ... */
629 spin_lock_irqsave(&xfer->lock, flags);
630 wa = xfer->wa;
631 dev = &wa->usb_iface->dev;
632 rpipe = xfer->ep->hcpriv;
633 if (printk_ratelimit())
634 dev_err(dev, "xfer %p#%u: request error %d\n",
635 xfer, seg->index, urb->status);
636 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
637 EDC_ERROR_TIMEFRAME)){
638 dev_err(dev, "DTO: URB max acceptable errors "
639 "exceeded, resetting device\n");
640 wa_reset_all(wa);
641 }
642 usb_unlink_urb(seg->dto_urb);
643 seg->status = WA_SEG_ERROR;
644 seg->result = urb->status;
645 xfer->segs_done++;
646 __wa_xfer_abort(xfer);
647 rpipe_ready = rpipe_avail_inc(rpipe);
648 done = __wa_xfer_is_done(xfer);
649 spin_unlock_irqrestore(&xfer->lock, flags);
650 if (done)
651 wa_xfer_completion(xfer);
652 if (rpipe_ready)
653 wa_xfer_delayed_run(rpipe);
654 }
655 d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
656}
657
658/*
659 * Allocate the segs array and initialize each of them
660 *
661 * The segments are freed by wa_xfer_destroy() when the xfer use count
662 * drops to zero; however, because each segment is given the same life
663 * cycle as the USB URB it contains, it is actually freed by
664 * usb_put_urb() on the contained USB URB (twisted, eh?).
665 */
666static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
667{
668 int result, cnt;
669 size_t alloc_size = sizeof(*xfer->seg[0])
670 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
671 struct usb_device *usb_dev = xfer->wa->usb_dev;
672 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
673 struct wa_seg *seg;
674 size_t buf_itr, buf_size, buf_itr_size;
675
676 result = -ENOMEM;
677 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
678 if (xfer->seg == NULL)
679 goto error_segs_kzalloc;
680 buf_itr = 0;
681 buf_size = xfer->urb->transfer_buffer_length;
682 for (cnt = 0; cnt < xfer->segs; cnt++) {
683 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
684 if (seg == NULL)
685 goto error_seg_kzalloc;
686 wa_seg_init(seg);
687 seg->xfer = xfer;
688 seg->index = cnt;
689 usb_fill_bulk_urb(&seg->urb, usb_dev,
690 usb_sndbulkpipe(usb_dev,
691 dto_epd->bEndpointAddress),
692 &seg->xfer_hdr, xfer_hdr_size,
693 wa_seg_cb, seg);
694 buf_itr_size = buf_size > xfer->seg_size ?
695 xfer->seg_size : buf_size;
696 if (xfer->is_inbound == 0 && buf_size > 0) {
697 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
698 if (seg->dto_urb == NULL)
699 goto error_dto_alloc;
700 usb_fill_bulk_urb(
701 seg->dto_urb, usb_dev,
702 usb_sndbulkpipe(usb_dev,
703 dto_epd->bEndpointAddress),
704 NULL, 0, wa_seg_dto_cb, seg);
705 if (xfer->is_dma) {
706 seg->dto_urb->transfer_dma =
707 xfer->urb->transfer_dma + buf_itr;
708 seg->dto_urb->transfer_flags |=
709 URB_NO_TRANSFER_DMA_MAP;
710 } else
711 seg->dto_urb->transfer_buffer =
712 xfer->urb->transfer_buffer + buf_itr;
713 seg->dto_urb->transfer_buffer_length = buf_itr_size;
714 }
715 seg->status = WA_SEG_READY;
716 buf_itr += buf_itr_size;
717 buf_size -= buf_itr_size;
718 }
719 return 0;
720
721error_dto_alloc:
722 kfree(xfer->seg[cnt]);
723 cnt--;
724error_seg_kzalloc:
725 /* use the fact that cnt is left at were it failed */
726 for (; cnt > 0; cnt--) {
727 if (xfer->is_inbound == 0)
728 kfree(xfer->seg[cnt]->dto_urb);
729 kfree(xfer->seg[cnt]);
730 }
731error_segs_kzalloc:
732 return result;
733}
734
735/*
736 * Allocates all the stuff needed to submit a transfer
737 *
738 * Breaks the whole data buffer in a list of segments, each one has a
739 * structure allocated to it and linked in xfer->seg[index]
740 *
741 * FIXME: merge setup_segs() and the last part of this function, no
742 * need to do two for loops when we could run everything in a
743 * single one
744 */
745static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
746{
747 int result;
748 struct device *dev = &xfer->wa->usb_iface->dev;
749 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
750 size_t xfer_hdr_size, cnt, transfer_size;
751 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
752
753 d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n",
754 xfer, xfer->ep->hcpriv, urb);
755
756 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
757 if (result < 0)
758 goto error_setup_sizes;
759 xfer_hdr_size = result;
760 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
761 if (result < 0) {
762 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
763 xfer, xfer->segs, result);
764 goto error_setup_segs;
765 }
766 /* Fill the first header */
767 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
768 wa_xfer_id_init(xfer);
769 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
770
771 /* Fill remainig headers */
772 xfer_hdr = xfer_hdr0;
773 transfer_size = urb->transfer_buffer_length;
774 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
775 xfer->seg_size : transfer_size;
776 transfer_size -= xfer->seg_size;
777 for (cnt = 1; cnt < xfer->segs; cnt++) {
778 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
779 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
780 xfer_hdr->bTransferSegment = cnt;
781 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
782 cpu_to_le32(xfer->seg_size)
783 : cpu_to_le32(transfer_size);
784 xfer->seg[cnt]->status = WA_SEG_READY;
785 transfer_size -= xfer->seg_size;
786 }
787 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
788 result = 0;
789error_setup_segs:
790error_setup_sizes:
791 d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n",
792 xfer, xfer->ep->hcpriv, urb, result);
793 return result;
794}
795
796/*
797 *
798 *
799 * rpipe->seg_lock is held!
800 */
801static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
802 struct wa_seg *seg)
803{
804 int result;
805 result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
806 if (result < 0) {
807 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
808 xfer, seg->index, result);
809 goto error_seg_submit;
810 }
811 if (seg->dto_urb) {
812 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
813 if (result < 0) {
814 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
815 xfer, seg->index, result);
816 goto error_dto_submit;
817 }
818 }
819 seg->status = WA_SEG_SUBMITTED;
820 rpipe_avail_dec(rpipe);
821 return 0;
822
823error_dto_submit:
824 usb_unlink_urb(&seg->urb);
825error_seg_submit:
826 seg->status = WA_SEG_ERROR;
827 seg->result = result;
828 return result;
829}
830
831/*
832 * Execute more queued request segments until the maximum concurrent allowed
833 *
834 * The ugly unlock/lock sequence on the error path is needed as the
835 * xfer->lock normally nests the seg_lock and not viceversa.
836 *
837 */
838static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
839{
840 int result;
841 struct device *dev = &rpipe->wa->usb_iface->dev;
842 struct wa_seg *seg;
843 struct wa_xfer *xfer;
844 unsigned long flags;
845
846 d_fnstart(1, dev, "(rpipe #%d) %d segments available\n",
847 le16_to_cpu(rpipe->descr.wRPipeIndex),
848 atomic_read(&rpipe->segs_available));
849 spin_lock_irqsave(&rpipe->seg_lock, flags);
850 while (atomic_read(&rpipe->segs_available) > 0
851 && !list_empty(&rpipe->seg_list)) {
852 seg = list_entry(rpipe->seg_list.next, struct wa_seg,
853 list_node);
854 list_del(&seg->list_node);
855 xfer = seg->xfer;
856 result = __wa_seg_submit(rpipe, xfer, seg);
857 d_printf(1, dev, "xfer %p#%u submitted from delayed "
858 "[%d segments available] %d\n",
859 xfer, seg->index,
860 atomic_read(&rpipe->segs_available), result);
861 if (unlikely(result < 0)) {
862 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
863 spin_lock_irqsave(&xfer->lock, flags);
864 __wa_xfer_abort(xfer);
865 xfer->segs_done++;
866 spin_unlock_irqrestore(&xfer->lock, flags);
867 spin_lock_irqsave(&rpipe->seg_lock, flags);
868 }
869 }
870 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
871 d_fnend(1, dev, "(rpipe #%d) = void, %d segments available\n",
872 le16_to_cpu(rpipe->descr.wRPipeIndex),
873 atomic_read(&rpipe->segs_available));
874
875}
876
877/*
878 *
879 * xfer->lock is taken
880 *
881 * On failure submitting we just stop submitting and return error;
882 * wa_urb_enqueue_b() will execute the completion path
883 */
884static int __wa_xfer_submit(struct wa_xfer *xfer)
885{
886 int result;
887 struct wahc *wa = xfer->wa;
888 struct device *dev = &wa->usb_iface->dev;
889 unsigned cnt;
890 struct wa_seg *seg;
891 unsigned long flags;
892 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
893 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
894 u8 available;
895 u8 empty;
896
897 d_fnstart(3, dev, "(xfer %p [rpipe %p])\n",
898 xfer, xfer->ep->hcpriv);
899
900 spin_lock_irqsave(&wa->xfer_list_lock, flags);
901 list_add_tail(&xfer->list_node, &wa->xfer_list);
902 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
903
904 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
905 result = 0;
906 spin_lock_irqsave(&rpipe->seg_lock, flags);
907 for (cnt = 0; cnt < xfer->segs; cnt++) {
908 available = atomic_read(&rpipe->segs_available);
909 empty = list_empty(&rpipe->seg_list);
910 seg = xfer->seg[cnt];
911 d_printf(2, dev, "xfer %p#%u: available %u empty %u (%s)\n",
912 xfer, cnt, available, empty,
913 available == 0 || !empty ? "delayed" : "submitted");
914 if (available == 0 || !empty) {
915 d_printf(1, dev, "xfer %p#%u: delayed\n", xfer, cnt);
916 seg->status = WA_SEG_DELAYED;
917 list_add_tail(&seg->list_node, &rpipe->seg_list);
918 } else {
919 result = __wa_seg_submit(rpipe, xfer, seg);
920 if (result < 0)
921 goto error_seg_submit;
922 }
923 xfer->segs_submitted++;
924 }
925 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
926 d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer,
927 xfer->ep->hcpriv);
928 return result;
929
930error_seg_submit:
931 __wa_xfer_abort(xfer);
932 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
933 d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer,
934 xfer->ep->hcpriv);
935 return result;
936}
937
938/*
939 * Second part of a URB/transfer enqueuement
940 *
941 * Assumes this comes from wa_urb_enqueue() [maybe through
942 * wa_urb_enqueue_run()]. At this point:
943 *
944 * xfer->wa filled and refcounted
945 * xfer->ep filled with rpipe refcounted if
946 * delayed == 0
947 * xfer->urb filled and refcounted (this is the case when called
948 * from wa_urb_enqueue() as we come from usb_submit_urb()
949 * and when called by wa_urb_enqueue_run(), as we took an
950 * extra ref dropped by _run() after we return).
951 * xfer->gfp filled
952 *
953 * If we fail at __wa_xfer_submit(), then we just check if we are done
954 * and if so, we run the completion procedure. However, if we are not
955 * yet done, we do nothing and wait for the completion handlers from
956 * the submitted URBs or from the xfer-result path to kick in. If xfer
957 * result never kicks in, the xfer will timeout from the USB code and
958 * dequeue() will be called.
959 */
960static void wa_urb_enqueue_b(struct wa_xfer *xfer)
961{
962 int result;
963 unsigned long flags;
964 struct urb *urb = xfer->urb;
965 struct wahc *wa = xfer->wa;
966 struct wusbhc *wusbhc = wa->wusb;
967 struct device *dev = &wa->usb_iface->dev;
968 struct wusb_dev *wusb_dev;
969 unsigned done;
970
971 d_fnstart(3, dev, "(wa %p urb %p)\n", wa, urb);
972 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
973 if (result < 0)
974 goto error_rpipe_get;
975 result = -ENODEV;
976 /* FIXME: segmentation broken -- kills DWA */
977 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
978 if (urb->dev == NULL)
979 goto error_dev_gone;
980 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
981 if (wusb_dev == NULL) {
982 mutex_unlock(&wusbhc->mutex);
983 goto error_dev_gone;
984 }
985 mutex_unlock(&wusbhc->mutex);
986
987 spin_lock_irqsave(&xfer->lock, flags);
988 xfer->wusb_dev = wusb_dev;
989 result = urb->status;
990 if (urb->status != -EINPROGRESS)
991 goto error_dequeued;
992
993 result = __wa_xfer_setup(xfer, urb);
994 if (result < 0)
995 goto error_xfer_setup;
996 result = __wa_xfer_submit(xfer);
997 if (result < 0)
998 goto error_xfer_submit;
999 spin_unlock_irqrestore(&xfer->lock, flags);
1000 d_fnend(3, dev, "(wa %p urb %p) = void\n", wa, urb);
1001 return;
1002
1003 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1004 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
1005 * upundo setup().
1006 */
1007error_xfer_setup:
1008error_dequeued:
1009 spin_unlock_irqrestore(&xfer->lock, flags);
1010 /* FIXME: segmentation broken, kills DWA */
1011 if (wusb_dev)
1012 wusb_dev_put(wusb_dev);
1013error_dev_gone:
1014 rpipe_put(xfer->ep->hcpriv);
1015error_rpipe_get:
1016 xfer->result = result;
1017 wa_xfer_giveback(xfer);
1018 d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result);
1019 return;
1020
1021error_xfer_submit:
1022 done = __wa_xfer_is_done(xfer);
1023 xfer->result = result;
1024 spin_unlock_irqrestore(&xfer->lock, flags);
1025 if (done)
1026 wa_xfer_completion(xfer);
1027 d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result);
1028 return;
1029}
1030
1031/*
1032 * Execute the delayed transfers in the Wire Adapter @wa
1033 *
1034 * We need to be careful here, as dequeue() could be called in the
1035 * middle. That's why we do the whole thing under the
1036 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
1037 * and then checks the list -- so as we would be acquiring in inverse
1038 * order, we just drop the lock once we have the xfer and reacquire it
1039 * later.
1040 */
1041void wa_urb_enqueue_run(struct work_struct *ws)
1042{
1043 struct wahc *wa = container_of(ws, struct wahc, xfer_work);
1044 struct device *dev = &wa->usb_iface->dev;
1045 struct wa_xfer *xfer, *next;
1046 struct urb *urb;
1047
1048 d_fnstart(3, dev, "(wa %p)\n", wa);
1049 spin_lock_irq(&wa->xfer_list_lock);
1050 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
1051 list_node) {
1052 list_del_init(&xfer->list_node);
1053 spin_unlock_irq(&wa->xfer_list_lock);
1054
1055 urb = xfer->urb;
1056 wa_urb_enqueue_b(xfer);
1057 usb_put_urb(urb); /* taken when queuing */
1058
1059 spin_lock_irq(&wa->xfer_list_lock);
1060 }
1061 spin_unlock_irq(&wa->xfer_list_lock);
1062 d_fnend(3, dev, "(wa %p) = void\n", wa);
1063}
1064EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1065
1066/*
1067 * Submit a transfer to the Wire Adapter in a delayed way
1068 *
1069 * The process of enqueuing involves possible sleeps() [see
1070 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1071 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1072 *
1073 * @urb: We own a reference to it done by the HCI Linux USB stack that
1074 * will be given up by calling usb_hcd_giveback_urb() or by
1075 * returning error from this function -> ergo we don't have to
1076 * refcount it.
1077 */
1078int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1079 struct urb *urb, gfp_t gfp)
1080{
1081 int result;
1082 struct device *dev = &wa->usb_iface->dev;
1083 struct wa_xfer *xfer;
1084 unsigned long my_flags;
1085 unsigned cant_sleep = irqs_disabled() | in_atomic();
1086
1087 d_fnstart(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x)\n",
1088 wa, ep, urb, urb->transfer_buffer_length, gfp);
1089
1090 if (urb->transfer_buffer == NULL
1091 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1092 && urb->transfer_buffer_length != 0) {
1093 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1094 dump_stack();
1095 }
1096
1097 result = -ENOMEM;
1098 xfer = kzalloc(sizeof(*xfer), gfp);
1099 if (xfer == NULL)
1100 goto error_kmalloc;
1101
1102 result = -ENOENT;
1103 if (urb->status != -EINPROGRESS) /* cancelled */
1104 goto error_dequeued; /* before starting? */
1105 wa_xfer_init(xfer);
1106 xfer->wa = wa_get(wa);
1107 xfer->urb = urb;
1108 xfer->gfp = gfp;
1109 xfer->ep = ep;
1110 urb->hcpriv = xfer;
1111 d_printf(2, dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1112 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1113 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1114 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1115 cant_sleep ? "deferred" : "inline");
1116 if (cant_sleep) {
1117 usb_get_urb(urb);
1118 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1119 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1120 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1121 queue_work(wusbd, &wa->xfer_work);
1122 } else {
1123 wa_urb_enqueue_b(xfer);
1124 }
1125 d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = 0\n",
1126 wa, ep, urb, urb->transfer_buffer_length, gfp);
1127 return 0;
1128
1129error_dequeued:
1130 kfree(xfer);
1131error_kmalloc:
1132 d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = %d\n",
1133 wa, ep, urb, urb->transfer_buffer_length, gfp, result);
1134 return result;
1135}
1136EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1137
1138/*
1139 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1140 * handler] is called.
1141 *
1142 * Until a transfer goes successfully through wa_urb_enqueue() it
1143 * needs to be dequeued with completion calling; when stuck in delayed
1144 * or before wa_xfer_setup() is called, we need to do completion.
1145 *
1146 * not setup If there is no hcpriv yet, that means that that enqueue
1147 * still had no time to set the xfer up. Because
1148 * urb->status should be other than -EINPROGRESS,
1149 * enqueue() will catch that and bail out.
1150 *
1151 * If the transfer has gone through setup, we just need to clean it
1152 * up. If it has gone through submit(), we have to abort it [with an
1153 * asynch request] and then make sure we cancel each segment.
1154 *
1155 */
1156int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1157{
1158 struct device *dev = &wa->usb_iface->dev;
1159 unsigned long flags, flags2;
1160 struct wa_xfer *xfer;
1161 struct wa_seg *seg;
1162 struct wa_rpipe *rpipe;
1163 unsigned cnt;
1164 unsigned rpipe_ready = 0;
1165
1166 d_fnstart(3, dev, "(wa %p, urb %p)\n", wa, urb);
1167
1168 d_printf(1, dev, "xfer %p urb %p: aborting\n", urb->hcpriv, urb);
1169 xfer = urb->hcpriv;
1170 if (xfer == NULL) {
1171 /* NOthing setup yet enqueue will see urb->status !=
1172 * -EINPROGRESS (by hcd layer) and bail out with
1173 * error, no need to do completion
1174 */
1175 BUG_ON(urb->status == -EINPROGRESS);
1176 goto out;
1177 }
1178 spin_lock_irqsave(&xfer->lock, flags);
1179 rpipe = xfer->ep->hcpriv;
1180 /* Check the delayed list -> if there, release and complete */
1181 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1182 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1183 goto dequeue_delayed;
1184 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1185 if (xfer->seg == NULL) /* still hasn't reached */
1186 goto out_unlock; /* setup(), enqueue_b() completes */
1187 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1188 __wa_xfer_abort(xfer);
1189 for (cnt = 0; cnt < xfer->segs; cnt++) {
1190 seg = xfer->seg[cnt];
1191 switch (seg->status) {
1192 case WA_SEG_NOTREADY:
1193 case WA_SEG_READY:
1194 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1195 xfer, cnt, seg->status);
1196 WARN_ON(1);
1197 break;
1198 case WA_SEG_DELAYED:
1199 seg->status = WA_SEG_ABORTED;
1200 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1201 list_del(&seg->list_node);
1202 xfer->segs_done++;
1203 rpipe_ready = rpipe_avail_inc(rpipe);
1204 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1205 break;
1206 case WA_SEG_SUBMITTED:
1207 seg->status = WA_SEG_ABORTED;
1208 usb_unlink_urb(&seg->urb);
1209 if (xfer->is_inbound == 0)
1210 usb_unlink_urb(seg->dto_urb);
1211 xfer->segs_done++;
1212 rpipe_ready = rpipe_avail_inc(rpipe);
1213 break;
1214 case WA_SEG_PENDING:
1215 seg->status = WA_SEG_ABORTED;
1216 xfer->segs_done++;
1217 rpipe_ready = rpipe_avail_inc(rpipe);
1218 break;
1219 case WA_SEG_DTI_PENDING:
1220 usb_unlink_urb(wa->dti_urb);
1221 seg->status = WA_SEG_ABORTED;
1222 xfer->segs_done++;
1223 rpipe_ready = rpipe_avail_inc(rpipe);
1224 break;
1225 case WA_SEG_DONE:
1226 case WA_SEG_ERROR:
1227 case WA_SEG_ABORTED:
1228 break;
1229 }
1230 }
1231 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1232 __wa_xfer_is_done(xfer);
1233 spin_unlock_irqrestore(&xfer->lock, flags);
1234 wa_xfer_completion(xfer);
1235 if (rpipe_ready)
1236 wa_xfer_delayed_run(rpipe);
1237 d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
1238 return 0;
1239
1240out_unlock:
1241 spin_unlock_irqrestore(&xfer->lock, flags);
1242out:
1243 d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
1244 return 0;
1245
1246dequeue_delayed:
1247 list_del_init(&xfer->list_node);
1248 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1249 xfer->result = urb->status;
1250 spin_unlock_irqrestore(&xfer->lock, flags);
1251 wa_xfer_giveback(xfer);
1252 usb_put_urb(urb); /* we got a ref in enqueue() */
1253 d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
1254 return 0;
1255}
1256EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1257
1258/*
1259 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1260 * codes
1261 *
1262 * Positive errno values are internal inconsistencies and should be
1263 * flagged louder. Negative are to be passed up to the user in the
1264 * normal way.
1265 *
1266 * @status: USB WA status code -- high two bits are stripped.
1267 */
1268static int wa_xfer_status_to_errno(u8 status)
1269{
1270 int errno;
1271 u8 real_status = status;
1272 static int xlat[] = {
1273 [WA_XFER_STATUS_SUCCESS] = 0,
1274 [WA_XFER_STATUS_HALTED] = -EPIPE,
1275 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1276 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1277 [WA_XFER_RESERVED] = EINVAL,
1278 [WA_XFER_STATUS_NOT_FOUND] = 0,
1279 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1280 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1281 [WA_XFER_STATUS_ABORTED] = -EINTR,
1282 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1283 [WA_XFER_INVALID_FORMAT] = EINVAL,
1284 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1285 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1286 };
1287 status &= 0x3f;
1288
1289 if (status == 0)
1290 return 0;
1291 if (status >= ARRAY_SIZE(xlat)) {
1292 if (printk_ratelimit())
1293 printk(KERN_ERR "%s(): BUG? "
1294 "Unknown WA transfer status 0x%02x\n",
1295 __func__, real_status);
1296 return -EINVAL;
1297 }
1298 errno = xlat[status];
1299 if (unlikely(errno > 0)) {
1300 if (printk_ratelimit())
1301 printk(KERN_ERR "%s(): BUG? "
1302 "Inconsistent WA status: 0x%02x\n",
1303 __func__, real_status);
1304 errno = -errno;
1305 }
1306 return errno;
1307}
1308
1309/*
1310 * Process a xfer result completion message
1311 *
1312 * inbound transfers: need to schedule a DTI read
1313 *
1314 * FIXME: this functio needs to be broken up in parts
1315 */
1316static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1317{
1318 int result;
1319 struct device *dev = &wa->usb_iface->dev;
1320 unsigned long flags;
1321 u8 seg_idx;
1322 struct wa_seg *seg;
1323 struct wa_rpipe *rpipe;
1324 struct wa_xfer_result *xfer_result = wa->xfer_result;
1325 u8 done = 0;
1326 u8 usb_status;
1327 unsigned rpipe_ready = 0;
1328
1329 d_fnstart(3, dev, "(wa %p xfer %p)\n", wa, xfer);
1330 spin_lock_irqsave(&xfer->lock, flags);
1331 seg_idx = xfer_result->bTransferSegment & 0x7f;
1332 if (unlikely(seg_idx >= xfer->segs))
1333 goto error_bad_seg;
1334 seg = xfer->seg[seg_idx];
1335 rpipe = xfer->ep->hcpriv;
1336 usb_status = xfer_result->bTransferStatus;
1337 d_printf(2, dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1338 xfer, seg_idx, usb_status, seg->status);
1339 if (seg->status == WA_SEG_ABORTED
1340 || seg->status == WA_SEG_ERROR) /* already handled */
1341 goto segment_aborted;
1342 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1343 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1344 if (seg->status != WA_SEG_PENDING) {
1345 if (printk_ratelimit())
1346 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1347 xfer, seg_idx, seg->status);
1348 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1349 }
1350 if (usb_status & 0x80) {
1351 seg->result = wa_xfer_status_to_errno(usb_status);
1352 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1353 xfer, seg->index, usb_status);
1354 goto error_complete;
1355 }
1356 /* FIXME: we ignore warnings, tally them for stats */
1357 if (usb_status & 0x40) /* Warning?... */
1358 usb_status = 0; /* ... pass */
1359 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1360 seg->status = WA_SEG_DTI_PENDING;
1361 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1362 if (xfer->is_dma) {
1363 wa->buf_in_urb->transfer_dma =
1364 xfer->urb->transfer_dma
1365 + seg_idx * xfer->seg_size;
1366 wa->buf_in_urb->transfer_flags
1367 |= URB_NO_TRANSFER_DMA_MAP;
1368 } else {
1369 wa->buf_in_urb->transfer_buffer =
1370 xfer->urb->transfer_buffer
1371 + seg_idx * xfer->seg_size;
1372 wa->buf_in_urb->transfer_flags
1373 &= ~URB_NO_TRANSFER_DMA_MAP;
1374 }
1375 wa->buf_in_urb->transfer_buffer_length =
1376 le32_to_cpu(xfer_result->dwTransferLength);
1377 wa->buf_in_urb->context = seg;
1378 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1379 if (result < 0)
1380 goto error_submit_buf_in;
1381 } else {
1382 /* OUT data phase, complete it -- */
1383 seg->status = WA_SEG_DONE;
1384 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1385 xfer->segs_done++;
1386 rpipe_ready = rpipe_avail_inc(rpipe);
1387 done = __wa_xfer_is_done(xfer);
1388 }
1389 spin_unlock_irqrestore(&xfer->lock, flags);
1390 if (done)
1391 wa_xfer_completion(xfer);
1392 if (rpipe_ready)
1393 wa_xfer_delayed_run(rpipe);
1394 d_fnend(3, dev, "(wa %p xfer %p) = void\n", wa, xfer);
1395 return;
1396
1397
1398error_submit_buf_in:
1399 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1400 dev_err(dev, "DTI: URB max acceptable errors "
1401 "exceeded, resetting device\n");
1402 wa_reset_all(wa);
1403 }
1404 if (printk_ratelimit())
1405 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1406 xfer, seg_idx, result);
1407 seg->result = result;
1408error_complete:
1409 seg->status = WA_SEG_ERROR;
1410 xfer->segs_done++;
1411 rpipe_ready = rpipe_avail_inc(rpipe);
1412 __wa_xfer_abort(xfer);
1413 done = __wa_xfer_is_done(xfer);
1414 spin_unlock_irqrestore(&xfer->lock, flags);
1415 if (done)
1416 wa_xfer_completion(xfer);
1417 if (rpipe_ready)
1418 wa_xfer_delayed_run(rpipe);
1419 d_fnend(3, dev, "(wa %p xfer %p) = void [segment/DTI-submit error]\n",
1420 wa, xfer);
1421 return;
1422
1423
1424error_bad_seg:
1425 spin_unlock_irqrestore(&xfer->lock, flags);
1426 wa_urb_dequeue(wa, xfer->urb);
1427 if (printk_ratelimit())
1428 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1429 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1430 dev_err(dev, "DTI: URB max acceptable errors "
1431 "exceeded, resetting device\n");
1432 wa_reset_all(wa);
1433 }
1434 d_fnend(3, dev, "(wa %p xfer %p) = void [bad seg]\n", wa, xfer);
1435 return;
1436
1437
1438segment_aborted:
1439 /* nothing to do, as the aborter did the completion */
1440 spin_unlock_irqrestore(&xfer->lock, flags);
1441 d_fnend(3, dev, "(wa %p xfer %p) = void [segment aborted]\n",
1442 wa, xfer);
1443 return;
1444
1445}
1446
1447/*
1448 * Callback for the IN data phase
1449 *
1450 * If succesful transition state; otherwise, take a note of the
1451 * error, mark this segment done and try completion.
1452 *
1453 * Note we don't access until we are sure that the transfer hasn't
1454 * been cancelled (ECONNRESET, ENOENT), which could mean that
1455 * seg->xfer could be already gone.
1456 */
1457static void wa_buf_in_cb(struct urb *urb)
1458{
1459 struct wa_seg *seg = urb->context;
1460 struct wa_xfer *xfer = seg->xfer;
1461 struct wahc *wa;
1462 struct device *dev;
1463 struct wa_rpipe *rpipe;
1464 unsigned rpipe_ready;
1465 unsigned long flags;
1466 u8 done = 0;
1467
1468 d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
1469 switch (urb->status) {
1470 case 0:
1471 spin_lock_irqsave(&xfer->lock, flags);
1472 wa = xfer->wa;
1473 dev = &wa->usb_iface->dev;
1474 rpipe = xfer->ep->hcpriv;
1475 d_printf(2, dev, "xfer %p#%u: data in done (%zu bytes)\n",
1476 xfer, seg->index, (size_t)urb->actual_length);
1477 seg->status = WA_SEG_DONE;
1478 seg->result = urb->actual_length;
1479 xfer->segs_done++;
1480 rpipe_ready = rpipe_avail_inc(rpipe);
1481 done = __wa_xfer_is_done(xfer);
1482 spin_unlock_irqrestore(&xfer->lock, flags);
1483 if (done)
1484 wa_xfer_completion(xfer);
1485 if (rpipe_ready)
1486 wa_xfer_delayed_run(rpipe);
1487 break;
1488 case -ECONNRESET: /* URB unlinked; no need to do anything */
1489 case -ENOENT: /* as it was done by the who unlinked us */
1490 break;
1491 default: /* Other errors ... */
1492 spin_lock_irqsave(&xfer->lock, flags);
1493 wa = xfer->wa;
1494 dev = &wa->usb_iface->dev;
1495 rpipe = xfer->ep->hcpriv;
1496 if (printk_ratelimit())
1497 dev_err(dev, "xfer %p#%u: data in error %d\n",
1498 xfer, seg->index, urb->status);
1499 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1500 EDC_ERROR_TIMEFRAME)){
1501 dev_err(dev, "DTO: URB max acceptable errors "
1502 "exceeded, resetting device\n");
1503 wa_reset_all(wa);
1504 }
1505 seg->status = WA_SEG_ERROR;
1506 seg->result = urb->status;
1507 xfer->segs_done++;
1508 rpipe_ready = rpipe_avail_inc(rpipe);
1509 __wa_xfer_abort(xfer);
1510 done = __wa_xfer_is_done(xfer);
1511 spin_unlock_irqrestore(&xfer->lock, flags);
1512 if (done)
1513 wa_xfer_completion(xfer);
1514 if (rpipe_ready)
1515 wa_xfer_delayed_run(rpipe);
1516 }
1517 d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
1518}
1519
1520/*
1521 * Handle an incoming transfer result buffer
1522 *
1523 * Given a transfer result buffer, it completes the transfer (possibly
1524 * scheduling and buffer in read) and then resubmits the DTI URB for a
1525 * new transfer result read.
1526 *
1527 *
1528 * The xfer_result DTI URB state machine
1529 *
1530 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1531 *
1532 * We start in OFF mode, the first xfer_result notification [through
1533 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1534 * read.
1535 *
1536 * We receive a buffer -- if it is not a xfer_result, we complain and
1537 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1538 * request accounting. If it is an IN segment, we move to RBI and post
1539 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1540 * repost the DTI-URB and move to RXR state. if there was no IN
1541 * segment, it will repost the DTI-URB.
1542 *
1543 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1544 * errors) in the URBs.
1545 */
1546static void wa_xfer_result_cb(struct urb *urb)
1547{
1548 int result;
1549 struct wahc *wa = urb->context;
1550 struct device *dev = &wa->usb_iface->dev;
1551 struct wa_xfer_result *xfer_result;
1552 u32 xfer_id;
1553 struct wa_xfer *xfer;
1554 u8 usb_status;
1555
1556 d_fnstart(3, dev, "(%p)\n", wa);
1557 BUG_ON(wa->dti_urb != urb);
1558 switch (wa->dti_urb->status) {
1559 case 0:
1560 /* We have a xfer result buffer; check it */
1561 d_printf(2, dev, "DTI: xfer result %d bytes at %p\n",
1562 urb->actual_length, urb->transfer_buffer);
1563 d_dump(3, dev, urb->transfer_buffer, urb->actual_length);
1564 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1565 dev_err(dev, "DTI Error: xfer result--bad size "
1566 "xfer result (%d bytes vs %zu needed)\n",
1567 urb->actual_length, sizeof(*xfer_result));
1568 break;
1569 }
1570 xfer_result = wa->xfer_result;
1571 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1572 dev_err(dev, "DTI Error: xfer result--"
1573 "bad header length %u\n",
1574 xfer_result->hdr.bLength);
1575 break;
1576 }
1577 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1578 dev_err(dev, "DTI Error: xfer result--"
1579 "bad header type 0x%02x\n",
1580 xfer_result->hdr.bNotifyType);
1581 break;
1582 }
1583 usb_status = xfer_result->bTransferStatus & 0x3f;
1584 if (usb_status == WA_XFER_STATUS_ABORTED
1585 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1586 /* taken care of already */
1587 break;
1588 xfer_id = xfer_result->dwTransferID;
1589 xfer = wa_xfer_get_by_id(wa, xfer_id);
1590 if (xfer == NULL) {
1591 /* FIXME: transaction might have been cancelled */
1592 dev_err(dev, "DTI Error: xfer result--"
1593 "unknown xfer 0x%08x (status 0x%02x)\n",
1594 xfer_id, usb_status);
1595 break;
1596 }
1597 wa_xfer_result_chew(wa, xfer);
1598 wa_xfer_put(xfer);
1599 break;
1600 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1601 case -ESHUTDOWN: /* going away! */
1602 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1603 goto out;
1604 default:
1605 /* Unknown error */
1606 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1607 EDC_ERROR_TIMEFRAME)) {
1608 dev_err(dev, "DTI: URB max acceptable errors "
1609 "exceeded, resetting device\n");
1610 wa_reset_all(wa);
1611 goto out;
1612 }
1613 if (printk_ratelimit())
1614 dev_err(dev, "DTI: URB error %d\n", urb->status);
1615 break;
1616 }
1617 /* Resubmit the DTI URB */
1618 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1619 if (result < 0) {
1620 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1621 "resetting\n", result);
1622 wa_reset_all(wa);
1623 }
1624out:
1625 d_fnend(3, dev, "(%p) = void\n", wa);
1626 return;
1627}
1628
1629/*
1630 * Transfer complete notification
1631 *
1632 * Called from the notif.c code. We get a notification on EP2 saying
1633 * that some endpoint has some transfer result data available. We are
1634 * about to read it.
1635 *
1636 * To speed up things, we always have a URB reading the DTI URB; we
1637 * don't really set it up and start it until the first xfer complete
1638 * notification arrives, which is what we do here.
1639 *
1640 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1641 * machine starts.
1642 *
1643 * So here we just initialize the DTI URB for reading transfer result
1644 * notifications and also the buffer-in URB, for reading buffers. Then
1645 * we just submit the DTI URB.
1646 *
1647 * @wa shall be referenced
1648 */
1649void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1650{
1651 int result;
1652 struct device *dev = &wa->usb_iface->dev;
1653 struct wa_notif_xfer *notif_xfer;
1654 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1655
1656 d_fnstart(4, dev, "(%p, %p)\n", wa, notif_hdr);
1657 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1658 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1659
1660 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1661 /* FIXME: hardcoded limitation, adapt */
1662 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1663 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1664 goto error;
1665 }
1666 if (wa->dti_urb != NULL) /* DTI URB already started */
1667 goto out;
1668
1669 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1670 if (wa->dti_urb == NULL) {
1671 dev_err(dev, "Can't allocate DTI URB\n");
1672 goto error_dti_urb_alloc;
1673 }
1674 usb_fill_bulk_urb(
1675 wa->dti_urb, wa->usb_dev,
1676 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1677 wa->xfer_result, wa->xfer_result_size,
1678 wa_xfer_result_cb, wa);
1679
1680 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1681 if (wa->buf_in_urb == NULL) {
1682 dev_err(dev, "Can't allocate BUF-IN URB\n");
1683 goto error_buf_in_urb_alloc;
1684 }
1685 usb_fill_bulk_urb(
1686 wa->buf_in_urb, wa->usb_dev,
1687 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1688 NULL, 0, wa_buf_in_cb, wa);
1689 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1690 if (result < 0) {
1691 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1692 "resetting\n", result);
1693 goto error_dti_urb_submit;
1694 }
1695out:
1696 d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr);
1697 return;
1698
1699error_dti_urb_submit:
1700 usb_put_urb(wa->buf_in_urb);
1701error_buf_in_urb_alloc:
1702 usb_put_urb(wa->dti_urb);
1703 wa->dti_urb = NULL;
1704error_dti_urb_alloc:
1705error:
1706 wa_reset_all(wa);
1707 d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr);
1708 return;
1709}
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
new file mode 100644
index 000000000000..07c63a31c799
--- /dev/null
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -0,0 +1,418 @@
1/*
2 * Wireless USB Host Controller
3 * sysfs glue, wusbcore module support and life cycle management
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * Creation/destruction of wusbhc is split in two parts; that that
25 * doesn't require the HCD to be added (wusbhc_{create,destroy}) and
26 * the one that requires (phase B, wusbhc_b_{create,destroy}).
27 *
28 * This is so because usb_add_hcd() will start the HC, and thus, all
29 * the HC specific stuff has to be already initialiazed (like sysfs
30 * thingies).
31 */
32#include <linux/device.h>
33#include <linux/module.h>
34#include "wusbhc.h"
35
36/**
37 * Extract the wusbhc that corresponds to a USB Host Controller class device
38 *
39 * WARNING! Apply only if @dev is that of a
40 * wusbhc.usb_hcd.self->class_dev; otherwise, you loose.
41 */
42static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev)
43{
44 struct usb_bus *usb_bus = dev_get_drvdata(dev);
45 struct usb_hcd *usb_hcd = bus_to_hcd(usb_bus);
46 return usb_hcd_to_wusbhc(usb_hcd);
47}
48
49/*
50 * Show & store the current WUSB trust timeout
51 *
52 * We don't do locking--it is an 'atomic' value.
53 *
54 * The units that we store/show are always MILLISECONDS. However, the
55 * value of trust_timeout is jiffies.
56 */
57static ssize_t wusb_trust_timeout_show(struct device *dev,
58 struct device_attribute *attr, char *buf)
59{
60 struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
61
62 return scnprintf(buf, PAGE_SIZE, "%u\n", wusbhc->trust_timeout);
63}
64
65static ssize_t wusb_trust_timeout_store(struct device *dev,
66 struct device_attribute *attr,
67 const char *buf, size_t size)
68{
69 struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
70 ssize_t result = -ENOSYS;
71 unsigned trust_timeout;
72
73 result = sscanf(buf, "%u", &trust_timeout);
74 if (result != 1) {
75 result = -EINVAL;
76 goto out;
77 }
78 /* FIXME: maybe we should check for range validity? */
79 wusbhc->trust_timeout = trust_timeout;
80 cancel_delayed_work(&wusbhc->keep_alive_timer);
81 flush_workqueue(wusbd);
82 queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
83 (trust_timeout * CONFIG_HZ)/1000/2);
84out:
85 return result < 0 ? result : size;
86}
87static DEVICE_ATTR(wusb_trust_timeout, 0644, wusb_trust_timeout_show,
88 wusb_trust_timeout_store);
89
90/*
91 * Show & store the current WUSB CHID
92 */
93static ssize_t wusb_chid_show(struct device *dev,
94 struct device_attribute *attr, char *buf)
95{
96 struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
97 ssize_t result = 0;
98
99 if (wusbhc->wuie_host_info != NULL)
100 result += ckhdid_printf(buf, PAGE_SIZE,
101 &wusbhc->wuie_host_info->CHID);
102 return result;
103}
104
105/*
106 * Store a new CHID
107 *
108 * This will (FIXME) trigger many changes.
109 *
110 * - Send an all zeros CHID and it will stop the controller
111 * - Send a non-zero CHID and it will start it
112 * (unless it was started, it will just change the CHID,
113 * diconnecting all devices first).
114 *
115 * So first we scan the MMC we are sent and then we act on it. We
116 * read it in the same format as we print it, an ASCII string of 16
117 * hex bytes.
118 *
119 * See wusbhc_chid_set() for more info.
120 */
121static ssize_t wusb_chid_store(struct device *dev,
122 struct device_attribute *attr,
123 const char *buf, size_t size)
124{
125 struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
126 struct wusb_ckhdid chid;
127 ssize_t result;
128
129 result = sscanf(buf,
130 "%02hhx %02hhx %02hhx %02hhx "
131 "%02hhx %02hhx %02hhx %02hhx "
132 "%02hhx %02hhx %02hhx %02hhx "
133 "%02hhx %02hhx %02hhx %02hhx\n",
134 &chid.data[0] , &chid.data[1] ,
135 &chid.data[2] , &chid.data[3] ,
136 &chid.data[4] , &chid.data[5] ,
137 &chid.data[6] , &chid.data[7] ,
138 &chid.data[8] , &chid.data[9] ,
139 &chid.data[10], &chid.data[11],
140 &chid.data[12], &chid.data[13],
141 &chid.data[14], &chid.data[15]);
142 if (result != 16) {
143 dev_err(dev, "Unrecognized CHID (need 16 8-bit hex digits): "
144 "%d\n", (int)result);
145 return -EINVAL;
146 }
147 result = wusbhc_chid_set(wusbhc, &chid);
148 return result < 0 ? result : size;
149}
150static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store);
151
152/* Group all the WUSBHC attributes */
153static struct attribute *wusbhc_attrs[] = {
154 &dev_attr_wusb_trust_timeout.attr,
155 &dev_attr_wusb_chid.attr,
156 NULL,
157};
158
159static struct attribute_group wusbhc_attr_group = {
160 .name = NULL, /* we want them in the same directory */
161 .attrs = wusbhc_attrs,
162};
163
164/*
165 * Create a wusbhc instance
166 *
167 * NOTEs:
168 *
169 * - assumes *wusbhc has been zeroed and wusbhc->usb_hcd has been
170 * initialized but not added.
171 *
172 * - fill out ports_max, mmcies_max and mmcie_{add,rm} before calling.
173 *
174 * - fill out wusbhc->uwb_rc and refcount it before calling
175 * - fill out the wusbhc->sec_modes array
176 */
177int wusbhc_create(struct wusbhc *wusbhc)
178{
179 int result = 0;
180
181 wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS;
182 mutex_init(&wusbhc->mutex);
183 result = wusbhc_mmcie_create(wusbhc);
184 if (result < 0)
185 goto error_mmcie_create;
186 result = wusbhc_devconnect_create(wusbhc);
187 if (result < 0)
188 goto error_devconnect_create;
189 result = wusbhc_rh_create(wusbhc);
190 if (result < 0)
191 goto error_rh_create;
192 result = wusbhc_sec_create(wusbhc);
193 if (result < 0)
194 goto error_sec_create;
195 return 0;
196
197error_sec_create:
198 wusbhc_rh_destroy(wusbhc);
199error_rh_create:
200 wusbhc_devconnect_destroy(wusbhc);
201error_devconnect_create:
202 wusbhc_mmcie_destroy(wusbhc);
203error_mmcie_create:
204 return result;
205}
206EXPORT_SYMBOL_GPL(wusbhc_create);
207
208static inline struct kobject *wusbhc_kobj(struct wusbhc *wusbhc)
209{
210 return &wusbhc->usb_hcd.self.controller->kobj;
211}
212
213/*
214 * Phase B of a wusbhc instance creation
215 *
216 * Creates fields that depend on wusbhc->usb_hcd having been
217 * added. This is where we create the sysfs files in
218 * /sys/class/usb_host/usb_hostX/.
219 *
220 * NOTE: Assumes wusbhc->usb_hcd has been already added by the upper
221 * layer (hwahc or whci)
222 */
223int wusbhc_b_create(struct wusbhc *wusbhc)
224{
225 int result = 0;
226 struct device *dev = wusbhc->usb_hcd.self.controller;
227
228 result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
229 if (result < 0) {
230 dev_err(dev, "Cannot register WUSBHC attributes: %d\n", result);
231 goto error_create_attr_group;
232 }
233
234 result = wusbhc_pal_register(wusbhc);
235 if (result < 0)
236 goto error_pal_register;
237 return 0;
238
239error_pal_register:
240 sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
241error_create_attr_group:
242 return result;
243}
244EXPORT_SYMBOL_GPL(wusbhc_b_create);
245
246void wusbhc_b_destroy(struct wusbhc *wusbhc)
247{
248 wusbhc_pal_unregister(wusbhc);
249 sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
250}
251EXPORT_SYMBOL_GPL(wusbhc_b_destroy);
252
253void wusbhc_destroy(struct wusbhc *wusbhc)
254{
255 wusbhc_sec_destroy(wusbhc);
256 wusbhc_rh_destroy(wusbhc);
257 wusbhc_devconnect_destroy(wusbhc);
258 wusbhc_mmcie_destroy(wusbhc);
259}
260EXPORT_SYMBOL_GPL(wusbhc_destroy);
261
262struct workqueue_struct *wusbd;
263EXPORT_SYMBOL_GPL(wusbd);
264
265/*
266 * WUSB Cluster ID allocation map
267 *
268 * Each WUSB bus in a channel is identified with a Cluster Id in the
269 * unauth address pace (WUSB1.0[4.3]). We take the range 0xe0 to 0xff
270 * (that's space for 31 WUSB controllers, as 0xff can't be taken). We
271 * start taking from 0xff, 0xfe, 0xfd... (hence the += or -= 0xff).
272 *
273 * For each one we taken, we pin it in the bitap
274 */
275#define CLUSTER_IDS 32
276static DECLARE_BITMAP(wusb_cluster_id_table, CLUSTER_IDS);
277static DEFINE_SPINLOCK(wusb_cluster_ids_lock);
278
279/*
280 * Get a WUSB Cluster ID
281 *
282 * Need to release with wusb_cluster_id_put() when done w/ it.
283 */
284/* FIXME: coordinate with the choose_addres() from the USB stack */
285/* we want to leave the top of the 128 range for cluster addresses and
286 * the bottom for device addresses (as we map them one on one with
287 * ports). */
288u8 wusb_cluster_id_get(void)
289{
290 u8 id;
291 spin_lock(&wusb_cluster_ids_lock);
292 id = find_first_zero_bit(wusb_cluster_id_table, CLUSTER_IDS);
293 if (id > CLUSTER_IDS) {
294 id = 0;
295 goto out;
296 }
297 set_bit(id, wusb_cluster_id_table);
298 id = (u8) 0xff - id;
299out:
300 spin_unlock(&wusb_cluster_ids_lock);
301 return id;
302
303}
304EXPORT_SYMBOL_GPL(wusb_cluster_id_get);
305
306/*
307 * Release a WUSB Cluster ID
308 *
309 * Obtained it with wusb_cluster_id_get()
310 */
311void wusb_cluster_id_put(u8 id)
312{
313 id = 0xff - id;
314 BUG_ON(id >= CLUSTER_IDS);
315 spin_lock(&wusb_cluster_ids_lock);
316 WARN_ON(!test_bit(id, wusb_cluster_id_table));
317 clear_bit(id, wusb_cluster_id_table);
318 spin_unlock(&wusb_cluster_ids_lock);
319}
320EXPORT_SYMBOL_GPL(wusb_cluster_id_put);
321
322/**
323 * wusbhc_giveback_urb - return an URB to the USB core
324 * @wusbhc: the host controller the URB is from.
325 * @urb: the URB.
326 * @status: the URB's status.
327 *
328 * Return an URB to the USB core doing some additional WUSB specific
329 * processing.
330 *
331 * - After a successful transfer, update the trust timeout timestamp
332 * for the WUSB device.
333 *
334 * - [WUSB] sections 4.13 and 7.5.1 specifies the stop retrasmittion
335 * condition for the WCONNECTACK_IE is that the host has observed
336 * the associated device responding to a control transfer.
337 */
338void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status)
339{
340 struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
341
342 if (status == 0) {
343 wusb_dev->entry_ts = jiffies;
344
345 /* wusbhc_devconnect_acked() can't be called from from
346 atomic context so defer it to a work queue. */
347 if (!list_empty(&wusb_dev->cack_node))
348 queue_work(wusbd, &wusb_dev->devconnect_acked_work);
349 }
350
351 usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status);
352}
353EXPORT_SYMBOL_GPL(wusbhc_giveback_urb);
354
355/**
356 * wusbhc_reset_all - reset the HC hardware
357 * @wusbhc: the host controller to reset.
358 *
359 * Request a full hardware reset of the chip. This will also reset
360 * the radio controller and any other PALs.
361 */
362void wusbhc_reset_all(struct wusbhc *wusbhc)
363{
364 uwb_rc_reset_all(wusbhc->uwb_rc);
365}
366EXPORT_SYMBOL_GPL(wusbhc_reset_all);
367
368static struct notifier_block wusb_usb_notifier = {
369 .notifier_call = wusb_usb_ncb,
370 .priority = INT_MAX /* Need to be called first of all */
371};
372
373static int __init wusbcore_init(void)
374{
375 int result;
376 result = wusb_crypto_init();
377 if (result < 0)
378 goto error_crypto_init;
379 /* WQ is singlethread because we need to serialize notifications */
380 wusbd = create_singlethread_workqueue("wusbd");
381 if (wusbd == NULL) {
382 result = -ENOMEM;
383 printk(KERN_ERR "WUSB-core: Cannot create wusbd workqueue\n");
384 goto error_wusbd_create;
385 }
386 usb_register_notify(&wusb_usb_notifier);
387 bitmap_zero(wusb_cluster_id_table, CLUSTER_IDS);
388 set_bit(0, wusb_cluster_id_table); /* reserve Cluster ID 0xff */
389 return 0;
390
391error_wusbd_create:
392 wusb_crypto_exit();
393error_crypto_init:
394 return result;
395
396}
397module_init(wusbcore_init);
398
399static void __exit wusbcore_exit(void)
400{
401 clear_bit(0, wusb_cluster_id_table);
402 if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) {
403 char buf[256];
404 bitmap_scnprintf(buf, sizeof(buf), wusb_cluster_id_table,
405 CLUSTER_IDS);
406 printk(KERN_ERR "BUG: WUSB Cluster IDs not released "
407 "on exit: %s\n", buf);
408 WARN_ON(1);
409 }
410 usb_unregister_notify(&wusb_usb_notifier);
411 destroy_workqueue(wusbd);
412 wusb_crypto_exit();
413}
414module_exit(wusbcore_exit);
415
416MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
417MODULE_DESCRIPTION("Wireless USB core");
418MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
new file mode 100644
index 000000000000..d0c132434f1b
--- /dev/null
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -0,0 +1,495 @@
1/*
2 * Wireless USB Host Controller
3 * Common infrastructure for WHCI and HWA WUSB-HC drivers
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This driver implements parts common to all Wireless USB Host
25 * Controllers (struct wusbhc, embedding a struct usb_hcd) and is used
26 * by:
27 *
28 * - hwahc: HWA, USB-dongle that implements a Wireless USB host
29 * controller, (Wireless USB 1.0 Host-Wire-Adapter specification).
30 *
31 * - whci: WHCI, a PCI card with a wireless host controller
32 * (Wireless Host Controller Interface 1.0 specification).
33 *
34 * Check out the Design-overview.txt file in the source documentation
35 * for other details on the implementation.
36 *
37 * Main blocks:
38 *
39 * rh Root Hub emulation (part of the HCD glue)
40 *
41 * devconnect Handle all the issues related to device connection,
42 * authentication, disconnection, timeout, reseting,
43 * keepalives, etc.
44 *
45 * mmc MMC IE broadcasting handling
46 *
47 * A host controller driver just initializes its stuff and as part of
48 * that, creates a 'struct wusbhc' instance that handles all the
49 * common WUSB mechanisms. Links in the function ops that are specific
50 * to it and then registers the host controller. Ready to run.
51 */
52
53#ifndef __WUSBHC_H__
54#define __WUSBHC_H__
55
56#include <linux/usb.h>
57#include <linux/list.h>
58#include <linux/mutex.h>
59#include <linux/kref.h>
60#include <linux/workqueue.h>
61/* FIXME: Yes, I know: BAD--it's not my fault the USB HC iface is not
62 * public */
63#include <linux/../../drivers/usb/core/hcd.h>
64#include <linux/uwb.h>
65#include <linux/usb/wusb.h>
66
67
68/**
69 * Wireless USB device
70 *
71 * Describe a WUSB device connected to the cluster. This struct
72 * belongs to the 'struct wusb_port' it is attached to and it is
73 * responsible for putting and clearing the pointer to it.
74 *
75 * Note this "complements" the 'struct usb_device' that the usb_hcd
76 * keeps for each connected USB device. However, it extends some
77 * information that is not available (there is no hcpriv ptr in it!)
78 * *and* most importantly, it's life cycle is different. It is created
79 * as soon as we get a DN_Connect (connect request notification) from
80 * the device through the WUSB host controller; the USB stack doesn't
81 * create the device until we authenticate it. FIXME: this will
82 * change.
83 *
84 * @bos: This is allocated when the BOS descriptors are read from
85 * the device and freed upon the wusb_dev struct dying.
86 * @wusb_cap_descr: points into @bos, and has been verified to be size
87 * safe.
88 */
89struct wusb_dev {
90 struct kref refcnt;
91 struct wusbhc *wusbhc;
92 struct list_head cack_node; /* Connect-Ack list */
93 u8 port_idx;
94 u8 addr;
95 u8 beacon_type:4;
96 struct usb_encryption_descriptor ccm1_etd;
97 struct wusb_ckhdid cdid;
98 unsigned long entry_ts;
99 struct usb_bos_descriptor *bos;
100 struct usb_wireless_cap_descriptor *wusb_cap_descr;
101 struct uwb_mas_bm availability;
102 struct work_struct devconnect_acked_work;
103 struct urb *set_gtk_urb;
104 struct usb_ctrlrequest *set_gtk_req;
105 struct usb_device *usb_dev;
106};
107
108#define WUSB_DEV_ADDR_UNAUTH 0x80
109
110static inline void wusb_dev_init(struct wusb_dev *wusb_dev)
111{
112 kref_init(&wusb_dev->refcnt);
113 /* no need to init the cack_node */
114}
115
116extern void wusb_dev_destroy(struct kref *_wusb_dev);
117
118static inline struct wusb_dev *wusb_dev_get(struct wusb_dev *wusb_dev)
119{
120 kref_get(&wusb_dev->refcnt);
121 return wusb_dev;
122}
123
124static inline void wusb_dev_put(struct wusb_dev *wusb_dev)
125{
126 kref_put(&wusb_dev->refcnt, wusb_dev_destroy);
127}
128
129/**
130 * Wireless USB Host Controlller root hub "fake" ports
131 * (state and device information)
132 *
133 * Wireless USB is wireless, so there are no ports; but we
134 * fake'em. Each RC can connect a max of devices at the same time
135 * (given in the Wireless Adapter descriptor, bNumPorts or WHCI's
136 * caps), referred to in wusbhc->ports_max.
137 *
138 * See rh.c for more information.
139 *
140 * The @status and @change use the same bits as in USB2.0[11.24.2.7],
141 * so we don't have to do much when getting the port's status.
142 *
143 * WUSB1.0[7.1], USB2.0[11.24.2.7.1,fig 11-10],
144 * include/linux/usb_ch9.h (#define USB_PORT_STAT_*)
145 */
146struct wusb_port {
147 u16 status;
148 u16 change;
149 struct wusb_dev *wusb_dev; /* connected device's info */
150 unsigned reset_count;
151 u32 ptk_tkid;
152};
153
154/**
155 * WUSB Host Controller specifics
156 *
157 * All fields that are common to all Wireless USB controller types
158 * (HWA and WHCI) are grouped here. Host Controller
159 * functions/operations that only deal with general Wireless USB HC
160 * issues use this data type to refer to the host.
161 *
162 * @usb_hcd Instantiation of a USB host controller
163 * (initialized by upper layer [HWA=HC or WHCI].
164 *
165 * @dev Device that implements this; initialized by the
166 * upper layer (HWA-HC, WHCI...); this device should
167 * have a refcount.
168 *
169 * @trust_timeout After this time without hearing for device
170 * activity, we consider the device gone and we have to
171 * re-authenticate.
172 *
173 * Can be accessed w/o locking--however, read to a
174 * local variable then use.
175 *
176 * @chid WUSB Cluster Host ID: this is supposed to be a
177 * unique value that doesn't change across reboots (so
178 * that your devices do not require re-association).
179 *
180 * Read/Write protected by @mutex
181 *
182 * @dev_info This array has ports_max elements. It is used to
183 * give the HC information about the WUSB devices (see
184 * 'struct wusb_dev_info').
185 *
186 * For HWA we need to allocate it in heap; for WHCI it
187 * needs to be permanently mapped, so we keep it for
188 * both and make it easy. Call wusbhc->dev_info_set()
189 * to update an entry.
190 *
191 * @ports_max Number of simultaneous device connections (fake
192 * ports) this HC will take. Read-only.
193 *
194 * @port Array of port status for each fake root port. Guaranteed to
195 * always be the same lenght during device existence
196 * [this allows for some unlocked but referenced reading].
197 *
198 * @mmcies_max Max number of Information Elements this HC can send
199 * in its MMC. Read-only.
200 *
201 * @mmcie_add HC specific operation (WHCI or HWA) for adding an
202 * MMCIE.
203 *
204 * @mmcie_rm HC specific operation (WHCI or HWA) for removing an
205 * MMCIE.
206 *
207 * @enc_types Array which describes the encryptions methods
208 * supported by the host as described in WUSB1.0 --
209 * one entry per supported method. As of WUSB1.0 there
210 * is only four methods, we make space for eight just in
211 * case they decide to add some more (and pray they do
212 * it in sequential order). if 'enc_types[enc_method]
213 * != 0', then it is supported by the host. enc_method
214 * is USB_ENC_TYPE*.
215 *
216 * @set_ptk: Set the PTK and enable encryption for a device. Or, if
217 * the supplied key is NULL, disable encryption for that
218 * device.
219 *
220 * @set_gtk: Set the GTK to be used for all future broadcast packets
221 * (i.e., MMCs). With some hardware, setting the GTK may start
222 * MMC transmission.
223 *
224 * NOTE:
225 *
226 * - If wusb_dev->usb_dev is not NULL, then usb_dev is valid
227 * (wusb_dev has a refcount on it). Likewise, if usb_dev->wusb_dev
228 * is not NULL, usb_dev->wusb_dev is valid (usb_dev keeps a
229 * refcount on it).
230 *
231 * Most of the times when you need to use it, it will be non-NULL,
232 * so there is no real need to check for it (wusb_dev will
233 * dissapear before usb_dev).
234 *
235 * - The following fields need to be filled out before calling
236 * wusbhc_create(): ports_max, mmcies_max, mmcie_{add,rm}.
237 *
238 * - there is no wusbhc_init() method, we do everything in
239 * wusbhc_create().
240 *
241 * - Creation is done in two phases, wusbhc_create() and
242 * wusbhc_create_b(); b are the parts that need to be called after
243 * calling usb_hcd_add(&wusbhc->usb_hcd).
244 */
245struct wusbhc {
246 struct usb_hcd usb_hcd; /* HAS TO BE 1st */
247 struct device *dev;
248 struct uwb_rc *uwb_rc;
249 struct uwb_pal pal;
250
251 unsigned trust_timeout; /* in jiffies */
252 struct wuie_host_info *wuie_host_info; /* Includes CHID */
253
254 struct mutex mutex; /* locks everything else */
255 u16 cluster_id; /* Wireless USB Cluster ID */
256 struct wusb_port *port; /* Fake port status handling */
257 struct wusb_dev_info *dev_info; /* for Set Device Info mgmt */
258 u8 ports_max;
259 unsigned active:1; /* currently xmit'ing MMCs */
260 struct wuie_keep_alive keep_alive_ie; /* protected by mutex */
261 struct delayed_work keep_alive_timer;
262 struct list_head cack_list; /* Connect acknowledging */
263 size_t cack_count; /* protected by 'mutex' */
264 struct wuie_connect_ack cack_ie;
265 struct uwb_rsv *rsv; /* cluster bandwidth reservation */
266
267 struct mutex mmcie_mutex; /* MMC WUIE handling */
268 struct wuie_hdr **mmcie; /* WUIE array */
269 u8 mmcies_max;
270 /* FIXME: make wusbhc_ops? */
271 int (*start)(struct wusbhc *wusbhc);
272 void (*stop)(struct wusbhc *wusbhc);
273 int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
274 u8 handle, struct wuie_hdr *wuie);
275 int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle);
276 int (*dev_info_set)(struct wusbhc *, struct wusb_dev *wusb_dev);
277 int (*bwa_set)(struct wusbhc *wusbhc, s8 stream_index,
278 const struct uwb_mas_bm *);
279 int (*set_ptk)(struct wusbhc *wusbhc, u8 port_idx,
280 u32 tkid, const void *key, size_t key_size);
281 int (*set_gtk)(struct wusbhc *wusbhc,
282 u32 tkid, const void *key, size_t key_size);
283 int (*set_num_dnts)(struct wusbhc *wusbhc, u8 interval, u8 slots);
284
285 struct {
286 struct usb_key_descriptor descr;
287 u8 data[16]; /* GTK key data */
288 } __attribute__((packed)) gtk;
289 u8 gtk_index;
290 u32 gtk_tkid;
291 struct work_struct gtk_rekey_done_work;
292 int pending_set_gtks;
293
294 struct usb_encryption_descriptor *ccm1_etd;
295};
296
297#define usb_hcd_to_wusbhc(u) container_of((u), struct wusbhc, usb_hcd)
298
299
300extern int wusbhc_create(struct wusbhc *);
301extern int wusbhc_b_create(struct wusbhc *);
302extern void wusbhc_b_destroy(struct wusbhc *);
303extern void wusbhc_destroy(struct wusbhc *);
304extern int wusb_dev_sysfs_add(struct wusbhc *, struct usb_device *,
305 struct wusb_dev *);
306extern void wusb_dev_sysfs_rm(struct wusb_dev *);
307extern int wusbhc_sec_create(struct wusbhc *);
308extern int wusbhc_sec_start(struct wusbhc *);
309extern void wusbhc_sec_stop(struct wusbhc *);
310extern void wusbhc_sec_destroy(struct wusbhc *);
311extern void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb,
312 int status);
313void wusbhc_reset_all(struct wusbhc *wusbhc);
314
315int wusbhc_pal_register(struct wusbhc *wusbhc);
316void wusbhc_pal_unregister(struct wusbhc *wusbhc);
317
318/*
319 * Return @usb_dev's @usb_hcd (properly referenced) or NULL if gone
320 *
321 * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
322 *
323 * This is a safe assumption as @usb_dev->bus is referenced all the
324 * time during the @usb_dev life cycle.
325 */
326static inline struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
327{
328 struct usb_hcd *usb_hcd;
329 usb_hcd = container_of(usb_dev->bus, struct usb_hcd, self);
330 return usb_get_hcd(usb_hcd);
331}
332
333/*
334 * Increment the reference count on a wusbhc.
335 *
336 * @wusbhc's life cycle is identical to that of the underlying usb_hcd.
337 */
338static inline struct wusbhc *wusbhc_get(struct wusbhc *wusbhc)
339{
340 return usb_get_hcd(&wusbhc->usb_hcd) ? wusbhc : NULL;
341}
342
343/*
344 * Return the wusbhc associated to a @usb_dev
345 *
346 * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
347 *
348 * @returns: wusbhc for @usb_dev; NULL if the @usb_dev is being torn down.
349 * WARNING: referenced at the usb_hcd level, unlocked
350 *
351 * FIXME: move offline
352 */
353static inline struct wusbhc *wusbhc_get_by_usb_dev(struct usb_device *usb_dev)
354{
355 struct wusbhc *wusbhc = NULL;
356 struct usb_hcd *usb_hcd;
357 if (usb_dev->devnum > 1 && !usb_dev->wusb) {
358 /* but root hubs */
359 dev_err(&usb_dev->dev, "devnum %d wusb %d\n", usb_dev->devnum,
360 usb_dev->wusb);
361 BUG_ON(usb_dev->devnum > 1 && !usb_dev->wusb);
362 }
363 usb_hcd = usb_hcd_get_by_usb_dev(usb_dev);
364 if (usb_hcd == NULL)
365 return NULL;
366 BUG_ON(usb_hcd->wireless == 0);
367 return wusbhc = usb_hcd_to_wusbhc(usb_hcd);
368}
369
370
371static inline void wusbhc_put(struct wusbhc *wusbhc)
372{
373 usb_put_hcd(&wusbhc->usb_hcd);
374}
375
376int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid);
377void wusbhc_stop(struct wusbhc *wusbhc);
378extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *);
379
380/* Device connect handling */
381extern int wusbhc_devconnect_create(struct wusbhc *);
382extern void wusbhc_devconnect_destroy(struct wusbhc *);
383extern int wusbhc_devconnect_start(struct wusbhc *wusbhc,
384 const struct wusb_ckhdid *chid);
385extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc);
386extern int wusbhc_devconnect_auth(struct wusbhc *, u8);
387extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr,
388 struct wusb_dn_hdr *dn_hdr, size_t size);
389extern int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port);
390extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port);
391extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
392 void *priv);
393extern int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
394 u8 addr);
395
396/* Wireless USB fake Root Hub methods */
397extern int wusbhc_rh_create(struct wusbhc *);
398extern void wusbhc_rh_destroy(struct wusbhc *);
399
400extern int wusbhc_rh_status_data(struct usb_hcd *, char *);
401extern int wusbhc_rh_control(struct usb_hcd *, u16, u16, u16, char *, u16);
402extern int wusbhc_rh_suspend(struct usb_hcd *);
403extern int wusbhc_rh_resume(struct usb_hcd *);
404extern int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned);
405
406/* MMC handling */
407extern int wusbhc_mmcie_create(struct wusbhc *);
408extern void wusbhc_mmcie_destroy(struct wusbhc *);
409extern int wusbhc_mmcie_set(struct wusbhc *, u8 interval, u8 repeat_cnt,
410 struct wuie_hdr *);
411extern void wusbhc_mmcie_rm(struct wusbhc *, struct wuie_hdr *);
412
413/* Bandwidth reservation */
414int wusbhc_rsv_establish(struct wusbhc *wusbhc);
415void wusbhc_rsv_terminate(struct wusbhc *wusbhc);
416
417/*
418 * I've always said
419 * I wanted a wedding in a church...
420 *
421 * but lately I've been thinking about
422 * the Botanical Gardens.
423 *
424 * We could do it by the tulips.
425 * It'll be beautiful
426 *
427 * --Security!
428 */
429extern int wusb_dev_sec_add(struct wusbhc *, struct usb_device *,
430 struct wusb_dev *);
431extern void wusb_dev_sec_rm(struct wusb_dev *) ;
432extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *,
433 struct wusb_ckhdid *ck);
434void wusbhc_gtk_rekey(struct wusbhc *wusbhc);
435
436
437/* WUSB Cluster ID handling */
438extern u8 wusb_cluster_id_get(void);
439extern void wusb_cluster_id_put(u8);
440
441/*
442 * wusb_port_by_idx - return the port associated to a zero-based port index
443 *
444 * NOTE: valid without locking as long as wusbhc is referenced (as the
445 * number of ports doesn't change). The data pointed to has to
446 * be verified though :)
447 */
448static inline struct wusb_port *wusb_port_by_idx(struct wusbhc *wusbhc,
449 u8 port_idx)
450{
451 return &wusbhc->port[port_idx];
452}
453
454/*
455 * wusb_port_no_to_idx - Convert port number (per usb_dev->portnum) to
456 * a port_idx.
457 *
458 * USB stack USB ports are 1 based!!
459 *
460 * NOTE: only valid for WUSB devices!!!
461 */
462static inline u8 wusb_port_no_to_idx(u8 port_no)
463{
464 return port_no - 1;
465}
466
467extern struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *,
468 struct usb_device *);
469
470/*
471 * Return a referenced wusb_dev given a @usb_dev
472 *
473 * Returns NULL if the usb_dev is being torn down.
474 *
475 * FIXME: move offline
476 */
477static inline
478struct wusb_dev *wusb_dev_get_by_usb_dev(struct usb_device *usb_dev)
479{
480 struct wusbhc *wusbhc;
481 struct wusb_dev *wusb_dev;
482 wusbhc = wusbhc_get_by_usb_dev(usb_dev);
483 if (wusbhc == NULL)
484 return NULL;
485 mutex_lock(&wusbhc->mutex);
486 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
487 mutex_unlock(&wusbhc->mutex);
488 wusbhc_put(wusbhc);
489 return wusb_dev;
490}
491
492/* Misc */
493
494extern struct workqueue_struct *wusbd;
495#endif /* #ifndef __WUSBHC_H__ */
diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig
new file mode 100644
index 000000000000..ca783127af36
--- /dev/null
+++ b/drivers/uwb/Kconfig
@@ -0,0 +1,90 @@
1#
2# UWB device configuration
3#
4
5menuconfig UWB
6 tristate "Ultra Wideband devices (EXPERIMENTAL)"
7 depends on EXPERIMENTAL
8 depends on PCI
9 default n
10 help
11 UWB is a high-bandwidth, low-power, point-to-point radio
12 technology using a wide spectrum (3.1-10.6GHz). It is
13 optimized for in-room use (480Mbps at 2 meters, 110Mbps at
14 10m). It serves as the transport layer for other protocols,
15 such as Wireless USB (WUSB), IP (WLP) and upcoming
16 Bluetooth and 1394
17
18 The topology is peer to peer; however, higher level
19 protocols (such as WUSB) might impose a master/slave
20 relationship.
21
22 Say Y here if your computer has UWB radio controllers (USB or PCI)
23 based. You will need to enable the radio controllers
24 below. It is ok to select all of them, no harm done.
25
26 For more help check the UWB and WUSB related files in
27 <file:Documentation/usb/>.
28
29 To compile the UWB stack as a module, choose M here.
30
31if UWB
32
33config UWB_HWA
34 tristate "UWB Radio Control driver for WUSB-compliant USB dongles (HWA)"
35 depends on USB
36 help
37 This driver enables the radio controller for HWA USB
38 devices. HWA stands for Host Wire Adapter, and it is a UWB
39 Radio Controller connected to your system via USB. Most of
40 them come with a Wireless USB host controller also.
41
42 To compile this driver select Y (built in) or M (module). It
43 is safe to select any even if you do not have the hardware.
44
45config UWB_WHCI
46 tristate "UWB Radio Control driver for WHCI-compliant cards"
47 depends on PCI
48 help
49 This driver enables the radio controller for WHCI cards.
50
51 WHCI is an specification developed by Intel
52 (http://www.intel.com/technology/comms/wusb/whci.htm) much
53 in the spirit of USB's EHCI, but for UWB and Wireless USB
54 radio/host controllers connected via memmory mapping (eg:
55 PCI). Most of these cards come also with a Wireless USB host
56 controller.
57
58 To compile this driver select Y (built in) or M (module). It
59 is safe to select any even if you do not have the hardware.
60
61config UWB_WLP
62 tristate "Support WiMedia Link Protocol (Ethernet/IP over UWB)"
63 depends on UWB && NET
64 help
65 This is a common library for drivers that implement
66 networking over UWB.
67
68config UWB_I1480U
69 tristate "Support for Intel Wireless UWB Link 1480 HWA"
70 depends on UWB_HWA
71 select FW_LOADER
72 help
73 This driver enables support for the i1480 when connected via
74 USB. It consists of a firmware uploader that will enable it
75 to behave as an HWA device.
76
77 To compile this driver select Y (built in) or M (module). It
78 is safe to select any even if you do not have the hardware.
79
80config UWB_I1480U_WLP
81 tristate "Support for Intel Wireless UWB Link 1480 HWA's WLP interface"
82 depends on UWB_I1480U && UWB_WLP && NET
83 help
84 This driver enables WLP support for the i1480 when connected via
85 USB. WLP is the WiMedia Link Protocol, or IP over UWB.
86
87 To compile this driver select Y (built in) or M (module). It
88 is safe to select any even if you don't have the hardware.
89
90endif # UWB
diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile
new file mode 100644
index 000000000000..257e6908304c
--- /dev/null
+++ b/drivers/uwb/Makefile
@@ -0,0 +1,29 @@
1obj-$(CONFIG_UWB) += uwb.o
2obj-$(CONFIG_UWB_WLP) += wlp/
3obj-$(CONFIG_UWB_WHCI) += umc.o whci.o whc-rc.o
4obj-$(CONFIG_UWB_HWA) += hwa-rc.o
5obj-$(CONFIG_UWB_I1480U) += i1480/
6
7uwb-objs := \
8 address.o \
9 beacon.o \
10 driver.o \
11 drp.o \
12 drp-avail.o \
13 drp-ie.o \
14 est.o \
15 ie.o \
16 lc-dev.o \
17 lc-rc.o \
18 neh.o \
19 pal.o \
20 reset.o \
21 rsv.o \
22 scan.o \
23 uwb-debug.o \
24 uwbd.o
25
26umc-objs := \
27 umc-bus.o \
28 umc-dev.o \
29 umc-drv.o
diff --git a/drivers/uwb/address.c b/drivers/uwb/address.c
new file mode 100644
index 000000000000..1664ae5f1706
--- /dev/null
+++ b/drivers/uwb/address.c
@@ -0,0 +1,374 @@
1/*
2 * Ultra Wide Band
3 * Address management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/errno.h>
27#include <linux/module.h>
28#include <linux/device.h>
29#include <linux/random.h>
30#include <linux/etherdevice.h>
31#include <linux/uwb/debug.h>
32#include "uwb-internal.h"
33
34
35/** Device Address Management command */
36struct uwb_rc_cmd_dev_addr_mgmt {
37 struct uwb_rccb rccb;
38 u8 bmOperationType;
39 u8 baAddr[6];
40} __attribute__((packed));
41
42
43/**
44 * Low level command for setting/getting UWB radio's addresses
45 *
46 * @hwarc: HWA Radio Control interface instance
47 * @bmOperationType:
48 * Set/get, MAC/DEV (see WUSB1.0[8.6.2.2])
49 * @baAddr: address buffer--assumed to have enough data to hold
50 * the address type requested.
51 * @reply: Pointer to reply buffer (can be stack allocated)
52 * @returns: 0 if ok, < 0 errno code on error.
53 *
54 * @cmd has to be allocated because USB cannot grok USB or vmalloc
55 * buffers depending on your combination of host architecture.
56 */
57static
58int uwb_rc_dev_addr_mgmt(struct uwb_rc *rc,
59 u8 bmOperationType, const u8 *baAddr,
60 struct uwb_rc_evt_dev_addr_mgmt *reply)
61{
62 int result;
63 struct uwb_rc_cmd_dev_addr_mgmt *cmd;
64
65 result = -ENOMEM;
66 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
67 if (cmd == NULL)
68 goto error_kzalloc;
69 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
70 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_DEV_ADDR_MGMT);
71 cmd->bmOperationType = bmOperationType;
72 if (baAddr) {
73 size_t size = 0;
74 switch (bmOperationType >> 1) {
75 case 0: size = 2; break;
76 case 1: size = 6; break;
77 default: BUG();
78 }
79 memcpy(cmd->baAddr, baAddr, size);
80 }
81 reply->rceb.bEventType = UWB_RC_CET_GENERAL;
82 reply->rceb.wEvent = UWB_RC_CMD_DEV_ADDR_MGMT;
83 result = uwb_rc_cmd(rc, "DEV-ADDR-MGMT",
84 &cmd->rccb, sizeof(*cmd),
85 &reply->rceb, sizeof(*reply));
86 if (result < 0)
87 goto error_cmd;
88 if (result < sizeof(*reply)) {
89 dev_err(&rc->uwb_dev.dev,
90 "DEV-ADDR-MGMT: not enough data replied: "
91 "%d vs %zu bytes needed\n", result, sizeof(*reply));
92 result = -ENOMSG;
93 } else if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
94 dev_err(&rc->uwb_dev.dev,
95 "DEV-ADDR-MGMT: command execution failed: %s (%d)\n",
96 uwb_rc_strerror(reply->bResultCode),
97 reply->bResultCode);
98 result = -EIO;
99 } else
100 result = 0;
101error_cmd:
102 kfree(cmd);
103error_kzalloc:
104 return result;
105}
106
107
108/**
109 * Set the UWB RC MAC or device address.
110 *
111 * @rc: UWB Radio Controller
112 * @_addr: Pointer to address to write [assumed to be either a
113 * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *'].
114 * @type: Type of address to set (UWB_ADDR_DEV or UWB_ADDR_MAC).
115 * @returns: 0 if ok, < 0 errno code on error.
116 *
117 * Some anal retentivity here: even if both 'struct
118 * uwb_{dev,mac}_addr' have the actual byte array in the same offset
119 * and I could just pass _addr to hwarc_cmd_dev_addr_mgmt(), I prefer
120 * to use some syntatic sugar in case someday we decide to change the
121 * format of the structs. The compiler will optimize it out anyway.
122 */
123static int uwb_rc_addr_set(struct uwb_rc *rc,
124 const void *_addr, enum uwb_addr_type type)
125{
126 int result;
127 u8 bmOperationType = 0x1; /* Set address */
128 const struct uwb_dev_addr *dev_addr = _addr;
129 const struct uwb_mac_addr *mac_addr = _addr;
130 struct uwb_rc_evt_dev_addr_mgmt reply;
131 const u8 *baAddr;
132
133 result = -EINVAL;
134 switch (type) {
135 case UWB_ADDR_DEV:
136 baAddr = dev_addr->data;
137 break;
138 case UWB_ADDR_MAC:
139 baAddr = mac_addr->data;
140 bmOperationType |= 0x2;
141 break;
142 default:
143 return result;
144 }
145 return uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &reply);
146}
147
148
149/**
150 * Get the UWB radio's MAC or device address.
151 *
152 * @rc: UWB Radio Controller
153 * @_addr: Where to write the address data [assumed to be either a
154 * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *'].
155 * @type: Type of address to get (UWB_ADDR_DEV or UWB_ADDR_MAC).
156 * @returns: 0 if ok (and *_addr set), < 0 errno code on error.
157 *
158 * See comment in uwb_rc_addr_set() about anal retentivity in the
159 * type handling of the address variables.
160 */
161static int uwb_rc_addr_get(struct uwb_rc *rc,
162 void *_addr, enum uwb_addr_type type)
163{
164 int result;
165 u8 bmOperationType = 0x0; /* Get address */
166 struct uwb_rc_evt_dev_addr_mgmt evt;
167 struct uwb_dev_addr *dev_addr = _addr;
168 struct uwb_mac_addr *mac_addr = _addr;
169 u8 *baAddr;
170
171 result = -EINVAL;
172 switch (type) {
173 case UWB_ADDR_DEV:
174 baAddr = dev_addr->data;
175 break;
176 case UWB_ADDR_MAC:
177 bmOperationType |= 0x2;
178 baAddr = mac_addr->data;
179 break;
180 default:
181 return result;
182 }
183 result = uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &evt);
184 if (result == 0)
185 switch (type) {
186 case UWB_ADDR_DEV:
187 memcpy(&dev_addr->data, evt.baAddr,
188 sizeof(dev_addr->data));
189 break;
190 case UWB_ADDR_MAC:
191 memcpy(&mac_addr->data, evt.baAddr,
192 sizeof(mac_addr->data));
193 break;
194 default: /* shut gcc up */
195 BUG();
196 }
197 return result;
198}
199
200
201/** Get @rc's MAC address to @addr */
202int uwb_rc_mac_addr_get(struct uwb_rc *rc,
203 struct uwb_mac_addr *addr) {
204 return uwb_rc_addr_get(rc, addr, UWB_ADDR_MAC);
205}
206EXPORT_SYMBOL_GPL(uwb_rc_mac_addr_get);
207
208
209/** Get @rc's device address to @addr */
210int uwb_rc_dev_addr_get(struct uwb_rc *rc,
211 struct uwb_dev_addr *addr) {
212 return uwb_rc_addr_get(rc, addr, UWB_ADDR_DEV);
213}
214EXPORT_SYMBOL_GPL(uwb_rc_dev_addr_get);
215
216
217/** Set @rc's address to @addr */
218int uwb_rc_mac_addr_set(struct uwb_rc *rc,
219 const struct uwb_mac_addr *addr)
220{
221 int result = -EINVAL;
222 mutex_lock(&rc->uwb_dev.mutex);
223 result = uwb_rc_addr_set(rc, addr, UWB_ADDR_MAC);
224 mutex_unlock(&rc->uwb_dev.mutex);
225 return result;
226}
227
228
229/** Set @rc's address to @addr */
230int uwb_rc_dev_addr_set(struct uwb_rc *rc,
231 const struct uwb_dev_addr *addr)
232{
233 int result = -EINVAL;
234 mutex_lock(&rc->uwb_dev.mutex);
235 result = uwb_rc_addr_set(rc, addr, UWB_ADDR_DEV);
236 rc->uwb_dev.dev_addr = *addr;
237 mutex_unlock(&rc->uwb_dev.mutex);
238 return result;
239}
240
241/* Returns !0 if given address is already assigned to device. */
242int __uwb_mac_addr_assigned_check(struct device *dev, void *_addr)
243{
244 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
245 struct uwb_mac_addr *addr = _addr;
246
247 if (!uwb_mac_addr_cmp(addr, &uwb_dev->mac_addr))
248 return !0;
249 return 0;
250}
251
252/* Returns !0 if given address is already assigned to device. */
253int __uwb_dev_addr_assigned_check(struct device *dev, void *_addr)
254{
255 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
256 struct uwb_dev_addr *addr = _addr;
257 if (!uwb_dev_addr_cmp(addr, &uwb_dev->dev_addr))
258 return !0;
259 return 0;
260}
261
262/**
263 * uwb_dev_addr_assign - assigned a generated DevAddr to a radio controller
264 * @rc: the (local) radio controller device requiring a new DevAddr
265 *
266 * A new DevAddr is required when:
267 * - first setting up a radio controller
268 * - if the hardware reports a DevAddr conflict
269 *
270 * The DevAddr is randomly generated in the generated DevAddr range
271 * [0x100, 0xfeff]. The number of devices in a beacon group is limited
272 * by mMaxBPLength (96) so this address space will never be exhausted.
273 *
274 * [ECMA-368] 17.1.1, 17.16.
275 */
276int uwb_rc_dev_addr_assign(struct uwb_rc *rc)
277{
278 struct uwb_dev_addr new_addr;
279
280 do {
281 get_random_bytes(new_addr.data, sizeof(new_addr.data));
282 } while (new_addr.data[0] == 0x00 || new_addr.data[0] == 0xff
283 || __uwb_dev_addr_assigned(rc, &new_addr));
284
285 return uwb_rc_dev_addr_set(rc, &new_addr);
286}
287
288/**
289 * uwbd_evt_handle_rc_dev_addr_conflict - handle a DEV_ADDR_CONFLICT event
290 * @evt: the DEV_ADDR_CONFLICT notification from the radio controller
291 *
292 * A new (non-conflicting) DevAddr is assigned to the radio controller.
293 *
294 * [ECMA-368] 17.1.1.1.
295 */
296int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt)
297{
298 struct uwb_rc *rc = evt->rc;
299
300 return uwb_rc_dev_addr_assign(rc);
301}
302
303/*
304 * Print the 48-bit EUI MAC address of the radio controller when
305 * reading /sys/class/uwb_rc/XX/mac_address
306 */
307static ssize_t uwb_rc_mac_addr_show(struct device *dev,
308 struct device_attribute *attr, char *buf)
309{
310 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
311 struct uwb_rc *rc = uwb_dev->rc;
312 struct uwb_mac_addr addr;
313 ssize_t result;
314
315 mutex_lock(&rc->uwb_dev.mutex);
316 result = uwb_rc_addr_get(rc, &addr, UWB_ADDR_MAC);
317 mutex_unlock(&rc->uwb_dev.mutex);
318 if (result >= 0) {
319 result = uwb_mac_addr_print(buf, UWB_ADDR_STRSIZE, &addr);
320 buf[result++] = '\n';
321 }
322 return result;
323}
324
325/*
326 * Parse a 48 bit address written to /sys/class/uwb_rc/XX/mac_address
327 * and if correct, set it.
328 */
329static ssize_t uwb_rc_mac_addr_store(struct device *dev,
330 struct device_attribute *attr,
331 const char *buf, size_t size)
332{
333 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
334 struct uwb_rc *rc = uwb_dev->rc;
335 struct uwb_mac_addr addr;
336 ssize_t result;
337
338 result = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx\n",
339 &addr.data[0], &addr.data[1], &addr.data[2],
340 &addr.data[3], &addr.data[4], &addr.data[5]);
341 if (result != 6) {
342 result = -EINVAL;
343 goto out;
344 }
345 if (is_multicast_ether_addr(addr.data)) {
346 dev_err(&rc->uwb_dev.dev, "refusing to set multicast "
347 "MAC address %s\n", buf);
348 result = -EINVAL;
349 goto out;
350 }
351 result = uwb_rc_mac_addr_set(rc, &addr);
352 if (result == 0)
353 rc->uwb_dev.mac_addr = addr;
354out:
355 return result < 0 ? result : size;
356}
357DEVICE_ATTR(mac_address, S_IRUGO | S_IWUSR, uwb_rc_mac_addr_show, uwb_rc_mac_addr_store);
358
359/** Print @addr to @buf, @return bytes written */
360size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr,
361 int type)
362{
363 size_t result;
364 if (type)
365 result = scnprintf(buf, buf_size,
366 "%02x:%02x:%02x:%02x:%02x:%02x",
367 addr[0], addr[1], addr[2],
368 addr[3], addr[4], addr[5]);
369 else
370 result = scnprintf(buf, buf_size, "%02x:%02x",
371 addr[1], addr[0]);
372 return result;
373}
374EXPORT_SYMBOL_GPL(__uwb_addr_print);
diff --git a/drivers/uwb/beacon.c b/drivers/uwb/beacon.c
new file mode 100644
index 000000000000..46b18eec5026
--- /dev/null
+++ b/drivers/uwb/beacon.c
@@ -0,0 +1,642 @@
1/*
2 * Ultra Wide Band
3 * Beacon management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/device.h>
30#include <linux/err.h>
31#include <linux/kdev_t.h>
32#include "uwb-internal.h"
33
34#define D_LOCAL 0
35#include <linux/uwb/debug.h>
36
37/** Start Beaconing command structure */
38struct uwb_rc_cmd_start_beacon {
39 struct uwb_rccb rccb;
40 __le16 wBPSTOffset;
41 u8 bChannelNumber;
42} __attribute__((packed));
43
44
45static int uwb_rc_start_beacon(struct uwb_rc *rc, u16 bpst_offset, u8 channel)
46{
47 int result;
48 struct uwb_rc_cmd_start_beacon *cmd;
49 struct uwb_rc_evt_confirm reply;
50
51 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
52 if (cmd == NULL)
53 return -ENOMEM;
54 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
55 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_START_BEACON);
56 cmd->wBPSTOffset = cpu_to_le16(bpst_offset);
57 cmd->bChannelNumber = channel;
58 reply.rceb.bEventType = UWB_RC_CET_GENERAL;
59 reply.rceb.wEvent = UWB_RC_CMD_START_BEACON;
60 result = uwb_rc_cmd(rc, "START-BEACON", &cmd->rccb, sizeof(*cmd),
61 &reply.rceb, sizeof(reply));
62 if (result < 0)
63 goto error_cmd;
64 if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
65 dev_err(&rc->uwb_dev.dev,
66 "START-BEACON: command execution failed: %s (%d)\n",
67 uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
68 result = -EIO;
69 }
70error_cmd:
71 kfree(cmd);
72 return result;
73}
74
75static int uwb_rc_stop_beacon(struct uwb_rc *rc)
76{
77 int result;
78 struct uwb_rccb *cmd;
79 struct uwb_rc_evt_confirm reply;
80
81 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
82 if (cmd == NULL)
83 return -ENOMEM;
84 cmd->bCommandType = UWB_RC_CET_GENERAL;
85 cmd->wCommand = cpu_to_le16(UWB_RC_CMD_STOP_BEACON);
86 reply.rceb.bEventType = UWB_RC_CET_GENERAL;
87 reply.rceb.wEvent = UWB_RC_CMD_STOP_BEACON;
88 result = uwb_rc_cmd(rc, "STOP-BEACON", cmd, sizeof(*cmd),
89 &reply.rceb, sizeof(reply));
90 if (result < 0)
91 goto error_cmd;
92 if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
93 dev_err(&rc->uwb_dev.dev,
94 "STOP-BEACON: command execution failed: %s (%d)\n",
95 uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
96 result = -EIO;
97 }
98error_cmd:
99 kfree(cmd);
100 return result;
101}
102
103/*
104 * Start/stop beacons
105 *
106 * @rc: UWB Radio Controller to operate on
107 * @channel: UWB channel on which to beacon (WUSB[table
108 * 5-12]). If -1, stop beaconing.
109 * @bpst_offset: Beacon Period Start Time offset; FIXME-do zero
110 *
111 * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
112 * of a SET IE command after the device sent the first beacon that includes
113 * the IEs specified in the SET IE command. So, after we start beaconing we
114 * check if there is anything in the IE cache and call the SET IE command
115 * if needed.
116 */
117int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset)
118{
119 int result;
120 struct device *dev = &rc->uwb_dev.dev;
121
122 mutex_lock(&rc->uwb_dev.mutex);
123 if (channel < 0)
124 channel = -1;
125 if (channel == -1)
126 result = uwb_rc_stop_beacon(rc);
127 else {
128 /* channel >= 0...dah */
129 result = uwb_rc_start_beacon(rc, bpst_offset, channel);
130 if (result < 0)
131 goto out_up;
132 if (le16_to_cpu(rc->ies->wIELength) > 0) {
133 result = uwb_rc_set_ie(rc, rc->ies);
134 if (result < 0) {
135 dev_err(dev, "Cannot set new IE on device: "
136 "%d\n", result);
137 result = uwb_rc_stop_beacon(rc);
138 channel = -1;
139 bpst_offset = 0;
140 } else
141 result = 0;
142 }
143 }
144
145 if (result < 0)
146 goto out_up;
147 rc->beaconing = channel;
148
149 uwb_notify(rc, NULL, uwb_bg_joined(rc) ? UWB_NOTIF_BG_JOIN : UWB_NOTIF_BG_LEAVE);
150
151out_up:
152 mutex_unlock(&rc->uwb_dev.mutex);
153 return result;
154}
155
156/*
157 * Beacon cache
158 *
159 * The purpose of this is to speed up the lookup of becon information
160 * when a new beacon arrives. The UWB Daemon uses it also to keep a
161 * tab of which devices are in radio distance and which not. When a
162 * device's beacon stays present for more than a certain amount of
163 * time, it is considered a new, usable device. When a beacon ceases
164 * to be received for a certain amount of time, it is considered that
165 * the device is gone.
166 *
167 * FIXME: use an allocator for the entries
168 * FIXME: use something faster for search than a list
169 */
170
171struct uwb_beca uwb_beca = {
172 .list = LIST_HEAD_INIT(uwb_beca.list),
173 .mutex = __MUTEX_INITIALIZER(uwb_beca.mutex)
174};
175
176
177void uwb_bce_kfree(struct kref *_bce)
178{
179 struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt);
180
181 kfree(bce->be);
182 kfree(bce);
183}
184
185
186/* Find a beacon by dev addr in the cache */
187static
188struct uwb_beca_e *__uwb_beca_find_bydev(const struct uwb_dev_addr *dev_addr)
189{
190 struct uwb_beca_e *bce, *next;
191 list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
192 d_printf(6, NULL, "looking for addr %02x:%02x in %02x:%02x\n",
193 dev_addr->data[0], dev_addr->data[1],
194 bce->dev_addr.data[0], bce->dev_addr.data[1]);
195 if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr)))
196 goto out;
197 }
198 bce = NULL;
199out:
200 return bce;
201}
202
203/* Find a beacon by dev addr in the cache */
204static
205struct uwb_beca_e *__uwb_beca_find_bymac(const struct uwb_mac_addr *mac_addr)
206{
207 struct uwb_beca_e *bce, *next;
208 list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
209 if (!memcmp(bce->mac_addr, mac_addr->data,
210 sizeof(struct uwb_mac_addr)))
211 goto out;
212 }
213 bce = NULL;
214out:
215 return bce;
216}
217
218/**
219 * uwb_dev_get_by_devaddr - get a UWB device with a specific DevAddr
220 * @rc: the radio controller that saw the device
221 * @devaddr: DevAddr of the UWB device to find
222 *
223 * There may be more than one matching device (in the case of a
224 * DevAddr conflict), but only the first one is returned.
225 */
226struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
227 const struct uwb_dev_addr *devaddr)
228{
229 struct uwb_dev *found = NULL;
230 struct uwb_beca_e *bce;
231
232 mutex_lock(&uwb_beca.mutex);
233 bce = __uwb_beca_find_bydev(devaddr);
234 if (bce)
235 found = uwb_dev_try_get(rc, bce->uwb_dev);
236 mutex_unlock(&uwb_beca.mutex);
237
238 return found;
239}
240
241/**
242 * uwb_dev_get_by_macaddr - get a UWB device with a specific EUI-48
243 * @rc: the radio controller that saw the device
244 * @devaddr: EUI-48 of the UWB device to find
245 */
246struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
247 const struct uwb_mac_addr *macaddr)
248{
249 struct uwb_dev *found = NULL;
250 struct uwb_beca_e *bce;
251
252 mutex_lock(&uwb_beca.mutex);
253 bce = __uwb_beca_find_bymac(macaddr);
254 if (bce)
255 found = uwb_dev_try_get(rc, bce->uwb_dev);
256 mutex_unlock(&uwb_beca.mutex);
257
258 return found;
259}
260
261/* Initialize a beacon cache entry */
262static void uwb_beca_e_init(struct uwb_beca_e *bce)
263{
264 mutex_init(&bce->mutex);
265 kref_init(&bce->refcnt);
266 stats_init(&bce->lqe_stats);
267 stats_init(&bce->rssi_stats);
268}
269
270/*
271 * Add a beacon to the cache
272 *
273 * @be: Beacon event information
274 * @bf: Beacon frame (part of b, really)
275 * @ts_jiffies: Timestamp (in jiffies) when the beacon was received
276 */
277struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be,
278 struct uwb_beacon_frame *bf,
279 unsigned long ts_jiffies)
280{
281 struct uwb_beca_e *bce;
282
283 bce = kzalloc(sizeof(*bce), GFP_KERNEL);
284 if (bce == NULL)
285 return NULL;
286 uwb_beca_e_init(bce);
287 bce->ts_jiffies = ts_jiffies;
288 bce->uwb_dev = NULL;
289 list_add(&bce->node, &uwb_beca.list);
290 return bce;
291}
292
293/*
294 * Wipe out beacon entries that became stale
295 *
296 * Remove associated devicest too.
297 */
298void uwb_beca_purge(void)
299{
300 struct uwb_beca_e *bce, *next;
301 unsigned long expires;
302
303 mutex_lock(&uwb_beca.mutex);
304 list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
305 expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms);
306 if (time_after(jiffies, expires)) {
307 uwbd_dev_offair(bce);
308 list_del(&bce->node);
309 uwb_bce_put(bce);
310 }
311 }
312 mutex_unlock(&uwb_beca.mutex);
313}
314
315/* Clean up the whole beacon cache. Called on shutdown */
316void uwb_beca_release(void)
317{
318 struct uwb_beca_e *bce, *next;
319 mutex_lock(&uwb_beca.mutex);
320 list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
321 list_del(&bce->node);
322 uwb_bce_put(bce);
323 }
324 mutex_unlock(&uwb_beca.mutex);
325}
326
327static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be,
328 struct uwb_beacon_frame *bf)
329{
330 char macbuf[UWB_ADDR_STRSIZE];
331 char devbuf[UWB_ADDR_STRSIZE];
332 char dstbuf[UWB_ADDR_STRSIZE];
333
334 uwb_mac_addr_print(macbuf, sizeof(macbuf), &bf->Device_Identifier);
335 uwb_dev_addr_print(devbuf, sizeof(devbuf), &bf->hdr.SrcAddr);
336 uwb_dev_addr_print(dstbuf, sizeof(dstbuf), &bf->hdr.DestAddr);
337 dev_info(&rc->uwb_dev.dev,
338 "BEACON from %s to %s (ch%u offset %u slot %u MAC %s)\n",
339 devbuf, dstbuf, be->bChannelNumber, be->wBPSTOffset,
340 bf->Beacon_Slot_Number, macbuf);
341}
342
343/*
344 * @bce: beacon cache entry, referenced
345 */
346ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce,
347 char *buf, size_t size)
348{
349 ssize_t result = 0;
350 struct uwb_rc_evt_beacon *be;
351 struct uwb_beacon_frame *bf;
352 struct uwb_buf_ctx ctx = {
353 .buf = buf,
354 .bytes = 0,
355 .size = size
356 };
357
358 mutex_lock(&bce->mutex);
359 be = bce->be;
360 if (be == NULL)
361 goto out;
362 bf = (void *) be->BeaconInfo;
363 uwb_ie_for_each(uwb_dev, uwb_ie_dump_hex, &ctx,
364 bf->IEData, be->wBeaconInfoLength - sizeof(*bf));
365 result = ctx.bytes;
366out:
367 mutex_unlock(&bce->mutex);
368 return result;
369}
370
371/*
372 * Verify that the beacon event, frame and IEs are ok
373 */
374static int uwb_verify_beacon(struct uwb_rc *rc, struct uwb_event *evt,
375 struct uwb_rc_evt_beacon *be)
376{
377 int result = -EINVAL;
378 struct uwb_beacon_frame *bf;
379 struct device *dev = &rc->uwb_dev.dev;
380
381 /* Is there enough data to decode a beacon frame? */
382 if (evt->notif.size < sizeof(*be) + sizeof(*bf)) {
383 dev_err(dev, "BEACON event: Not enough data to decode "
384 "(%zu vs %zu bytes needed)\n", evt->notif.size,
385 sizeof(*be) + sizeof(*bf));
386 goto error;
387 }
388 /* FIXME: make sure beacon frame IEs are fine and that the whole thing
389 * is consistent */
390 result = 0;
391error:
392 return result;
393}
394
395/*
396 * Handle UWB_RC_EVT_BEACON events
397 *
398 * We check the beacon cache to see how the received beacon fares. If
399 * is there already we refresh the timestamp. If not we create a new
400 * entry.
401 *
402 * According to the WHCI and WUSB specs, only one beacon frame is
403 * allowed per notification block, so we don't bother about scanning
404 * for more.
405 */
406int uwbd_evt_handle_rc_beacon(struct uwb_event *evt)
407{
408 int result = -EINVAL;
409 struct uwb_rc *rc;
410 struct uwb_rc_evt_beacon *be;
411 struct uwb_beacon_frame *bf;
412 struct uwb_beca_e *bce;
413 unsigned long last_ts;
414
415 rc = evt->rc;
416 be = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon, rceb);
417 result = uwb_verify_beacon(rc, evt, be);
418 if (result < 0)
419 return result;
420
421 /* FIXME: handle alien beacons. */
422 if (be->bBeaconType == UWB_RC_BEACON_TYPE_OL_ALIEN ||
423 be->bBeaconType == UWB_RC_BEACON_TYPE_NOL_ALIEN) {
424 return -ENOSYS;
425 }
426
427 bf = (struct uwb_beacon_frame *) be->BeaconInfo;
428
429 /*
430 * Drop beacons from devices with a NULL EUI-48 -- they cannot
431 * be uniquely identified.
432 *
433 * It's expected that these will all be WUSB devices and they
434 * have a WUSB specific connection method so ignoring them
435 * here shouldn't be a problem.
436 */
437 if (uwb_mac_addr_bcast(&bf->Device_Identifier))
438 return 0;
439
440 mutex_lock(&uwb_beca.mutex);
441 bce = __uwb_beca_find_bymac(&bf->Device_Identifier);
442 if (bce == NULL) {
443 /* Not in there, a new device is pinging */
444 uwb_beacon_print(evt->rc, be, bf);
445 bce = __uwb_beca_add(be, bf, evt->ts_jiffies);
446 if (bce == NULL) {
447 mutex_unlock(&uwb_beca.mutex);
448 return -ENOMEM;
449 }
450 }
451 mutex_unlock(&uwb_beca.mutex);
452
453 mutex_lock(&bce->mutex);
454 /* purge old beacon data */
455 kfree(bce->be);
456
457 last_ts = bce->ts_jiffies;
458
459 /* Update commonly used fields */
460 bce->ts_jiffies = evt->ts_jiffies;
461 bce->be = be;
462 bce->dev_addr = bf->hdr.SrcAddr;
463 bce->mac_addr = &bf->Device_Identifier;
464 be->wBPSTOffset = le16_to_cpu(be->wBPSTOffset);
465 be->wBeaconInfoLength = le16_to_cpu(be->wBeaconInfoLength);
466 stats_add_sample(&bce->lqe_stats, be->bLQI - 7);
467 stats_add_sample(&bce->rssi_stats, be->bRSSI + 18);
468
469 /*
470 * This might be a beacon from a new device.
471 */
472 if (bce->uwb_dev == NULL)
473 uwbd_dev_onair(evt->rc, bce);
474
475 mutex_unlock(&bce->mutex);
476
477 return 1; /* we keep the event data */
478}
479
480/*
481 * Handle UWB_RC_EVT_BEACON_SIZE events
482 *
483 * XXXXX
484 */
485int uwbd_evt_handle_rc_beacon_size(struct uwb_event *evt)
486{
487 int result = -EINVAL;
488 struct device *dev = &evt->rc->uwb_dev.dev;
489 struct uwb_rc_evt_beacon_size *bs;
490
491 /* Is there enough data to decode the event? */
492 if (evt->notif.size < sizeof(*bs)) {
493 dev_err(dev, "BEACON SIZE notification: Not enough data to "
494 "decode (%zu vs %zu bytes needed)\n",
495 evt->notif.size, sizeof(*bs));
496 goto error;
497 }
498 bs = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon_size, rceb);
499 if (0)
500 dev_info(dev, "Beacon size changed to %u bytes "
501 "(FIXME: action?)\n", le16_to_cpu(bs->wNewBeaconSize));
502 else {
503 /* temporary hack until we do something with this message... */
504 static unsigned count;
505 if (++count % 1000 == 0)
506 dev_info(dev, "Beacon size changed %u times "
507 "(FIXME: action?)\n", count);
508 }
509 result = 0;
510error:
511 return result;
512}
513
514/**
515 * uwbd_evt_handle_rc_bp_slot_change - handle a BP_SLOT_CHANGE event
516 * @evt: the BP_SLOT_CHANGE notification from the radio controller
517 *
518 * If the event indicates that no beacon period slots were available
519 * then radio controller has transitioned to a non-beaconing state.
520 * Otherwise, simply save the current beacon slot.
521 */
522int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *evt)
523{
524 struct uwb_rc *rc = evt->rc;
525 struct device *dev = &rc->uwb_dev.dev;
526 struct uwb_rc_evt_bp_slot_change *bpsc;
527
528 if (evt->notif.size < sizeof(*bpsc)) {
529 dev_err(dev, "BP SLOT CHANGE event: Not enough data\n");
530 return -EINVAL;
531 }
532 bpsc = container_of(evt->notif.rceb, struct uwb_rc_evt_bp_slot_change, rceb);
533
534 mutex_lock(&rc->uwb_dev.mutex);
535 if (uwb_rc_evt_bp_slot_change_no_slot(bpsc)) {
536 dev_info(dev, "stopped beaconing: No free slots in BP\n");
537 rc->beaconing = -1;
538 } else
539 rc->uwb_dev.beacon_slot = uwb_rc_evt_bp_slot_change_slot_num(bpsc);
540 mutex_unlock(&rc->uwb_dev.mutex);
541
542 return 0;
543}
544
545/**
546 * Handle UWB_RC_EVT_BPOIE_CHANGE events
547 *
548 * XXXXX
549 */
550struct uwb_ie_bpo {
551 struct uwb_ie_hdr hdr;
552 u8 bp_length;
553 u8 data[];
554} __attribute__((packed));
555
556int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *evt)
557{
558 int result = -EINVAL;
559 struct device *dev = &evt->rc->uwb_dev.dev;
560 struct uwb_rc_evt_bpoie_change *bpoiec;
561 struct uwb_ie_bpo *bpoie;
562 static unsigned count; /* FIXME: this is a temp hack */
563 size_t iesize;
564
565 /* Is there enough data to decode it? */
566 if (evt->notif.size < sizeof(*bpoiec)) {
567 dev_err(dev, "BPOIEC notification: Not enough data to "
568 "decode (%zu vs %zu bytes needed)\n",
569 evt->notif.size, sizeof(*bpoiec));
570 goto error;
571 }
572 bpoiec = container_of(evt->notif.rceb, struct uwb_rc_evt_bpoie_change, rceb);
573 iesize = le16_to_cpu(bpoiec->wBPOIELength);
574 if (iesize < sizeof(*bpoie)) {
575 dev_err(dev, "BPOIEC notification: Not enough IE data to "
576 "decode (%zu vs %zu bytes needed)\n",
577 iesize, sizeof(*bpoie));
578 goto error;
579 }
580 if (++count % 1000 == 0) /* Lame placeholder */
581 dev_info(dev, "BPOIE: %u changes received\n", count);
582 /*
583 * FIXME: At this point we should go over all the IEs in the
584 * bpoiec->BPOIE array and act on each.
585 */
586 result = 0;
587error:
588 return result;
589}
590
591/**
592 * uwb_bg_joined - is the RC in a beacon group?
593 * @rc: the radio controller
594 *
595 * Returns true if the radio controller is in a beacon group (even if
596 * it's the sole member).
597 */
598int uwb_bg_joined(struct uwb_rc *rc)
599{
600 return rc->beaconing != -1;
601}
602EXPORT_SYMBOL_GPL(uwb_bg_joined);
603
604/*
605 * Print beaconing state.
606 */
607static ssize_t uwb_rc_beacon_show(struct device *dev,
608 struct device_attribute *attr, char *buf)
609{
610 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
611 struct uwb_rc *rc = uwb_dev->rc;
612 ssize_t result;
613
614 mutex_lock(&rc->uwb_dev.mutex);
615 result = sprintf(buf, "%d\n", rc->beaconing);
616 mutex_unlock(&rc->uwb_dev.mutex);
617 return result;
618}
619
620/*
621 * Start beaconing on the specified channel, or stop beaconing.
622 *
623 * The BPST offset of when to start searching for a beacon group to
624 * join may be specified.
625 */
626static ssize_t uwb_rc_beacon_store(struct device *dev,
627 struct device_attribute *attr,
628 const char *buf, size_t size)
629{
630 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
631 struct uwb_rc *rc = uwb_dev->rc;
632 int channel;
633 unsigned bpst_offset = 0;
634 ssize_t result = -EINVAL;
635
636 result = sscanf(buf, "%d %u\n", &channel, &bpst_offset);
637 if (result >= 1)
638 result = uwb_rc_beacon(rc, channel, bpst_offset);
639
640 return result < 0 ? result : size;
641}
642DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, uwb_rc_beacon_show, uwb_rc_beacon_store);
diff --git a/drivers/uwb/driver.c b/drivers/uwb/driver.c
new file mode 100644
index 000000000000..521cdeb84971
--- /dev/null
+++ b/drivers/uwb/driver.c
@@ -0,0 +1,144 @@
1/*
2 * Ultra Wide Band
3 * Driver initialization, etc
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * Life cycle: FIXME: explain
26 *
27 * UWB radio controller:
28 *
29 * 1. alloc a uwb_rc, zero it
30 * 2. call uwb_rc_init() on it to set it up + ops (won't do any
31 * kind of allocation)
32 * 3. register (now it is owned by the UWB stack--deregister before
33 * freeing/destroying).
34 * 4. It lives on it's own now (UWB stack handles)--when it
35 * disconnects, call unregister()
36 * 5. free it.
37 *
38 * Make sure you have a reference to the uwb_rc before calling
39 * any of the UWB API functions.
40 *
41 * TODO:
42 *
43 * 1. Locking and life cycle management is crappy still. All entry
44 * points to the UWB HCD API assume you have a reference on the
45 * uwb_rc structure and that it won't go away. They mutex lock it
46 * before doing anything.
47 */
48
49#include <linux/kernel.h>
50#include <linux/init.h>
51#include <linux/module.h>
52#include <linux/device.h>
53#include <linux/err.h>
54#include <linux/kdev_t.h>
55#include <linux/random.h>
56#include <linux/uwb/debug.h>
57#include "uwb-internal.h"
58
59
60/* UWB stack attributes (or 'global' constants) */
61
62
63/**
64 * If a beacon dissapears for longer than this, then we consider the
65 * device who was represented by that beacon to be gone.
66 *
67 * ECMA-368[17.2.3, last para] establishes that a device must not
68 * consider a device to be its neighbour if he doesn't receive a beacon
69 * for more than mMaxLostBeacons. mMaxLostBeacons is defined in
70 * ECMA-368[17.16] as 3; because we can get only one beacon per
71 * superframe, that'd be 3 * 65ms = 195 ~ 200 ms. Let's give it time
72 * for jitter and stuff and make it 500 ms.
73 */
74unsigned long beacon_timeout_ms = 500;
75
76static
77ssize_t beacon_timeout_ms_show(struct class *class, char *buf)
78{
79 return scnprintf(buf, PAGE_SIZE, "%lu\n", beacon_timeout_ms);
80}
81
82static
83ssize_t beacon_timeout_ms_store(struct class *class,
84 const char *buf, size_t size)
85{
86 unsigned long bt;
87 ssize_t result;
88 result = sscanf(buf, "%lu", &bt);
89 if (result != 1)
90 return -EINVAL;
91 beacon_timeout_ms = bt;
92 return size;
93}
94
95static struct class_attribute uwb_class_attrs[] = {
96 __ATTR(beacon_timeout_ms, S_IWUSR | S_IRUGO,
97 beacon_timeout_ms_show, beacon_timeout_ms_store),
98 __ATTR_NULL,
99};
100
101/** Device model classes */
102struct class uwb_rc_class = {
103 .name = "uwb_rc",
104 .class_attrs = uwb_class_attrs,
105};
106
107
108static int __init uwb_subsys_init(void)
109{
110 int result = 0;
111
112 result = uwb_est_create();
113 if (result < 0) {
114 printk(KERN_ERR "uwb: Can't initialize EST subsystem\n");
115 goto error_est_init;
116 }
117
118 result = class_register(&uwb_rc_class);
119 if (result < 0)
120 goto error_uwb_rc_class_register;
121 uwbd_start();
122 uwb_dbg_init();
123 return 0;
124
125error_uwb_rc_class_register:
126 uwb_est_destroy();
127error_est_init:
128 return result;
129}
130module_init(uwb_subsys_init);
131
132static void __exit uwb_subsys_exit(void)
133{
134 uwb_dbg_exit();
135 uwbd_stop();
136 class_unregister(&uwb_rc_class);
137 uwb_est_destroy();
138 return;
139}
140module_exit(uwb_subsys_exit);
141
142MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
143MODULE_DESCRIPTION("Ultra Wide Band core");
144MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/drp-avail.c b/drivers/uwb/drp-avail.c
new file mode 100644
index 000000000000..3febd8552808
--- /dev/null
+++ b/drivers/uwb/drp-avail.c
@@ -0,0 +1,288 @@
1/*
2 * Ultra Wide Band
3 * DRP availability management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Reinette Chatre <reinette.chatre@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 *
21 *
22 * Manage DRP Availability (the MAS available for DRP
23 * reservations). Thus:
24 *
25 * - Handle DRP Availability Change notifications
26 *
27 * - Allow the reservation manager to indicate MAS reserved/released
28 * by local (owned by/targeted at the radio controller)
29 * reservations.
30 *
31 * - Based on the two sources above, generate a DRP Availability IE to
32 * be included in the beacon.
33 *
34 * See also the documentation for struct uwb_drp_avail.
35 */
36
37#include <linux/errno.h>
38#include <linux/module.h>
39#include <linux/device.h>
40#include <linux/bitmap.h>
41#include "uwb-internal.h"
42
43/**
44 * uwb_drp_avail_init - initialize an RC's MAS availability
45 *
46 * All MAS are available initially. The RC will inform use which
47 * slots are used for the BP (it may change in size).
48 */
49void uwb_drp_avail_init(struct uwb_rc *rc)
50{
51 bitmap_fill(rc->drp_avail.global, UWB_NUM_MAS);
52 bitmap_fill(rc->drp_avail.local, UWB_NUM_MAS);
53 bitmap_fill(rc->drp_avail.pending, UWB_NUM_MAS);
54}
55
56/*
57 * Determine MAS available for new local reservations.
58 *
59 * avail = global & local & pending
60 */
61static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail)
62{
63 bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
64 bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS);
65}
66
67/**
68 * uwb_drp_avail_reserve_pending - reserve MAS for a new reservation
69 * @rc: the radio controller
70 * @mas: the MAS to reserve
71 *
72 * Returns 0 on success, or -EBUSY if the MAS requested aren't available.
73 */
74int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas)
75{
76 struct uwb_mas_bm avail;
77
78 uwb_drp_available(rc, &avail);
79 if (!bitmap_subset(mas->bm, avail.bm, UWB_NUM_MAS))
80 return -EBUSY;
81
82 bitmap_andnot(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
83 return 0;
84}
85
86/**
87 * uwb_drp_avail_reserve - reserve MAS for an established reservation
88 * @rc: the radio controller
89 * @mas: the MAS to reserve
90 */
91void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas)
92{
93 bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
94 bitmap_andnot(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
95 rc->drp_avail.ie_valid = false;
96}
97
98/**
99 * uwb_drp_avail_release - release MAS from a pending or established reservation
100 * @rc: the radio controller
101 * @mas: the MAS to release
102 */
103void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas)
104{
105 bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
106 bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
107 rc->drp_avail.ie_valid = false;
108}
109
110/**
111 * uwb_drp_avail_ie_update - update the DRP Availability IE
112 * @rc: the radio controller
113 *
114 * avail = global & local
115 */
116void uwb_drp_avail_ie_update(struct uwb_rc *rc)
117{
118 struct uwb_mas_bm avail;
119
120 bitmap_and(avail.bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
121
122 rc->drp_avail.ie.hdr.element_id = UWB_IE_DRP_AVAILABILITY;
123 rc->drp_avail.ie.hdr.length = UWB_NUM_MAS / 8;
124 uwb_mas_bm_copy_le(rc->drp_avail.ie.bmp, &avail);
125 rc->drp_avail.ie_valid = true;
126}
127
128/**
129 * Create an unsigned long from a buffer containing a byte stream.
130 *
131 * @array: pointer to buffer
132 * @itr: index of buffer from where we start
133 * @len: the buffer's remaining size may not be exact multiple of
134 * sizeof(unsigned long), @len is the length of buffer that needs
135 * to be converted. This will be sizeof(unsigned long) or smaller
136 * (BUG if not). If it is smaller then we will pad the remaining
137 * space of the result with zeroes.
138 */
139static
140unsigned long get_val(u8 *array, size_t itr, size_t len)
141{
142 unsigned long val = 0;
143 size_t top = itr + len;
144
145 BUG_ON(len > sizeof(val));
146
147 while (itr < top) {
148 val <<= 8;
149 val |= array[top - 1];
150 top--;
151 }
152 val <<= 8 * (sizeof(val) - len); /* padding */
153 return val;
154}
155
156/**
157 * Initialize bitmap from data buffer.
158 *
159 * The bitmap to be converted could come from a IE, for example a
160 * DRP Availability IE.
161 * From ECMA-368 1.0 [16.8.7]: "
162 * octets: 1 1 N * (0 to 32)
163 * Element ID Length (=N) DRP Availability Bitmap
164 *
165 * The DRP Availability Bitmap field is up to 256 bits long, one
166 * bit for each MAS in the superframe, where the least-significant
167 * bit of the field corresponds to the first MAS in the superframe
168 * and successive bits correspond to successive MASs."
169 *
170 * The DRP Availability bitmap is in octets from 0 to 32, so octet
171 * 32 contains bits for MAS 1-8, etc. If the bitmap is smaller than 32
172 * octets, the bits in octets not included at the end of the bitmap are
173 * treated as zero. In this case (when the bitmap is smaller than 32
174 * octets) the MAS represented range from MAS 1 to MAS (size of bitmap)
175 * with the last octet still containing bits for MAS 1-8, etc.
176 *
177 * For example:
178 * F00F0102 03040506 0708090A 0B0C0D0E 0F010203
179 * ^^^^
180 * ||||
181 * ||||
182 * |||\LSB of byte is MAS 9
183 * ||\MSB of byte is MAS 16
184 * |\LSB of first byte is MAS 1
185 * \ MSB of byte is MAS 8
186 *
187 * An example of this encoding can be found in ECMA-368 Annex-D [Table D.11]
188 *
189 * The resulting bitmap will have the following mapping:
190 * bit position 0 == MAS 1
191 * bit position 1 == MAS 2
192 * ...
193 * bit position (UWB_NUM_MAS - 1) == MAS UWB_NUM_MAS
194 *
195 * @bmp_itr: pointer to bitmap (can be declared with DECLARE_BITMAP)
196 * @buffer: pointer to buffer containing bitmap data in big endian
197 * format (MSB first)
198 * @buffer_size:number of bytes with which bitmap should be initialized
199 */
200static
201void buffer_to_bmp(unsigned long *bmp_itr, void *_buffer,
202 size_t buffer_size)
203{
204 u8 *buffer = _buffer;
205 size_t itr, len;
206 unsigned long val;
207
208 itr = 0;
209 while (itr < buffer_size) {
210 len = buffer_size - itr >= sizeof(val) ?
211 sizeof(val) : buffer_size - itr;
212 val = get_val(buffer, itr, len);
213 bmp_itr[itr / sizeof(val)] = val;
214 itr += sizeof(val);
215 }
216}
217
218
219/**
220 * Extract DRP Availability bitmap from the notification.
221 *
222 * The notification that comes in contains a bitmap of (UWB_NUM_MAS / 8) bytes
223 * We convert that to our internal representation.
224 */
225static
226int uwbd_evt_get_drp_avail(struct uwb_event *evt, unsigned long *bmp)
227{
228 struct device *dev = &evt->rc->uwb_dev.dev;
229 struct uwb_rc_evt_drp_avail *drp_evt;
230 int result = -EINVAL;
231
232 /* Is there enough data to decode the event? */
233 if (evt->notif.size < sizeof(*drp_evt)) {
234 dev_err(dev, "DRP Availability Change: Not enough "
235 "data to decode event [%zu bytes, %zu "
236 "needed]\n", evt->notif.size, sizeof(*drp_evt));
237 goto error;
238 }
239 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp_avail, rceb);
240 buffer_to_bmp(bmp, drp_evt->bmp, UWB_NUM_MAS/8);
241 result = 0;
242error:
243 return result;
244}
245
246
247/**
248 * Process an incoming DRP Availability notification.
249 *
250 * @evt: Event information (packs the actual event data, which
251 * radio controller it came to, etc).
252 *
253 * @returns: 0 on success (so uwbd() frees the event buffer), < 0
254 * on error.
255 *
256 * According to ECMA-368 1.0 [16.8.7], bits set to ONE indicate that
257 * the MAS slot is available, bits set to ZERO indicate that the slot
258 * is busy.
259 *
260 * So we clear available slots, we set used slots :)
261 *
262 * The notification only marks non-availability based on the BP and
263 * received DRP IEs that are not for this radio controller. A copy of
264 * this bitmap is needed to generate the real availability (which
265 * includes local and pending reservations).
266 *
267 * The DRP Availability IE that this radio controller emits will need
268 * to be updated.
269 */
270int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt)
271{
272 int result;
273 struct uwb_rc *rc = evt->rc;
274 DECLARE_BITMAP(bmp, UWB_NUM_MAS);
275
276 result = uwbd_evt_get_drp_avail(evt, bmp);
277 if (result < 0)
278 return result;
279
280 mutex_lock(&rc->rsvs_mutex);
281 bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS);
282 rc->drp_avail.ie_valid = false;
283 mutex_unlock(&rc->rsvs_mutex);
284
285 uwb_rsv_sched_update(rc);
286
287 return 0;
288}
diff --git a/drivers/uwb/drp-ie.c b/drivers/uwb/drp-ie.c
new file mode 100644
index 000000000000..882724c5f126
--- /dev/null
+++ b/drivers/uwb/drp-ie.c
@@ -0,0 +1,232 @@
1/*
2 * UWB DRP IE management.
3 *
4 * Copyright (C) 2005-2006 Intel Corporation
5 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#include <linux/version.h>
20#include <linux/kernel.h>
21#include <linux/random.h>
22#include <linux/uwb.h>
23
24#include "uwb-internal.h"
25
26/*
27 * Allocate a DRP IE.
28 *
29 * To save having to free/allocate a DRP IE when its MAS changes,
30 * enough memory is allocated for the maxiumum number of DRP
31 * allocation fields. This gives an overhead per reservation of up to
32 * (UWB_NUM_ZONES - 1) * 4 = 60 octets.
33 */
34static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
35{
36 struct uwb_ie_drp *drp_ie;
37 unsigned tiebreaker;
38
39 drp_ie = kzalloc(sizeof(struct uwb_ie_drp) +
40 UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc),
41 GFP_KERNEL);
42 if (drp_ie) {
43 drp_ie->hdr.element_id = UWB_IE_DRP;
44
45 get_random_bytes(&tiebreaker, sizeof(unsigned));
46 uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1);
47 }
48 return drp_ie;
49}
50
51
52/*
53 * Fill a DRP IE's allocation fields from a MAS bitmap.
54 */
55static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
56 struct uwb_mas_bm *mas)
57{
58 int z, i, num_fields = 0, next = 0;
59 struct uwb_drp_alloc *zones;
60 __le16 current_bmp;
61 DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS);
62 DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE);
63
64 zones = drp_ie->allocs;
65
66 bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS);
67
68 /* Determine unique MAS bitmaps in zones from bitmap. */
69 for (z = 0; z < UWB_NUM_ZONES; z++) {
70 bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE);
71 if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) {
72 bool found = false;
73 current_bmp = (__le16) *tmp_mas_bm;
74 for (i = 0; i < next; i++) {
75 if (current_bmp == zones[i].mas_bm) {
76 zones[i].zone_bm |= 1 << z;
77 found = true;
78 break;
79 }
80 }
81 if (!found) {
82 num_fields++;
83 zones[next].zone_bm = 1 << z;
84 zones[next].mas_bm = current_bmp;
85 next++;
86 }
87 }
88 bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
89 }
90
91 /* Store in format ready for transmission (le16). */
92 for (i = 0; i < num_fields; i++) {
93 drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm);
94 drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm);
95 }
96
97 drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr)
98 + num_fields * sizeof(struct uwb_drp_alloc);
99}
100
101/**
102 * uwb_drp_ie_update - update a reservation's DRP IE
103 * @rsv: the reservation
104 */
105int uwb_drp_ie_update(struct uwb_rsv *rsv)
106{
107 struct device *dev = &rsv->rc->uwb_dev.dev;
108 struct uwb_ie_drp *drp_ie;
109 int reason_code, status;
110
111 switch (rsv->state) {
112 case UWB_RSV_STATE_NONE:
113 kfree(rsv->drp_ie);
114 rsv->drp_ie = NULL;
115 return 0;
116 case UWB_RSV_STATE_O_INITIATED:
117 reason_code = UWB_DRP_REASON_ACCEPTED;
118 status = 0;
119 break;
120 case UWB_RSV_STATE_O_PENDING:
121 reason_code = UWB_DRP_REASON_ACCEPTED;
122 status = 0;
123 break;
124 case UWB_RSV_STATE_O_MODIFIED:
125 reason_code = UWB_DRP_REASON_MODIFIED;
126 status = 1;
127 break;
128 case UWB_RSV_STATE_O_ESTABLISHED:
129 reason_code = UWB_DRP_REASON_ACCEPTED;
130 status = 1;
131 break;
132 case UWB_RSV_STATE_T_ACCEPTED:
133 reason_code = UWB_DRP_REASON_ACCEPTED;
134 status = 1;
135 break;
136 case UWB_RSV_STATE_T_DENIED:
137 reason_code = UWB_DRP_REASON_DENIED;
138 status = 0;
139 break;
140 default:
141 dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state);
142 return -EINVAL;
143 }
144
145 if (rsv->drp_ie == NULL) {
146 rsv->drp_ie = uwb_drp_ie_alloc();
147 if (rsv->drp_ie == NULL)
148 return -ENOMEM;
149 }
150 drp_ie = rsv->drp_ie;
151
152 uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv));
153 uwb_ie_drp_set_status(drp_ie, status);
154 uwb_ie_drp_set_reason_code(drp_ie, reason_code);
155 uwb_ie_drp_set_stream_index(drp_ie, rsv->stream);
156 uwb_ie_drp_set_type(drp_ie, rsv->type);
157
158 if (uwb_rsv_is_owner(rsv)) {
159 switch (rsv->target.type) {
160 case UWB_RSV_TARGET_DEV:
161 drp_ie->dev_addr = rsv->target.dev->dev_addr;
162 break;
163 case UWB_RSV_TARGET_DEVADDR:
164 drp_ie->dev_addr = rsv->target.devaddr;
165 break;
166 }
167 } else
168 drp_ie->dev_addr = rsv->owner->dev_addr;
169
170 uwb_drp_ie_from_bm(drp_ie, &rsv->mas);
171
172 rsv->ie_valid = true;
173 return 0;
174}
175
176/*
177 * Set MAS bits from given MAS bitmap in a single zone of large bitmap.
178 *
179 * We are given a zone id and the MAS bitmap of bits that need to be set in
180 * this zone. Note that this zone may already have bits set and this only
181 * adds settings - we cannot simply assign the MAS bitmap contents to the
182 * zone contents. We iterate over the the bits (MAS) in the zone and set the
183 * bits that are set in the given MAS bitmap.
184 */
185static
186void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm)
187{
188 int mas;
189 u16 mas_mask;
190
191 for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) {
192 mas_mask = 1 << mas;
193 if (mas_bm & mas_mask)
194 set_bit(zone * UWB_NUM_ZONES + mas, bm->bm);
195 }
196}
197
198/**
199 * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap
200 * @mas: MAS bitmap that will be populated to correspond to the
201 * allocation fields in the DRP IE
202 * @drp_ie: the DRP IE that contains the allocation fields.
203 *
204 * The input format is an array of MAS allocation fields (16 bit Zone
205 * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section
206 * 16.8.6. The output is a full 256 bit MAS bitmap.
207 *
208 * We go over all the allocation fields, for each allocation field we
209 * know which zones are impacted. We iterate over all the zones
210 * impacted and call a function that will set the correct MAS bits in
211 * each zone.
212 */
213void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
214{
215 int numallocs = (drp_ie->hdr.length - 4) / 4;
216 const struct uwb_drp_alloc *alloc;
217 int cnt;
218 u16 zone_bm, mas_bm;
219 u8 zone;
220 u16 zone_mask;
221
222 for (cnt = 0; cnt < numallocs; cnt++) {
223 alloc = &drp_ie->allocs[cnt];
224 zone_bm = le16_to_cpu(alloc->zone_bm);
225 mas_bm = le16_to_cpu(alloc->mas_bm);
226 for (zone = 0; zone < UWB_NUM_ZONES; zone++) {
227 zone_mask = 1 << zone;
228 if (zone_bm & zone_mask)
229 uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm);
230 }
231 }
232}
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c
new file mode 100644
index 000000000000..c0b1e5e2bd08
--- /dev/null
+++ b/drivers/uwb/drp.c
@@ -0,0 +1,461 @@
1/*
2 * Ultra Wide Band
3 * Dynamic Reservation Protocol handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/kthread.h>
22#include <linux/freezer.h>
23#include <linux/delay.h>
24#include "uwb-internal.h"
25
26/**
27 * Construct and send the SET DRP IE
28 *
29 * @rc: UWB Host controller
30 * @returns: >= 0 number of bytes still available in the beacon
31 * < 0 errno code on error.
32 *
33 * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
34 * device to include in its beacon at the same time. We thus have to
35 * traverse all reservations and include the DRP IEs of all PENDING
36 * and NEGOTIATED reservations in a SET DRP command for transmission.
37 *
38 * A DRP Availability IE is appended.
39 *
40 * rc->uwb_dev.mutex is held
41 *
42 * FIXME We currently ignore the returned value indicating the remaining space
43 * in beacon. This could be used to deny reservation requests earlier if
44 * determined that they would cause the beacon space to be exceeded.
45 */
46static
47int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc)
48{
49 int result;
50 struct device *dev = &rc->uwb_dev.dev;
51 struct uwb_rc_cmd_set_drp_ie *cmd;
52 struct uwb_rc_evt_set_drp_ie reply;
53 struct uwb_rsv *rsv;
54 int num_bytes = 0;
55 u8 *IEDataptr;
56
57 result = -ENOMEM;
58 /* First traverse all reservations to determine memory needed. */
59 list_for_each_entry(rsv, &rc->reservations, rc_node) {
60 if (rsv->drp_ie != NULL)
61 num_bytes += rsv->drp_ie->hdr.length + 2;
62 }
63 num_bytes += sizeof(rc->drp_avail.ie);
64 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
65 if (cmd == NULL)
66 goto error;
67 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
68 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
69 cmd->wIELength = num_bytes;
70 IEDataptr = (u8 *)&cmd->IEData[0];
71
72 /* Next traverse all reservations to place IEs in allocated memory. */
73 list_for_each_entry(rsv, &rc->reservations, rc_node) {
74 if (rsv->drp_ie != NULL) {
75 memcpy(IEDataptr, rsv->drp_ie,
76 rsv->drp_ie->hdr.length + 2);
77 IEDataptr += rsv->drp_ie->hdr.length + 2;
78 }
79 }
80 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
81
82 reply.rceb.bEventType = UWB_RC_CET_GENERAL;
83 reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE;
84 result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb,
85 sizeof(*cmd) + num_bytes, &reply.rceb,
86 sizeof(reply));
87 if (result < 0)
88 goto error_cmd;
89 result = le16_to_cpu(reply.wRemainingSpace);
90 if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
91 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution "
92 "failed: %s (%d). RemainingSpace in beacon "
93 "= %d\n", uwb_rc_strerror(reply.bResultCode),
94 reply.bResultCode, result);
95 result = -EIO;
96 } else {
97 dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon "
98 "= %d.\n", result);
99 result = 0;
100 }
101error_cmd:
102 kfree(cmd);
103error:
104 return result;
105
106}
107/**
108 * Send all DRP IEs associated with this host
109 *
110 * @returns: >= 0 number of bytes still available in the beacon
111 * < 0 errno code on error.
112 *
113 * As per the protocol we obtain the host controller device lock to access
114 * bandwidth structures.
115 */
116int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
117{
118 int result;
119
120 mutex_lock(&rc->uwb_dev.mutex);
121 result = uwb_rc_gen_send_drp_ie(rc);
122 mutex_unlock(&rc->uwb_dev.mutex);
123 return result;
124}
125
126void uwb_drp_handle_timeout(struct uwb_rsv *rsv)
127{
128 struct device *dev = &rsv->rc->uwb_dev.dev;
129
130 dev_dbg(dev, "reservation timeout in state %s (%d)\n",
131 uwb_rsv_state_str(rsv->state), rsv->state);
132
133 switch (rsv->state) {
134 case UWB_RSV_STATE_O_INITIATED:
135 if (rsv->is_multicast) {
136 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
137 return;
138 }
139 break;
140 case UWB_RSV_STATE_O_ESTABLISHED:
141 if (rsv->is_multicast)
142 return;
143 break;
144 default:
145 break;
146 }
147 uwb_rsv_remove(rsv);
148}
149
150/*
151 * Based on the DRP IE, transition a target reservation to a new
152 * state.
153 */
154static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
155 struct uwb_ie_drp *drp_ie)
156{
157 struct device *dev = &rc->uwb_dev.dev;
158 int status;
159 enum uwb_drp_reason reason_code;
160
161 status = uwb_ie_drp_status(drp_ie);
162 reason_code = uwb_ie_drp_reason_code(drp_ie);
163
164 if (status) {
165 switch (reason_code) {
166 case UWB_DRP_REASON_ACCEPTED:
167 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
168 break;
169 case UWB_DRP_REASON_MODIFIED:
170 dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
171 reason_code, status);
172 break;
173 default:
174 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
175 reason_code, status);
176 }
177 } else {
178 switch (reason_code) {
179 case UWB_DRP_REASON_ACCEPTED:
180 /* New reservations are handled in uwb_rsv_find(). */
181 break;
182 case UWB_DRP_REASON_DENIED:
183 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
184 break;
185 case UWB_DRP_REASON_CONFLICT:
186 case UWB_DRP_REASON_MODIFIED:
187 dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
188 reason_code, status);
189 break;
190 default:
191 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
192 reason_code, status);
193 }
194 }
195}
196
197/*
198 * Based on the DRP IE, transition an owner reservation to a new
199 * state.
200 */
201static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
202 struct uwb_ie_drp *drp_ie)
203{
204 struct device *dev = &rc->uwb_dev.dev;
205 int status;
206 enum uwb_drp_reason reason_code;
207
208 status = uwb_ie_drp_status(drp_ie);
209 reason_code = uwb_ie_drp_reason_code(drp_ie);
210
211 if (status) {
212 switch (reason_code) {
213 case UWB_DRP_REASON_ACCEPTED:
214 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
215 break;
216 case UWB_DRP_REASON_MODIFIED:
217 dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
218 reason_code, status);
219 break;
220 default:
221 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
222 reason_code, status);
223 }
224 } else {
225 switch (reason_code) {
226 case UWB_DRP_REASON_PENDING:
227 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
228 break;
229 case UWB_DRP_REASON_DENIED:
230 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
231 break;
232 case UWB_DRP_REASON_CONFLICT:
233 case UWB_DRP_REASON_MODIFIED:
234 dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
235 reason_code, status);
236 break;
237 default:
238 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
239 reason_code, status);
240 }
241 }
242}
243
244/*
245 * Process a received DRP IE, it's either for a reservation owned by
246 * the RC or targeted at it (or it's for a WUSB cluster reservation).
247 */
248static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src,
249 struct uwb_ie_drp *drp_ie)
250{
251 struct uwb_rsv *rsv;
252
253 rsv = uwb_rsv_find(rc, src, drp_ie);
254 if (!rsv) {
255 /*
256 * No reservation? It's either for a recently
257 * terminated reservation; or the DRP IE couldn't be
258 * processed (e.g., an invalid IE or out of memory).
259 */
260 return;
261 }
262
263 /*
264 * Do nothing with DRP IEs for reservations that have been
265 * terminated.
266 */
267 if (rsv->state == UWB_RSV_STATE_NONE) {
268 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
269 return;
270 }
271
272 if (uwb_ie_drp_owner(drp_ie))
273 uwb_drp_process_target(rc, rsv, drp_ie);
274 else
275 uwb_drp_process_owner(rc, rsv, drp_ie);
276}
277
278
279/*
280 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
281 * from a device.
282 */
283static
284void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
285 size_t ielen, struct uwb_dev *src_dev)
286{
287 struct device *dev = &rc->uwb_dev.dev;
288 struct uwb_ie_hdr *ie_hdr;
289 void *ptr;
290
291 ptr = drp_evt->ie_data;
292 for (;;) {
293 ie_hdr = uwb_ie_next(&ptr, &ielen);
294 if (!ie_hdr)
295 break;
296
297 switch (ie_hdr->element_id) {
298 case UWB_IE_DRP_AVAILABILITY:
299 /* FIXME: does something need to be done with this? */
300 break;
301 case UWB_IE_DRP:
302 uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr);
303 break;
304 default:
305 dev_warn(dev, "unexpected IE in DRP notification\n");
306 break;
307 }
308 }
309
310 if (ielen > 0)
311 dev_warn(dev, "%d octets remaining in DRP notification\n",
312 (int)ielen);
313}
314
315
316/*
317 * Go through all the DRP IEs and find the ones that conflict with our
318 * reservations.
319 *
320 * FIXME: must resolve the conflict according the the rules in
321 * [ECMA-368].
322 */
323static
324void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
325 size_t ielen, struct uwb_dev *src_dev)
326{
327 struct device *dev = &rc->uwb_dev.dev;
328 struct uwb_ie_hdr *ie_hdr;
329 struct uwb_ie_drp *drp_ie;
330 void *ptr;
331
332 ptr = drp_evt->ie_data;
333 for (;;) {
334 ie_hdr = uwb_ie_next(&ptr, &ielen);
335 if (!ie_hdr)
336 break;
337
338 drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr);
339
340 /* FIXME: check if this DRP IE conflicts. */
341 }
342
343 if (ielen > 0)
344 dev_warn(dev, "%d octets remaining in DRP notification\n",
345 (int)ielen);
346}
347
348
349/*
350 * Terminate all reservations owned by, or targeted at, 'uwb_dev'.
351 */
352static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev)
353{
354 struct uwb_rsv *rsv;
355
356 list_for_each_entry(rsv, &rc->reservations, rc_node) {
357 if (rsv->owner == uwb_dev
358 || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev))
359 uwb_rsv_remove(rsv);
360 }
361}
362
363
364/**
365 * uwbd_evt_handle_rc_drp - handle a DRP_IE event
366 * @evt: the DRP_IE event from the radio controller
367 *
368 * This processes DRP notifications from the radio controller, either
369 * initiating a new reservation or transitioning an existing
370 * reservation into a different state.
371 *
372 * DRP notifications can occur for three different reasons:
373 *
374 * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
375 * the target or source have been recieved.
376 *
377 * These DRP IEs could be new or for an existing reservation.
378 *
379 * If the DRP IE for an existing reservation ceases to be to
380 * recieved for at least mMaxLostBeacons, the reservation should be
381 * considered to be terminated. Note that the TERMINATE reason (see
382 * below) may not always be signalled (e.g., the remote device has
383 * two or more reservations established with the RC).
384 *
385 * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
386 * group conflict with the RC's reservations.
387 *
388 * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
389 * from a device (i.e., it's terminated all reservations).
390 *
391 * Only the software state of the reservations is changed; the setting
392 * of the radio controller's DRP IEs is done after all the events in
393 * an event buffer are processed. This saves waiting multiple times
394 * for the SET_DRP_IE command to complete.
395 */
396int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
397{
398 struct device *dev = &evt->rc->uwb_dev.dev;
399 struct uwb_rc *rc = evt->rc;
400 struct uwb_rc_evt_drp *drp_evt;
401 size_t ielength, bytes_left;
402 struct uwb_dev_addr src_addr;
403 struct uwb_dev *src_dev;
404 int reason;
405
406 /* Is there enough data to decode the event (and any IEs in
407 its payload)? */
408 if (evt->notif.size < sizeof(*drp_evt)) {
409 dev_err(dev, "DRP event: Not enough data to decode event "
410 "[%zu bytes left, %zu needed]\n",
411 evt->notif.size, sizeof(*drp_evt));
412 return 0;
413 }
414 bytes_left = evt->notif.size - sizeof(*drp_evt);
415 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
416 ielength = le16_to_cpu(drp_evt->ie_length);
417 if (bytes_left != ielength) {
418 dev_err(dev, "DRP event: Not enough data in payload [%zu"
419 "bytes left, %zu declared in the event]\n",
420 bytes_left, ielength);
421 return 0;
422 }
423
424 memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
425 src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
426 if (!src_dev) {
427 /*
428 * A DRP notification from an unrecognized device.
429 *
430 * This is probably from a WUSB device that doesn't
431 * have an EUI-48 and therefore doesn't show up in the
432 * UWB device database. It's safe to simply ignore
433 * these.
434 */
435 return 0;
436 }
437
438 mutex_lock(&rc->rsvs_mutex);
439
440 reason = uwb_rc_evt_drp_reason(drp_evt);
441
442 switch (reason) {
443 case UWB_DRP_NOTIF_DRP_IE_RCVD:
444 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
445 break;
446 case UWB_DRP_NOTIF_CONFLICT:
447 uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev);
448 break;
449 case UWB_DRP_NOTIF_TERMINATE:
450 uwb_drp_terminate_all(rc, src_dev);
451 break;
452 default:
453 dev_warn(dev, "ignored DRP event with reason code: %d\n", reason);
454 break;
455 }
456
457 mutex_unlock(&rc->rsvs_mutex);
458
459 uwb_dev_put(src_dev);
460 return 0;
461}
diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c
new file mode 100644
index 000000000000..5fe566b7c845
--- /dev/null
+++ b/drivers/uwb/est.c
@@ -0,0 +1,477 @@
1/*
2 * Ultra Wide Band Radio Control
3 * Event Size Tables management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * Infrastructure, code and data tables for guessing the size of
26 * events received on the notification endpoints of UWB radio
27 * controllers.
28 *
29 * You define a table of events and for each, its size and how to get
30 * the extra size.
31 *
32 * ENTRY POINTS:
33 *
34 * uwb_est_{init/destroy}(): To initialize/release the EST subsystem.
35 *
36 * uwb_est_[u]register(): To un/register event size tables
37 * uwb_est_grow()
38 *
39 * uwb_est_find_size(): Get the size of an event
40 * uwb_est_get_size()
41 */
42#include <linux/spinlock.h>
43#define D_LOCAL 0
44#include <linux/uwb/debug.h>
45#include "uwb-internal.h"
46
47
48struct uwb_est {
49 u16 type_event_high;
50 u16 vendor, product;
51 u8 entries;
52 const struct uwb_est_entry *entry;
53};
54
55
56static struct uwb_est *uwb_est;
57static u8 uwb_est_size;
58static u8 uwb_est_used;
59static DEFINE_RWLOCK(uwb_est_lock);
60
61/**
62 * WUSB Standard Event Size Table, HWA-RC interface
63 *
64 * Sizes for events and notifications type 0 (general), high nibble 0.
65 */
66static
67struct uwb_est_entry uwb_est_00_00xx[] = {
68 [UWB_RC_EVT_IE_RCV] = {
69 .size = sizeof(struct uwb_rc_evt_ie_rcv),
70 .offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength),
71 },
72 [UWB_RC_EVT_BEACON] = {
73 .size = sizeof(struct uwb_rc_evt_beacon),
74 .offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength),
75 },
76 [UWB_RC_EVT_BEACON_SIZE] = {
77 .size = sizeof(struct uwb_rc_evt_beacon_size),
78 },
79 [UWB_RC_EVT_BPOIE_CHANGE] = {
80 .size = sizeof(struct uwb_rc_evt_bpoie_change),
81 .offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change,
82 wBPOIELength),
83 },
84 [UWB_RC_EVT_BP_SLOT_CHANGE] = {
85 .size = sizeof(struct uwb_rc_evt_bp_slot_change),
86 },
87 [UWB_RC_EVT_BP_SWITCH_IE_RCV] = {
88 .size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv),
89 .offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength),
90 },
91 [UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
92 .size = sizeof(struct uwb_rc_evt_dev_addr_conflict),
93 },
94 [UWB_RC_EVT_DRP_AVAIL] = {
95 .size = sizeof(struct uwb_rc_evt_drp_avail)
96 },
97 [UWB_RC_EVT_DRP] = {
98 .size = sizeof(struct uwb_rc_evt_drp),
99 .offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length),
100 },
101 [UWB_RC_EVT_BP_SWITCH_STATUS] = {
102 .size = sizeof(struct uwb_rc_evt_bp_switch_status),
103 },
104 [UWB_RC_EVT_CMD_FRAME_RCV] = {
105 .size = sizeof(struct uwb_rc_evt_cmd_frame_rcv),
106 .offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength),
107 },
108 [UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = {
109 .size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv),
110 .offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength),
111 },
112 [UWB_RC_CMD_CHANNEL_CHANGE] = {
113 .size = sizeof(struct uwb_rc_evt_confirm),
114 },
115 [UWB_RC_CMD_DEV_ADDR_MGMT] = {
116 .size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) },
117 [UWB_RC_CMD_GET_IE] = {
118 .size = sizeof(struct uwb_rc_evt_get_ie),
119 .offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength),
120 },
121 [UWB_RC_CMD_RESET] = {
122 .size = sizeof(struct uwb_rc_evt_confirm),
123 },
124 [UWB_RC_CMD_SCAN] = {
125 .size = sizeof(struct uwb_rc_evt_confirm),
126 },
127 [UWB_RC_CMD_SET_BEACON_FILTER] = {
128 .size = sizeof(struct uwb_rc_evt_confirm),
129 },
130 [UWB_RC_CMD_SET_DRP_IE] = {
131 .size = sizeof(struct uwb_rc_evt_set_drp_ie),
132 },
133 [UWB_RC_CMD_SET_IE] = {
134 .size = sizeof(struct uwb_rc_evt_set_ie),
135 },
136 [UWB_RC_CMD_SET_NOTIFICATION_FILTER] = {
137 .size = sizeof(struct uwb_rc_evt_confirm),
138 },
139 [UWB_RC_CMD_SET_TX_POWER] = {
140 .size = sizeof(struct uwb_rc_evt_confirm),
141 },
142 [UWB_RC_CMD_SLEEP] = {
143 .size = sizeof(struct uwb_rc_evt_confirm),
144 },
145 [UWB_RC_CMD_START_BEACON] = {
146 .size = sizeof(struct uwb_rc_evt_confirm),
147 },
148 [UWB_RC_CMD_STOP_BEACON] = {
149 .size = sizeof(struct uwb_rc_evt_confirm),
150 },
151 [UWB_RC_CMD_BP_MERGE] = {
152 .size = sizeof(struct uwb_rc_evt_confirm),
153 },
154 [UWB_RC_CMD_SEND_COMMAND_FRAME] = {
155 .size = sizeof(struct uwb_rc_evt_confirm),
156 },
157 [UWB_RC_CMD_SET_ASIE_NOTIF] = {
158 .size = sizeof(struct uwb_rc_evt_confirm),
159 },
160};
161
162static
163struct uwb_est_entry uwb_est_01_00xx[] = {
164 [UWB_RC_DAA_ENERGY_DETECTED] = {
165 .size = sizeof(struct uwb_rc_evt_daa_energy_detected),
166 },
167 [UWB_RC_SET_DAA_ENERGY_MASK] = {
168 .size = sizeof(struct uwb_rc_evt_set_daa_energy_mask),
169 },
170 [UWB_RC_SET_NOTIFICATION_FILTER_EX] = {
171 .size = sizeof(struct uwb_rc_evt_set_notification_filter_ex),
172 },
173};
174
175/**
176 * Initialize the EST subsystem
177 *
178 * Register the standard tables also.
179 *
180 * FIXME: tag init
181 */
182int uwb_est_create(void)
183{
184 int result;
185
186 uwb_est_size = 2;
187 uwb_est_used = 0;
188 uwb_est = kzalloc(uwb_est_size * sizeof(uwb_est[0]), GFP_KERNEL);
189 if (uwb_est == NULL)
190 return -ENOMEM;
191
192 result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff,
193 uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx));
194 if (result < 0)
195 goto out;
196 result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff,
197 uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx));
198out:
199 return result;
200}
201
202
203/** Clean it up */
204void uwb_est_destroy(void)
205{
206 kfree(uwb_est);
207 uwb_est = NULL;
208 uwb_est_size = uwb_est_used = 0;
209}
210
211
212/**
213 * Double the capacity of the EST table
214 *
215 * @returns 0 if ok, < 0 errno no error.
216 */
217static
218int uwb_est_grow(void)
219{
220 size_t actual_size = uwb_est_size * sizeof(uwb_est[0]);
221 void *new = kmalloc(2 * actual_size, GFP_ATOMIC);
222 if (new == NULL)
223 return -ENOMEM;
224 memcpy(new, uwb_est, actual_size);
225 memset(new + actual_size, 0, actual_size);
226 kfree(uwb_est);
227 uwb_est = new;
228 uwb_est_size *= 2;
229 return 0;
230}
231
232
233/**
234 * Register an event size table
235 *
236 * Makes room for it if the table is full, and then inserts it in the
237 * right position (entries are sorted by type, event_high, vendor and
238 * then product).
239 *
240 * @vendor: vendor code for matching against the device (0x0000 and
241 * 0xffff mean any); use 0x0000 to force all to match without
242 * checking possible vendor specific ones, 0xfffff to match
243 * after checking vendor specific ones.
244 *
245 * @product: product code from that vendor; same matching rules, use
246 * 0x0000 for not allowing vendor specific matches, 0xffff
247 * for allowing.
248 *
249 * This arragement just makes the tables sort differenty. Because the
250 * table is sorted by growing type-event_high-vendor-product, a zero
251 * vendor will match before than a 0x456a vendor, that will match
252 * before a 0xfffff vendor.
253 *
254 * @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
255 */
256/* FIXME: add bus type to vendor/product code */
257int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product,
258 const struct uwb_est_entry *entry, size_t entries)
259{
260 unsigned long flags;
261 unsigned itr;
262 u16 type_event_high;
263 int result = 0;
264
265 write_lock_irqsave(&uwb_est_lock, flags);
266 if (uwb_est_used == uwb_est_size) {
267 result = uwb_est_grow();
268 if (result < 0)
269 goto out;
270 }
271 /* Find the right spot to insert it in */
272 type_event_high = type << 8 | event_high;
273 for (itr = 0; itr < uwb_est_used; itr++)
274 if (uwb_est[itr].type_event_high < type
275 && uwb_est[itr].vendor < vendor
276 && uwb_est[itr].product < product)
277 break;
278
279 /* Shift others to make room for the new one? */
280 if (itr < uwb_est_used)
281 memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr);
282 uwb_est[itr].type_event_high = type << 8 | event_high;
283 uwb_est[itr].vendor = vendor;
284 uwb_est[itr].product = product;
285 uwb_est[itr].entry = entry;
286 uwb_est[itr].entries = entries;
287 uwb_est_used++;
288out:
289 write_unlock_irqrestore(&uwb_est_lock, flags);
290 return result;
291}
292EXPORT_SYMBOL_GPL(uwb_est_register);
293
294
295/**
296 * Unregister an event size table
297 *
298 * This just removes the specified entry and moves the ones after it
299 * to fill in the gap. This is needed to keep the list sorted; no
300 * reallocation is done to reduce the size of the table.
301 *
302 * We unregister by all the data we used to register instead of by
303 * pointer to the @entry array because we might have used the same
304 * table for a bunch of IDs (for example).
305 *
306 * @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
307 */
308int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product,
309 const struct uwb_est_entry *entry, size_t entries)
310{
311 unsigned long flags;
312 unsigned itr;
313 struct uwb_est est_cmp = {
314 .type_event_high = type << 8 | event_high,
315 .vendor = vendor,
316 .product = product,
317 .entry = entry,
318 .entries = entries
319 };
320 write_lock_irqsave(&uwb_est_lock, flags);
321 for (itr = 0; itr < uwb_est_used; itr++)
322 if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp)))
323 goto found;
324 write_unlock_irqrestore(&uwb_est_lock, flags);
325 return -ENOENT;
326
327found:
328 if (itr < uwb_est_used - 1) /* Not last one? move ones above */
329 memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1);
330 uwb_est_used--;
331 write_unlock_irqrestore(&uwb_est_lock, flags);
332 return 0;
333}
334EXPORT_SYMBOL_GPL(uwb_est_unregister);
335
336
337/**
338 * Get the size of an event from a table
339 *
340 * @rceb: pointer to the buffer with the event
341 * @rceb_size: size of the area pointed to by @rceb in bytes.
342 * @returns: > 0 Size of the event
343 * -ENOSPC An area big enough was not provided to look
344 * ahead into the event's guts and guess the size.
345 * -EINVAL Unknown event code (wEvent).
346 *
347 * This will look at the received RCEB and guess what is the total
348 * size. For variable sized events, it will look further ahead into
349 * their length field to see how much data should be read.
350 *
351 * Note this size is *not* final--the neh (Notification/Event Handle)
352 * might specificy an extra size to add.
353 */
354static
355ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est,
356 u8 event_low, const struct uwb_rceb *rceb,
357 size_t rceb_size)
358{
359 unsigned offset;
360 ssize_t size;
361 struct device *dev = &uwb_rc->uwb_dev.dev;
362 const struct uwb_est_entry *entry;
363
364 size = -ENOENT;
365 if (event_low >= est->entries) { /* in range? */
366 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u out of range\n",
367 est, est->type_event_high, est->vendor, est->product,
368 est->entries, event_low);
369 goto out;
370 }
371 size = -ENOENT;
372 entry = &est->entry[event_low];
373 if (entry->size == 0 && entry->offset == 0) { /* unknown? */
374 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u unknown\n",
375 est, est->type_event_high, est->vendor, est->product,
376 est->entries, event_low);
377 goto out;
378 }
379 offset = entry->offset; /* extra fries with that? */
380 if (offset == 0)
381 size = entry->size;
382 else {
383 /* Ops, got an extra size field at 'offset'--read it */
384 const void *ptr = rceb;
385 size_t type_size = 0;
386 offset--;
387 size = -ENOSPC; /* enough data for more? */
388 switch (entry->type) {
389 case UWB_EST_16: type_size = sizeof(__le16); break;
390 case UWB_EST_8: type_size = sizeof(u8); break;
391 default: BUG();
392 }
393 if (offset + type_size > rceb_size) {
394 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: "
395 "not enough data to read extra size\n",
396 est, est->type_event_high, est->vendor,
397 est->product, est->entries);
398 goto out;
399 }
400 size = entry->size;
401 ptr += offset;
402 switch (entry->type) {
403 case UWB_EST_16: size += le16_to_cpu(*(__le16 *)ptr); break;
404 case UWB_EST_8: size += *(u8 *)ptr; break;
405 default: BUG();
406 }
407 }
408out:
409 return size;
410}
411
412
413/**
414 * Guesses the size of a WA event
415 *
416 * @rceb: pointer to the buffer with the event
417 * @rceb_size: size of the area pointed to by @rceb in bytes.
418 * @returns: > 0 Size of the event
419 * -ENOSPC An area big enough was not provided to look
420 * ahead into the event's guts and guess the size.
421 * -EINVAL Unknown event code (wEvent).
422 *
423 * This will look at the received RCEB and guess what is the total
424 * size by checking all the tables registered with
425 * uwb_est_register(). For variable sized events, it will look further
426 * ahead into their length field to see how much data should be read.
427 *
428 * Note this size is *not* final--the neh (Notification/Event Handle)
429 * might specificy an extra size to add or replace.
430 */
431ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
432 size_t rceb_size)
433{
434 /* FIXME: add vendor/product data */
435 ssize_t size;
436 struct device *dev = &rc->uwb_dev.dev;
437 unsigned long flags;
438 unsigned itr;
439 u16 type_event_high, event;
440 u8 *ptr = (u8 *) rceb;
441
442 read_lock_irqsave(&uwb_est_lock, flags);
443 d_printf(2, dev, "Size query for event 0x%02x/%04x/%02x,"
444 " buffer size %ld\n",
445 (unsigned) rceb->bEventType,
446 (unsigned) le16_to_cpu(rceb->wEvent),
447 (unsigned) rceb->bEventContext,
448 (long) rceb_size);
449 size = -ENOSPC;
450 if (rceb_size < sizeof(*rceb))
451 goto out;
452 event = le16_to_cpu(rceb->wEvent);
453 type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8;
454 for (itr = 0; itr < uwb_est_used; itr++) {
455 d_printf(3, dev, "Checking EST 0x%04x/%04x/%04x\n",
456 uwb_est[itr].type_event_high, uwb_est[itr].vendor,
457 uwb_est[itr].product);
458 if (uwb_est[itr].type_event_high != type_event_high)
459 continue;
460 size = uwb_est_get_size(rc, &uwb_est[itr],
461 event & 0x00ff, rceb, rceb_size);
462 /* try more tables that might handle the same type */
463 if (size != -ENOENT)
464 goto out;
465 }
466 dev_dbg(dev, "event 0x%02x/%04x/%02x: no handlers available; "
467 "RCEB %02x %02x %02x %02x\n",
468 (unsigned) rceb->bEventType,
469 (unsigned) le16_to_cpu(rceb->wEvent),
470 (unsigned) rceb->bEventContext,
471 ptr[0], ptr[1], ptr[2], ptr[3]);
472 size = -ENOENT;
473out:
474 read_unlock_irqrestore(&uwb_est_lock, flags);
475 return size;
476}
477EXPORT_SYMBOL_GPL(uwb_est_find_size);
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
new file mode 100644
index 000000000000..3d26fa0f8ae1
--- /dev/null
+++ b/drivers/uwb/hwa-rc.c
@@ -0,0 +1,926 @@
1/*
2 * WUSB Host Wire Adapter: Radio Control Interface (WUSB[8.6])
3 * Radio Control command/event transport
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Initialize the Radio Control interface Driver.
24 *
25 * For each device probed, creates an 'struct hwarc' which contains
26 * just the representation of the UWB Radio Controller, and the logic
27 * for reading notifications and passing them to the UWB Core.
28 *
29 * So we initialize all of those, register the UWB Radio Controller
30 * and setup the notification/event handle to pipe the notifications
31 * to the UWB management Daemon.
32 *
33 * Command and event filtering.
34 *
35 * This is the driver for the Radio Control Interface described in WUSB
36 * 1.0. The core UWB module assumes that all drivers are compliant to the
37 * WHCI 0.95 specification. We thus create a filter that parses all
38 * incoming messages from the (WUSB 1.0) device and manipulate them to
39 * conform to the WHCI 0.95 specification. Similarly, outgoing messages
40 * are parsed and manipulated to conform to the WUSB 1.0 compliant messages
41 * that the device expects. Only a few messages are affected:
42 * Affected events:
43 * UWB_RC_EVT_BEACON
44 * UWB_RC_EVT_BP_SLOT_CHANGE
45 * UWB_RC_EVT_DRP_AVAIL
46 * UWB_RC_EVT_DRP
47 * Affected commands:
48 * UWB_RC_CMD_SCAN
49 * UWB_RC_CMD_SET_DRP_IE
50 *
51 *
52 *
53 */
54#include <linux/version.h>
55#include <linux/init.h>
56#include <linux/module.h>
57#include <linux/usb.h>
58#include <linux/usb/wusb.h>
59#include <linux/usb/wusb-wa.h>
60#include <linux/uwb.h>
61#include "uwb-internal.h"
62#define D_LOCAL 1
63#include <linux/uwb/debug.h>
64
65/* The device uses commands and events from the WHCI specification, although
66 * reporting itself as WUSB compliant. */
67#define WUSB_QUIRK_WHCI_CMD_EVT 0x01
68
69/**
70 * Descriptor for an instance of the UWB Radio Control Driver that
71 * attaches to the RCI interface of the Host Wired Adapter.
72 *
73 * Unless there is a lock specific to the 'data members', all access
74 * is protected by uwb_rc->mutex.
75 *
76 * The NEEP (Notification/Event EndPoint) URB (@neep_urb) writes to
77 * @rd_buffer. Note there is no locking because it is perfectly (heh!)
78 * serialized--probe() submits an URB, callback is called, processes
79 * the data (synchronously), submits another URB, and so on. There is
80 * no concurrent access to the buffer.
81 */
82struct hwarc {
83 struct usb_device *usb_dev;
84 struct usb_interface *usb_iface;
85 struct uwb_rc *uwb_rc; /* UWB host controller */
86 struct urb *neep_urb; /* Notification endpoint handling */
87 struct edc neep_edc;
88 void *rd_buffer; /* NEEP read buffer */
89};
90
91
92/* Beacon received notification (WUSB 1.0 [8.6.3.2]) */
93struct uwb_rc_evt_beacon_WUSB_0100 {
94 struct uwb_rceb rceb;
95 u8 bChannelNumber;
96 __le16 wBPSTOffset;
97 u8 bLQI;
98 u8 bRSSI;
99 __le16 wBeaconInfoLength;
100 u8 BeaconInfo[];
101} __attribute__((packed));
102
103/**
104 * Filter WUSB 1.0 BEACON RCV notification to be WHCI 0.95
105 *
106 * @header: the incoming event
107 * @buf_size: size of buffer containing incoming event
108 * @new_size: size of event after filtering completed
109 *
110 * The WHCI 0.95 spec has a "Beacon Type" field. This value is unknown at
111 * the time we receive the beacon from WUSB so we just set it to
112 * UWB_RC_BEACON_TYPE_NEIGHBOR as a default.
113 * The solution below allocates memory upon receipt of every beacon from a
114 * WUSB device. This will deteriorate performance. What is the right way to
115 * do this?
116 */
117static
118int hwarc_filter_evt_beacon_WUSB_0100(struct uwb_rc *rc,
119 struct uwb_rceb **header,
120 const size_t buf_size,
121 size_t *new_size)
122{
123 struct uwb_rc_evt_beacon_WUSB_0100 *be;
124 struct uwb_rc_evt_beacon *newbe;
125 size_t bytes_left, ielength;
126 struct device *dev = &rc->uwb_dev.dev;
127
128 be = container_of(*header, struct uwb_rc_evt_beacon_WUSB_0100, rceb);
129 bytes_left = buf_size;
130 if (bytes_left < sizeof(*be)) {
131 dev_err(dev, "Beacon Received Notification: Not enough data "
132 "to decode for filtering (%zu vs %zu bytes needed)\n",
133 bytes_left, sizeof(*be));
134 return -EINVAL;
135 }
136 bytes_left -= sizeof(*be);
137 ielength = le16_to_cpu(be->wBeaconInfoLength);
138 if (bytes_left < ielength) {
139 dev_err(dev, "Beacon Received Notification: Not enough data "
140 "to decode IEs (%zu vs %zu bytes needed)\n",
141 bytes_left, ielength);
142 return -EINVAL;
143 }
144 newbe = kzalloc(sizeof(*newbe) + ielength, GFP_ATOMIC);
145 if (newbe == NULL)
146 return -ENOMEM;
147 newbe->rceb = be->rceb;
148 newbe->bChannelNumber = be->bChannelNumber;
149 newbe->bBeaconType = UWB_RC_BEACON_TYPE_NEIGHBOR;
150 newbe->wBPSTOffset = be->wBPSTOffset;
151 newbe->bLQI = be->bLQI;
152 newbe->bRSSI = be->bRSSI;
153 newbe->wBeaconInfoLength = be->wBeaconInfoLength;
154 memcpy(newbe->BeaconInfo, be->BeaconInfo, ielength);
155 *header = &newbe->rceb;
156 *new_size = sizeof(*newbe) + ielength;
157 return 1; /* calling function will free memory */
158}
159
160
161/* DRP Availability change notification (WUSB 1.0 [8.6.3.8]) */
162struct uwb_rc_evt_drp_avail_WUSB_0100 {
163 struct uwb_rceb rceb;
164 __le16 wIELength;
165 u8 IEData[];
166} __attribute__((packed));
167
168/**
169 * Filter WUSB 1.0 DRP AVAILABILITY CHANGE notification to be WHCI 0.95
170 *
171 * @header: the incoming event
172 * @buf_size: size of buffer containing incoming event
173 * @new_size: size of event after filtering completed
174 */
175static
176int hwarc_filter_evt_drp_avail_WUSB_0100(struct uwb_rc *rc,
177 struct uwb_rceb **header,
178 const size_t buf_size,
179 size_t *new_size)
180{
181 struct uwb_rc_evt_drp_avail_WUSB_0100 *da;
182 struct uwb_rc_evt_drp_avail *newda;
183 struct uwb_ie_hdr *ie_hdr;
184 size_t bytes_left, ielength;
185 struct device *dev = &rc->uwb_dev.dev;
186
187
188 da = container_of(*header, struct uwb_rc_evt_drp_avail_WUSB_0100, rceb);
189 bytes_left = buf_size;
190 if (bytes_left < sizeof(*da)) {
191 dev_err(dev, "Not enough data to decode DRP Avail "
192 "Notification for filtering. Expected %zu, "
193 "received %zu.\n", (size_t)sizeof(*da), bytes_left);
194 return -EINVAL;
195 }
196 bytes_left -= sizeof(*da);
197 ielength = le16_to_cpu(da->wIELength);
198 if (bytes_left < ielength) {
199 dev_err(dev, "DRP Avail Notification filter: IE length "
200 "[%zu bytes] does not match actual length "
201 "[%zu bytes].\n", ielength, bytes_left);
202 return -EINVAL;
203 }
204 if (ielength < sizeof(*ie_hdr)) {
205 dev_err(dev, "DRP Avail Notification filter: Not enough "
206 "data to decode IE [%zu bytes, %zu needed]\n",
207 ielength, sizeof(*ie_hdr));
208 return -EINVAL;
209 }
210 ie_hdr = (void *) da->IEData;
211 if (ie_hdr->length > 32) {
212 dev_err(dev, "DRP Availability Change event has unexpected "
213 "length for filtering. Expected < 32 bytes, "
214 "got %zu bytes.\n", (size_t)ie_hdr->length);
215 return -EINVAL;
216 }
217 newda = kzalloc(sizeof(*newda), GFP_ATOMIC);
218 if (newda == NULL)
219 return -ENOMEM;
220 newda->rceb = da->rceb;
221 memcpy(newda->bmp, (u8 *) ie_hdr + sizeof(*ie_hdr), ie_hdr->length);
222 *header = &newda->rceb;
223 *new_size = sizeof(*newda);
224 return 1; /* calling function will free memory */
225}
226
227
228/* DRP notification (WUSB 1.0 [8.6.3.9]) */
229struct uwb_rc_evt_drp_WUSB_0100 {
230 struct uwb_rceb rceb;
231 struct uwb_dev_addr wSrcAddr;
232 u8 bExplicit;
233 __le16 wIELength;
234 u8 IEData[];
235} __attribute__((packed));
236
237/**
238 * Filter WUSB 1.0 DRP Notification to be WHCI 0.95
239 *
240 * @header: the incoming event
241 * @buf_size: size of buffer containing incoming event
242 * @new_size: size of event after filtering completed
243 *
244 * It is hard to manage DRP reservations without having a Reason code.
245 * Unfortunately there is none in the WUSB spec. We just set the default to
246 * DRP IE RECEIVED.
247 * We do not currently use the bBeaconSlotNumber value, so we set this to
248 * zero for now.
249 */
250static
251int hwarc_filter_evt_drp_WUSB_0100(struct uwb_rc *rc,
252 struct uwb_rceb **header,
253 const size_t buf_size,
254 size_t *new_size)
255{
256 struct uwb_rc_evt_drp_WUSB_0100 *drpev;
257 struct uwb_rc_evt_drp *newdrpev;
258 size_t bytes_left, ielength;
259 struct device *dev = &rc->uwb_dev.dev;
260
261 drpev = container_of(*header, struct uwb_rc_evt_drp_WUSB_0100, rceb);
262 bytes_left = buf_size;
263 if (bytes_left < sizeof(*drpev)) {
264 dev_err(dev, "Not enough data to decode DRP Notification "
265 "for filtering. Expected %zu, received %zu.\n",
266 (size_t)sizeof(*drpev), bytes_left);
267 return -EINVAL;
268 }
269 ielength = le16_to_cpu(drpev->wIELength);
270 bytes_left -= sizeof(*drpev);
271 if (bytes_left < ielength) {
272 dev_err(dev, "DRP Notification filter: header length [%zu "
273 "bytes] does not match actual length [%zu "
274 "bytes].\n", ielength, bytes_left);
275 return -EINVAL;
276 }
277 newdrpev = kzalloc(sizeof(*newdrpev) + ielength, GFP_ATOMIC);
278 if (newdrpev == NULL)
279 return -ENOMEM;
280 newdrpev->rceb = drpev->rceb;
281 newdrpev->src_addr = drpev->wSrcAddr;
282 newdrpev->reason = UWB_DRP_NOTIF_DRP_IE_RCVD;
283 newdrpev->beacon_slot_number = 0;
284 newdrpev->ie_length = drpev->wIELength;
285 memcpy(newdrpev->ie_data, drpev->IEData, ielength);
286 *header = &newdrpev->rceb;
287 *new_size = sizeof(*newdrpev) + ielength;
288 return 1; /* calling function will free memory */
289}
290
291
292/* Scan Command (WUSB 1.0 [8.6.2.5]) */
293struct uwb_rc_cmd_scan_WUSB_0100 {
294 struct uwb_rccb rccb;
295 u8 bChannelNumber;
296 u8 bScanState;
297} __attribute__((packed));
298
299/**
300 * Filter WHCI 0.95 SCAN command to be WUSB 1.0 SCAN command
301 *
302 * @header: command sent to device (compliant to WHCI 0.95)
303 * @size: size of command sent to device
304 *
305 * We only reduce the size by two bytes because the WUSB 1.0 scan command
306 * does not have the last field (wStarttime). Also, make sure we don't send
307 * the device an unexpected scan type.
308 */
309static
310int hwarc_filter_cmd_scan_WUSB_0100(struct uwb_rc *rc,
311 struct uwb_rccb **header,
312 size_t *size)
313{
314 struct uwb_rc_cmd_scan *sc;
315
316 sc = container_of(*header, struct uwb_rc_cmd_scan, rccb);
317
318 if (sc->bScanState == UWB_SCAN_ONLY_STARTTIME)
319 sc->bScanState = UWB_SCAN_ONLY;
320 /* Don't send the last two bytes. */
321 *size -= 2;
322 return 0;
323}
324
325
326/* SET DRP IE command (WUSB 1.0 [8.6.2.7]) */
327struct uwb_rc_cmd_set_drp_ie_WUSB_0100 {
328 struct uwb_rccb rccb;
329 u8 bExplicit;
330 __le16 wIELength;
331 struct uwb_ie_drp IEData[];
332} __attribute__((packed));
333
334/**
335 * Filter WHCI 0.95 SET DRP IE command to be WUSB 1.0 SET DRP IE command
336 *
337 * @header: command sent to device (compliant to WHCI 0.95)
338 * @size: size of command sent to device
339 *
340 * WUSB has an extra bExplicit field - we assume always explicit
341 * negotiation so this field is set. The command expected by the device is
342 * thus larger than the one prepared by the driver so we need to
343 * reallocate memory to accommodate this.
344 * We trust the driver to send us the correct data so no checking is done
345 * on incoming data - evn though it is variable length.
346 */
347static
348int hwarc_filter_cmd_set_drp_ie_WUSB_0100(struct uwb_rc *rc,
349 struct uwb_rccb **header,
350 size_t *size)
351{
352 struct uwb_rc_cmd_set_drp_ie *orgcmd;
353 struct uwb_rc_cmd_set_drp_ie_WUSB_0100 *cmd;
354 size_t ielength;
355
356 orgcmd = container_of(*header, struct uwb_rc_cmd_set_drp_ie, rccb);
357 ielength = le16_to_cpu(orgcmd->wIELength);
358 cmd = kzalloc(sizeof(*cmd) + ielength, GFP_KERNEL);
359 if (cmd == NULL)
360 return -ENOMEM;
361 cmd->rccb = orgcmd->rccb;
362 cmd->bExplicit = 0;
363 cmd->wIELength = orgcmd->wIELength;
364 memcpy(cmd->IEData, orgcmd->IEData, ielength);
365 *header = &cmd->rccb;
366 *size = sizeof(*cmd) + ielength;
367 return 1; /* calling function will free memory */
368}
369
370
371/**
372 * Filter data from WHCI driver to WUSB device
373 *
374 * @header: WHCI 0.95 compliant command from driver
375 * @size: length of command
376 *
377 * The routine managing commands to the device (uwb_rc_cmd()) will call the
378 * filtering function pointer (if it exists) before it passes any data to
379 * the device. At this time the command has been formatted according to
380 * WHCI 0.95 and is ready to be sent to the device.
381 *
382 * The filter function will be provided with the current command and its
383 * length. The function will manipulate the command if necessary and
384 * potentially reallocate memory for a command that needed more memory that
385 * the given command. If new memory was created the function will return 1
386 * to indicate to the calling function that the memory need to be freed
387 * when not needed any more. The size will contain the new length of the
388 * command.
389 * If memory has not been allocated we rely on the original mechanisms to
390 * free the memory of the command - even when we reduce the value of size.
391 */
392static
393int hwarc_filter_cmd_WUSB_0100(struct uwb_rc *rc, struct uwb_rccb **header,
394 size_t *size)
395{
396 int result;
397 struct uwb_rccb *rccb = *header;
398 int cmd = le16_to_cpu(rccb->wCommand);
399 switch (cmd) {
400 case UWB_RC_CMD_SCAN:
401 result = hwarc_filter_cmd_scan_WUSB_0100(rc, header, size);
402 break;
403 case UWB_RC_CMD_SET_DRP_IE:
404 result = hwarc_filter_cmd_set_drp_ie_WUSB_0100(rc, header, size);
405 break;
406 default:
407 result = -ENOANO;
408 break;
409 }
410 return result;
411}
412
413
414/**
415 * Filter data from WHCI driver to WUSB device
416 *
417 * @header: WHCI 0.95 compliant command from driver
418 * @size: length of command
419 *
420 * Filter commands based on which protocol the device supports. The WUSB
421 * errata should be the same as WHCI 0.95 so we do not filter that here -
422 * only WUSB 1.0.
423 */
424static
425int hwarc_filter_cmd(struct uwb_rc *rc, struct uwb_rccb **header,
426 size_t *size)
427{
428 int result = -ENOANO;
429 if (rc->version == 0x0100)
430 result = hwarc_filter_cmd_WUSB_0100(rc, header, size);
431 return result;
432}
433
434
435/**
436 * Compute return value as sum of incoming value and value at given offset
437 *
438 * @rceb: event for which we compute the size, it contains a variable
439 * length field.
440 * @core_size: size of the "non variable" part of the event
441 * @offset: place in event where the length of the variable part is stored
442 * @buf_size: total length of buffer in which event arrived - we need to make
443 * sure we read the offset in memory that is still part of the event
444 */
445static
446ssize_t hwarc_get_event_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
447 size_t core_size, size_t offset,
448 const size_t buf_size)
449{
450 ssize_t size = -ENOSPC;
451 const void *ptr = rceb;
452 size_t type_size = sizeof(__le16);
453 struct device *dev = &rc->uwb_dev.dev;
454
455 if (offset + type_size >= buf_size) {
456 dev_err(dev, "Not enough data to read extra size of event "
457 "0x%02x/%04x/%02x, only got %zu bytes.\n",
458 rceb->bEventType, le16_to_cpu(rceb->wEvent),
459 rceb->bEventContext, buf_size);
460 goto out;
461 }
462 ptr += offset;
463 size = core_size + le16_to_cpu(*(__le16 *)ptr);
464out:
465 return size;
466}
467
468
469/* Beacon slot change notification (WUSB 1.0 [8.6.3.5]) */
470struct uwb_rc_evt_bp_slot_change_WUSB_0100 {
471 struct uwb_rceb rceb;
472 u8 bSlotNumber;
473} __attribute__((packed));
474
475
476/**
477 * Filter data from WUSB device to WHCI driver
478 *
479 * @header: incoming event
480 * @buf_size: size of buffer in which event arrived
481 * @_event_size: actual size of event in the buffer
482 * @new_size: size of event after filtered
483 *
484 * We don't know how the buffer is constructed - there may be more than one
485 * event in it so buffer length does not determine event length. We first
486 * determine the expected size of the incoming event. This value is passed
487 * back only if the actual filtering succeeded (so we know the computed
488 * expected size is correct). This value will be zero if
489 * the event did not need any filtering.
490 *
491 * WHCI interprets the BP Slot Change event's data differently than
492 * WUSB. The event sizes are exactly the same. The data field
493 * indicates the new beacon slot in which a RC is transmitting its
494 * beacon. The maximum value of this is 96 (wMacBPLength ECMA-368
495 * 17.16 (Table 117)). We thus know that the WUSB value will not set
496 * the bit bNoSlot, so we don't really do anything (placeholder).
497 */
498static
499int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header,
500 const size_t buf_size, size_t *_real_size,
501 size_t *_new_size)
502{
503 int result = -ENOANO;
504 struct uwb_rceb *rceb = *header;
505 int event = le16_to_cpu(rceb->wEvent);
506 size_t event_size;
507 size_t core_size, offset;
508
509 if (rceb->bEventType != UWB_RC_CET_GENERAL)
510 goto out;
511 switch (event) {
512 case UWB_RC_EVT_BEACON:
513 core_size = sizeof(struct uwb_rc_evt_beacon_WUSB_0100);
514 offset = offsetof(struct uwb_rc_evt_beacon_WUSB_0100,
515 wBeaconInfoLength);
516 event_size = hwarc_get_event_size(rc, rceb, core_size,
517 offset, buf_size);
518 if (event_size < 0)
519 goto out;
520 *_real_size = event_size;
521 result = hwarc_filter_evt_beacon_WUSB_0100(rc, header,
522 buf_size, _new_size);
523 break;
524 case UWB_RC_EVT_BP_SLOT_CHANGE:
525 *_new_size = *_real_size =
526 sizeof(struct uwb_rc_evt_bp_slot_change_WUSB_0100);
527 result = 0;
528 break;
529
530 case UWB_RC_EVT_DRP_AVAIL:
531 core_size = sizeof(struct uwb_rc_evt_drp_avail_WUSB_0100);
532 offset = offsetof(struct uwb_rc_evt_drp_avail_WUSB_0100,
533 wIELength);
534 event_size = hwarc_get_event_size(rc, rceb, core_size,
535 offset, buf_size);
536 if (event_size < 0)
537 goto out;
538 *_real_size = event_size;
539 result = hwarc_filter_evt_drp_avail_WUSB_0100(
540 rc, header, buf_size, _new_size);
541 break;
542
543 case UWB_RC_EVT_DRP:
544 core_size = sizeof(struct uwb_rc_evt_drp_WUSB_0100);
545 offset = offsetof(struct uwb_rc_evt_drp_WUSB_0100, wIELength);
546 event_size = hwarc_get_event_size(rc, rceb, core_size,
547 offset, buf_size);
548 if (event_size < 0)
549 goto out;
550 *_real_size = event_size;
551 result = hwarc_filter_evt_drp_WUSB_0100(rc, header,
552 buf_size, _new_size);
553 break;
554
555 default:
556 break;
557 }
558out:
559 return result;
560}
561
562/**
563 * Filter data from WUSB device to WHCI driver
564 *
565 * @header: incoming event
566 * @buf_size: size of buffer in which event arrived
567 * @_event_size: actual size of event in the buffer
568 * @_new_size: size of event after filtered
569 *
570 * Filter events based on which protocol the device supports. The WUSB
571 * errata should be the same as WHCI 0.95 so we do not filter that here -
572 * only WUSB 1.0.
573 *
574 * If we don't handle it, we return -ENOANO (why the weird error code?
575 * well, so if I get it, I can pinpoint in the code that raised
576 * it...after all, not too many places use the higher error codes).
577 */
578static
579int hwarc_filter_event(struct uwb_rc *rc, struct uwb_rceb **header,
580 const size_t buf_size, size_t *_real_size,
581 size_t *_new_size)
582{
583 int result = -ENOANO;
584 if (rc->version == 0x0100)
585 result = hwarc_filter_event_WUSB_0100(
586 rc, header, buf_size, _real_size, _new_size);
587 return result;
588}
589
590
591/**
592 * Execute an UWB RC command on HWA
593 *
594 * @rc: Instance of a Radio Controller that is a HWA
595 * @cmd: Buffer containing the RCCB and payload to execute
596 * @cmd_size: Size of the command buffer.
597 *
598 * NOTE: rc's mutex has to be locked
599 */
600static
601int hwarc_cmd(struct uwb_rc *uwb_rc, const struct uwb_rccb *cmd, size_t cmd_size)
602{
603 struct hwarc *hwarc = uwb_rc->priv;
604 return usb_control_msg(
605 hwarc->usb_dev, usb_sndctrlpipe(hwarc->usb_dev, 0),
606 WA_EXEC_RC_CMD, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
607 0, hwarc->usb_iface->cur_altsetting->desc.bInterfaceNumber,
608 (void *) cmd, cmd_size, 100 /* FIXME: this is totally arbitrary */);
609}
610
611static
612int hwarc_reset(struct uwb_rc *uwb_rc)
613{
614 struct hwarc *hwarc = uwb_rc->priv;
615 return usb_reset_device(hwarc->usb_dev);
616}
617
618/**
619 * Callback for the notification and event endpoint
620 *
621 * Check's that everything is fine and then passes the read data to
622 * the notification/event handling mechanism (neh).
623 */
624static
625void hwarc_neep_cb(struct urb *urb)
626{
627 struct hwarc *hwarc = urb->context;
628 struct usb_interface *usb_iface = hwarc->usb_iface;
629 struct device *dev = &usb_iface->dev;
630 int result;
631
632 switch (result = urb->status) {
633 case 0:
634 d_printf(3, dev, "NEEP: receive stat %d, %zu bytes\n",
635 urb->status, (size_t)urb->actual_length);
636 uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer,
637 urb->actual_length);
638 break;
639 case -ECONNRESET: /* Not an error, but a controlled situation; */
640 case -ENOENT: /* (we killed the URB)...so, no broadcast */
641 d_printf(2, dev, "NEEP: URB reset/noent %d\n", urb->status);
642 goto out;
643 case -ESHUTDOWN: /* going away! */
644 d_printf(2, dev, "NEEP: URB down %d\n", urb->status);
645 goto out;
646 default: /* On general errors, retry unless it gets ugly */
647 if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS,
648 EDC_ERROR_TIMEFRAME))
649 goto error_exceeded;
650 dev_err(dev, "NEEP: URB error %d\n", urb->status);
651 }
652 result = usb_submit_urb(urb, GFP_ATOMIC);
653 d_printf(3, dev, "NEEP: submit %d\n", result);
654 if (result < 0) {
655 dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n",
656 result);
657 goto error;
658 }
659out:
660 return;
661
662error_exceeded:
663 dev_err(dev, "NEEP: URB max acceptable errors "
664 "exceeded, resetting device\n");
665error:
666 uwb_rc_neh_error(hwarc->uwb_rc, result);
667 uwb_rc_reset_all(hwarc->uwb_rc);
668 return;
669}
670
671static void hwarc_init(struct hwarc *hwarc)
672{
673 edc_init(&hwarc->neep_edc);
674}
675
676/**
677 * Initialize the notification/event endpoint stuff
678 *
679 * Note this is effectively a parallel thread; it knows that
680 * hwarc->uwb_rc always exists because the existence of a 'hwarc'
681 * means that there is a reverence on the hwarc->uwb_rc (see
682 * _probe()), and thus _neep_cb() can execute safely.
683 */
684static int hwarc_neep_init(struct uwb_rc *rc)
685{
686 struct hwarc *hwarc = rc->priv;
687 struct usb_interface *iface = hwarc->usb_iface;
688 struct usb_device *usb_dev = interface_to_usbdev(iface);
689 struct device *dev = &iface->dev;
690 int result;
691 struct usb_endpoint_descriptor *epd;
692
693 epd = &iface->cur_altsetting->endpoint[0].desc;
694 hwarc->rd_buffer = (void *) __get_free_page(GFP_KERNEL);
695 if (hwarc->rd_buffer == NULL) {
696 dev_err(dev, "Unable to allocate notification's read buffer\n");
697 goto error_rd_buffer;
698 }
699 hwarc->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
700 if (hwarc->neep_urb == NULL) {
701 dev_err(dev, "Unable to allocate notification URB\n");
702 goto error_urb_alloc;
703 }
704 usb_fill_int_urb(hwarc->neep_urb, usb_dev,
705 usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
706 hwarc->rd_buffer, PAGE_SIZE,
707 hwarc_neep_cb, hwarc, epd->bInterval);
708 result = usb_submit_urb(hwarc->neep_urb, GFP_ATOMIC);
709 if (result < 0) {
710 dev_err(dev, "Cannot submit notification URB: %d\n", result);
711 goto error_neep_submit;
712 }
713 return 0;
714
715error_neep_submit:
716 usb_free_urb(hwarc->neep_urb);
717error_urb_alloc:
718 free_page((unsigned long)hwarc->rd_buffer);
719error_rd_buffer:
720 return -ENOMEM;
721}
722
723
724/** Clean up all the notification endpoint resources */
725static void hwarc_neep_release(struct uwb_rc *rc)
726{
727 struct hwarc *hwarc = rc->priv;
728
729 usb_kill_urb(hwarc->neep_urb);
730 usb_free_urb(hwarc->neep_urb);
731 free_page((unsigned long)hwarc->rd_buffer);
732}
733
734/**
735 * Get the version from class-specific descriptor
736 *
737 * NOTE: this descriptor comes with the big bundled configuration
738 * descriptor that includes the interfaces' and endpoints', so
739 * we just look for it in the cached copy kept by the USB stack.
740 *
741 * NOTE2: We convert LE fields to CPU order.
742 */
743static int hwarc_get_version(struct uwb_rc *rc)
744{
745 int result;
746
747 struct hwarc *hwarc = rc->priv;
748 struct uwb_rc_control_intf_class_desc *descr;
749 struct device *dev = &rc->uwb_dev.dev;
750 struct usb_device *usb_dev = hwarc->usb_dev;
751 char *itr;
752 struct usb_descriptor_header *hdr;
753 size_t itr_size, actconfig_idx;
754 u16 version;
755
756 actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
757 sizeof(usb_dev->config[0]);
758 itr = usb_dev->rawdescriptors[actconfig_idx];
759 itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
760 while (itr_size >= sizeof(*hdr)) {
761 hdr = (struct usb_descriptor_header *) itr;
762 d_printf(3, dev, "Extra device descriptor: "
763 "type %02x/%u bytes @ %zu (%zu left)\n",
764 hdr->bDescriptorType, hdr->bLength,
765 (itr - usb_dev->rawdescriptors[actconfig_idx]),
766 itr_size);
767 if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL)
768 goto found;
769 itr += hdr->bLength;
770 itr_size -= hdr->bLength;
771 }
772 dev_err(dev, "cannot find Radio Control Interface Class descriptor\n");
773 return -ENODEV;
774
775found:
776 result = -EINVAL;
777 if (hdr->bLength > itr_size) { /* is it available? */
778 dev_err(dev, "incomplete Radio Control Interface Class "
779 "descriptor (%zu bytes left, %u needed)\n",
780 itr_size, hdr->bLength);
781 goto error;
782 }
783 if (hdr->bLength < sizeof(*descr)) {
784 dev_err(dev, "short Radio Control Interface Class "
785 "descriptor\n");
786 goto error;
787 }
788 descr = (struct uwb_rc_control_intf_class_desc *) hdr;
789 /* Make LE fields CPU order */
790 version = __le16_to_cpu(descr->bcdRCIVersion);
791 if (version != 0x0100) {
792 dev_err(dev, "Device reports protocol version 0x%04x. We "
793 "do not support that. \n", version);
794 result = -EINVAL;
795 goto error;
796 }
797 rc->version = version;
798 d_printf(3, dev, "Device supports WUSB protocol version 0x%04x \n",
799 rc->version);
800 result = 0;
801error:
802 return result;
803}
804
805/*
806 * By creating a 'uwb_rc', we have a reference on it -- that reference
807 * is the one we drop when we disconnect.
808 *
809 * No need to switch altsettings; according to WUSB1.0[8.6.1.1], there
810 * is only one altsetting allowed.
811 */
812static int hwarc_probe(struct usb_interface *iface,
813 const struct usb_device_id *id)
814{
815 int result;
816 struct uwb_rc *uwb_rc;
817 struct hwarc *hwarc;
818 struct device *dev = &iface->dev;
819
820 result = -ENOMEM;
821 uwb_rc = uwb_rc_alloc();
822 if (uwb_rc == NULL) {
823 dev_err(dev, "unable to allocate RC instance\n");
824 goto error_rc_alloc;
825 }
826 hwarc = kzalloc(sizeof(*hwarc), GFP_KERNEL);
827 if (hwarc == NULL) {
828 dev_err(dev, "unable to allocate HWA RC instance\n");
829 goto error_alloc;
830 }
831 hwarc_init(hwarc);
832 hwarc->usb_dev = usb_get_dev(interface_to_usbdev(iface));
833 hwarc->usb_iface = usb_get_intf(iface);
834 hwarc->uwb_rc = uwb_rc;
835
836 uwb_rc->owner = THIS_MODULE;
837 uwb_rc->start = hwarc_neep_init;
838 uwb_rc->stop = hwarc_neep_release;
839 uwb_rc->cmd = hwarc_cmd;
840 uwb_rc->reset = hwarc_reset;
841 if (id->driver_info & WUSB_QUIRK_WHCI_CMD_EVT) {
842 uwb_rc->filter_cmd = NULL;
843 uwb_rc->filter_event = NULL;
844 } else {
845 uwb_rc->filter_cmd = hwarc_filter_cmd;
846 uwb_rc->filter_event = hwarc_filter_event;
847 }
848
849 result = uwb_rc_add(uwb_rc, dev, hwarc);
850 if (result < 0)
851 goto error_rc_add;
852 result = hwarc_get_version(uwb_rc);
853 if (result < 0) {
854 dev_err(dev, "cannot retrieve version of RC \n");
855 goto error_get_version;
856 }
857 usb_set_intfdata(iface, hwarc);
858 return 0;
859
860error_get_version:
861 uwb_rc_rm(uwb_rc);
862error_rc_add:
863 usb_put_intf(iface);
864 usb_put_dev(hwarc->usb_dev);
865error_alloc:
866 uwb_rc_put(uwb_rc);
867error_rc_alloc:
868 return result;
869}
870
871static void hwarc_disconnect(struct usb_interface *iface)
872{
873 struct hwarc *hwarc = usb_get_intfdata(iface);
874 struct uwb_rc *uwb_rc = hwarc->uwb_rc;
875
876 usb_set_intfdata(hwarc->usb_iface, NULL);
877 uwb_rc_rm(uwb_rc);
878 usb_put_intf(hwarc->usb_iface);
879 usb_put_dev(hwarc->usb_dev);
880 d_printf(1, &hwarc->usb_iface->dev, "freed hwarc %p\n", hwarc);
881 kfree(hwarc);
882 uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */
883}
884
885/** USB device ID's that we handle */
886static struct usb_device_id hwarc_id_table[] = {
887 /* D-Link DUB-1210 */
888 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02),
889 .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
890 /* Intel i1480 (using firmware 1.3PA2-20070828) */
891 { USB_DEVICE_AND_INTERFACE_INFO(0x8086, 0x0c3b, 0xe0, 0x01, 0x02),
892 .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
893 /* Generic match for the Radio Control interface */
894 { USB_INTERFACE_INFO(0xe0, 0x01, 0x02), },
895 { },
896};
897MODULE_DEVICE_TABLE(usb, hwarc_id_table);
898
899static struct usb_driver hwarc_driver = {
900 .name = "hwa-rc",
901 .probe = hwarc_probe,
902 .disconnect = hwarc_disconnect,
903 .id_table = hwarc_id_table,
904};
905
906static int __init hwarc_driver_init(void)
907{
908 int result;
909 result = usb_register(&hwarc_driver);
910 if (result < 0)
911 printk(KERN_ERR "HWA-RC: Cannot register USB driver: %d\n",
912 result);
913 return result;
914
915}
916module_init(hwarc_driver_init);
917
918static void __exit hwarc_driver_exit(void)
919{
920 usb_deregister(&hwarc_driver);
921}
922module_exit(hwarc_driver_exit);
923
924MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
925MODULE_DESCRIPTION("Host Wireless Adapter Radio Control Driver");
926MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/Makefile b/drivers/uwb/i1480/Makefile
new file mode 100644
index 000000000000..212bbc7d4c32
--- /dev/null
+++ b/drivers/uwb/i1480/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_UWB_I1480U) += dfu/ i1480-est.o
2obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp/
diff --git a/drivers/uwb/i1480/dfu/Makefile b/drivers/uwb/i1480/dfu/Makefile
new file mode 100644
index 000000000000..bd1b9f25424c
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/Makefile
@@ -0,0 +1,9 @@
1obj-$(CONFIG_UWB_I1480U) += i1480-dfu-usb.o
2
3i1480-dfu-usb-objs := \
4 dfu.o \
5 mac.o \
6 phy.o \
7 usb.o
8
9
diff --git a/drivers/uwb/i1480/dfu/dfu.c b/drivers/uwb/i1480/dfu/dfu.c
new file mode 100644
index 000000000000..9097b3b30385
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/dfu.c
@@ -0,0 +1,217 @@
1/*
2 * Intel Wireless UWB Link 1480
3 * Main driver
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Common code for firmware upload used by the USB and PCI version;
24 * i1480_fw_upload() takes a device descriptor and uses the function
25 * pointers it provides to upload firmware and prepare the PHY.
26 *
27 * As well, provides common functions used by the rest of the code.
28 */
29#include "i1480-dfu.h"
30#include <linux/errno.h>
31#include <linux/delay.h>
32#include <linux/pci.h>
33#include <linux/device.h>
34#include <linux/uwb.h>
35#include <linux/random.h>
36
37#define D_LOCAL 0
38#include <linux/uwb/debug.h>
39
40/**
41 * i1480_rceb_check - Check RCEB for expected field values
42 * @i1480: pointer to device for which RCEB is being checked
43 * @rceb: RCEB being checked
44 * @cmd: which command the RCEB is related to
45 * @context: expected context
46 * @expected_type: expected event type
47 * @expected_event: expected event
48 *
49 * If @cmd is NULL, do not print error messages, but still return an error
50 * code.
51 *
52 * Return 0 if @rceb matches the expected values, -EINVAL otherwise.
53 */
54int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb,
55 const char *cmd, u8 context, u8 expected_type,
56 unsigned expected_event)
57{
58 int result = 0;
59 struct device *dev = i1480->dev;
60 if (rceb->bEventContext != context) {
61 if (cmd)
62 dev_err(dev, "%s: unexpected context id 0x%02x "
63 "(expected 0x%02x)\n", cmd,
64 rceb->bEventContext, context);
65 result = -EINVAL;
66 }
67 if (rceb->bEventType != expected_type) {
68 if (cmd)
69 dev_err(dev, "%s: unexpected event type 0x%02x "
70 "(expected 0x%02x)\n", cmd,
71 rceb->bEventType, expected_type);
72 result = -EINVAL;
73 }
74 if (le16_to_cpu(rceb->wEvent) != expected_event) {
75 if (cmd)
76 dev_err(dev, "%s: unexpected event 0x%04x "
77 "(expected 0x%04x)\n", cmd,
78 le16_to_cpu(rceb->wEvent), expected_event);
79 result = -EINVAL;
80 }
81 return result;
82}
83EXPORT_SYMBOL_GPL(i1480_rceb_check);
84
85
86/**
87 * Execute a Radio Control Command
88 *
89 * Command data has to be in i1480->cmd_buf.
90 *
91 * @returns size of the reply data filled in i1480->evt_buf or < 0 errno
92 * code on error.
93 */
94ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size,
95 size_t reply_size)
96{
97 ssize_t result;
98 struct uwb_rceb *reply = i1480->evt_buf;
99 struct uwb_rccb *cmd = i1480->cmd_buf;
100 u16 expected_event = reply->wEvent;
101 u8 expected_type = reply->bEventType;
102 u8 context;
103
104 d_fnstart(3, i1480->dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size);
105 init_completion(&i1480->evt_complete);
106 i1480->evt_result = -EINPROGRESS;
107 do {
108 get_random_bytes(&context, 1);
109 } while (context == 0x00 || context == 0xff);
110 cmd->bCommandContext = context;
111 result = i1480->cmd(i1480, cmd_name, cmd_size);
112 if (result < 0)
113 goto error;
114 /* wait for the callback to report a event was received */
115 result = wait_for_completion_interruptible_timeout(
116 &i1480->evt_complete, HZ);
117 if (result == 0) {
118 result = -ETIMEDOUT;
119 goto error;
120 }
121 if (result < 0)
122 goto error;
123 result = i1480->evt_result;
124 if (result < 0) {
125 dev_err(i1480->dev, "%s: command reply reception failed: %zd\n",
126 cmd_name, result);
127 goto error;
128 }
129 /*
130 * Firmware versions >= 1.4.12224 for IOGear GUWA100U generate a
131 * spurious notification after firmware is downloaded. So check whether
132 * the receibed RCEB is such notification before assuming that the
133 * command has failed.
134 */
135 if (i1480_rceb_check(i1480, i1480->evt_buf, NULL,
136 0, 0xfd, 0x0022) == 0) {
137 /* Now wait for the actual RCEB for this command. */
138 result = i1480->wait_init_done(i1480);
139 if (result < 0)
140 goto error;
141 result = i1480->evt_result;
142 }
143 if (result != reply_size) {
144 dev_err(i1480->dev, "%s returned only %zu bytes, %zu expected\n",
145 cmd_name, result, reply_size);
146 result = -EINVAL;
147 goto error;
148 }
149 /* Verify we got the right event in response */
150 result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context,
151 expected_type, expected_event);
152error:
153 d_fnend(3, i1480->dev, "(%p, %s, %zu) = %zd\n",
154 i1480, cmd_name, cmd_size, result);
155 return result;
156}
157EXPORT_SYMBOL_GPL(i1480_cmd);
158
159
160static
161int i1480_print_state(struct i1480 *i1480)
162{
163 int result;
164 u32 *buf = (u32 *) i1480->cmd_buf;
165
166 result = i1480->read(i1480, 0x80080000, 2 * sizeof(*buf));
167 if (result < 0) {
168 dev_err(i1480->dev, "cannot read U & L states: %d\n", result);
169 goto error;
170 }
171 dev_info(i1480->dev, "state U 0x%08x, L 0x%08x\n", buf[0], buf[1]);
172error:
173 return result;
174}
175
176
177/*
178 * PCI probe, firmware uploader
179 *
180 * _mac_fw_upload() will call rc_setup(), which needs an rc_release().
181 */
182int i1480_fw_upload(struct i1480 *i1480)
183{
184 int result;
185
186 result = i1480_pre_fw_upload(i1480); /* PHY pre fw */
187 if (result < 0 && result != -ENOENT) {
188 i1480_print_state(i1480);
189 goto error;
190 }
191 result = i1480_mac_fw_upload(i1480); /* MAC fw */
192 if (result < 0) {
193 if (result == -ENOENT)
194 dev_err(i1480->dev, "Cannot locate MAC FW file '%s'\n",
195 i1480->mac_fw_name);
196 else
197 i1480_print_state(i1480);
198 goto error;
199 }
200 result = i1480_phy_fw_upload(i1480); /* PHY fw */
201 if (result < 0 && result != -ENOENT) {
202 i1480_print_state(i1480);
203 goto error_rc_release;
204 }
205 /*
206 * FIXME: find some reliable way to check whether firmware is running
207 * properly. Maybe use some standard request that has no side effects?
208 */
209 dev_info(i1480->dev, "firmware uploaded successfully\n");
210error_rc_release:
211 if (i1480->rc_release)
212 i1480->rc_release(i1480);
213 result = 0;
214error:
215 return result;
216}
217EXPORT_SYMBOL_GPL(i1480_fw_upload);
diff --git a/drivers/uwb/i1480/dfu/i1480-dfu.h b/drivers/uwb/i1480/dfu/i1480-dfu.h
new file mode 100644
index 000000000000..46f45e800f36
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/i1480-dfu.h
@@ -0,0 +1,260 @@
1/*
2 * i1480 Device Firmware Upload
3 *
4 * Copyright (C) 2005-2006 Intel Corporation
5 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * This driver is the firmware uploader for the Intel Wireless UWB
23 * Link 1480 device (both in the USB and PCI incarnations).
24 *
25 * The process is quite simple: we stop the device, write the firmware
26 * to its memory and then restart it. Wait for the device to let us
27 * know it is done booting firmware. Ready.
28 *
29 * We might have to upload before or after a phy firmware (which might
30 * be done in two methods, using a normal firmware image or through
31 * the MPI port).
32 *
33 * Because USB and PCI use common methods, we just make ops out of the
34 * common operations (read, write, wait_init_done and cmd) and
35 * implement them in usb.c and pci.c.
36 *
37 * The flow is (some parts omitted):
38 *
39 * i1480_{usb,pci}_probe() On enumerate/discovery
40 * i1480_fw_upload()
41 * i1480_pre_fw_upload()
42 * __mac_fw_upload()
43 * fw_hdrs_load()
44 * mac_fw_hdrs_push()
45 * i1480->write() [i1480_{usb,pci}_write()]
46 * i1480_fw_cmp()
47 * i1480->read() [i1480_{usb,pci}_read()]
48 * i1480_mac_fw_upload()
49 * __mac_fw_upload()
50 * i1480->setup(()
51 * i1480->wait_init_done()
52 * i1480_cmd_reset()
53 * i1480->cmd() [i1480_{usb,pci}_cmd()]
54 * ...
55 * i1480_phy_fw_upload()
56 * request_firmware()
57 * i1480_mpi_write()
58 * i1480->cmd() [i1480_{usb,pci}_cmd()]
59 *
60 * Once the probe function enumerates the device and uploads the
61 * firmware, we just exit with -ENODEV, as we don't really want to
62 * attach to the device.
63 */
64#ifndef __i1480_DFU_H__
65#define __i1480_DFU_H__
66
67#include <linux/uwb/spec.h>
68#include <linux/types.h>
69#include <linux/completion.h>
70
71#define i1480_FW_UPLOAD_MODE_MASK (cpu_to_le32(0x00000018))
72
73#if i1480_FW > 0x00000302
74#define i1480_RCEB_EXTENDED
75#endif
76
77struct uwb_rccb;
78struct uwb_rceb;
79
80/*
81 * Common firmware upload handlers
82 *
83 * Normally you embed this struct in another one specific to your hw.
84 *
85 * @write Write to device's memory from buffer.
86 * @read Read from device's memory to i1480->evt_buf.
87 * @setup Setup device after basic firmware is uploaded
88 * @wait_init_done
89 * Wait for the device to send a notification saying init
90 * is done.
91 * @cmd FOP for issuing the command to the hardware. The
92 * command data is contained in i1480->cmd_buf and the size
93 * is supplied as an argument. The command replied is put
94 * in i1480->evt_buf and the size in i1480->evt_result (or if
95 * an error, a < 0 errno code).
96 *
97 * @cmd_buf Memory buffer used to send commands to the device.
98 * Allocated by the upper layers i1480_fw_upload().
99 * Size has to be @buf_size.
100 * @evt_buf Memory buffer used to place the async notifications
101 * received by the hw. Allocated by the upper layers
102 * i1480_fw_upload().
103 * Size has to be @buf_size.
104 * @cmd_complete
105 * Low level driver uses this to notify code waiting afor
106 * an event that the event has arrived and data is in
107 * i1480->evt_buf (and size/result in i1480->evt_result).
108 * @hw_rev
109 * Use this value to activate dfu code to support new revisions
110 * of hardware. i1480_init() sets this to a default value.
111 * It should be updated by the USB and PCI code.
112 */
113struct i1480 {
114 struct device *dev;
115
116 int (*write)(struct i1480 *, u32 addr, const void *, size_t);
117 int (*read)(struct i1480 *, u32 addr, size_t);
118 int (*rc_setup)(struct i1480 *);
119 void (*rc_release)(struct i1480 *);
120 int (*wait_init_done)(struct i1480 *);
121 int (*cmd)(struct i1480 *, const char *cmd_name, size_t cmd_size);
122 const char *pre_fw_name;
123 const char *mac_fw_name;
124 const char *mac_fw_name_deprecate; /* FIXME: Will go away */
125 const char *phy_fw_name;
126 u8 hw_rev;
127
128 size_t buf_size; /* size of both evt_buf and cmd_buf */
129 void *evt_buf, *cmd_buf;
130 ssize_t evt_result;
131 struct completion evt_complete;
132};
133
134static inline
135void i1480_init(struct i1480 *i1480)
136{
137 i1480->hw_rev = 1;
138 init_completion(&i1480->evt_complete);
139}
140
141extern int i1480_fw_upload(struct i1480 *);
142extern int i1480_pre_fw_upload(struct i1480 *);
143extern int i1480_mac_fw_upload(struct i1480 *);
144extern int i1480_phy_fw_upload(struct i1480 *);
145extern ssize_t i1480_cmd(struct i1480 *, const char *, size_t, size_t);
146extern int i1480_rceb_check(const struct i1480 *,
147 const struct uwb_rceb *, const char *, u8,
148 u8, unsigned);
149
150enum {
151 /* Vendor specific command type */
152 i1480_CET_VS1 = 0xfd,
153 /* i1480 commands */
154 i1480_CMD_SET_IP_MAS = 0x000e,
155 i1480_CMD_GET_MAC_PHY_INFO = 0x0003,
156 i1480_CMD_MPI_WRITE = 0x000f,
157 i1480_CMD_MPI_READ = 0x0010,
158 /* i1480 events */
159#if i1480_FW > 0x00000302
160 i1480_EVT_CONFIRM = 0x0002,
161 i1480_EVT_RM_INIT_DONE = 0x0101,
162 i1480_EVT_DEV_ADD = 0x0103,
163 i1480_EVT_DEV_RM = 0x0104,
164 i1480_EVT_DEV_ID_CHANGE = 0x0105,
165 i1480_EVT_GET_MAC_PHY_INFO = i1480_CMD_GET_MAC_PHY_INFO,
166#else
167 i1480_EVT_CONFIRM = 0x0002,
168 i1480_EVT_RM_INIT_DONE = 0x0101,
169 i1480_EVT_DEV_ADD = 0x0103,
170 i1480_EVT_DEV_RM = 0x0104,
171 i1480_EVT_DEV_ID_CHANGE = 0x0105,
172 i1480_EVT_GET_MAC_PHY_INFO = i1480_EVT_CONFIRM,
173#endif
174};
175
176
177struct i1480_evt_confirm {
178 struct uwb_rceb rceb;
179#ifdef i1480_RCEB_EXTENDED
180 __le16 wParamLength;
181#endif
182 u8 bResultCode;
183} __attribute__((packed));
184
185
186struct i1480_rceb {
187 struct uwb_rceb rceb;
188#ifdef i1480_RCEB_EXTENDED
189 __le16 wParamLength;
190#endif
191} __attribute__((packed));
192
193
194/**
195 * Get MAC & PHY Information confirm event structure
196 *
197 * Confirm event returned by the command.
198 */
199struct i1480_evt_confirm_GMPI {
200#if i1480_FW > 0x00000302
201 struct uwb_rceb rceb;
202 __le16 wParamLength;
203 __le16 status;
204 u8 mac_addr[6]; /* EUI-64 bit IEEE address [still 8 bytes?] */
205 u8 dev_addr[2];
206 __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */
207 u8 hw_rev;
208 u8 phy_vendor;
209 u8 phy_rev; /* major v = >> 8; minor = v & 0xff */
210 __le16 mac_caps;
211 u8 phy_caps[3];
212 u8 key_stores;
213 __le16 mcast_addr_stores;
214 u8 sec_mode_supported;
215#else
216 struct uwb_rceb rceb;
217 u8 status;
218 u8 mac_addr[8]; /* EUI-64 bit IEEE address [still 8 bytes?] */
219 u8 dev_addr[2];
220 __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */
221 __le16 phy_fw_rev; /* major v = >> 8; minor = v & 0xff */
222 __le16 mac_caps;
223 u8 phy_caps;
224 u8 key_stores;
225 __le16 mcast_addr_stores;
226 u8 sec_mode_supported;
227#endif
228} __attribute__((packed));
229
230
231struct i1480_cmd_mpi_write {
232 struct uwb_rccb rccb;
233 __le16 size;
234 u8 data[];
235};
236
237
238struct i1480_cmd_mpi_read {
239 struct uwb_rccb rccb;
240 __le16 size;
241 struct {
242 u8 page, offset;
243 } __attribute__((packed)) data[];
244} __attribute__((packed));
245
246
247struct i1480_evt_mpi_read {
248 struct uwb_rceb rceb;
249#ifdef i1480_RCEB_EXTENDED
250 __le16 wParamLength;
251#endif
252 u8 bResultCode;
253 __le16 size;
254 struct {
255 u8 page, offset, value;
256 } __attribute__((packed)) data[];
257} __attribute__((packed));
258
259
260#endif /* #ifndef __i1480_DFU_H__ */
diff --git a/drivers/uwb/i1480/dfu/mac.c b/drivers/uwb/i1480/dfu/mac.c
new file mode 100644
index 000000000000..2e4d8f07c165
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/mac.c
@@ -0,0 +1,527 @@
1/*
2 * Intel Wireless UWB Link 1480
3 * MAC Firmware upload implementation
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Implementation of the code for parsing the firmware file (extract
24 * the headers and binary code chunks) in the fw_*() functions. The
25 * code to upload pre and mac firmwares is the same, so it uses a
26 * common entry point in __mac_fw_upload(), which uses the i1480
27 * function pointers to push the firmware to the device.
28 */
29#include <linux/delay.h>
30#include <linux/firmware.h>
31#include <linux/uwb.h>
32#include "i1480-dfu.h"
33
34#define D_LOCAL 0
35#include <linux/uwb/debug.h>
36
37/*
38 * Descriptor for a continuous segment of MAC fw data
39 */
40struct fw_hdr {
41 unsigned long address;
42 size_t length;
43 const u32 *bin;
44 struct fw_hdr *next;
45};
46
47
48/* Free a chain of firmware headers */
49static
50void fw_hdrs_free(struct fw_hdr *hdr)
51{
52 struct fw_hdr *next;
53
54 while (hdr) {
55 next = hdr->next;
56 kfree(hdr);
57 hdr = next;
58 }
59}
60
61
62/* Fill a firmware header descriptor from a memory buffer */
63static
64int fw_hdr_load(struct i1480 *i1480, struct fw_hdr *hdr, unsigned hdr_cnt,
65 const char *_data, const u32 *data_itr, const u32 *data_top)
66{
67 size_t hdr_offset = (const char *) data_itr - _data;
68 size_t remaining_size = (void *) data_top - (void *) data_itr;
69 if (data_itr + 2 > data_top) {
70 dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in header at "
71 "offset %zu, limit %zu\n",
72 hdr_cnt, hdr_offset,
73 (const char *) data_itr + 2 - _data,
74 (const char *) data_top - _data);
75 return -EINVAL;
76 }
77 hdr->next = NULL;
78 hdr->address = le32_to_cpu(*data_itr++);
79 hdr->length = le32_to_cpu(*data_itr++);
80 hdr->bin = data_itr;
81 if (hdr->length > remaining_size) {
82 dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in data; "
83 "chunk too long (%zu bytes), only %zu left\n",
84 hdr_cnt, hdr_offset, hdr->length, remaining_size);
85 return -EINVAL;
86 }
87 return 0;
88}
89
90
91/**
92 * Get a buffer where the firmware is supposed to be and create a
93 * chain of headers linking them together.
94 *
95 * @phdr: where to place the pointer to the first header (headers link
96 * to the next via the @hdr->next ptr); need to free the whole
97 * chain when done.
98 *
99 * @_data: Pointer to the data buffer.
100 *
101 * @_data_size: Size of the data buffer (bytes); data size has to be a
102 * multiple of 4. Function will fail if not.
103 *
104 * Goes over the whole binary blob; reads the first chunk and creates
105 * a fw hdr from it (which points to where the data is in @_data and
106 * the length of the chunk); then goes on to the next chunk until
107 * done. Each header is linked to the next.
108 */
109static
110int fw_hdrs_load(struct i1480 *i1480, struct fw_hdr **phdr,
111 const char *_data, size_t data_size)
112{
113 int result;
114 unsigned hdr_cnt = 0;
115 u32 *data = (u32 *) _data, *data_itr, *data_top;
116 struct fw_hdr *hdr, **prev_hdr = phdr;
117
118 result = -EINVAL;
119 /* Check size is ok and pointer is aligned */
120 if (data_size % sizeof(u32) != 0)
121 goto error;
122 if ((unsigned long) _data % sizeof(u16) != 0)
123 goto error;
124 *phdr = NULL;
125 data_itr = data;
126 data_top = (u32 *) (_data + data_size);
127 while (data_itr < data_top) {
128 result = -ENOMEM;
129 hdr = kmalloc(sizeof(*hdr), GFP_KERNEL);
130 if (hdr == NULL) {
131 dev_err(i1480->dev, "Cannot allocate fw header "
132 "for chunk #%u\n", hdr_cnt);
133 goto error_alloc;
134 }
135 result = fw_hdr_load(i1480, hdr, hdr_cnt,
136 _data, data_itr, data_top);
137 if (result < 0)
138 goto error_load;
139 data_itr += 2 + hdr->length;
140 *prev_hdr = hdr;
141 prev_hdr = &hdr->next;
142 hdr_cnt++;
143 };
144 *prev_hdr = NULL;
145 return 0;
146
147error_load:
148 kfree(hdr);
149error_alloc:
150 fw_hdrs_free(*phdr);
151error:
152 return result;
153}
154
155
156/**
157 * Compares a chunk of fw with one in the devices's memory
158 *
159 * @i1480: Device instance
160 * @hdr: Pointer to the firmware chunk
161 * @returns: 0 if equal, < 0 errno on error. If > 0, it is the offset
162 * where the difference was found (plus one).
163 *
164 * Kind of dirty and simplistic, but does the trick in both the PCI
165 * and USB version. We do a quick[er] memcmp(), and if it fails, we do
166 * a byte-by-byte to find the offset.
167 */
168static
169ssize_t i1480_fw_cmp(struct i1480 *i1480, struct fw_hdr *hdr)
170{
171 ssize_t result = 0;
172 u32 src_itr = 0, cnt;
173 size_t size = hdr->length*sizeof(hdr->bin[0]);
174 size_t chunk_size;
175 u8 *bin = (u8 *) hdr->bin;
176
177 while (size > 0) {
178 chunk_size = size < i1480->buf_size ? size : i1480->buf_size;
179 result = i1480->read(i1480, hdr->address + src_itr, chunk_size);
180 if (result < 0) {
181 dev_err(i1480->dev, "error reading for verification: "
182 "%zd\n", result);
183 goto error;
184 }
185 if (memcmp(i1480->cmd_buf, bin + src_itr, result)) {
186 u8 *buf = i1480->cmd_buf;
187 d_printf(2, i1480->dev,
188 "original data @ %p + %u, %zu bytes\n",
189 bin, src_itr, result);
190 d_dump(4, i1480->dev, bin + src_itr, result);
191 for (cnt = 0; cnt < result; cnt++)
192 if (bin[src_itr + cnt] != buf[cnt]) {
193 dev_err(i1480->dev, "byte failed at "
194 "src_itr %u cnt %u [0x%02x "
195 "vs 0x%02x]\n", src_itr, cnt,
196 bin[src_itr + cnt], buf[cnt]);
197 result = src_itr + cnt + 1;
198 goto cmp_failed;
199 }
200 }
201 src_itr += result;
202 size -= result;
203 }
204 result = 0;
205error:
206cmp_failed:
207 return result;
208}
209
210
211/**
212 * Writes firmware headers to the device.
213 *
214 * @prd: PRD instance
215 * @hdr: Processed firmware
216 * @returns: 0 if ok, < 0 errno on error.
217 */
218static
219int mac_fw_hdrs_push(struct i1480 *i1480, struct fw_hdr *hdr,
220 const char *fw_name, const char *fw_tag)
221{
222 struct device *dev = i1480->dev;
223 ssize_t result = 0;
224 struct fw_hdr *hdr_itr;
225 int verif_retry_count;
226
227 d_fnstart(3, dev, "(%p, %p)\n", i1480, hdr);
228 /* Now, header by header, push them to the hw */
229 for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) {
230 verif_retry_count = 0;
231retry:
232 dev_dbg(dev, "fw chunk (%zu @ 0x%08lx)\n",
233 hdr_itr->length * sizeof(hdr_itr->bin[0]),
234 hdr_itr->address);
235 result = i1480->write(i1480, hdr_itr->address, hdr_itr->bin,
236 hdr_itr->length*sizeof(hdr_itr->bin[0]));
237 if (result < 0) {
238 dev_err(dev, "%s fw '%s': write failed (%zuB @ 0x%lx):"
239 " %zd\n", fw_tag, fw_name,
240 hdr_itr->length * sizeof(hdr_itr->bin[0]),
241 hdr_itr->address, result);
242 break;
243 }
244 result = i1480_fw_cmp(i1480, hdr_itr);
245 if (result < 0) {
246 dev_err(dev, "%s fw '%s': verification read "
247 "failed (%zuB @ 0x%lx): %zd\n",
248 fw_tag, fw_name,
249 hdr_itr->length * sizeof(hdr_itr->bin[0]),
250 hdr_itr->address, result);
251 break;
252 }
253 if (result > 0) { /* Offset where it failed + 1 */
254 result--;
255 dev_err(dev, "%s fw '%s': WARNING: verification "
256 "failed at 0x%lx: retrying\n",
257 fw_tag, fw_name, hdr_itr->address + result);
258 if (++verif_retry_count < 3)
259 goto retry; /* write this block again! */
260 dev_err(dev, "%s fw '%s': verification failed at 0x%lx: "
261 "tried %d times\n", fw_tag, fw_name,
262 hdr_itr->address + result, verif_retry_count);
263 result = -EINVAL;
264 break;
265 }
266 }
267 d_fnend(3, dev, "(%zd)\n", result);
268 return result;
269}
270
271
272/** Puts the device in firmware upload mode.*/
273static
274int mac_fw_upload_enable(struct i1480 *i1480)
275{
276 int result;
277 u32 reg = 0x800000c0;
278 u32 *buffer = (u32 *)i1480->cmd_buf;
279
280 if (i1480->hw_rev > 1)
281 reg = 0x8000d0d4;
282 result = i1480->read(i1480, reg, sizeof(u32));
283 if (result < 0)
284 goto error_cmd;
285 *buffer &= ~i1480_FW_UPLOAD_MODE_MASK;
286 result = i1480->write(i1480, reg, buffer, sizeof(u32));
287 if (result < 0)
288 goto error_cmd;
289 return 0;
290error_cmd:
291 dev_err(i1480->dev, "can't enable fw upload mode: %d\n", result);
292 return result;
293}
294
295
296/** Gets the device out of firmware upload mode. */
297static
298int mac_fw_upload_disable(struct i1480 *i1480)
299{
300 int result;
301 u32 reg = 0x800000c0;
302 u32 *buffer = (u32 *)i1480->cmd_buf;
303
304 if (i1480->hw_rev > 1)
305 reg = 0x8000d0d4;
306 result = i1480->read(i1480, reg, sizeof(u32));
307 if (result < 0)
308 goto error_cmd;
309 *buffer |= i1480_FW_UPLOAD_MODE_MASK;
310 result = i1480->write(i1480, reg, buffer, sizeof(u32));
311 if (result < 0)
312 goto error_cmd;
313 return 0;
314error_cmd:
315 dev_err(i1480->dev, "can't disable fw upload mode: %d\n", result);
316 return result;
317}
318
319
320
321/**
322 * Generic function for uploading a MAC firmware.
323 *
324 * @i1480: Device instance
325 * @fw_name: Name of firmware file to upload.
326 * @fw_tag: Name of the firmware type (for messages)
327 * [eg: MAC, PRE]
328 * @do_wait: Wait for device to emit initialization done message (0
329 * for PRE fws, 1 for MAC fws).
330 * @returns: 0 if ok, < 0 errno on error.
331 */
332static
333int __mac_fw_upload(struct i1480 *i1480, const char *fw_name,
334 const char *fw_tag)
335{
336 int result;
337 const struct firmware *fw;
338 struct fw_hdr *fw_hdrs;
339
340 d_fnstart(3, i1480->dev, "(%p, %s, %s)\n", i1480, fw_name, fw_tag);
341 result = request_firmware(&fw, fw_name, i1480->dev);
342 if (result < 0) /* Up to caller to complain on -ENOENT */
343 goto out;
344 d_printf(3, i1480->dev, "%s fw '%s': uploading\n", fw_tag, fw_name);
345 result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size);
346 if (result < 0) {
347 dev_err(i1480->dev, "%s fw '%s': failed to parse firmware "
348 "file: %d\n", fw_tag, fw_name, result);
349 goto out_release;
350 }
351 result = mac_fw_upload_enable(i1480);
352 if (result < 0)
353 goto out_hdrs_release;
354 result = mac_fw_hdrs_push(i1480, fw_hdrs, fw_name, fw_tag);
355 mac_fw_upload_disable(i1480);
356out_hdrs_release:
357 if (result >= 0)
358 dev_info(i1480->dev, "%s fw '%s': uploaded\n", fw_tag, fw_name);
359 else
360 dev_err(i1480->dev, "%s fw '%s': failed to upload (%d), "
361 "power cycle device\n", fw_tag, fw_name, result);
362 fw_hdrs_free(fw_hdrs);
363out_release:
364 release_firmware(fw);
365out:
366 d_fnend(3, i1480->dev, "(%p, %s, %s) = %d\n", i1480, fw_name, fw_tag,
367 result);
368 return result;
369}
370
371
372/**
373 * Upload a pre-PHY firmware
374 *
375 */
376int i1480_pre_fw_upload(struct i1480 *i1480)
377{
378 int result;
379 result = __mac_fw_upload(i1480, i1480->pre_fw_name, "PRE");
380 if (result == 0)
381 msleep(400);
382 return result;
383}
384
385
386/**
387 * Reset a the MAC and PHY
388 *
389 * @i1480: Device's instance
390 * @returns: 0 if ok, < 0 errno code on error
391 *
392 * We put the command on kmalloc'ed memory as some arches cannot do
393 * USB from the stack. The reply event is copied from an stage buffer,
394 * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
395 *
396 * We issue the reset to make sure the UWB controller reinits the PHY;
397 * this way we can now if the PHY init went ok.
398 */
399static
400int i1480_cmd_reset(struct i1480 *i1480)
401{
402 int result;
403 struct uwb_rccb *cmd = (void *) i1480->cmd_buf;
404 struct i1480_evt_reset {
405 struct uwb_rceb rceb;
406 u8 bResultCode;
407 } __attribute__((packed)) *reply = (void *) i1480->evt_buf;
408
409 result = -ENOMEM;
410 cmd->bCommandType = UWB_RC_CET_GENERAL;
411 cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET);
412 reply->rceb.bEventType = UWB_RC_CET_GENERAL;
413 reply->rceb.wEvent = UWB_RC_CMD_RESET;
414 result = i1480_cmd(i1480, "RESET", sizeof(*cmd), sizeof(*reply));
415 if (result < 0)
416 goto out;
417 if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
418 dev_err(i1480->dev, "RESET: command execution failed: %u\n",
419 reply->bResultCode);
420 result = -EIO;
421 }
422out:
423 return result;
424
425}
426
427
428/* Wait for the MAC FW to start running */
429static
430int i1480_fw_is_running_q(struct i1480 *i1480)
431{
432 int cnt = 0;
433 int result;
434 u32 *val = (u32 *) i1480->cmd_buf;
435
436 d_fnstart(3, i1480->dev, "(i1480 %p)\n", i1480);
437 for (cnt = 0; cnt < 10; cnt++) {
438 msleep(100);
439 result = i1480->read(i1480, 0x80080000, 4);
440 if (result < 0) {
441 dev_err(i1480->dev, "Can't read 0x8008000: %d\n", result);
442 goto out;
443 }
444 if (*val == 0x55555555UL) /* fw running? cool */
445 goto out;
446 }
447 dev_err(i1480->dev, "Timed out waiting for fw to start\n");
448 result = -ETIMEDOUT;
449out:
450 d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result);
451 return result;
452
453}
454
455
456/**
457 * Upload MAC firmware, wait for it to start
458 *
459 * @i1480: Device instance
460 * @fw_name: Name of the file that contains the firmware
461 *
462 * This has to be called after the pre fw has been uploaded (if
463 * there is any).
464 */
465int i1480_mac_fw_upload(struct i1480 *i1480)
466{
467 int result = 0, deprecated_name = 0;
468 struct i1480_rceb *rcebe = (void *) i1480->evt_buf;
469
470 d_fnstart(3, i1480->dev, "(%p)\n", i1480);
471 result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC");
472 if (result == -ENOENT) {
473 result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate,
474 "MAC");
475 deprecated_name = 1;
476 }
477 if (result < 0)
478 return result;
479 if (deprecated_name == 1)
480 dev_warn(i1480->dev,
481 "WARNING: firmware file name %s is deprecated, "
482 "please rename to %s\n",
483 i1480->mac_fw_name_deprecate, i1480->mac_fw_name);
484 result = i1480_fw_is_running_q(i1480);
485 if (result < 0)
486 goto error_fw_not_running;
487 result = i1480->rc_setup ? i1480->rc_setup(i1480) : 0;
488 if (result < 0) {
489 dev_err(i1480->dev, "Cannot setup after MAC fw upload: %d\n",
490 result);
491 goto error_setup;
492 }
493 result = i1480->wait_init_done(i1480); /* wait init'on */
494 if (result < 0) {
495 dev_err(i1480->dev, "MAC fw '%s': Initialization timed out "
496 "(%d)\n", i1480->mac_fw_name, result);
497 goto error_init_timeout;
498 }
499 /* verify we got the right initialization done event */
500 if (i1480->evt_result != sizeof(*rcebe)) {
501 dev_err(i1480->dev, "MAC fw '%s': initialization event returns "
502 "wrong size (%zu bytes vs %zu needed)\n",
503 i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe));
504 dump_bytes(i1480->dev, rcebe, min(i1480->evt_result, (ssize_t)32));
505 goto error_size;
506 }
507 result = -EIO;
508 if (i1480_rceb_check(i1480, &rcebe->rceb, NULL, 0, i1480_CET_VS1,
509 i1480_EVT_RM_INIT_DONE) < 0) {
510 dev_err(i1480->dev, "wrong initialization event 0x%02x/%04x/%02x "
511 "received; expected 0x%02x/%04x/00\n",
512 rcebe->rceb.bEventType, le16_to_cpu(rcebe->rceb.wEvent),
513 rcebe->rceb.bEventContext, i1480_CET_VS1,
514 i1480_EVT_RM_INIT_DONE);
515 goto error_init_timeout;
516 }
517 result = i1480_cmd_reset(i1480);
518 if (result < 0)
519 dev_err(i1480->dev, "MAC fw '%s': MBOA reset failed (%d)\n",
520 i1480->mac_fw_name, result);
521error_fw_not_running:
522error_init_timeout:
523error_size:
524error_setup:
525 d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result);
526 return result;
527}
diff --git a/drivers/uwb/i1480/dfu/phy.c b/drivers/uwb/i1480/dfu/phy.c
new file mode 100644
index 000000000000..3b1a87de8e63
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/phy.c
@@ -0,0 +1,203 @@
1/*
2 * Intel Wireless UWB Link 1480
3 * PHY parameters upload
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Code for uploading the PHY parameters to the PHY through the UWB
24 * Radio Control interface.
25 *
26 * We just send the data through the MPI interface using HWA-like
27 * commands and then reset the PHY to make sure it is ok.
28 */
29#include <linux/delay.h>
30#include <linux/device.h>
31#include <linux/firmware.h>
32#include <linux/usb/wusb.h>
33#include "i1480-dfu.h"
34
35
36/**
37 * Write a value array to an address of the MPI interface
38 *
39 * @i1480: Device descriptor
40 * @data: Data array to write
41 * @size: Size of the data array
42 * @returns: 0 if ok, < 0 errno code on error.
43 *
44 * The data array is organized into pairs:
45 *
46 * ADDRESS VALUE
47 *
48 * ADDRESS is BE 16 bit unsigned, VALUE 8 bit unsigned. Size thus has
49 * to be a multiple of three.
50 */
51static
52int i1480_mpi_write(struct i1480 *i1480, const void *data, size_t size)
53{
54 int result;
55 struct i1480_cmd_mpi_write *cmd = i1480->cmd_buf;
56 struct i1480_evt_confirm *reply = i1480->evt_buf;
57
58 BUG_ON(size > 480);
59 result = -ENOMEM;
60 cmd->rccb.bCommandType = i1480_CET_VS1;
61 cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_WRITE);
62 cmd->size = cpu_to_le16(size);
63 memcpy(cmd->data, data, size);
64 reply->rceb.bEventType = i1480_CET_VS1;
65 reply->rceb.wEvent = i1480_CMD_MPI_WRITE;
66 result = i1480_cmd(i1480, "MPI-WRITE", sizeof(*cmd) + size, sizeof(*reply));
67 if (result < 0)
68 goto out;
69 if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
70 dev_err(i1480->dev, "MPI-WRITE: command execution failed: %d\n",
71 reply->bResultCode);
72 result = -EIO;
73 }
74out:
75 return result;
76}
77
78
79/**
80 * Read a value array to from an address of the MPI interface
81 *
82 * @i1480: Device descriptor
83 * @data: where to place the read array
84 * @srcaddr: Where to read from
85 * @size: Size of the data read array
86 * @returns: 0 if ok, < 0 errno code on error.
87 *
88 * The command data array is organized into pairs ADDR0 ADDR1..., and
89 * the returned data in ADDR0 VALUE0 ADDR1 VALUE1...
90 *
91 * We generate the command array to be a sequential read and then
92 * rearrange the result.
93 *
94 * We use the i1480->cmd_buf for the command, i1480->evt_buf for the reply.
95 *
96 * As the reply has to fit in 512 bytes (i1480->evt_buffer), the max amount
97 * of values we can read is (512 - sizeof(*reply)) / 3
98 */
99static
100int i1480_mpi_read(struct i1480 *i1480, u8 *data, u16 srcaddr, size_t size)
101{
102 int result;
103 struct i1480_cmd_mpi_read *cmd = i1480->cmd_buf;
104 struct i1480_evt_mpi_read *reply = i1480->evt_buf;
105 unsigned cnt;
106
107 memset(i1480->cmd_buf, 0x69, 512);
108 memset(i1480->evt_buf, 0x69, 512);
109
110 BUG_ON(size > (i1480->buf_size - sizeof(*reply)) / 3);
111 result = -ENOMEM;
112 cmd->rccb.bCommandType = i1480_CET_VS1;
113 cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_READ);
114 cmd->size = cpu_to_le16(3*size);
115 for (cnt = 0; cnt < size; cnt++) {
116 cmd->data[cnt].page = (srcaddr + cnt) >> 8;
117 cmd->data[cnt].offset = (srcaddr + cnt) & 0xff;
118 }
119 reply->rceb.bEventType = i1480_CET_VS1;
120 reply->rceb.wEvent = i1480_CMD_MPI_READ;
121 result = i1480_cmd(i1480, "MPI-READ", sizeof(*cmd) + 2*size,
122 sizeof(*reply) + 3*size);
123 if (result < 0)
124 goto out;
125 if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
126 dev_err(i1480->dev, "MPI-READ: command execution failed: %d\n",
127 reply->bResultCode);
128 result = -EIO;
129 }
130 for (cnt = 0; cnt < size; cnt++) {
131 if (reply->data[cnt].page != (srcaddr + cnt) >> 8)
132 dev_err(i1480->dev, "MPI-READ: page inconsistency at "
133 "index %u: expected 0x%02x, got 0x%02x\n", cnt,
134 (srcaddr + cnt) >> 8, reply->data[cnt].page);
135 if (reply->data[cnt].offset != ((srcaddr + cnt) & 0x00ff))
136 dev_err(i1480->dev, "MPI-READ: offset inconsistency at "
137 "index %u: expected 0x%02x, got 0x%02x\n", cnt,
138 (srcaddr + cnt) & 0x00ff,
139 reply->data[cnt].offset);
140 data[cnt] = reply->data[cnt].value;
141 }
142 result = 0;
143out:
144 return result;
145}
146
147
148/**
149 * Upload a PHY firmware, wait for it to start
150 *
151 * @i1480: Device instance
152 * @fw_name: Name of the file that contains the firmware
153 *
154 * We assume the MAC fw is up and running. This means we can use the
155 * MPI interface to write the PHY firmware. Once done, we issue an
156 * MBOA Reset, which will force the MAC to reset and reinitialize the
157 * PHY. If that works, we are ready to go.
158 *
159 * Max packet size for the MPI write is 512, so the max buffer is 480
160 * (which gives us 160 byte triads of MSB, LSB and VAL for the data).
161 */
162int i1480_phy_fw_upload(struct i1480 *i1480)
163{
164 int result;
165 const struct firmware *fw;
166 const char *data_itr, *data_top;
167 const size_t MAX_BLK_SIZE = 480; /* 160 triads */
168 size_t data_size;
169 u8 phy_stat;
170
171 result = request_firmware(&fw, i1480->phy_fw_name, i1480->dev);
172 if (result < 0)
173 goto out;
174 /* Loop writing data in chunks as big as possible until done. */
175 for (data_itr = fw->data, data_top = data_itr + fw->size;
176 data_itr < data_top; data_itr += MAX_BLK_SIZE) {
177 data_size = min(MAX_BLK_SIZE, (size_t) (data_top - data_itr));
178 result = i1480_mpi_write(i1480, data_itr, data_size);
179 if (result < 0)
180 goto error_mpi_write;
181 }
182 /* Read MPI page 0, offset 6; if 0, PHY was initialized correctly. */
183 result = i1480_mpi_read(i1480, &phy_stat, 0x0006, 1);
184 if (result < 0) {
185 dev_err(i1480->dev, "PHY: can't get status: %d\n", result);
186 goto error_mpi_status;
187 }
188 if (phy_stat != 0) {
189 result = -ENODEV;
190 dev_info(i1480->dev, "error, PHY not ready: %u\n", phy_stat);
191 goto error_phy_status;
192 }
193 dev_info(i1480->dev, "PHY fw '%s': uploaded\n", i1480->phy_fw_name);
194error_phy_status:
195error_mpi_status:
196error_mpi_write:
197 release_firmware(fw);
198 if (result < 0)
199 dev_err(i1480->dev, "PHY fw '%s': failed to upload (%d), "
200 "power cycle device\n", i1480->phy_fw_name, result);
201out:
202 return result;
203}
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
new file mode 100644
index 000000000000..98eeeff051aa
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -0,0 +1,500 @@
1/*
2 * Intel Wireless UWB Link 1480
3 * USB SKU firmware upload implementation
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * This driver will prepare the i1480 device to behave as a real
24 * Wireless USB HWA adaptor by uploading the firmware.
25 *
26 * When the device is connected or driver is loaded, i1480_usb_probe()
27 * is called--this will allocate and initialize the device structure,
28 * fill in the pointers to the common functions (read, write,
29 * wait_init_done and cmd for HWA command execution) and once that is
30 * done, call the common firmware uploading routine. Then clean up and
31 * return -ENODEV, as we don't attach to the device.
32 *
33 * The rest are the basic ops we implement that the fw upload code
34 * uses to do its job. All the ops in the common code are i1480->NAME,
35 * the functions are i1480_usb_NAME().
36 */
37#include <linux/module.h>
38#include <linux/version.h>
39#include <linux/usb.h>
40#include <linux/interrupt.h>
41#include <linux/delay.h>
42#include <linux/uwb.h>
43#include <linux/usb/wusb.h>
44#include <linux/usb/wusb-wa.h>
45#include "i1480-dfu.h"
46
47#define D_LOCAL 0
48#include <linux/uwb/debug.h>
49
50
51struct i1480_usb {
52 struct i1480 i1480;
53 struct usb_device *usb_dev;
54 struct usb_interface *usb_iface;
55 struct urb *neep_urb; /* URB for reading from EP1 */
56};
57
58
59static
60void i1480_usb_init(struct i1480_usb *i1480_usb)
61{
62 i1480_init(&i1480_usb->i1480);
63}
64
65
66static
67int i1480_usb_create(struct i1480_usb *i1480_usb, struct usb_interface *iface)
68{
69 struct usb_device *usb_dev = interface_to_usbdev(iface);
70 int result = -ENOMEM;
71
72 i1480_usb->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
73 i1480_usb->usb_iface = usb_get_intf(iface);
74 usb_set_intfdata(iface, i1480_usb); /* Bind the driver to iface0 */
75 i1480_usb->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
76 if (i1480_usb->neep_urb == NULL)
77 goto error;
78 return 0;
79
80error:
81 usb_set_intfdata(iface, NULL);
82 usb_put_intf(iface);
83 usb_put_dev(usb_dev);
84 return result;
85}
86
87
88static
89void i1480_usb_destroy(struct i1480_usb *i1480_usb)
90{
91 usb_kill_urb(i1480_usb->neep_urb);
92 usb_free_urb(i1480_usb->neep_urb);
93 usb_set_intfdata(i1480_usb->usb_iface, NULL);
94 usb_put_intf(i1480_usb->usb_iface);
95 usb_put_dev(i1480_usb->usb_dev);
96}
97
98
99/**
100 * Write a buffer to a memory address in the i1480 device
101 *
102 * @i1480: i1480 instance
103 * @memory_address:
104 * Address where to write the data buffer to.
105 * @buffer: Buffer to the data
106 * @size: Size of the buffer [has to be < 512].
107 * @returns: 0 if ok, < 0 errno code on error.
108 *
109 * Data buffers to USB cannot be on the stack or in vmalloc'ed areas,
110 * so we copy it to the local i1480 buffer before proceeding. In any
111 * case, we have a max size we can send, soooo.
112 */
113static
114int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
115 const void *buffer, size_t size)
116{
117 int result = 0;
118 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
119 size_t buffer_size, itr = 0;
120
121 d_fnstart(3, i1480->dev, "(%p, 0x%08x, %p, %zu)\n",
122 i1480, memory_address, buffer, size);
123 BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
124 while (size > 0) {
125 buffer_size = size < i1480->buf_size ? size : i1480->buf_size;
126 memcpy(i1480->cmd_buf, buffer + itr, buffer_size);
127 result = usb_control_msg(
128 i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
129 0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
130 cpu_to_le16(memory_address & 0xffff),
131 cpu_to_le16((memory_address >> 16) & 0xffff),
132 i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */);
133 if (result < 0)
134 break;
135 d_printf(3, i1480->dev,
136 "wrote @ 0x%08x %u bytes (of %zu bytes requested)\n",
137 memory_address, result, buffer_size);
138 d_dump(4, i1480->dev, i1480->cmd_buf, result);
139 itr += result;
140 memory_address += result;
141 size -= result;
142 }
143 d_fnend(3, i1480->dev, "(%p, 0x%08x, %p, %zu) = %d\n",
144 i1480, memory_address, buffer, size, result);
145 return result;
146}
147
148
149/**
150 * Read a block [max size 512] of the device's memory to @i1480's buffer.
151 *
152 * @i1480: i1480 instance
153 * @memory_address:
154 * Address where to read from.
155 * @size: Size to read. Smaller than or equal to 512.
156 * @returns: >= 0 number of bytes written if ok, < 0 errno code on error.
157 *
158 * NOTE: if the memory address or block is incorrect, you might get a
159 * stall or a different memory read. Caller has to verify the
160 * memory address and size passed back in the @neh structure.
161 */
162static
163int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size)
164{
165 ssize_t result = 0, bytes = 0;
166 size_t itr, read_size = i1480->buf_size;
167 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
168
169 d_fnstart(3, i1480->dev, "(%p, 0x%08x, %zu)\n",
170 i1480, addr, size);
171 BUG_ON(size > i1480->buf_size);
172 BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
173 BUG_ON(read_size > 512);
174
175 if (addr >= 0x8000d200 && addr < 0x8000d400) /* Yeah, HW quirk */
176 read_size = 4;
177
178 for (itr = 0; itr < size; itr += read_size) {
179 size_t itr_addr = addr + itr;
180 size_t itr_size = min(read_size, size - itr);
181 result = usb_control_msg(
182 i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0),
183 0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
184 cpu_to_le16(itr_addr & 0xffff),
185 cpu_to_le16((itr_addr >> 16) & 0xffff),
186 i1480->cmd_buf + itr, itr_size,
187 100 /* FIXME: arbitrary */);
188 if (result < 0) {
189 dev_err(i1480->dev, "%s: USB read error: %zd\n",
190 __func__, result);
191 goto out;
192 }
193 if (result != itr_size) {
194 result = -EIO;
195 dev_err(i1480->dev,
196 "%s: partial read got only %zu bytes vs %zu expected\n",
197 __func__, result, itr_size);
198 goto out;
199 }
200 bytes += result;
201 }
202 result = bytes;
203out:
204 d_fnend(3, i1480->dev, "(%p, 0x%08x, %zu) = %zd\n",
205 i1480, addr, size, result);
206 if (result > 0)
207 d_dump(4, i1480->dev, i1480->cmd_buf, result);
208 return result;
209}
210
211
212/**
213 * Callback for reads on the notification/event endpoint
214 *
215 * Just enables the completion read handler.
216 */
217static
218void i1480_usb_neep_cb(struct urb *urb)
219{
220 struct i1480 *i1480 = urb->context;
221 struct device *dev = i1480->dev;
222
223 switch (urb->status) {
224 case 0:
225 break;
226 case -ECONNRESET: /* Not an error, but a controlled situation; */
227 case -ENOENT: /* (we killed the URB)...so, no broadcast */
228 dev_dbg(dev, "NEEP: reset/noent %d\n", urb->status);
229 break;
230 case -ESHUTDOWN: /* going away! */
231 dev_dbg(dev, "NEEP: down %d\n", urb->status);
232 break;
233 default:
234 dev_err(dev, "NEEP: unknown status %d\n", urb->status);
235 break;
236 }
237 i1480->evt_result = urb->actual_length;
238 complete(&i1480->evt_complete);
239 return;
240}
241
242
243/**
244 * Wait for the MAC FW to initialize
245 *
246 * MAC FW sends a 0xfd/0101/00 notification to EP1 when done
247 * initializing. Get that notification into i1480->evt_buf; upper layer
248 * will verify it.
249 *
250 * Set i1480->evt_result with the result of getting the event or its
251 * size (if succesful).
252 *
253 * Delivers the data directly to i1480->evt_buf
254 */
255static
256int i1480_usb_wait_init_done(struct i1480 *i1480)
257{
258 int result;
259 struct device *dev = i1480->dev;
260 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
261 struct usb_endpoint_descriptor *epd;
262
263 d_fnstart(3, dev, "(%p)\n", i1480);
264 init_completion(&i1480->evt_complete);
265 i1480->evt_result = -EINPROGRESS;
266 epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
267 usb_fill_int_urb(i1480_usb->neep_urb, i1480_usb->usb_dev,
268 usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
269 i1480->evt_buf, i1480->buf_size,
270 i1480_usb_neep_cb, i1480, epd->bInterval);
271 result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
272 if (result < 0) {
273 dev_err(dev, "init done: cannot submit NEEP read: %d\n",
274 result);
275 goto error_submit;
276 }
277 /* Wait for the USB callback to get the data */
278 result = wait_for_completion_interruptible_timeout(
279 &i1480->evt_complete, HZ);
280 if (result <= 0) {
281 result = result == 0 ? -ETIMEDOUT : result;
282 goto error_wait;
283 }
284 usb_kill_urb(i1480_usb->neep_urb);
285 d_fnend(3, dev, "(%p) = 0\n", i1480);
286 return 0;
287
288error_wait:
289 usb_kill_urb(i1480_usb->neep_urb);
290error_submit:
291 i1480->evt_result = result;
292 d_fnend(3, dev, "(%p) = %d\n", i1480, result);
293 return result;
294}
295
296
297/**
298 * Generic function for issuing commands to the i1480
299 *
300 * @i1480: i1480 instance
301 * @cmd_name: Name of the command (for error messages)
302 * @cmd: Pointer to command buffer
303 * @cmd_size: Size of the command buffer
304 * @reply: Buffer for the reply event
305 * @reply_size: Expected size back (including RCEB); the reply buffer
306 * is assumed to be as big as this.
307 * @returns: >= 0 size of the returned event data if ok,
308 * < 0 errno code on error.
309 *
310 * Arms the NE handle, issues the command to the device and checks the
311 * basics of the reply event.
312 */
313static
314int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size)
315{
316 int result;
317 struct device *dev = i1480->dev;
318 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
319 struct usb_endpoint_descriptor *epd;
320 struct uwb_rccb *cmd = i1480->cmd_buf;
321 u8 iface_no;
322
323 d_fnstart(3, dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size);
324 /* Post a read on the notification & event endpoint */
325 iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber;
326 epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
327 usb_fill_int_urb(
328 i1480_usb->neep_urb, i1480_usb->usb_dev,
329 usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
330 i1480->evt_buf, i1480->buf_size,
331 i1480_usb_neep_cb, i1480, epd->bInterval);
332 result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
333 if (result < 0) {
334 dev_err(dev, "%s: cannot submit NEEP read: %d\n",
335 cmd_name, result);
336 goto error_submit_ep1;
337 }
338 /* Now post the command on EP0 */
339 result = usb_control_msg(
340 i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
341 WA_EXEC_RC_CMD,
342 USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
343 0, iface_no,
344 cmd, cmd_size,
345 100 /* FIXME: this is totally arbitrary */);
346 if (result < 0) {
347 dev_err(dev, "%s: control request failed: %d\n",
348 cmd_name, result);
349 goto error_submit_ep0;
350 }
351 d_fnend(3, dev, "(%p, %s, %zu) = %d\n",
352 i1480, cmd_name, cmd_size, result);
353 return result;
354
355error_submit_ep0:
356 usb_kill_urb(i1480_usb->neep_urb);
357error_submit_ep1:
358 d_fnend(3, dev, "(%p, %s, %zu) = %d\n",
359 i1480, cmd_name, cmd_size, result);
360 return result;
361}
362
363
364/*
365 * Probe a i1480 device for uploading firmware.
366 *
367 * We attach only to interface #0, which is the radio control interface.
368 */
369static
370int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
371{
372 struct i1480_usb *i1480_usb;
373 struct i1480 *i1480;
374 struct device *dev = &iface->dev;
375 int result;
376
377 result = -ENODEV;
378 if (iface->cur_altsetting->desc.bInterfaceNumber != 0) {
379 dev_dbg(dev, "not attaching to iface %d\n",
380 iface->cur_altsetting->desc.bInterfaceNumber);
381 goto error;
382 }
383 if (iface->num_altsetting > 1
384 && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) {
385 /* Need altsetting #1 [HW QUIRK] or EP1 won't work */
386 result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
387 if (result < 0)
388 dev_warn(dev,
389 "can't set altsetting 1 on iface 0: %d\n",
390 result);
391 }
392
393 result = -ENOMEM;
394 i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
395 if (i1480_usb == NULL) {
396 dev_err(dev, "Unable to allocate instance\n");
397 goto error;
398 }
399 i1480_usb_init(i1480_usb);
400
401 i1480 = &i1480_usb->i1480;
402 i1480->buf_size = 512;
403 i1480->cmd_buf = kmalloc(2 * i1480->buf_size, GFP_KERNEL);
404 if (i1480->cmd_buf == NULL) {
405 dev_err(dev, "Cannot allocate transfer buffers\n");
406 result = -ENOMEM;
407 goto error_buf_alloc;
408 }
409 i1480->evt_buf = i1480->cmd_buf + i1480->buf_size;
410
411 result = i1480_usb_create(i1480_usb, iface);
412 if (result < 0) {
413 dev_err(dev, "Cannot create instance: %d\n", result);
414 goto error_create;
415 }
416
417 /* setup the fops and upload the firmare */
418 i1480->pre_fw_name = "i1480-pre-phy-0.0.bin";
419 i1480->mac_fw_name = "i1480-usb-0.0.bin";
420 i1480->mac_fw_name_deprecate = "ptc-0.0.bin";
421 i1480->phy_fw_name = "i1480-phy-0.0.bin";
422 i1480->dev = &iface->dev;
423 i1480->write = i1480_usb_write;
424 i1480->read = i1480_usb_read;
425 i1480->rc_setup = NULL;
426 i1480->wait_init_done = i1480_usb_wait_init_done;
427 i1480->cmd = i1480_usb_cmd;
428
429 result = i1480_fw_upload(&i1480_usb->i1480); /* the real thing */
430 if (result >= 0) {
431 usb_reset_device(i1480_usb->usb_dev);
432 result = -ENODEV; /* we don't want to bind to the iface */
433 }
434 i1480_usb_destroy(i1480_usb);
435error_create:
436 kfree(i1480->cmd_buf);
437error_buf_alloc:
438 kfree(i1480_usb);
439error:
440 return result;
441}
442
443#define i1480_USB_DEV(v, p) \
444{ \
445 .match_flags = USB_DEVICE_ID_MATCH_DEVICE \
446 | USB_DEVICE_ID_MATCH_DEV_INFO \
447 | USB_DEVICE_ID_MATCH_INT_INFO, \
448 .idVendor = (v), \
449 .idProduct = (p), \
450 .bDeviceClass = 0xff, \
451 .bDeviceSubClass = 0xff, \
452 .bDeviceProtocol = 0xff, \
453 .bInterfaceClass = 0xff, \
454 .bInterfaceSubClass = 0xff, \
455 .bInterfaceProtocol = 0xff, \
456}
457
458
459/** USB device ID's that we handle */
460static struct usb_device_id i1480_usb_id_table[] = {
461 i1480_USB_DEV(0x8086, 0xdf3b),
462 i1480_USB_DEV(0x15a9, 0x0005),
463 i1480_USB_DEV(0x07d1, 0x3802),
464 i1480_USB_DEV(0x050d, 0x305a),
465 i1480_USB_DEV(0x3495, 0x3007),
466 {},
467};
468MODULE_DEVICE_TABLE(usb, i1480_usb_id_table);
469
470
471static struct usb_driver i1480_dfu_driver = {
472 .name = "i1480-dfu-usb",
473 .id_table = i1480_usb_id_table,
474 .probe = i1480_usb_probe,
475 .disconnect = NULL,
476};
477
478
479/*
480 * Initialize the i1480 DFU driver.
481 *
482 * We also need to register our function for guessing event sizes.
483 */
484static int __init i1480_dfu_driver_init(void)
485{
486 return usb_register(&i1480_dfu_driver);
487}
488module_init(i1480_dfu_driver_init);
489
490
491static void __exit i1480_dfu_driver_exit(void)
492{
493 usb_deregister(&i1480_dfu_driver);
494}
495module_exit(i1480_dfu_driver_exit);
496
497
498MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
499MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB");
500MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/i1480-est.c b/drivers/uwb/i1480/i1480-est.c
new file mode 100644
index 000000000000..7bf8c6febae7
--- /dev/null
+++ b/drivers/uwb/i1480/i1480-est.c
@@ -0,0 +1,99 @@
1/*
2 * Intel Wireless UWB Link 1480
3 * Event Size tables for Wired Adaptors
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/usb.h>
29#include <linux/uwb.h>
30#include "dfu/i1480-dfu.h"
31
32
33/** Event size table for wEvents 0x00XX */
34static struct uwb_est_entry i1480_est_fd00[] = {
35 /* Anybody expecting this response has to use
36 * neh->extra_size to specify the real size that will
37 * come back. */
38 [i1480_EVT_CONFIRM] = { .size = sizeof(struct i1480_evt_confirm) },
39 [i1480_CMD_SET_IP_MAS] = { .size = sizeof(struct i1480_evt_confirm) },
40#ifdef i1480_RCEB_EXTENDED
41 [0x09] = {
42 .size = sizeof(struct i1480_rceb),
43 .offset = 1 + offsetof(struct i1480_rceb, wParamLength),
44 },
45#endif
46};
47
48/** Event size table for wEvents 0x01XX */
49static struct uwb_est_entry i1480_est_fd01[] = {
50 [0xff & i1480_EVT_RM_INIT_DONE] = { .size = sizeof(struct i1480_rceb) },
51 [0xff & i1480_EVT_DEV_ADD] = { .size = sizeof(struct i1480_rceb) + 9 },
52 [0xff & i1480_EVT_DEV_RM] = { .size = sizeof(struct i1480_rceb) + 9 },
53 [0xff & i1480_EVT_DEV_ID_CHANGE] = {
54 .size = sizeof(struct i1480_rceb) + 2 },
55};
56
57static int i1480_est_init(void)
58{
59 int result = uwb_est_register(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
60 i1480_est_fd00,
61 ARRAY_SIZE(i1480_est_fd00));
62 if (result < 0) {
63 printk(KERN_ERR "Can't register EST table fd00: %d\n", result);
64 return result;
65 }
66 result = uwb_est_register(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b,
67 i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01));
68 if (result < 0) {
69 printk(KERN_ERR "Can't register EST table fd01: %d\n", result);
70 return result;
71 }
72 return 0;
73}
74module_init(i1480_est_init);
75
76static void i1480_est_exit(void)
77{
78 uwb_est_unregister(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
79 i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00));
80 uwb_est_unregister(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b,
81 i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01));
82}
83module_exit(i1480_est_exit);
84
85MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
86MODULE_DESCRIPTION("i1480's Vendor Specific Event Size Tables");
87MODULE_LICENSE("GPL");
88
89/**
90 * USB device ID's that we handle
91 *
92 * [so we are loaded when this kind device is connected]
93 */
94static struct usb_device_id i1480_est_id_table[] = {
95 { USB_DEVICE(0x8086, 0xdf3b), },
96 { USB_DEVICE(0x8086, 0x0c3b), },
97 { },
98};
99MODULE_DEVICE_TABLE(usb, i1480_est_id_table);
diff --git a/drivers/uwb/i1480/i1480-wlp.h b/drivers/uwb/i1480/i1480-wlp.h
new file mode 100644
index 000000000000..18a8b0e4567b
--- /dev/null
+++ b/drivers/uwb/i1480/i1480-wlp.h
@@ -0,0 +1,200 @@
1/*
2 * Intel 1480 Wireless UWB Link
3 * WLP specific definitions
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * FIXME: docs
25 */
26
27#ifndef __i1480_wlp_h__
28#define __i1480_wlp_h__
29
30#include <linux/spinlock.h>
31#include <linux/list.h>
32#include <linux/uwb.h>
33#include <linux/if_ether.h>
34#include <asm/byteorder.h>
35
36/* New simplified header format? */
37#undef WLP_HDR_FMT_2 /* FIXME: rename */
38
39/**
40 * Values of the Delivery ID & Type field when PCA or DRP
41 *
42 * The Delivery ID & Type field in the WLP TX header indicates whether
43 * the frame is PCA or DRP. This is done based on the high level bit of
44 * this field.
45 * We use this constant to test if the traffic is PCA or DRP as follows:
46 * if (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)
47 * this is DRP traffic
48 * else
49 * this is PCA traffic
50 */
51enum deliver_id_type_bit {
52 WLP_DRP = 8,
53};
54
55/**
56 * WLP TX header
57 *
58 * Indicates UWB/WLP-specific transmission parameters for a network
59 * packet.
60 */
61struct wlp_tx_hdr {
62 /* dword 0 */
63 struct uwb_dev_addr dstaddr;
64 u8 key_index;
65 u8 mac_params;
66 /* dword 1 */
67 u8 phy_params;
68#ifndef WLP_HDR_FMT_2
69 u8 reserved;
70 __le16 oui01; /* FIXME: not so sure if __le16 or u8[2] */
71 /* dword 2 */
72 u8 oui2; /* if all LE, it could be merged */
73 __le16 prid;
74#endif
75} __attribute__((packed));
76
77static inline int wlp_tx_hdr_delivery_id_type(const struct wlp_tx_hdr *hdr)
78{
79 return hdr->mac_params & 0x0f;
80}
81
82static inline int wlp_tx_hdr_ack_policy(const struct wlp_tx_hdr *hdr)
83{
84 return (hdr->mac_params >> 4) & 0x07;
85}
86
87static inline int wlp_tx_hdr_rts_cts(const struct wlp_tx_hdr *hdr)
88{
89 return (hdr->mac_params >> 7) & 0x01;
90}
91
92static inline void wlp_tx_hdr_set_delivery_id_type(struct wlp_tx_hdr *hdr, int id)
93{
94 hdr->mac_params = (hdr->mac_params & ~0x0f) | id;
95}
96
97static inline void wlp_tx_hdr_set_ack_policy(struct wlp_tx_hdr *hdr,
98 enum uwb_ack_pol policy)
99{
100 hdr->mac_params = (hdr->mac_params & ~0x70) | (policy << 4);
101}
102
103static inline void wlp_tx_hdr_set_rts_cts(struct wlp_tx_hdr *hdr, int rts_cts)
104{
105 hdr->mac_params = (hdr->mac_params & ~0x80) | (rts_cts << 7);
106}
107
108static inline enum uwb_phy_rate wlp_tx_hdr_phy_rate(const struct wlp_tx_hdr *hdr)
109{
110 return hdr->phy_params & 0x0f;
111}
112
113static inline int wlp_tx_hdr_tx_power(const struct wlp_tx_hdr *hdr)
114{
115 return (hdr->phy_params >> 4) & 0x0f;
116}
117
118static inline void wlp_tx_hdr_set_phy_rate(struct wlp_tx_hdr *hdr, enum uwb_phy_rate rate)
119{
120 hdr->phy_params = (hdr->phy_params & ~0x0f) | rate;
121}
122
123static inline void wlp_tx_hdr_set_tx_power(struct wlp_tx_hdr *hdr, int pwr)
124{
125 hdr->phy_params = (hdr->phy_params & ~0xf0) | (pwr << 4);
126}
127
128
129/**
130 * WLP RX header
131 *
132 * Provides UWB/WLP-specific transmission data for a received
133 * network packet.
134 */
135struct wlp_rx_hdr {
136 /* dword 0 */
137 struct uwb_dev_addr dstaddr;
138 struct uwb_dev_addr srcaddr;
139 /* dword 1 */
140 u8 LQI;
141 s8 RSSI;
142 u8 reserved3;
143#ifndef WLP_HDR_FMT_2
144 u8 oui0;
145 /* dword 2 */
146 __le16 oui12;
147 __le16 prid;
148#endif
149} __attribute__((packed));
150
151
152/** User configurable options for WLP */
153struct wlp_options {
154 struct mutex mutex; /* access to user configurable options*/
155 struct wlp_tx_hdr def_tx_hdr; /* default tx hdr */
156 u8 pca_base_priority;
157 u8 bw_alloc; /*index into bw_allocs[] for PCA/DRP reservations*/
158};
159
160
161static inline
162void wlp_options_init(struct wlp_options *options)
163{
164 mutex_init(&options->mutex);
165 wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, UWB_ACK_INM);
166 wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, 1);
167 /* FIXME: default to phy caps */
168 wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, UWB_PHY_RATE_480);
169#ifndef WLP_HDR_FMT_2
170 options->def_tx_hdr.prid = cpu_to_le16(0x0000);
171#endif
172}
173
174
175/* sysfs helpers */
176
177extern ssize_t uwb_pca_base_priority_store(struct wlp_options *,
178 const char *, size_t);
179extern ssize_t uwb_pca_base_priority_show(const struct wlp_options *, char *);
180extern ssize_t uwb_bw_alloc_store(struct wlp_options *, const char *, size_t);
181extern ssize_t uwb_bw_alloc_show(const struct wlp_options *, char *);
182extern ssize_t uwb_ack_policy_store(struct wlp_options *,
183 const char *, size_t);
184extern ssize_t uwb_ack_policy_show(const struct wlp_options *, char *);
185extern ssize_t uwb_rts_cts_store(struct wlp_options *, const char *, size_t);
186extern ssize_t uwb_rts_cts_show(const struct wlp_options *, char *);
187extern ssize_t uwb_phy_rate_store(struct wlp_options *, const char *, size_t);
188extern ssize_t uwb_phy_rate_show(const struct wlp_options *, char *);
189
190
191/** Simple bandwidth allocation (temporary and too simple) */
192struct wlp_bw_allocs {
193 const char *name;
194 struct {
195 u8 mask, stream;
196 } tx, rx;
197};
198
199
200#endif /* #ifndef __i1480_wlp_h__ */
diff --git a/drivers/uwb/i1480/i1480u-wlp/Makefile b/drivers/uwb/i1480/i1480u-wlp/Makefile
new file mode 100644
index 000000000000..fe6709b8e68b
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/Makefile
@@ -0,0 +1,8 @@
1obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp.o
2
3i1480u-wlp-objs := \
4 lc.o \
5 netdev.o \
6 rx.o \
7 sysfs.o \
8 tx.o
diff --git a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
new file mode 100644
index 000000000000..5f1b2951bb83
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
@@ -0,0 +1,284 @@
1/*
2 * Intel 1480 Wireless UWB Link USB
3 * Header formats, constants, general internal interfaces
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This is not an standard interface.
25 *
26 * FIXME: docs
27 *
28 * i1480u-wlp is pretty simple: two endpoints, one for tx, one for
29 * rx. rx is polled. Network packets (ethernet, whatever) are wrapped
30 * in i1480 TX or RX headers (for sending over the air), and these
31 * packets are wrapped in UNTD headers (for sending to the WLP UWB
32 * controller).
33 *
34 * UNTD packets (UNTD hdr + i1480 hdr + network packet) packets
35 * cannot be bigger than i1480u_MAX_FRG_SIZE. When this happens, the
36 * i1480 packet is broken in chunks/packets:
37 *
38 * UNTD-1st.hdr + i1480.hdr + payload
39 * UNTD-next.hdr + payload
40 * ...
41 * UNTD-last.hdr + payload
42 *
43 * so that each packet is smaller or equal than i1480u_MAX_FRG_SIZE.
44 *
45 * All HW structures and bitmaps are little endian, so we need to play
46 * ugly tricks when defining bitfields. Hoping for the day GCC
47 * implements __attribute__((endian(1234))).
48 *
49 * FIXME: ROADMAP to the whole implementation
50 */
51
52#ifndef __i1480u_wlp_h__
53#define __i1480u_wlp_h__
54
55#include <linux/usb.h>
56#include <linux/netdevice.h>
57#include <linux/uwb.h> /* struct uwb_rc, struct uwb_notifs_handler */
58#include <linux/wlp.h>
59#include "../i1480-wlp.h"
60
61#undef i1480u_FLOW_CONTROL /* Enable flow control code */
62
63/**
64 * Basic flow control
65 */
66enum {
67 i1480u_TX_INFLIGHT_MAX = 1000,
68 i1480u_TX_INFLIGHT_THRESHOLD = 100,
69};
70
71/** Maximum size of a transaction that we can tx/rx */
72enum {
73 /* Maximum packet size computed as follows: max UNTD header (8) +
74 * i1480 RX header (8) + max Ethernet header and payload (4096) +
75 * Padding added by skb_reserve (2) to make post Ethernet payload
76 * start on 16 byte boundary*/
77 i1480u_MAX_RX_PKT_SIZE = 4114,
78 i1480u_MAX_FRG_SIZE = 512,
79 i1480u_RX_BUFS = 9,
80};
81
82
83/**
84 * UNTD packet type
85 *
86 * We need to fragment any payload whose UNTD packet is going to be
87 * bigger than i1480u_MAX_FRG_SIZE.
88 */
89enum i1480u_pkt_type {
90 i1480u_PKT_FRAG_1ST = 0x1,
91 i1480u_PKT_FRAG_NXT = 0x0,
92 i1480u_PKT_FRAG_LST = 0x2,
93 i1480u_PKT_FRAG_CMP = 0x3
94};
95enum {
96 i1480u_PKT_NONE = 0x4,
97};
98
99/** USB Network Transfer Descriptor - common */
100struct untd_hdr {
101 u8 type;
102 __le16 len;
103} __attribute__((packed));
104
105static inline enum i1480u_pkt_type untd_hdr_type(const struct untd_hdr *hdr)
106{
107 return hdr->type & 0x03;
108}
109
110static inline int untd_hdr_rx_tx(const struct untd_hdr *hdr)
111{
112 return (hdr->type >> 2) & 0x01;
113}
114
115static inline void untd_hdr_set_type(struct untd_hdr *hdr, enum i1480u_pkt_type type)
116{
117 hdr->type = (hdr->type & ~0x03) | type;
118}
119
120static inline void untd_hdr_set_rx_tx(struct untd_hdr *hdr, int rx_tx)
121{
122 hdr->type = (hdr->type & ~0x04) | (rx_tx << 2);
123}
124
125
126/**
127 * USB Network Transfer Descriptor - Complete Packet
128 *
129 * This is for a packet that is smaller (header + payload) than
130 * i1480u_MAX_FRG_SIZE.
131 *
132 * @hdr.total_len is the size of the payload; the payload doesn't
133 * count this header nor the padding, but includes the size of i1480
134 * header.
135 */
136struct untd_hdr_cmp {
137 struct untd_hdr hdr;
138 u8 padding;
139} __attribute__((packed));
140
141
142/**
143 * USB Network Transfer Descriptor - First fragment
144 *
145 * @hdr.len is the size of the *whole packet* (excluding UNTD
146 * headers); @fragment_len is the size of the payload (excluding UNTD
147 * headers, but including i1480 headers).
148 */
149struct untd_hdr_1st {
150 struct untd_hdr hdr;
151 __le16 fragment_len;
152 u8 padding[3];
153} __attribute__((packed));
154
155
156/**
157 * USB Network Transfer Descriptor - Next / Last [Rest]
158 *
159 * @hdr.len is the size of the payload, not including headrs.
160 */
161struct untd_hdr_rst {
162 struct untd_hdr hdr;
163 u8 padding;
164} __attribute__((packed));
165
166
167/**
168 * Transmission context
169 *
170 * Wraps all the stuff needed to track a pending/active tx
171 * operation.
172 */
173struct i1480u_tx {
174 struct list_head list_node;
175 struct i1480u *i1480u;
176 struct urb *urb;
177
178 struct sk_buff *skb;
179 struct wlp_tx_hdr *wlp_tx_hdr;
180
181 void *buf; /* if NULL, no new buf was used */
182 size_t buf_size;
183};
184
185/**
186 * Basic flow control
187 *
188 * We maintain a basic flow control counter. "count" how many TX URBs are
189 * outstanding. Only allow "max"
190 * TX URBs to be outstanding. If this value is reached the queue will be
191 * stopped. The queue will be restarted when there are
192 * "threshold" URBs outstanding.
193 * Maintain a counter of how many time the TX queue needed to be restarted
194 * due to the "max" being exceeded and the "threshold" reached again. The
195 * timestamp "restart_ts" is to keep track from when the counter was last
196 * queried (see sysfs handling of file wlp_tx_inflight).
197 */
198struct i1480u_tx_inflight {
199 atomic_t count;
200 unsigned long max;
201 unsigned long threshold;
202 unsigned long restart_ts;
203 atomic_t restart_count;
204};
205
206/**
207 * Instance of a i1480u WLP interface
208 *
209 * Keeps references to the USB device that wraps it, as well as it's
210 * interface and associated UWB host controller. As well, it also
211 * keeps a link to the netdevice for integration into the networking
212 * stack.
213 * We maintian separate error history for the tx and rx endpoints because
214 * the implementation does not rely on locking - having one shared
215 * structure between endpoints may cause problems. Adding locking to the
216 * implementation will have higher cost than adding a separate structure.
217 */
218struct i1480u {
219 struct usb_device *usb_dev;
220 struct usb_interface *usb_iface;
221 struct net_device *net_dev;
222
223 spinlock_t lock;
224 struct net_device_stats stats;
225
226 /* RX context handling */
227 struct sk_buff *rx_skb;
228 struct uwb_dev_addr rx_srcaddr;
229 size_t rx_untd_pkt_size;
230 struct i1480u_rx_buf {
231 struct i1480u *i1480u; /* back pointer */
232 struct urb *urb;
233 struct sk_buff *data; /* i1480u_MAX_RX_PKT_SIZE each */
234 } rx_buf[i1480u_RX_BUFS]; /* N bufs */
235
236 spinlock_t tx_list_lock; /* TX context */
237 struct list_head tx_list;
238 u8 tx_stream;
239
240 struct stats lqe_stats, rssi_stats; /* radio statistics */
241
242 /* Options we can set from sysfs */
243 struct wlp_options options;
244 struct uwb_notifs_handler uwb_notifs_handler;
245 struct edc tx_errors;
246 struct edc rx_errors;
247 struct wlp wlp;
248#ifdef i1480u_FLOW_CONTROL
249 struct urb *notif_urb;
250 struct edc notif_edc; /* error density counter */
251 u8 notif_buffer[1];
252#endif
253 struct i1480u_tx_inflight tx_inflight;
254};
255
256/* Internal interfaces */
257extern void i1480u_rx_cb(struct urb *urb);
258extern int i1480u_rx_setup(struct i1480u *);
259extern void i1480u_rx_release(struct i1480u *);
260extern void i1480u_tx_release(struct i1480u *);
261extern int i1480u_xmit_frame(struct wlp *, struct sk_buff *,
262 struct uwb_dev_addr *);
263extern void i1480u_stop_queue(struct wlp *);
264extern void i1480u_start_queue(struct wlp *);
265extern int i1480u_sysfs_setup(struct i1480u *);
266extern void i1480u_sysfs_release(struct i1480u *);
267
268/* netdev interface */
269extern int i1480u_open(struct net_device *);
270extern int i1480u_stop(struct net_device *);
271extern int i1480u_hard_start_xmit(struct sk_buff *, struct net_device *);
272extern void i1480u_tx_timeout(struct net_device *);
273extern int i1480u_set_config(struct net_device *, struct ifmap *);
274extern struct net_device_stats *i1480u_get_stats(struct net_device *);
275extern int i1480u_change_mtu(struct net_device *, int);
276extern void i1480u_uwb_notifs_cb(void *, struct uwb_dev *, enum uwb_notifs);
277
278/* bandwidth allocation callback */
279extern void i1480u_bw_alloc_cb(struct uwb_rsv *);
280
281/* Sys FS */
282extern struct attribute_group i1480u_wlp_attr_group;
283
284#endif /* #ifndef __i1480u_wlp_h__ */
diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c
new file mode 100644
index 000000000000..737d60cd5b73
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/lc.c
@@ -0,0 +1,421 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * This implements a very simple network driver for the WLP USB
26 * device that is associated to a UWB (Ultra Wide Band) host.
27 *
28 * This is seen as an interface of a composite device. Once the UWB
29 * host has an association to another WLP capable device, the
30 * networking interface (aka WLP) can start to send packets back and
31 * forth.
32 *
33 * Limitations:
34 *
35 * - Hand cranked; can't ifup the interface until there is an association
36 *
37 * - BW allocation very simplistic [see i1480u_mas_set() and callees].
38 *
39 *
40 * ROADMAP:
41 *
42 * ENTRY POINTS (driver model):
43 *
44 * i1480u_driver_{exit,init}(): initialization of the driver.
45 *
46 * i1480u_probe(): called by the driver code when a device
47 * matching 'i1480u_id_table' is connected.
48 *
49 * This allocs a netdev instance, inits with
50 * i1480u_add(), then registers_netdev().
51 * i1480u_init()
52 * i1480u_add()
53 *
54 * i1480u_disconnect(): device has been disconnected/module
55 * is being removed.
56 * i1480u_rm()
57 */
58#include <linux/version.h>
59#include <linux/if_arp.h>
60#include <linux/etherdevice.h>
61#include <linux/uwb/debug.h>
62#include "i1480u-wlp.h"
63
64
65
66static inline
67void i1480u_init(struct i1480u *i1480u)
68{
69 /* nothing so far... doesn't it suck? */
70 spin_lock_init(&i1480u->lock);
71 INIT_LIST_HEAD(&i1480u->tx_list);
72 spin_lock_init(&i1480u->tx_list_lock);
73 wlp_options_init(&i1480u->options);
74 edc_init(&i1480u->tx_errors);
75 edc_init(&i1480u->rx_errors);
76#ifdef i1480u_FLOW_CONTROL
77 edc_init(&i1480u->notif_edc);
78#endif
79 stats_init(&i1480u->lqe_stats);
80 stats_init(&i1480u->rssi_stats);
81 wlp_init(&i1480u->wlp);
82}
83
84/**
85 * Fill WLP device information structure
86 *
87 * The structure will contain a few character arrays, each ending with a
88 * null terminated string. Each string has to fit (excluding terminating
89 * character) into a specified range obtained from the WLP substack.
90 *
91 * It is still not clear exactly how this device information should be
92 * obtained. Until we find out we use the USB device descriptor as backup, some
93 * information elements have intuitive mappings, other not.
94 */
95static
96void i1480u_fill_device_info(struct wlp *wlp, struct wlp_device_info *dev_info)
97{
98 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
99 struct usb_device *usb_dev = i1480u->usb_dev;
100 /* Treat device name and model name the same */
101 if (usb_dev->descriptor.iProduct) {
102 usb_string(usb_dev, usb_dev->descriptor.iProduct,
103 dev_info->name, sizeof(dev_info->name));
104 usb_string(usb_dev, usb_dev->descriptor.iProduct,
105 dev_info->model_name, sizeof(dev_info->model_name));
106 }
107 if (usb_dev->descriptor.iManufacturer)
108 usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
109 dev_info->manufacturer,
110 sizeof(dev_info->manufacturer));
111 scnprintf(dev_info->model_nr, sizeof(dev_info->model_nr), "%04x",
112 __le16_to_cpu(usb_dev->descriptor.bcdDevice));
113 if (usb_dev->descriptor.iSerialNumber)
114 usb_string(usb_dev, usb_dev->descriptor.iSerialNumber,
115 dev_info->serial, sizeof(dev_info->serial));
116 /* FIXME: where should we obtain category? */
117 dev_info->prim_dev_type.category = cpu_to_le16(WLP_DEV_CAT_OTHER);
118 /* FIXME: Complete OUI and OUIsubdiv attributes */
119}
120
121#ifdef i1480u_FLOW_CONTROL
122/**
123 * Callback for the notification endpoint
124 *
125 * This mostly controls the xon/xoff protocol. In case of hard error,
126 * we stop the queue. If not, we always retry.
127 */
128static
129void i1480u_notif_cb(struct urb *urb, struct pt_regs *regs)
130{
131 struct i1480u *i1480u = urb->context;
132 struct usb_interface *usb_iface = i1480u->usb_iface;
133 struct device *dev = &usb_iface->dev;
134 int result;
135
136 switch (urb->status) {
137 case 0: /* Got valid data, do xon/xoff */
138 switch (i1480u->notif_buffer[0]) {
139 case 'N':
140 dev_err(dev, "XOFF STOPPING queue at %lu\n", jiffies);
141 netif_stop_queue(i1480u->net_dev);
142 break;
143 case 'A':
144 dev_err(dev, "XON STARTING queue at %lu\n", jiffies);
145 netif_start_queue(i1480u->net_dev);
146 break;
147 default:
148 dev_err(dev, "NEP: unknown data 0x%02hhx\n",
149 i1480u->notif_buffer[0]);
150 }
151 break;
152 case -ECONNRESET: /* Controlled situation ... */
153 case -ENOENT: /* we killed the URB... */
154 dev_err(dev, "NEP: URB reset/noent %d\n", urb->status);
155 goto error;
156 case -ESHUTDOWN: /* going away! */
157 dev_err(dev, "NEP: URB down %d\n", urb->status);
158 goto error;
159 default: /* Retry unless it gets ugly */
160 if (edc_inc(&i1480u->notif_edc, EDC_MAX_ERRORS,
161 EDC_ERROR_TIMEFRAME)) {
162 dev_err(dev, "NEP: URB max acceptable errors "
163 "exceeded; resetting device\n");
164 goto error_reset;
165 }
166 dev_err(dev, "NEP: URB error %d\n", urb->status);
167 break;
168 }
169 result = usb_submit_urb(urb, GFP_ATOMIC);
170 if (result < 0) {
171 dev_err(dev, "NEP: Can't resubmit URB: %d; resetting device\n",
172 result);
173 goto error_reset;
174 }
175 return;
176
177error_reset:
178 wlp_reset_all(&i1480-wlp);
179error:
180 netif_stop_queue(i1480u->net_dev);
181 return;
182}
183#endif
184
185static
186int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface)
187{
188 int result = -ENODEV;
189 struct wlp *wlp = &i1480u->wlp;
190 struct usb_device *usb_dev = interface_to_usbdev(iface);
191 struct net_device *net_dev = i1480u->net_dev;
192 struct uwb_rc *rc;
193 struct uwb_dev *uwb_dev;
194#ifdef i1480u_FLOW_CONTROL
195 struct usb_endpoint_descriptor *epd;
196#endif
197
198 i1480u->usb_dev = usb_get_dev(usb_dev);
199 i1480u->usb_iface = iface;
200 rc = uwb_rc_get_by_grandpa(&i1480u->usb_dev->dev);
201 if (rc == NULL) {
202 dev_err(&iface->dev, "Cannot get associated UWB Radio "
203 "Controller\n");
204 goto out;
205 }
206 wlp->xmit_frame = i1480u_xmit_frame;
207 wlp->fill_device_info = i1480u_fill_device_info;
208 wlp->stop_queue = i1480u_stop_queue;
209 wlp->start_queue = i1480u_start_queue;
210 result = wlp_setup(wlp, rc);
211 if (result < 0) {
212 dev_err(&iface->dev, "Cannot setup WLP\n");
213 goto error_wlp_setup;
214 }
215 result = 0;
216 ether_setup(net_dev); /* make it an etherdevice */
217 uwb_dev = &rc->uwb_dev;
218 /* FIXME: hookup address change notifications? */
219
220 memcpy(net_dev->dev_addr, uwb_dev->mac_addr.data,
221 sizeof(net_dev->dev_addr));
222
223 net_dev->hard_header_len = sizeof(struct untd_hdr_cmp)
224 + sizeof(struct wlp_tx_hdr)
225 + WLP_DATA_HLEN
226 + ETH_HLEN;
227 net_dev->mtu = 3500;
228 net_dev->tx_queue_len = 20; /* FIXME: maybe use 1000? */
229
230/* net_dev->flags &= ~IFF_BROADCAST; FIXME: BUG in firmware */
231 /* FIXME: multicast disabled */
232 net_dev->flags &= ~IFF_MULTICAST;
233 net_dev->features &= ~NETIF_F_SG;
234 net_dev->features &= ~NETIF_F_FRAGLIST;
235 /* All NETIF_F_*_CSUM disabled */
236 net_dev->features |= NETIF_F_HIGHDMA;
237 net_dev->watchdog_timeo = 5*HZ; /* FIXME: a better default? */
238
239 net_dev->open = i1480u_open;
240 net_dev->stop = i1480u_stop;
241 net_dev->hard_start_xmit = i1480u_hard_start_xmit;
242 net_dev->tx_timeout = i1480u_tx_timeout;
243 net_dev->get_stats = i1480u_get_stats;
244 net_dev->set_config = i1480u_set_config;
245 net_dev->change_mtu = i1480u_change_mtu;
246
247#ifdef i1480u_FLOW_CONTROL
248 /* Notification endpoint setup (submitted when we open the device) */
249 i1480u->notif_urb = usb_alloc_urb(0, GFP_KERNEL);
250 if (i1480u->notif_urb == NULL) {
251 dev_err(&iface->dev, "Unable to allocate notification URB\n");
252 result = -ENOMEM;
253 goto error_urb_alloc;
254 }
255 epd = &iface->cur_altsetting->endpoint[0].desc;
256 usb_fill_int_urb(i1480u->notif_urb, usb_dev,
257 usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
258 i1480u->notif_buffer, sizeof(i1480u->notif_buffer),
259 i1480u_notif_cb, i1480u, epd->bInterval);
260
261#endif
262
263 i1480u->tx_inflight.max = i1480u_TX_INFLIGHT_MAX;
264 i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD;
265 i1480u->tx_inflight.restart_ts = jiffies;
266 usb_set_intfdata(iface, i1480u);
267 return result;
268
269#ifdef i1480u_FLOW_CONTROL
270error_urb_alloc:
271#endif
272 wlp_remove(wlp);
273error_wlp_setup:
274 uwb_rc_put(rc);
275out:
276 usb_put_dev(i1480u->usb_dev);
277 return result;
278}
279
280static void i1480u_rm(struct i1480u *i1480u)
281{
282 struct uwb_rc *rc = i1480u->wlp.rc;
283 usb_set_intfdata(i1480u->usb_iface, NULL);
284#ifdef i1480u_FLOW_CONTROL
285 usb_kill_urb(i1480u->notif_urb);
286 usb_free_urb(i1480u->notif_urb);
287#endif
288 wlp_remove(&i1480u->wlp);
289 uwb_rc_put(rc);
290 usb_put_dev(i1480u->usb_dev);
291}
292
293/** Just setup @net_dev's i1480u private data */
294static void i1480u_netdev_setup(struct net_device *net_dev)
295{
296 struct i1480u *i1480u = netdev_priv(net_dev);
297 /* Initialize @i1480u */
298 memset(i1480u, 0, sizeof(*i1480u));
299 i1480u_init(i1480u);
300}
301
302/**
303 * Probe a i1480u interface and register it
304 *
305 * @iface: USB interface to link to
306 * @id: USB class/subclass/protocol id
307 * @returns: 0 if ok, < 0 errno code on error.
308 *
309 * Does basic housekeeping stuff and then allocs a netdev with space
310 * for the i1480u data. Initializes, registers in i1480u, registers in
311 * netdev, ready to go.
312 */
313static int i1480u_probe(struct usb_interface *iface,
314 const struct usb_device_id *id)
315{
316 int result;
317 struct net_device *net_dev;
318 struct device *dev = &iface->dev;
319 struct i1480u *i1480u;
320
321 /* Allocate instance [calls i1480u_netdev_setup() on it] */
322 result = -ENOMEM;
323 net_dev = alloc_netdev(sizeof(*i1480u), "wlp%d", i1480u_netdev_setup);
324 if (net_dev == NULL) {
325 dev_err(dev, "no memory for network device instance\n");
326 goto error_alloc_netdev;
327 }
328 SET_NETDEV_DEV(net_dev, dev);
329 i1480u = netdev_priv(net_dev);
330 i1480u->net_dev = net_dev;
331 result = i1480u_add(i1480u, iface); /* Now setup all the wlp stuff */
332 if (result < 0) {
333 dev_err(dev, "cannot add i1480u device: %d\n", result);
334 goto error_i1480u_add;
335 }
336 result = register_netdev(net_dev); /* Okey dokey, bring it up */
337 if (result < 0) {
338 dev_err(dev, "cannot register network device: %d\n", result);
339 goto error_register_netdev;
340 }
341 i1480u_sysfs_setup(i1480u);
342 if (result < 0)
343 goto error_sysfs_init;
344 return 0;
345
346error_sysfs_init:
347 unregister_netdev(net_dev);
348error_register_netdev:
349 i1480u_rm(i1480u);
350error_i1480u_add:
351 free_netdev(net_dev);
352error_alloc_netdev:
353 return result;
354}
355
356
357/**
358 * Disconect a i1480u from the system.
359 *
360 * i1480u_stop() has been called before, so al the rx and tx contexts
361 * have been taken down already. Make sure the queue is stopped,
362 * unregister netdev and i1480u, free and kill.
363 */
364static void i1480u_disconnect(struct usb_interface *iface)
365{
366 struct i1480u *i1480u;
367 struct net_device *net_dev;
368
369 i1480u = usb_get_intfdata(iface);
370 net_dev = i1480u->net_dev;
371 netif_stop_queue(net_dev);
372#ifdef i1480u_FLOW_CONTROL
373 usb_kill_urb(i1480u->notif_urb);
374#endif
375 i1480u_sysfs_release(i1480u);
376 unregister_netdev(net_dev);
377 i1480u_rm(i1480u);
378 free_netdev(net_dev);
379}
380
381static struct usb_device_id i1480u_id_table[] = {
382 {
383 .match_flags = USB_DEVICE_ID_MATCH_DEVICE \
384 | USB_DEVICE_ID_MATCH_DEV_INFO \
385 | USB_DEVICE_ID_MATCH_INT_INFO,
386 .idVendor = 0x8086,
387 .idProduct = 0x0c3b,
388 .bDeviceClass = 0xef,
389 .bDeviceSubClass = 0x02,
390 .bDeviceProtocol = 0x02,
391 .bInterfaceClass = 0xff,
392 .bInterfaceSubClass = 0xff,
393 .bInterfaceProtocol = 0xff,
394 },
395 {},
396};
397MODULE_DEVICE_TABLE(usb, i1480u_id_table);
398
399static struct usb_driver i1480u_driver = {
400 .name = KBUILD_MODNAME,
401 .probe = i1480u_probe,
402 .disconnect = i1480u_disconnect,
403 .id_table = i1480u_id_table,
404};
405
406static int __init i1480u_driver_init(void)
407{
408 return usb_register(&i1480u_driver);
409}
410module_init(i1480u_driver_init);
411
412
413static void __exit i1480u_driver_exit(void)
414{
415 usb_deregister(&i1480u_driver);
416}
417module_exit(i1480u_driver_exit);
418
419MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
420MODULE_DESCRIPTION("i1480 Wireless UWB Link WLP networking for USB");
421MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c
new file mode 100644
index 000000000000..8802ac43d872
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c
@@ -0,0 +1,368 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * Implementation of the netdevice linkage (except tx and rx related stuff).
26 *
27 * ROADMAP:
28 *
29 * ENTRY POINTS (Net device):
30 *
31 * i1480u_open(): Called when we ifconfig up the interface;
32 * associates to a UWB host controller, reserves
33 * bandwidth (MAS), sets up RX USB URB and starts
34 * the queue.
35 *
36 * i1480u_stop(): Called when we ifconfig down a interface;
37 * reverses _open().
38 *
39 * i1480u_set_config():
40 */
41
42#include <linux/if_arp.h>
43#include <linux/etherdevice.h>
44#include <linux/uwb/debug.h>
45#include "i1480u-wlp.h"
46
47struct i1480u_cmd_set_ip_mas {
48 struct uwb_rccb rccb;
49 struct uwb_dev_addr addr;
50 u8 stream;
51 u8 owner;
52 u8 type; /* enum uwb_drp_type */
53 u8 baMAS[32];
54} __attribute__((packed));
55
56
57static
58int i1480u_set_ip_mas(
59 struct uwb_rc *rc,
60 const struct uwb_dev_addr *dstaddr,
61 u8 stream, u8 owner, u8 type, unsigned long *mas)
62{
63
64 int result;
65 struct i1480u_cmd_set_ip_mas *cmd;
66 struct uwb_rc_evt_confirm reply;
67
68 result = -ENOMEM;
69 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
70 if (cmd == NULL)
71 goto error_kzalloc;
72 cmd->rccb.bCommandType = 0xfd;
73 cmd->rccb.wCommand = cpu_to_le16(0x000e);
74 cmd->addr = *dstaddr;
75 cmd->stream = stream;
76 cmd->owner = owner;
77 cmd->type = type;
78 if (mas == NULL)
79 memset(cmd->baMAS, 0x00, sizeof(cmd->baMAS));
80 else
81 memcpy(cmd->baMAS, mas, sizeof(cmd->baMAS));
82 reply.rceb.bEventType = 0xfd;
83 reply.rceb.wEvent = cpu_to_le16(0x000e);
84 result = uwb_rc_cmd(rc, "SET-IP-MAS", &cmd->rccb, sizeof(*cmd),
85 &reply.rceb, sizeof(reply));
86 if (result < 0)
87 goto error_cmd;
88 if (reply.bResultCode != UWB_RC_RES_FAIL) {
89 dev_err(&rc->uwb_dev.dev,
90 "SET-IP-MAS: command execution failed: %d\n",
91 reply.bResultCode);
92 result = -EIO;
93 }
94error_cmd:
95 kfree(cmd);
96error_kzalloc:
97 return result;
98}
99
100/*
101 * Inform a WLP interface of a MAS reservation
102 *
103 * @rc is assumed refcnted.
104 */
105/* FIXME: detect if remote device is WLP capable? */
106static int i1480u_mas_set_dev(struct uwb_dev *uwb_dev, struct uwb_rc *rc,
107 u8 stream, u8 owner, u8 type, unsigned long *mas)
108{
109 int result = 0;
110 struct device *dev = &rc->uwb_dev.dev;
111
112 result = i1480u_set_ip_mas(rc, &uwb_dev->dev_addr, stream, owner,
113 type, mas);
114 if (result < 0) {
115 char rcaddrbuf[UWB_ADDR_STRSIZE], devaddrbuf[UWB_ADDR_STRSIZE];
116 uwb_dev_addr_print(rcaddrbuf, sizeof(rcaddrbuf),
117 &rc->uwb_dev.dev_addr);
118 uwb_dev_addr_print(devaddrbuf, sizeof(devaddrbuf),
119 &uwb_dev->dev_addr);
120 dev_err(dev, "Set IP MAS (%s to %s) failed: %d\n",
121 rcaddrbuf, devaddrbuf, result);
122 }
123 return result;
124}
125
126/**
127 * Called by bandwidth allocator when change occurs in reservation.
128 *
129 * @rsv: The reservation that is being established, modified, or
130 * terminated.
131 *
132 * When a reservation is established, modified, or terminated the upper layer
133 * (WLP here) needs set/update the currently available Media Access Slots
134 * that can be use for IP traffic.
135 *
136 * Our action taken during failure depends on how the reservation is being
137 * changed:
138 * - if reservation is being established we do nothing if we cannot set the
139 * new MAS to be used
140 * - if reservation is being terminated we revert back to PCA whether the
141 * SET IP MAS command succeeds or not.
142 */
143void i1480u_bw_alloc_cb(struct uwb_rsv *rsv)
144{
145 int result = 0;
146 struct i1480u *i1480u = rsv->pal_priv;
147 struct device *dev = &i1480u->usb_iface->dev;
148 struct uwb_dev *target_dev = rsv->target.dev;
149 struct uwb_rc *rc = i1480u->wlp.rc;
150 u8 stream = rsv->stream;
151 int type = rsv->type;
152 int is_owner = rsv->owner == &rc->uwb_dev;
153 unsigned long *bmp = rsv->mas.bm;
154
155 dev_err(dev, "WLP callback called - sending set ip mas\n");
156 /*user cannot change options while setting configuration*/
157 mutex_lock(&i1480u->options.mutex);
158 switch (rsv->state) {
159 case UWB_RSV_STATE_T_ACCEPTED:
160 case UWB_RSV_STATE_O_ESTABLISHED:
161 result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner,
162 type, bmp);
163 if (result < 0) {
164 dev_err(dev, "MAS reservation failed: %d\n", result);
165 goto out;
166 }
167 if (is_owner) {
168 wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr,
169 WLP_DRP | stream);
170 wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 0);
171 }
172 break;
173 case UWB_RSV_STATE_NONE:
174 /* revert back to PCA */
175 result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner,
176 type, bmp);
177 if (result < 0)
178 dev_err(dev, "MAS reservation failed: %d\n", result);
179 /* Revert to PCA even though SET IP MAS failed. */
180 wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr,
181 i1480u->options.pca_base_priority);
182 wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 1);
183 break;
184 default:
185 dev_err(dev, "unexpected WLP reservation state: %s (%d).\n",
186 uwb_rsv_state_str(rsv->state), rsv->state);
187 break;
188 }
189out:
190 mutex_unlock(&i1480u->options.mutex);
191 return;
192}
193
194/**
195 *
196 * Called on 'ifconfig up'
197 */
198int i1480u_open(struct net_device *net_dev)
199{
200 int result;
201 struct i1480u *i1480u = netdev_priv(net_dev);
202 struct wlp *wlp = &i1480u->wlp;
203 struct uwb_rc *rc;
204 struct device *dev = &i1480u->usb_iface->dev;
205
206 rc = wlp->rc;
207 result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */
208 if (result < 0)
209 goto error_rx_setup;
210 netif_wake_queue(net_dev);
211#ifdef i1480u_FLOW_CONTROL
212 result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);;
213 if (result < 0) {
214 dev_err(dev, "Can't submit notification URB: %d\n", result);
215 goto error_notif_urb_submit;
216 }
217#endif
218 i1480u->uwb_notifs_handler.cb = i1480u_uwb_notifs_cb;
219 i1480u->uwb_notifs_handler.data = i1480u;
220 if (uwb_bg_joined(rc))
221 netif_carrier_on(net_dev);
222 else
223 netif_carrier_off(net_dev);
224 uwb_notifs_register(rc, &i1480u->uwb_notifs_handler);
225 /* Interface is up with an address, now we can create WSS */
226 result = wlp_wss_setup(net_dev, &wlp->wss);
227 if (result < 0) {
228 dev_err(dev, "Can't create WSS: %d. \n", result);
229 goto error_notif_deregister;
230 }
231 return 0;
232error_notif_deregister:
233 uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler);
234#ifdef i1480u_FLOW_CONTROL
235error_notif_urb_submit:
236#endif
237 netif_stop_queue(net_dev);
238 i1480u_rx_release(i1480u);
239error_rx_setup:
240 return result;
241}
242
243
244/**
245 * Called on 'ifconfig down'
246 */
247int i1480u_stop(struct net_device *net_dev)
248{
249 struct i1480u *i1480u = netdev_priv(net_dev);
250 struct wlp *wlp = &i1480u->wlp;
251 struct uwb_rc *rc = wlp->rc;
252
253 BUG_ON(wlp->rc == NULL);
254 wlp_wss_remove(&wlp->wss);
255 uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler);
256 netif_carrier_off(net_dev);
257#ifdef i1480u_FLOW_CONTROL
258 usb_kill_urb(i1480u->notif_urb);
259#endif
260 netif_stop_queue(net_dev);
261 i1480u_rx_release(i1480u);
262 i1480u_tx_release(i1480u);
263 return 0;
264}
265
266
267/** Report statistics */
268struct net_device_stats *i1480u_get_stats(struct net_device *net_dev)
269{
270 struct i1480u *i1480u = netdev_priv(net_dev);
271 return &i1480u->stats;
272}
273
274
275/**
276 *
277 * Change the interface config--we probably don't have to do anything.
278 */
279int i1480u_set_config(struct net_device *net_dev, struct ifmap *map)
280{
281 int result;
282 struct i1480u *i1480u = netdev_priv(net_dev);
283 BUG_ON(i1480u->wlp.rc == NULL);
284 result = 0;
285 return result;
286}
287
288/**
289 * Change the MTU of the interface
290 */
291int i1480u_change_mtu(struct net_device *net_dev, int mtu)
292{
293 static union {
294 struct wlp_tx_hdr tx;
295 struct wlp_rx_hdr rx;
296 } i1480u_all_hdrs;
297
298 if (mtu < ETH_HLEN) /* We encap eth frames */
299 return -ERANGE;
300 if (mtu > 4000 - sizeof(i1480u_all_hdrs))
301 return -ERANGE;
302 net_dev->mtu = mtu;
303 return 0;
304}
305
306
307/**
308 * Callback function to handle events from UWB
309 * When we see other devices we know the carrier is ok,
310 * if we are the only device in the beacon group we set the carrier
311 * state to off.
312 * */
313void i1480u_uwb_notifs_cb(void *data, struct uwb_dev *uwb_dev,
314 enum uwb_notifs event)
315{
316 struct i1480u *i1480u = data;
317 struct net_device *net_dev = i1480u->net_dev;
318 struct device *dev = &i1480u->usb_iface->dev;
319 switch (event) {
320 case UWB_NOTIF_BG_JOIN:
321 netif_carrier_on(net_dev);
322 dev_info(dev, "Link is up\n");
323 break;
324 case UWB_NOTIF_BG_LEAVE:
325 netif_carrier_off(net_dev);
326 dev_info(dev, "Link is down\n");
327 break;
328 default:
329 dev_err(dev, "don't know how to handle event %d from uwb\n",
330 event);
331 }
332}
333
334/**
335 * Stop the network queue
336 *
337 * Enable WLP substack to stop network queue. We also set the flow control
338 * threshold at this time to prevent the flow control from restarting the
339 * queue.
340 *
341 * we are loosing the current threshold value here ... FIXME?
342 */
343void i1480u_stop_queue(struct wlp *wlp)
344{
345 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
346 struct net_device *net_dev = i1480u->net_dev;
347 i1480u->tx_inflight.threshold = 0;
348 netif_stop_queue(net_dev);
349}
350
351/**
352 * Start the network queue
353 *
354 * Enable WLP substack to start network queue. Also re-enable the flow
355 * control to manage the queue again.
356 *
357 * We re-enable the flow control by storing the default threshold in the
358 * flow control threshold. This means that if the user modified the
359 * threshold before the queue was stopped and restarted that information
360 * will be lost. FIXME?
361 */
362void i1480u_start_queue(struct wlp *wlp)
363{
364 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
365 struct net_device *net_dev = i1480u->net_dev;
366 i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD;
367 netif_start_queue(net_dev);
368}
diff --git a/drivers/uwb/i1480/i1480u-wlp/rx.c b/drivers/uwb/i1480/i1480u-wlp/rx.c
new file mode 100644
index 000000000000..9fc035354a76
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/rx.c
@@ -0,0 +1,486 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * i1480u's RX handling is simple. i1480u will send the received
24 * network packets broken up in fragments; 1 to N fragments make a
25 * packet, we assemble them together and deliver the packet with netif_rx().
26 *
27 * Beacuse each USB transfer is a *single* fragment (except when the
28 * transfer contains a first fragment), each URB called thus
29 * back contains one or two fragments. So we queue N URBs, each with its own
30 * fragment buffer. When a URB is done, we process it (adding to the
31 * current skb from the fragment buffer until complete). Once
32 * processed, we requeue the URB. There is always a bunch of URBs
33 * ready to take data, so the intergap should be minimal.
34 *
35 * An URB's transfer buffer is the data field of a socket buffer. This
36 * reduces copying as data can be passed directly to network layer. If a
37 * complete packet or 1st fragment is received the URB's transfer buffer is
38 * taken away from it and used to send data to the network layer. In this
39 * case a new transfer buffer is allocated to the URB before being requeued.
40 * If a "NEXT" or "LAST" fragment is received, the fragment contents is
41 * appended to the RX packet under construction and the transfer buffer
42 * is reused. To be able to use this buffer to assemble complete packets
43 * we set each buffer's size to that of the MAX ethernet packet that can
44 * be received. There is thus room for improvement in memory usage.
45 *
46 * When the max tx fragment size increases, we should be able to read
47 * data into the skbs directly with very simple code.
48 *
49 * ROADMAP:
50 *
51 * ENTRY POINTS:
52 *
53 * i1480u_rx_setup(): setup RX context [from i1480u_open()]
54 *
55 * i1480u_rx_release(): release RX context [from i1480u_stop()]
56 *
57 * i1480u_rx_cb(): called when the RX USB URB receives a
58 * packet. It removes the header and pushes it up
59 * the Linux netdev stack with netif_rx().
60 *
61 * i1480u_rx_buffer()
62 * i1480u_drop() and i1480u_fix()
63 * i1480u_skb_deliver
64 *
65 */
66
67#include <linux/netdevice.h>
68#include <linux/etherdevice.h>
69#include "i1480u-wlp.h"
70
71#define D_LOCAL 0
72#include <linux/uwb/debug.h>
73
74
75/**
76 * Setup the RX context
77 *
78 * Each URB is provided with a transfer_buffer that is the data field
79 * of a new socket buffer.
80 */
81int i1480u_rx_setup(struct i1480u *i1480u)
82{
83 int result, cnt;
84 struct device *dev = &i1480u->usb_iface->dev;
85 struct net_device *net_dev = i1480u->net_dev;
86 struct usb_endpoint_descriptor *epd;
87 struct sk_buff *skb;
88
89 /* Alloc RX stuff */
90 i1480u->rx_skb = NULL; /* not in process of receiving packet */
91 result = -ENOMEM;
92 epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
93 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
94 struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
95 rx_buf->i1480u = i1480u;
96 skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
97 if (!skb) {
98 dev_err(dev,
99 "RX: cannot allocate RX buffer %d\n", cnt);
100 result = -ENOMEM;
101 goto error;
102 }
103 skb->dev = net_dev;
104 skb->ip_summed = CHECKSUM_NONE;
105 skb_reserve(skb, 2);
106 rx_buf->data = skb;
107 rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
108 if (unlikely(rx_buf->urb == NULL)) {
109 dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
110 result = -ENOMEM;
111 goto error;
112 }
113 usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
114 usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
115 rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
116 i1480u_rx_cb, rx_buf);
117 result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
118 if (unlikely(result < 0)) {
119 dev_err(dev, "RX: cannot submit URB %d: %d\n",
120 cnt, result);
121 goto error;
122 }
123 }
124 return 0;
125
126error:
127 i1480u_rx_release(i1480u);
128 return result;
129}
130
131
132/** Release resources associated to the rx context */
133void i1480u_rx_release(struct i1480u *i1480u)
134{
135 int cnt;
136 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
137 if (i1480u->rx_buf[cnt].data)
138 dev_kfree_skb(i1480u->rx_buf[cnt].data);
139 if (i1480u->rx_buf[cnt].urb) {
140 usb_kill_urb(i1480u->rx_buf[cnt].urb);
141 usb_free_urb(i1480u->rx_buf[cnt].urb);
142 }
143 }
144 if (i1480u->rx_skb != NULL)
145 dev_kfree_skb(i1480u->rx_skb);
146}
147
148static
149void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
150{
151 int cnt;
152 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
153 if (i1480u->rx_buf[cnt].urb)
154 usb_unlink_urb(i1480u->rx_buf[cnt].urb);
155 }
156}
157
158/** Fix an out-of-sequence packet */
159#define i1480u_fix(i1480u, msg...) \
160do { \
161 if (printk_ratelimit()) \
162 dev_err(&i1480u->usb_iface->dev, msg); \
163 dev_kfree_skb_irq(i1480u->rx_skb); \
164 i1480u->rx_skb = NULL; \
165 i1480u->rx_untd_pkt_size = 0; \
166} while (0)
167
168
169/** Drop an out-of-sequence packet */
170#define i1480u_drop(i1480u, msg...) \
171do { \
172 if (printk_ratelimit()) \
173 dev_err(&i1480u->usb_iface->dev, msg); \
174 i1480u->stats.rx_dropped++; \
175} while (0)
176
177
178
179
180/** Finalizes setting up the SKB and delivers it
181 *
182 * We first pass the incoming frame to WLP substack for verification. It
183 * may also be a WLP association frame in which case WLP will take over the
184 * processing. If WLP does not take it over it will still verify it, if the
185 * frame is invalid the skb will be freed by WLP and we will not continue
186 * parsing.
187 * */
188static
189void i1480u_skb_deliver(struct i1480u *i1480u)
190{
191 int should_parse;
192 struct net_device *net_dev = i1480u->net_dev;
193 struct device *dev = &i1480u->usb_iface->dev;
194
195 d_printf(6, dev, "RX delivered pre skb(%p), %u bytes\n",
196 i1480u->rx_skb, i1480u->rx_skb->len);
197 d_dump(7, dev, i1480u->rx_skb->data, i1480u->rx_skb->len);
198 should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
199 &i1480u->rx_srcaddr);
200 if (!should_parse)
201 goto out;
202 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
203 d_printf(5, dev, "RX delivered skb(%p), %u bytes\n",
204 i1480u->rx_skb, i1480u->rx_skb->len);
205 d_dump(7, dev, i1480u->rx_skb->data,
206 i1480u->rx_skb->len > 72 ? 72 : i1480u->rx_skb->len);
207 i1480u->stats.rx_packets++;
208 i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size;
209 net_dev->last_rx = jiffies;
210 /* FIXME: flow control: check netif_rx() retval */
211
212 netif_rx(i1480u->rx_skb); /* deliver */
213out:
214 i1480u->rx_skb = NULL;
215 i1480u->rx_untd_pkt_size = 0;
216}
217
218
219/**
220 * Process a buffer of data received from the USB RX endpoint
221 *
222 * First fragment arrives with next or last fragment. All other fragments
223 * arrive alone.
224 *
225 * /me hates long functions.
226 */
227static
228void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
229{
230 unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */
231 size_t untd_hdr_size, untd_frg_size;
232 size_t i1480u_hdr_size;
233 struct wlp_rx_hdr *i1480u_hdr = NULL;
234
235 struct i1480u *i1480u = rx_buf->i1480u;
236 struct sk_buff *skb = rx_buf->data;
237 int size_left = rx_buf->urb->actual_length;
238 void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
239 struct untd_hdr *untd_hdr;
240
241 struct net_device *net_dev = i1480u->net_dev;
242 struct device *dev = &i1480u->usb_iface->dev;
243 struct sk_buff *new_skb;
244
245#if 0
246 dev_fnstart(dev,
247 "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
248 dev_err(dev, "RX packet, %zu bytes\n", size_left);
249 dump_bytes(dev, ptr, size_left);
250#endif
251 i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
252
253 while (size_left > 0) {
254 if (pkt_completed) {
255 i1480u_drop(i1480u, "RX: fragment follows completed"
256 "packet in same buffer. Dropping\n");
257 break;
258 }
259 untd_hdr = ptr;
260 if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */
261 i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
262 goto out;
263 }
264 if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */
265 i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
266 goto out;
267 }
268 switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */
269 case i1480u_PKT_FRAG_1ST: {
270 struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
271 dev_dbg(dev, "1st fragment\n");
272 untd_hdr_size = sizeof(struct untd_hdr_1st);
273 if (i1480u->rx_skb != NULL)
274 i1480u_fix(i1480u, "RX: 1st fragment out of "
275 "sequence! Fixing\n");
276 if (size_left < untd_hdr_size + i1480u_hdr_size) {
277 i1480u_drop(i1480u, "RX: short 1st fragment! "
278 "Dropping\n");
279 goto out;
280 }
281 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
282 - i1480u_hdr_size;
283 untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
284 if (size_left < untd_hdr_size + untd_frg_size) {
285 i1480u_drop(i1480u,
286 "RX: short payload! Dropping\n");
287 goto out;
288 }
289 i1480u->rx_skb = skb;
290 i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
291 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
292 skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
293 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
294 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
295 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
296 rx_buf->data = NULL; /* need to create new buffer */
297 break;
298 }
299 case i1480u_PKT_FRAG_NXT: {
300 dev_dbg(dev, "nxt fragment\n");
301 untd_hdr_size = sizeof(struct untd_hdr_rst);
302 if (i1480u->rx_skb == NULL) {
303 i1480u_drop(i1480u, "RX: next fragment out of "
304 "sequence! Dropping\n");
305 goto out;
306 }
307 if (size_left < untd_hdr_size) {
308 i1480u_drop(i1480u, "RX: short NXT fragment! "
309 "Dropping\n");
310 goto out;
311 }
312 untd_frg_size = le16_to_cpu(untd_hdr->len);
313 if (size_left < untd_hdr_size + untd_frg_size) {
314 i1480u_drop(i1480u,
315 "RX: short payload! Dropping\n");
316 goto out;
317 }
318 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
319 ptr + untd_hdr_size, untd_frg_size);
320 break;
321 }
322 case i1480u_PKT_FRAG_LST: {
323 dev_dbg(dev, "Lst fragment\n");
324 untd_hdr_size = sizeof(struct untd_hdr_rst);
325 if (i1480u->rx_skb == NULL) {
326 i1480u_drop(i1480u, "RX: last fragment out of "
327 "sequence! Dropping\n");
328 goto out;
329 }
330 if (size_left < untd_hdr_size) {
331 i1480u_drop(i1480u, "RX: short LST fragment! "
332 "Dropping\n");
333 goto out;
334 }
335 untd_frg_size = le16_to_cpu(untd_hdr->len);
336 if (size_left < untd_frg_size + untd_hdr_size) {
337 i1480u_drop(i1480u,
338 "RX: short payload! Dropping\n");
339 goto out;
340 }
341 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
342 ptr + untd_hdr_size, untd_frg_size);
343 pkt_completed = 1;
344 break;
345 }
346 case i1480u_PKT_FRAG_CMP: {
347 dev_dbg(dev, "cmp fragment\n");
348 untd_hdr_size = sizeof(struct untd_hdr_cmp);
349 if (i1480u->rx_skb != NULL)
350 i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
351 " fragment!\n");
352 if (size_left < untd_hdr_size + i1480u_hdr_size) {
353 i1480u_drop(i1480u, "RX: short CMP fragment! "
354 "Dropping\n");
355 goto out;
356 }
357 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
358 untd_frg_size = i1480u->rx_untd_pkt_size;
359 if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
360 i1480u_drop(i1480u,
361 "RX: short payload! Dropping\n");
362 goto out;
363 }
364 i1480u->rx_skb = skb;
365 i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
366 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
367 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
368 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
369 skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
370 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
371 rx_buf->data = NULL; /* for hand off skb to network stack */
372 pkt_completed = 1;
373 i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
374 break;
375 }
376 default:
377 i1480u_drop(i1480u, "RX: unknown packet type %u! "
378 "Dropping\n", untd_hdr_type(untd_hdr));
379 goto out;
380 }
381 size_left -= untd_hdr_size + untd_frg_size;
382 if (size_left > 0)
383 ptr += untd_hdr_size + untd_frg_size;
384 }
385 if (pkt_completed)
386 i1480u_skb_deliver(i1480u);
387out:
388 /* recreate needed RX buffers*/
389 if (rx_buf->data == NULL) {
390 /* buffer is being used to receive packet, create new */
391 new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
392 if (!new_skb) {
393 if (printk_ratelimit())
394 dev_err(dev,
395 "RX: cannot allocate RX buffer\n");
396 } else {
397 new_skb->dev = net_dev;
398 new_skb->ip_summed = CHECKSUM_NONE;
399 skb_reserve(new_skb, 2);
400 rx_buf->data = new_skb;
401 }
402 }
403 return;
404}
405
406
407/**
408 * Called when an RX URB has finished receiving or has found some kind
409 * of error condition.
410 *
411 * LIMITATIONS:
412 *
413 * - We read USB-transfers, each transfer contains a SINGLE fragment
414 * (can contain a complete packet, or a 1st, next, or last fragment
415 * of a packet).
416 * Looks like a transfer can contain more than one fragment (07/18/06)
417 *
418 * - Each transfer buffer is the size of the maximum packet size (minus
419 * headroom), i1480u_MAX_PKT_SIZE - 2
420 *
421 * - We always read the full USB-transfer, no partials.
422 *
423 * - Each transfer is read directly into a skb. This skb will be used to
424 * send data to the upper layers if it is the first fragment or a complete
425 * packet. In the other cases the data will be copied from the skb to
426 * another skb that is being prepared for the upper layers from a prev
427 * first fragment.
428 *
429 * It is simply too much of a pain. Gosh, there should be a unified
430 * SG infrastructure for *everything* [so that I could declare a SG
431 * buffer, pass it to USB for receiving, append some space to it if
432 * I wish, receive more until I have the whole chunk, adapt
433 * pointers on each fragment to remove hardware headers and then
434 * attach that to an skbuff and netif_rx()].
435 */
436void i1480u_rx_cb(struct urb *urb)
437{
438 int result;
439 int do_parse_buffer = 1;
440 struct i1480u_rx_buf *rx_buf = urb->context;
441 struct i1480u *i1480u = rx_buf->i1480u;
442 struct device *dev = &i1480u->usb_iface->dev;
443 unsigned long flags;
444 u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
445
446 switch (urb->status) {
447 case 0:
448 break;
449 case -ECONNRESET: /* Not an error, but a controlled situation; */
450 case -ENOENT: /* (we killed the URB)...so, no broadcast */
451 case -ESHUTDOWN: /* going away! */
452 dev_err(dev, "RX URB[%u]: goind down %d\n",
453 rx_buf_idx, urb->status);
454 goto error;
455 default:
456 dev_err(dev, "RX URB[%u]: unknown status %d\n",
457 rx_buf_idx, urb->status);
458 if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
459 EDC_ERROR_TIMEFRAME)) {
460 dev_err(dev, "RX: max acceptable errors exceeded,"
461 " resetting device.\n");
462 i1480u_rx_unlink_urbs(i1480u);
463 wlp_reset_all(&i1480u->wlp);
464 goto error;
465 }
466 do_parse_buffer = 0;
467 break;
468 }
469 spin_lock_irqsave(&i1480u->lock, flags);
470 /* chew the data fragments, extract network packets */
471 if (do_parse_buffer) {
472 i1480u_rx_buffer(rx_buf);
473 if (rx_buf->data) {
474 rx_buf->urb->transfer_buffer = rx_buf->data->data;
475 result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
476 if (result < 0) {
477 dev_err(dev, "RX URB[%u]: cannot submit %d\n",
478 rx_buf_idx, result);
479 }
480 }
481 }
482 spin_unlock_irqrestore(&i1480u->lock, flags);
483error:
484 return;
485}
486
diff --git a/drivers/uwb/i1480/i1480u-wlp/sysfs.c b/drivers/uwb/i1480/i1480u-wlp/sysfs.c
new file mode 100644
index 000000000000..a1d8ca6ac935
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/sysfs.c
@@ -0,0 +1,408 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Sysfs interfaces
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/uwb/debug.h>
29#include <linux/device.h>
30#include "i1480u-wlp.h"
31
32
33/**
34 *
35 * @dev: Class device from the net_device; assumed refcnted.
36 *
37 * Yes, I don't lock--we assume it is refcounted and I am getting a
38 * single byte value that is kind of atomic to read.
39 */
40ssize_t uwb_phy_rate_show(const struct wlp_options *options, char *buf)
41{
42 return sprintf(buf, "%u\n",
43 wlp_tx_hdr_phy_rate(&options->def_tx_hdr));
44}
45EXPORT_SYMBOL_GPL(uwb_phy_rate_show);
46
47
48ssize_t uwb_phy_rate_store(struct wlp_options *options,
49 const char *buf, size_t size)
50{
51 ssize_t result;
52 unsigned rate;
53
54 result = sscanf(buf, "%u\n", &rate);
55 if (result != 1) {
56 result = -EINVAL;
57 goto out;
58 }
59 result = -EINVAL;
60 if (rate >= UWB_PHY_RATE_INVALID)
61 goto out;
62 wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, rate);
63 result = 0;
64out:
65 return result < 0 ? result : size;
66}
67EXPORT_SYMBOL_GPL(uwb_phy_rate_store);
68
69
70ssize_t uwb_rts_cts_show(const struct wlp_options *options, char *buf)
71{
72 return sprintf(buf, "%u\n",
73 wlp_tx_hdr_rts_cts(&options->def_tx_hdr));
74}
75EXPORT_SYMBOL_GPL(uwb_rts_cts_show);
76
77
78ssize_t uwb_rts_cts_store(struct wlp_options *options,
79 const char *buf, size_t size)
80{
81 ssize_t result;
82 unsigned value;
83
84 result = sscanf(buf, "%u\n", &value);
85 if (result != 1) {
86 result = -EINVAL;
87 goto out;
88 }
89 result = -EINVAL;
90 wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, !!value);
91 result = 0;
92out:
93 return result < 0 ? result : size;
94}
95EXPORT_SYMBOL_GPL(uwb_rts_cts_store);
96
97
98ssize_t uwb_ack_policy_show(const struct wlp_options *options, char *buf)
99{
100 return sprintf(buf, "%u\n",
101 wlp_tx_hdr_ack_policy(&options->def_tx_hdr));
102}
103EXPORT_SYMBOL_GPL(uwb_ack_policy_show);
104
105
106ssize_t uwb_ack_policy_store(struct wlp_options *options,
107 const char *buf, size_t size)
108{
109 ssize_t result;
110 unsigned value;
111
112 result = sscanf(buf, "%u\n", &value);
113 if (result != 1 || value > UWB_ACK_B_REQ) {
114 result = -EINVAL;
115 goto out;
116 }
117 wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, value);
118 result = 0;
119out:
120 return result < 0 ? result : size;
121}
122EXPORT_SYMBOL_GPL(uwb_ack_policy_store);
123
124
125/**
126 * Show the PCA base priority.
127 *
128 * We can access without locking, as the value is (for now) orthogonal
129 * to other values.
130 */
131ssize_t uwb_pca_base_priority_show(const struct wlp_options *options,
132 char *buf)
133{
134 return sprintf(buf, "%u\n",
135 options->pca_base_priority);
136}
137EXPORT_SYMBOL_GPL(uwb_pca_base_priority_show);
138
139
140/**
141 * Set the PCA base priority.
142 *
143 * We can access without locking, as the value is (for now) orthogonal
144 * to other values.
145 */
146ssize_t uwb_pca_base_priority_store(struct wlp_options *options,
147 const char *buf, size_t size)
148{
149 ssize_t result = -EINVAL;
150 u8 pca_base_priority;
151
152 result = sscanf(buf, "%hhu\n", &pca_base_priority);
153 if (result != 1) {
154 result = -EINVAL;
155 goto out;
156 }
157 result = -EINVAL;
158 if (pca_base_priority >= 8)
159 goto out;
160 options->pca_base_priority = pca_base_priority;
161 /* Update TX header if we are currently using PCA. */
162 if (result >= 0 && (wlp_tx_hdr_delivery_id_type(&options->def_tx_hdr) & WLP_DRP) == 0)
163 wlp_tx_hdr_set_delivery_id_type(&options->def_tx_hdr, options->pca_base_priority);
164 result = 0;
165out:
166 return result < 0 ? result : size;
167}
168EXPORT_SYMBOL_GPL(uwb_pca_base_priority_store);
169
170/**
171 * Show current inflight values
172 *
173 * Will print the current MAX and THRESHOLD values for the basic flow
174 * control. In addition it will report how many times the TX queue needed
175 * to be restarted since the last time this query was made.
176 */
177static ssize_t wlp_tx_inflight_show(struct i1480u_tx_inflight *inflight,
178 char *buf)
179{
180 ssize_t result;
181 unsigned long sec_elapsed = (jiffies - inflight->restart_ts)/HZ;
182 unsigned long restart_count = atomic_read(&inflight->restart_count);
183
184 result = scnprintf(buf, PAGE_SIZE, "%lu %lu %d %lu %lu %lu\n"
185 "#read: threshold max inflight_count restarts "
186 "seconds restarts/sec\n"
187 "#write: threshold max\n",
188 inflight->threshold, inflight->max,
189 atomic_read(&inflight->count),
190 restart_count, sec_elapsed,
191 sec_elapsed == 0 ? 0 : restart_count/sec_elapsed);
192 inflight->restart_ts = jiffies;
193 atomic_set(&inflight->restart_count, 0);
194 return result;
195}
196
197static
198ssize_t wlp_tx_inflight_store(struct i1480u_tx_inflight *inflight,
199 const char *buf, size_t size)
200{
201 unsigned long in_threshold, in_max;
202 ssize_t result;
203 result = sscanf(buf, "%lu %lu", &in_threshold, &in_max);
204 if (result != 2)
205 return -EINVAL;
206 if (in_max <= in_threshold)
207 return -EINVAL;
208 inflight->max = in_max;
209 inflight->threshold = in_threshold;
210 return size;
211}
212/*
213 * Glue (or function adaptors) for accesing info on sysfs
214 *
215 * [we need this indirection because the PCI driver does almost the
216 * same]
217 *
218 * Linux 2.6.21 changed how 'struct netdevice' does attributes (from
219 * having a 'struct class_dev' to having a 'struct device'). That is
220 * quite of a pain.
221 *
222 * So we try to abstract that here. i1480u_SHOW() and i1480u_STORE()
223 * create adaptors for extracting the 'struct i1480u' from a 'struct
224 * dev' and calling a function for doing a sysfs operation (as we have
225 * them factorized already). i1480u_ATTR creates the attribute file
226 * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a
227 * class_device_attr_NAME or device_attr_NAME (for group registration).
228 */
229#include <linux/version.h>
230
231#define i1480u_SHOW(name, fn, param) \
232static ssize_t i1480u_show_##name(struct device *dev, \
233 struct device_attribute *attr,\
234 char *buf) \
235{ \
236 struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \
237 return fn(&i1480u->param, buf); \
238}
239
240#define i1480u_STORE(name, fn, param) \
241static ssize_t i1480u_store_##name(struct device *dev, \
242 struct device_attribute *attr,\
243 const char *buf, size_t size)\
244{ \
245 struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \
246 return fn(&i1480u->param, buf, size); \
247}
248
249#define i1480u_ATTR(name, perm) static DEVICE_ATTR(name, perm, \
250 i1480u_show_##name,\
251 i1480u_store_##name)
252
253#define i1480u_ATTR_SHOW(name) static DEVICE_ATTR(name, \
254 S_IRUGO, \
255 i1480u_show_##name, NULL)
256
257#define i1480u_ATTR_NAME(a) (dev_attr_##a)
258
259
260/*
261 * Sysfs adaptors
262 */
263i1480u_SHOW(uwb_phy_rate, uwb_phy_rate_show, options);
264i1480u_STORE(uwb_phy_rate, uwb_phy_rate_store, options);
265i1480u_ATTR(uwb_phy_rate, S_IRUGO | S_IWUSR);
266
267i1480u_SHOW(uwb_rts_cts, uwb_rts_cts_show, options);
268i1480u_STORE(uwb_rts_cts, uwb_rts_cts_store, options);
269i1480u_ATTR(uwb_rts_cts, S_IRUGO | S_IWUSR);
270
271i1480u_SHOW(uwb_ack_policy, uwb_ack_policy_show, options);
272i1480u_STORE(uwb_ack_policy, uwb_ack_policy_store, options);
273i1480u_ATTR(uwb_ack_policy, S_IRUGO | S_IWUSR);
274
275i1480u_SHOW(uwb_pca_base_priority, uwb_pca_base_priority_show, options);
276i1480u_STORE(uwb_pca_base_priority, uwb_pca_base_priority_store, options);
277i1480u_ATTR(uwb_pca_base_priority, S_IRUGO | S_IWUSR);
278
279i1480u_SHOW(wlp_eda, wlp_eda_show, wlp);
280i1480u_STORE(wlp_eda, wlp_eda_store, wlp);
281i1480u_ATTR(wlp_eda, S_IRUGO | S_IWUSR);
282
283i1480u_SHOW(wlp_uuid, wlp_uuid_show, wlp);
284i1480u_STORE(wlp_uuid, wlp_uuid_store, wlp);
285i1480u_ATTR(wlp_uuid, S_IRUGO | S_IWUSR);
286
287i1480u_SHOW(wlp_dev_name, wlp_dev_name_show, wlp);
288i1480u_STORE(wlp_dev_name, wlp_dev_name_store, wlp);
289i1480u_ATTR(wlp_dev_name, S_IRUGO | S_IWUSR);
290
291i1480u_SHOW(wlp_dev_manufacturer, wlp_dev_manufacturer_show, wlp);
292i1480u_STORE(wlp_dev_manufacturer, wlp_dev_manufacturer_store, wlp);
293i1480u_ATTR(wlp_dev_manufacturer, S_IRUGO | S_IWUSR);
294
295i1480u_SHOW(wlp_dev_model_name, wlp_dev_model_name_show, wlp);
296i1480u_STORE(wlp_dev_model_name, wlp_dev_model_name_store, wlp);
297i1480u_ATTR(wlp_dev_model_name, S_IRUGO | S_IWUSR);
298
299i1480u_SHOW(wlp_dev_model_nr, wlp_dev_model_nr_show, wlp);
300i1480u_STORE(wlp_dev_model_nr, wlp_dev_model_nr_store, wlp);
301i1480u_ATTR(wlp_dev_model_nr, S_IRUGO | S_IWUSR);
302
303i1480u_SHOW(wlp_dev_serial, wlp_dev_serial_show, wlp);
304i1480u_STORE(wlp_dev_serial, wlp_dev_serial_store, wlp);
305i1480u_ATTR(wlp_dev_serial, S_IRUGO | S_IWUSR);
306
307i1480u_SHOW(wlp_dev_prim_category, wlp_dev_prim_category_show, wlp);
308i1480u_STORE(wlp_dev_prim_category, wlp_dev_prim_category_store, wlp);
309i1480u_ATTR(wlp_dev_prim_category, S_IRUGO | S_IWUSR);
310
311i1480u_SHOW(wlp_dev_prim_OUI, wlp_dev_prim_OUI_show, wlp);
312i1480u_STORE(wlp_dev_prim_OUI, wlp_dev_prim_OUI_store, wlp);
313i1480u_ATTR(wlp_dev_prim_OUI, S_IRUGO | S_IWUSR);
314
315i1480u_SHOW(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_show, wlp);
316i1480u_STORE(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_store, wlp);
317i1480u_ATTR(wlp_dev_prim_OUI_sub, S_IRUGO | S_IWUSR);
318
319i1480u_SHOW(wlp_dev_prim_subcat, wlp_dev_prim_subcat_show, wlp);
320i1480u_STORE(wlp_dev_prim_subcat, wlp_dev_prim_subcat_store, wlp);
321i1480u_ATTR(wlp_dev_prim_subcat, S_IRUGO | S_IWUSR);
322
323i1480u_SHOW(wlp_neighborhood, wlp_neighborhood_show, wlp);
324i1480u_ATTR_SHOW(wlp_neighborhood);
325
326i1480u_SHOW(wss_activate, wlp_wss_activate_show, wlp.wss);
327i1480u_STORE(wss_activate, wlp_wss_activate_store, wlp.wss);
328i1480u_ATTR(wss_activate, S_IRUGO | S_IWUSR);
329
330/*
331 * Show the (min, max, avg) Line Quality Estimate (LQE, in dB) as over
332 * the last 256 received WLP frames (ECMA-368 13.3).
333 *
334 * [the -7dB that have to be substracted from the LQI to make the LQE
335 * are already taken into account].
336 */
337i1480u_SHOW(wlp_lqe, stats_show, lqe_stats);
338i1480u_STORE(wlp_lqe, stats_store, lqe_stats);
339i1480u_ATTR(wlp_lqe, S_IRUGO | S_IWUSR);
340
341/*
342 * Show the Receive Signal Strength Indicator averaged over all the
343 * received WLP frames (ECMA-368 13.3). Still is not clear what
344 * this value is, but is kind of a percentage of the signal strength
345 * at the antenna.
346 */
347i1480u_SHOW(wlp_rssi, stats_show, rssi_stats);
348i1480u_STORE(wlp_rssi, stats_store, rssi_stats);
349i1480u_ATTR(wlp_rssi, S_IRUGO | S_IWUSR);
350
351/**
352 * We maintain a basic flow control counter. "count" how many TX URBs are
353 * outstanding. Only allow "max"
354 * TX URBs to be outstanding. If this value is reached the queue will be
355 * stopped. The queue will be restarted when there are
356 * "threshold" URBs outstanding.
357 */
358i1480u_SHOW(wlp_tx_inflight, wlp_tx_inflight_show, tx_inflight);
359i1480u_STORE(wlp_tx_inflight, wlp_tx_inflight_store, tx_inflight);
360i1480u_ATTR(wlp_tx_inflight, S_IRUGO | S_IWUSR);
361
362static struct attribute *i1480u_attrs[] = {
363 &i1480u_ATTR_NAME(uwb_phy_rate).attr,
364 &i1480u_ATTR_NAME(uwb_rts_cts).attr,
365 &i1480u_ATTR_NAME(uwb_ack_policy).attr,
366 &i1480u_ATTR_NAME(uwb_pca_base_priority).attr,
367 &i1480u_ATTR_NAME(wlp_lqe).attr,
368 &i1480u_ATTR_NAME(wlp_rssi).attr,
369 &i1480u_ATTR_NAME(wlp_eda).attr,
370 &i1480u_ATTR_NAME(wlp_uuid).attr,
371 &i1480u_ATTR_NAME(wlp_dev_name).attr,
372 &i1480u_ATTR_NAME(wlp_dev_manufacturer).attr,
373 &i1480u_ATTR_NAME(wlp_dev_model_name).attr,
374 &i1480u_ATTR_NAME(wlp_dev_model_nr).attr,
375 &i1480u_ATTR_NAME(wlp_dev_serial).attr,
376 &i1480u_ATTR_NAME(wlp_dev_prim_category).attr,
377 &i1480u_ATTR_NAME(wlp_dev_prim_OUI).attr,
378 &i1480u_ATTR_NAME(wlp_dev_prim_OUI_sub).attr,
379 &i1480u_ATTR_NAME(wlp_dev_prim_subcat).attr,
380 &i1480u_ATTR_NAME(wlp_neighborhood).attr,
381 &i1480u_ATTR_NAME(wss_activate).attr,
382 &i1480u_ATTR_NAME(wlp_tx_inflight).attr,
383 NULL,
384};
385
386static struct attribute_group i1480u_attr_group = {
387 .name = NULL, /* we want them in the same directory */
388 .attrs = i1480u_attrs,
389};
390
391int i1480u_sysfs_setup(struct i1480u *i1480u)
392{
393 int result;
394 struct device *dev = &i1480u->usb_iface->dev;
395 result = sysfs_create_group(&i1480u->net_dev->dev.kobj,
396 &i1480u_attr_group);
397 if (result < 0)
398 dev_err(dev, "cannot initialize sysfs attributes: %d\n",
399 result);
400 return result;
401}
402
403
404void i1480u_sysfs_release(struct i1480u *i1480u)
405{
406 sysfs_remove_group(&i1480u->net_dev->dev.kobj,
407 &i1480u_attr_group);
408}
diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c
new file mode 100644
index 000000000000..3426bfb68240
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/tx.c
@@ -0,0 +1,632 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Deal with TX (massaging data to transmit, handling it)
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Transmission engine. Get an skb, create from that a WLP transmit
24 * context, add a WLP TX header (which we keep prefilled in the
25 * device's instance), fill out the target-specific fields and
26 * fire it.
27 *
28 * ROADMAP:
29 *
30 * Entry points:
31 *
32 * i1480u_tx_release(): called by i1480u_disconnect() to release
33 * pending tx contexts.
34 *
35 * i1480u_tx_cb(): callback for TX contexts (USB URBs)
36 * i1480u_tx_destroy():
37 *
38 * i1480u_tx_timeout(): called for timeout handling from the
39 * network stack.
40 *
41 * i1480u_hard_start_xmit(): called for transmitting an skb from
42 * the network stack. Will interact with WLP
43 * substack to verify and prepare frame.
44 * i1480u_xmit_frame(): actual transmission on hardware
45 *
46 * i1480u_tx_create() Creates TX context
47 * i1480u_tx_create_1() For packets in 1 fragment
48 * i1480u_tx_create_n() For packets in >1 fragments
49 *
50 * TODO:
51 *
52 * - FIXME: rewrite using usb_sg_*(), add asynch support to
53 * usb_sg_*(). It might not make too much sense as most of
54 * the times the MTU will be smaller than one page...
55 */
56
57#include "i1480u-wlp.h"
58#define D_LOCAL 5
59#include <linux/uwb/debug.h>
60
61enum {
62 /* This is only for Next and Last TX packets */
63 i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
64 - sizeof(struct untd_hdr_rst),
65};
66
67/** Free resources allocated to a i1480u tx context. */
68static
69void i1480u_tx_free(struct i1480u_tx *wtx)
70{
71 kfree(wtx->buf);
72 if (wtx->skb)
73 dev_kfree_skb_irq(wtx->skb);
74 usb_free_urb(wtx->urb);
75 kfree(wtx);
76}
77
78static
79void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
80{
81 unsigned long flags;
82 spin_lock_irqsave(&i1480u->tx_list_lock, flags); /* not active any more */
83 list_del(&wtx->list_node);
84 i1480u_tx_free(wtx);
85 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
86}
87
88static
89void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
90{
91 unsigned long flags;
92 struct i1480u_tx *wtx, *next;
93
94 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
95 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
96 usb_unlink_urb(wtx->urb);
97 }
98 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
99}
100
101
102/**
103 * Callback for a completed tx USB URB.
104 *
105 * TODO:
106 *
107 * - FIXME: recover errors more gracefully
108 * - FIXME: handle NAKs (I dont think they come here) for flow ctl
109 */
110static
111void i1480u_tx_cb(struct urb *urb)
112{
113 struct i1480u_tx *wtx = urb->context;
114 struct i1480u *i1480u = wtx->i1480u;
115 struct net_device *net_dev = i1480u->net_dev;
116 struct device *dev = &i1480u->usb_iface->dev;
117 unsigned long flags;
118
119 switch (urb->status) {
120 case 0:
121 spin_lock_irqsave(&i1480u->lock, flags);
122 i1480u->stats.tx_packets++;
123 i1480u->stats.tx_bytes += urb->actual_length;
124 spin_unlock_irqrestore(&i1480u->lock, flags);
125 break;
126 case -ECONNRESET: /* Not an error, but a controlled situation; */
127 case -ENOENT: /* (we killed the URB)...so, no broadcast */
128 dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
129 netif_stop_queue(net_dev);
130 break;
131 case -ESHUTDOWN: /* going away! */
132 dev_dbg(dev, "notif endp: down %d\n", urb->status);
133 netif_stop_queue(net_dev);
134 break;
135 default:
136 dev_err(dev, "TX: unknown URB status %d\n", urb->status);
137 if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
138 EDC_ERROR_TIMEFRAME)) {
139 dev_err(dev, "TX: max acceptable errors exceeded."
140 "Reset device.\n");
141 netif_stop_queue(net_dev);
142 i1480u_tx_unlink_urbs(i1480u);
143 wlp_reset_all(&i1480u->wlp);
144 }
145 break;
146 }
147 i1480u_tx_destroy(i1480u, wtx);
148 if (atomic_dec_return(&i1480u->tx_inflight.count)
149 <= i1480u->tx_inflight.threshold
150 && netif_queue_stopped(net_dev)
151 && i1480u->tx_inflight.threshold != 0) {
152 if (d_test(2) && printk_ratelimit())
153 d_printf(2, dev, "Restart queue. \n");
154 netif_start_queue(net_dev);
155 atomic_inc(&i1480u->tx_inflight.restart_count);
156 }
157 return;
158}
159
160
161/**
162 * Given a buffer that doesn't fit in a single fragment, create an
163 * scatter/gather structure for delivery to the USB pipe.
164 *
165 * Implements functionality of i1480u_tx_create().
166 *
167 * @wtx: tx descriptor
168 * @skb: skb to send
169 * @gfp_mask: gfp allocation mask
170 * @returns: Pointer to @wtx if ok, NULL on error.
171 *
172 * Sorry, TOO LONG a function, but breaking it up is kind of hard
173 *
174 * This will break the buffer in chunks smaller than
175 * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
176 * to each:
177 *
178 * 1st header \
179 * i1480 tx header | fragment 1
180 * fragment data /
181 * nxt header \ fragment 2
182 * fragment data /
183 * ..
184 * ..
185 * last header \ fragment 3
186 * last fragment data /
187 *
188 * This does not fill the i1480 TX header, it is left up to the
189 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
190 *
191 * This function consumes the skb unless there is an error.
192 */
193static
194int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
195 gfp_t gfp_mask)
196{
197 int result;
198 void *pl;
199 size_t pl_size;
200
201 void *pl_itr, *buf_itr;
202 size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
203 struct untd_hdr_1st *untd_hdr_1st;
204 struct wlp_tx_hdr *wlp_tx_hdr;
205 struct untd_hdr_rst *untd_hdr_rst;
206
207 wtx->skb = NULL;
208 pl = skb->data;
209 pl_itr = pl;
210 pl_size = skb->len;
211 pl_size_left = pl_size; /* payload size */
212 /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
213 * the headers */
214 pl_size_1st = i1480u_MAX_FRG_SIZE
215 - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
216 BUG_ON(pl_size_1st > pl_size);
217 pl_size_left -= pl_size_1st;
218 /* The rest have an smaller header (no i1480 TX header). We
219 * need to break up the payload in blocks smaller than
220 * i1480u_MAX_PL_SIZE (payload excluding header). */
221 frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
222 /* Allocate space for the new buffer. In this new buffer we'll
223 * place the headers followed by the data fragment, headers,
224 * data fragments, etc..
225 */
226 result = -ENOMEM;
227 wtx->buf_size = sizeof(*untd_hdr_1st)
228 + sizeof(*wlp_tx_hdr)
229 + frgs * sizeof(*untd_hdr_rst)
230 + pl_size;
231 wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
232 if (wtx->buf == NULL)
233 goto error_buf_alloc;
234
235 buf_itr = wtx->buf; /* We got the space, let's fill it up */
236 /* Fill 1st fragment */
237 untd_hdr_1st = buf_itr;
238 buf_itr += sizeof(*untd_hdr_1st);
239 untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
240 untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
241 untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
242 untd_hdr_1st->fragment_len =
243 cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
244 memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
245 /* Set up i1480 header info */
246 wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
247 buf_itr += sizeof(*wlp_tx_hdr);
248 /* Copy the first fragment */
249 memcpy(buf_itr, pl_itr, pl_size_1st);
250 pl_itr += pl_size_1st;
251 buf_itr += pl_size_1st;
252
253 /* Now do each remaining fragment */
254 result = -EINVAL;
255 while (pl_size_left > 0) {
256 d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n",
257 pl_size_left, buf_itr - wtx->buf);
258 if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
259 > wtx->buf_size) {
260 printk(KERN_ERR "BUG: no space for header\n");
261 goto error_bug;
262 }
263 d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n",
264 pl_size_left, buf_itr - wtx->buf);
265 untd_hdr_rst = buf_itr;
266 buf_itr += sizeof(*untd_hdr_rst);
267 if (pl_size_left > i1480u_MAX_PL_SIZE) {
268 frg_pl_size = i1480u_MAX_PL_SIZE;
269 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
270 } else {
271 frg_pl_size = pl_size_left;
272 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
273 }
274 d_printf(5, NULL,
275 "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
276 pl_size_left, buf_itr - wtx->buf, frg_pl_size);
277 untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
278 untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
279 untd_hdr_rst->padding = 0;
280 if (buf_itr + frg_pl_size - wtx->buf
281 > wtx->buf_size) {
282 printk(KERN_ERR "BUG: no space for payload\n");
283 goto error_bug;
284 }
285 memcpy(buf_itr, pl_itr, frg_pl_size);
286 buf_itr += frg_pl_size;
287 pl_itr += frg_pl_size;
288 pl_size_left -= frg_pl_size;
289 d_printf(5, NULL,
290 "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
291 pl_size_left, buf_itr - wtx->buf, frg_pl_size);
292 }
293 dev_kfree_skb_irq(skb);
294 return 0;
295
296error_bug:
297 printk(KERN_ERR
298 "BUG: skb %u bytes\n"
299 "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
300 "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
301 skb->len,
302 frg_pl_size, i1480u_MAX_FRG_SIZE,
303 buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
304
305 kfree(wtx->buf);
306error_buf_alloc:
307 return result;
308}
309
310
311/**
312 * Given a buffer that fits in a single fragment, fill out a @wtx
313 * struct for transmitting it down the USB pipe.
314 *
315 * Uses the fact that we have space reserved in front of the skbuff
316 * for hardware headers :]
317 *
318 * This does not fill the i1480 TX header, it is left up to the
319 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
320 *
321 * @pl: pointer to payload data
322 * @pl_size: size of the payuload
323 *
324 * This function does not consume the @skb.
325 */
326static
327int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
328 gfp_t gfp_mask)
329{
330 struct untd_hdr_cmp *untd_hdr_cmp;
331 struct wlp_tx_hdr *wlp_tx_hdr;
332
333 wtx->buf = NULL;
334 wtx->skb = skb;
335 BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
336 wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
337 wtx->wlp_tx_hdr = wlp_tx_hdr;
338 BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
339 untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
340
341 untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
342 untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
343 untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
344 untd_hdr_cmp->padding = 0;
345 return 0;
346}
347
348
349/**
350 * Given a skb to transmit, massage it to become palatable for the TX pipe
351 *
352 * This will break the buffer in chunks smaller than
353 * i1480u_MAX_FRG_SIZE and add proper headers to each.
354 *
355 * 1st header \
356 * i1480 tx header | fragment 1
357 * fragment data /
358 * nxt header \ fragment 2
359 * fragment data /
360 * ..
361 * ..
362 * last header \ fragment 3
363 * last fragment data /
364 *
365 * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
366 *
367 * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
368 * following is composed:
369 *
370 * complete header \
371 * i1480 tx header | single fragment
372 * packet data /
373 *
374 * We were going to use s/g support, but because the interface is
375 * synch and at the end there is plenty of overhead to do it, it
376 * didn't seem that worth for data that is going to be smaller than
377 * one page.
378 */
379static
380struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
381 struct sk_buff *skb, gfp_t gfp_mask)
382{
383 int result;
384 struct usb_endpoint_descriptor *epd;
385 int usb_pipe;
386 unsigned long flags;
387
388 struct i1480u_tx *wtx;
389 const size_t pl_max_size =
390 i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
391 - sizeof(struct wlp_tx_hdr);
392
393 wtx = kmalloc(sizeof(*wtx), gfp_mask);
394 if (wtx == NULL)
395 goto error_wtx_alloc;
396 wtx->urb = usb_alloc_urb(0, gfp_mask);
397 if (wtx->urb == NULL)
398 goto error_urb_alloc;
399 epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
400 usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
401 /* Fits in a single complete packet or need to split? */
402 if (skb->len > pl_max_size) {
403 result = i1480u_tx_create_n(wtx, skb, gfp_mask);
404 if (result < 0)
405 goto error_create;
406 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
407 wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
408 } else {
409 result = i1480u_tx_create_1(wtx, skb, gfp_mask);
410 if (result < 0)
411 goto error_create;
412 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
413 skb->data, skb->len, i1480u_tx_cb, wtx);
414 }
415 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
416 list_add(&wtx->list_node, &i1480u->tx_list);
417 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
418 return wtx;
419
420error_create:
421 kfree(wtx->urb);
422error_urb_alloc:
423 kfree(wtx);
424error_wtx_alloc:
425 return NULL;
426}
427
428/**
429 * Actual fragmentation and transmission of frame
430 *
431 * @wlp: WLP substack data structure
432 * @skb: To be transmitted
433 * @dst: Device address of destination
434 * @returns: 0 on success, <0 on failure
435 *
436 * This function can also be called directly (not just from
437 * hard_start_xmit), so we also check here if the interface is up before
438 * taking sending anything.
439 */
440int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
441 struct uwb_dev_addr *dst)
442{
443 int result = -ENXIO;
444 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
445 struct device *dev = &i1480u->usb_iface->dev;
446 struct net_device *net_dev = i1480u->net_dev;
447 struct i1480u_tx *wtx;
448 struct wlp_tx_hdr *wlp_tx_hdr;
449 static unsigned char dev_bcast[2] = { 0xff, 0xff };
450#if 0
451 int lockup = 50;
452#endif
453
454 d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
455 net_dev);
456 BUG_ON(i1480u->wlp.rc == NULL);
457 if ((net_dev->flags & IFF_UP) == 0)
458 goto out;
459 result = -EBUSY;
460 if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
461 if (d_test(2) && printk_ratelimit())
462 d_printf(2, dev, "Max frames in flight "
463 "stopping queue.\n");
464 netif_stop_queue(net_dev);
465 goto error_max_inflight;
466 }
467 result = -ENOMEM;
468 wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
469 if (unlikely(wtx == NULL)) {
470 if (printk_ratelimit())
471 dev_err(dev, "TX: no memory for WLP TX URB,"
472 "dropping packet (in flight %d)\n",
473 atomic_read(&i1480u->tx_inflight.count));
474 netif_stop_queue(net_dev);
475 goto error_wtx_alloc;
476 }
477 wtx->i1480u = i1480u;
478 /* Fill out the i1480 header; @i1480u->def_tx_hdr read without
479 * locking. We do so because they are kind of orthogonal to
480 * each other (and thus not changed in an atomic batch).
481 * The ETH header is right after the WLP TX header. */
482 wlp_tx_hdr = wtx->wlp_tx_hdr;
483 *wlp_tx_hdr = i1480u->options.def_tx_hdr;
484 wlp_tx_hdr->dstaddr = *dst;
485 if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
486 && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
487 /*Broadcast message directed to DRP host. Send as best effort
488 * on PCA. */
489 wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
490 }
491
492#if 0
493 dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len);
494 dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len);
495#endif
496#if 0
497 /* simulates a device lockup after every lockup# packets */
498 if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) {
499 /* Simulate a dropped transmit interrupt */
500 net_dev->trans_start = jiffies;
501 netif_stop_queue(net_dev);
502 dev_err(dev, "Simulate lockup at %ld\n", jiffies);
503 return result;
504 }
505#endif
506
507 result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */
508 if (result < 0) {
509 dev_err(dev, "TX: cannot submit URB: %d\n", result);
510 /* We leave the freeing of skb to calling function */
511 wtx->skb = NULL;
512 goto error_tx_urb_submit;
513 }
514 atomic_inc(&i1480u->tx_inflight.count);
515 net_dev->trans_start = jiffies;
516 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
517 net_dev, result);
518 return result;
519
520error_tx_urb_submit:
521 i1480u_tx_destroy(i1480u, wtx);
522error_wtx_alloc:
523error_max_inflight:
524out:
525 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
526 net_dev, result);
527 return result;
528}
529
530
531/**
532 * Transmit an skb Called when an skbuf has to be transmitted
533 *
534 * The skb is first passed to WLP substack to ensure this is a valid
535 * frame. If valid the device address of destination will be filled and
536 * the WLP header prepended to the skb. If this step fails we fake sending
537 * the frame, if we return an error the network stack will just keep trying.
538 *
539 * Broadcast frames inside a WSS needs to be treated special as multicast is
540 * not supported. A broadcast frame is sent as unicast to each member of the
541 * WSS - this is done by the WLP substack when it finds a broadcast frame.
542 * So, we test if the WLP substack took over the skb and only transmit it
543 * if it has not (been taken over).
544 *
545 * @net_dev->xmit_lock is held
546 */
547int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
548{
549 int result;
550 struct i1480u *i1480u = netdev_priv(net_dev);
551 struct device *dev = &i1480u->usb_iface->dev;
552 struct uwb_dev_addr dst;
553
554 d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
555 net_dev);
556 BUG_ON(i1480u->wlp.rc == NULL);
557 if ((net_dev->flags & IFF_UP) == 0)
558 goto error;
559 result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
560 if (result < 0) {
561 dev_err(dev, "WLP verification of TX frame failed (%d). "
562 "Dropping packet.\n", result);
563 goto error;
564 } else if (result == 1) {
565 d_printf(6, dev, "WLP will transmit frame. \n");
566 /* trans_start time will be set when WLP actually transmits
567 * the frame */
568 goto out;
569 }
570 d_printf(6, dev, "Transmitting frame. \n");
571 result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
572 if (result < 0) {
573 dev_err(dev, "Frame TX failed (%d).\n", result);
574 goto error;
575 }
576 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
577 net_dev, result);
578 return NETDEV_TX_OK;
579error:
580 dev_kfree_skb_any(skb);
581 i1480u->stats.tx_dropped++;
582out:
583 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
584 net_dev, result);
585 return NETDEV_TX_OK;
586}
587
588
589/**
590 * Called when a pkt transmission doesn't complete in a reasonable period
591 * Device reset may sleep - do it outside of interrupt context (delayed)
592 */
593void i1480u_tx_timeout(struct net_device *net_dev)
594{
595 struct i1480u *i1480u = netdev_priv(net_dev);
596
597 wlp_reset_all(&i1480u->wlp);
598}
599
600
601void i1480u_tx_release(struct i1480u *i1480u)
602{
603 unsigned long flags;
604 struct i1480u_tx *wtx, *next;
605 int count = 0, empty;
606
607 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
608 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
609 count++;
610 usb_unlink_urb(wtx->urb);
611 }
612 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
613 count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
614 /*
615 * We don't like this sollution too much (dirty as it is), but
616 * it is cheaper than putting a refcount on each i1480u_tx and
617 * i1480uting for all of them to go away...
618 *
619 * Called when no more packets can be added to tx_list
620 * so can i1480ut for it to be empty.
621 */
622 while (1) {
623 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
624 empty = list_empty(&i1480u->tx_list);
625 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
626 if (empty)
627 break;
628 count--;
629 BUG_ON(count == 0);
630 msleep(20);
631 }
632}
diff --git a/drivers/uwb/ie.c b/drivers/uwb/ie.c
new file mode 100644
index 000000000000..cf6f3d152b9d
--- /dev/null
+++ b/drivers/uwb/ie.c
@@ -0,0 +1,541 @@
1/*
2 * Ultra Wide Band
3 * Information Element Handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Reinette Chatre <reinette.chatre@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * FIXME: docs
25 */
26
27#include "uwb-internal.h"
28#define D_LOCAL 0
29#include <linux/uwb/debug.h>
30
31/**
32 * uwb_ie_next - get the next IE in a buffer
33 * @ptr: start of the buffer containing the IE data
34 * @len: length of the buffer
35 *
36 * Both @ptr and @len are updated so subsequent calls to uwb_ie_next()
37 * will get the next IE.
38 *
39 * NULL is returned (and @ptr and @len will not be updated) if there
40 * are no more IEs in the buffer or the buffer is too short.
41 */
42struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len)
43{
44 struct uwb_ie_hdr *hdr;
45 size_t ie_len;
46
47 if (*len < sizeof(struct uwb_ie_hdr))
48 return NULL;
49
50 hdr = *ptr;
51 ie_len = sizeof(struct uwb_ie_hdr) + hdr->length;
52
53 if (*len < ie_len)
54 return NULL;
55
56 *ptr += ie_len;
57 *len -= ie_len;
58
59 return hdr;
60}
61EXPORT_SYMBOL_GPL(uwb_ie_next);
62
63/**
64 * Get the IEs that a radio controller is sending in its beacon
65 *
66 * @uwb_rc: UWB Radio Controller
67 * @returns: Size read from the system
68 *
69 * We don't need to lock the uwb_rc's mutex because we don't modify
70 * anything. Once done with the iedata buffer, call
71 * uwb_rc_ie_release(iedata). Don't call kfree on it.
72 */
73ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie)
74{
75 ssize_t result;
76 struct device *dev = &uwb_rc->uwb_dev.dev;
77 struct uwb_rccb *cmd = NULL;
78 struct uwb_rceb *reply = NULL;
79 struct uwb_rc_evt_get_ie *get_ie;
80
81 d_fnstart(3, dev, "(%p, %p)\n", uwb_rc, pget_ie);
82 result = -ENOMEM;
83 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
84 if (cmd == NULL)
85 goto error_kzalloc;
86 cmd->bCommandType = UWB_RC_CET_GENERAL;
87 cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE);
88 result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd),
89 UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE,
90 &reply);
91 if (result < 0)
92 goto error_cmd;
93 get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb);
94 if (result < sizeof(*get_ie)) {
95 dev_err(dev, "not enough data returned for decoding GET IE "
96 "(%zu bytes received vs %zu needed)\n",
97 result, sizeof(*get_ie));
98 result = -EINVAL;
99 } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) {
100 dev_err(dev, "not enough data returned for decoding GET IE "
101 "payload (%zu bytes received vs %zu needed)\n", result,
102 sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength));
103 result = -EINVAL;
104 } else
105 *pget_ie = get_ie;
106error_cmd:
107 kfree(cmd);
108error_kzalloc:
109 d_fnend(3, dev, "(%p, %p) = %d\n", uwb_rc, pget_ie, (int)result);
110 return result;
111}
112EXPORT_SYMBOL_GPL(uwb_rc_get_ie);
113
114
115/*
116 * Given a pointer to an IE, print it in ASCII/hex followed by a new line
117 *
118 * @ie_hdr: pointer to the IE header. Length is in there, and it is
119 * guaranteed that the ie_hdr->length bytes following it are
120 * safely accesible.
121 *
122 * @_data: context data passed from uwb_ie_for_each(), an struct output_ctx
123 */
124int uwb_ie_dump_hex(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr,
125 size_t offset, void *_ctx)
126{
127 struct uwb_buf_ctx *ctx = _ctx;
128 const u8 *pl = (void *)(ie_hdr + 1);
129 u8 pl_itr;
130
131 ctx->bytes += scnprintf(ctx->buf + ctx->bytes, ctx->size - ctx->bytes,
132 "%02x %02x ", (unsigned) ie_hdr->element_id,
133 (unsigned) ie_hdr->length);
134 pl_itr = 0;
135 while (pl_itr < ie_hdr->length && ctx->bytes < ctx->size)
136 ctx->bytes += scnprintf(ctx->buf + ctx->bytes,
137 ctx->size - ctx->bytes,
138 "%02x ", (unsigned) pl[pl_itr++]);
139 if (ctx->bytes < ctx->size)
140 ctx->buf[ctx->bytes++] = '\n';
141 return 0;
142}
143EXPORT_SYMBOL_GPL(uwb_ie_dump_hex);
144
145
146/**
147 * Verify that a pointer in a buffer points to valid IE
148 *
149 * @start: pointer to start of buffer in which IE appears
150 * @itr: pointer to IE inside buffer that will be verified
151 * @top: pointer to end of buffer
152 *
153 * @returns: 0 if IE is valid, <0 otherwise
154 *
155 * Verification involves checking that the buffer can contain a
156 * header and the amount of data reported in the IE header can be found in
157 * the buffer.
158 */
159static
160int uwb_rc_ie_verify(struct uwb_dev *uwb_dev, const void *start,
161 const void *itr, const void *top)
162{
163 struct device *dev = &uwb_dev->dev;
164 const struct uwb_ie_hdr *ie_hdr;
165
166 if (top - itr < sizeof(*ie_hdr)) {
167 dev_err(dev, "Bad IE: no data to decode header "
168 "(%zu bytes left vs %zu needed) at offset %zu\n",
169 top - itr, sizeof(*ie_hdr), itr - start);
170 return -EINVAL;
171 }
172 ie_hdr = itr;
173 itr += sizeof(*ie_hdr);
174 if (top - itr < ie_hdr->length) {
175 dev_err(dev, "Bad IE: not enough data for payload "
176 "(%zu bytes left vs %zu needed) at offset %zu\n",
177 top - itr, (size_t)ie_hdr->length,
178 (void *)ie_hdr - start);
179 return -EINVAL;
180 }
181 return 0;
182}
183
184
185/**
186 * Walk a buffer filled with consecutive IE's a buffer
187 *
188 * @uwb_dev: UWB device this IEs belong to (for err messages mainly)
189 *
190 * @fn: function to call with each IE; if it returns 0, we keep
191 * traversing the buffer. If it returns !0, we'll stop and return
192 * that value.
193 *
194 * @data: pointer passed to @fn
195 *
196 * @buf: buffer where the consecutive IEs are located
197 *
198 * @size: size of @buf
199 *
200 * Each IE is checked for basic correctness (there is space left for
201 * the header and the payload). If that test is failed, we stop
202 * processing. For every good IE, @fn is called.
203 */
204ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data,
205 const void *buf, size_t size)
206{
207 ssize_t result = 0;
208 const struct uwb_ie_hdr *ie_hdr;
209 const void *itr = buf, *top = itr + size;
210
211 while (itr < top) {
212 if (uwb_rc_ie_verify(uwb_dev, buf, itr, top) != 0)
213 break;
214 ie_hdr = itr;
215 itr += sizeof(*ie_hdr) + ie_hdr->length;
216 result = fn(uwb_dev, ie_hdr, itr - buf, data);
217 if (result != 0)
218 break;
219 }
220 return result;
221}
222EXPORT_SYMBOL_GPL(uwb_ie_for_each);
223
224
225/**
226 * Replace all IEs currently being transmitted by a device
227 *
228 * @cmd: pointer to the SET-IE command with the IEs to set
229 * @size: size of @buf
230 */
231int uwb_rc_set_ie(struct uwb_rc *rc, struct uwb_rc_cmd_set_ie *cmd)
232{
233 int result;
234 struct device *dev = &rc->uwb_dev.dev;
235 struct uwb_rc_evt_set_ie reply;
236
237 reply.rceb.bEventType = UWB_RC_CET_GENERAL;
238 reply.rceb.wEvent = UWB_RC_CMD_SET_IE;
239 result = uwb_rc_cmd(rc, "SET-IE", &cmd->rccb,
240 sizeof(*cmd) + le16_to_cpu(cmd->wIELength),
241 &reply.rceb, sizeof(reply));
242 if (result < 0)
243 goto error_cmd;
244 else if (result != sizeof(reply)) {
245 dev_err(dev, "SET-IE: not enough data to decode reply "
246 "(%d bytes received vs %zu needed)\n",
247 result, sizeof(reply));
248 result = -EIO;
249 } else if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
250 dev_err(dev, "SET-IE: command execution failed: %s (%d)\n",
251 uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
252 result = -EIO;
253 } else
254 result = 0;
255error_cmd:
256 return result;
257}
258
259/**
260 * Determine by IE id if IE is host settable
261 * WUSB 1.0 [8.6.2.8 Table 8.85]
262 *
263 * EXCEPTION:
264 * All but UWB_IE_WLP appears in Table 8.85 from WUSB 1.0. Setting this IE
265 * is required for the WLP substack to perform association with its WSS so
266 * we hope that the WUSB spec will be changed to reflect this.
267 */
268static
269int uwb_rc_ie_is_host_settable(enum uwb_ie element_id)
270{
271 if (element_id == UWB_PCA_AVAILABILITY ||
272 element_id == UWB_BP_SWITCH_IE ||
273 element_id == UWB_MAC_CAPABILITIES_IE ||
274 element_id == UWB_PHY_CAPABILITIES_IE ||
275 element_id == UWB_APP_SPEC_PROBE_IE ||
276 element_id == UWB_IDENTIFICATION_IE ||
277 element_id == UWB_MASTER_KEY_ID_IE ||
278 element_id == UWB_IE_WLP ||
279 element_id == UWB_APP_SPEC_IE)
280 return 1;
281 return 0;
282}
283
284
285/**
286 * Extract Host Settable IEs from IE
287 *
288 * @ie_data: pointer to buffer containing all IEs
289 * @size: size of buffer
290 *
291 * @returns: length of buffer that only includes host settable IEs
292 *
293 * Given a buffer of IEs we move all Host Settable IEs to front of buffer
294 * by overwriting the IEs that are not Host Settable.
295 * Buffer length is adjusted accordingly.
296 */
297static
298ssize_t uwb_rc_parse_host_settable_ie(struct uwb_dev *uwb_dev,
299 void *ie_data, size_t size)
300{
301 size_t new_len = size;
302 struct uwb_ie_hdr *ie_hdr;
303 size_t ie_length;
304 void *itr = ie_data, *top = itr + size;
305
306 while (itr < top) {
307 if (uwb_rc_ie_verify(uwb_dev, ie_data, itr, top) != 0)
308 break;
309 ie_hdr = itr;
310 ie_length = sizeof(*ie_hdr) + ie_hdr->length;
311 if (uwb_rc_ie_is_host_settable(ie_hdr->element_id)) {
312 itr += ie_length;
313 } else {
314 memmove(itr, itr + ie_length, top - (itr + ie_length));
315 new_len -= ie_length;
316 top -= ie_length;
317 }
318 }
319 return new_len;
320}
321
322
323/* Cleanup the whole IE management subsystem */
324void uwb_rc_ie_init(struct uwb_rc *uwb_rc)
325{
326 mutex_init(&uwb_rc->ies_mutex);
327}
328
329
330/**
331 * Set up cache for host settable IEs currently being transmitted
332 *
333 * First we just call GET-IE to get the current IEs being transmitted
334 * (or we workaround and pretend we did) and (because the format is
335 * the same) reuse that as the IE cache (with the command prefix, as
336 * explained in 'struct uwb_rc').
337 *
338 * @returns: size of cache created
339 */
340ssize_t uwb_rc_ie_setup(struct uwb_rc *uwb_rc)
341{
342 struct device *dev = &uwb_rc->uwb_dev.dev;
343 ssize_t result;
344 size_t capacity;
345 struct uwb_rc_evt_get_ie *ie_info;
346
347 d_fnstart(3, dev, "(%p)\n", uwb_rc);
348 mutex_lock(&uwb_rc->ies_mutex);
349 result = uwb_rc_get_ie(uwb_rc, &ie_info);
350 if (result < 0)
351 goto error_get_ie;
352 capacity = result;
353 d_printf(5, dev, "Got IEs %zu bytes (%zu long at %p)\n", result,
354 (size_t)le16_to_cpu(ie_info->wIELength), ie_info);
355
356 /* Remove IEs that host should not set. */
357 result = uwb_rc_parse_host_settable_ie(&uwb_rc->uwb_dev,
358 ie_info->IEData, le16_to_cpu(ie_info->wIELength));
359 if (result < 0)
360 goto error_parse;
361 d_printf(5, dev, "purged non-settable IEs to %zu bytes\n", result);
362 uwb_rc->ies = (void *) ie_info;
363 uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL;
364 uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE);
365 uwb_rc->ies_capacity = capacity;
366 d_printf(5, dev, "IE cache at %p %zu bytes, %zu capacity\n",
367 ie_info, result, capacity);
368 result = 0;
369error_parse:
370error_get_ie:
371 mutex_unlock(&uwb_rc->ies_mutex);
372 d_fnend(3, dev, "(%p) = %zu\n", uwb_rc, result);
373 return result;
374}
375
376
377/* Cleanup the whole IE management subsystem */
378void uwb_rc_ie_release(struct uwb_rc *uwb_rc)
379{
380 kfree(uwb_rc->ies);
381 uwb_rc->ies = NULL;
382 uwb_rc->ies_capacity = 0;
383}
384
385
386static
387int __acc_size(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr,
388 size_t offset, void *_ctx)
389{
390 size_t *acc_size = _ctx;
391 *acc_size += sizeof(*ie_hdr) + ie_hdr->length;
392 d_printf(6, &uwb_dev->dev, "new acc size %zu\n", *acc_size);
393 return 0;
394}
395
396
397/**
398 * Add a new IE to IEs currently being transmitted by device
399 *
400 * @ies: the buffer containing the new IE or IEs to be added to
401 * the device's beacon. The buffer will be verified for
402 * consistence (meaning the headers should be right) and
403 * consistent with the buffer size.
404 * @size: size of @ies (in bytes, total buffer size)
405 * @returns: 0 if ok, <0 errno code on error
406 *
407 * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
408 * after the device sent the first beacon that includes the IEs specified
409 * in the SET IE command. We thus cannot send this command if the device is
410 * not beaconing. Instead, a SET IE command will be sent later right after
411 * we start beaconing.
412 *
413 * Setting an IE on the device will overwrite all current IEs in device. So
414 * we take the current IEs being transmitted by the device, append the
415 * new one, and call SET IE with all the IEs needed.
416 *
417 * The local IE cache will only be updated with the new IE if SET IE
418 * completed successfully.
419 */
420int uwb_rc_ie_add(struct uwb_rc *uwb_rc,
421 const struct uwb_ie_hdr *ies, size_t size)
422{
423 int result = 0;
424 struct device *dev = &uwb_rc->uwb_dev.dev;
425 struct uwb_rc_cmd_set_ie *new_ies;
426 size_t ies_size, total_size, acc_size = 0;
427
428 if (uwb_rc->ies == NULL)
429 return -ESHUTDOWN;
430 uwb_ie_for_each(&uwb_rc->uwb_dev, __acc_size, &acc_size, ies, size);
431 if (acc_size != size) {
432 dev_err(dev, "BUG: bad IEs, misconstructed headers "
433 "[%zu bytes reported vs %zu calculated]\n",
434 size, acc_size);
435 WARN_ON(1);
436 return -EINVAL;
437 }
438 mutex_lock(&uwb_rc->ies_mutex);
439 ies_size = le16_to_cpu(uwb_rc->ies->wIELength);
440 total_size = sizeof(*uwb_rc->ies) + ies_size;
441 if (total_size + size > uwb_rc->ies_capacity) {
442 d_printf(4, dev, "Reallocating IE cache from %p capacity %zu "
443 "to capacity %zu\n", uwb_rc->ies, uwb_rc->ies_capacity,
444 total_size + size);
445 new_ies = kzalloc(total_size + size, GFP_KERNEL);
446 if (new_ies == NULL) {
447 dev_err(dev, "No memory for adding new IE\n");
448 result = -ENOMEM;
449 goto error_alloc;
450 }
451 memcpy(new_ies, uwb_rc->ies, total_size);
452 uwb_rc->ies_capacity = total_size + size;
453 kfree(uwb_rc->ies);
454 uwb_rc->ies = new_ies;
455 d_printf(4, dev, "New IE cache at %p capacity %zu\n",
456 uwb_rc->ies, uwb_rc->ies_capacity);
457 }
458 memcpy((void *)uwb_rc->ies + total_size, ies, size);
459 uwb_rc->ies->wIELength = cpu_to_le16(ies_size + size);
460 if (uwb_rc->beaconing != -1) {
461 result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies);
462 if (result < 0) {
463 dev_err(dev, "Cannot set new IE on device: %d\n",
464 result);
465 uwb_rc->ies->wIELength = cpu_to_le16(ies_size);
466 } else
467 result = 0;
468 }
469 d_printf(4, dev, "IEs now occupy %hu bytes of %zu capacity at %p\n",
470 le16_to_cpu(uwb_rc->ies->wIELength), uwb_rc->ies_capacity,
471 uwb_rc->ies);
472error_alloc:
473 mutex_unlock(&uwb_rc->ies_mutex);
474 return result;
475}
476EXPORT_SYMBOL_GPL(uwb_rc_ie_add);
477
478
479/*
480 * Remove an IE from internal cache
481 *
482 * We are dealing with our internal IE cache so no need to verify that the
483 * IEs are valid (it has been done already).
484 *
485 * Should be called with ies_mutex held
486 *
487 * We do not break out once an IE is found in the cache. It is currently
488 * possible to have more than one IE with the same ID included in the
489 * beacon. We don't reallocate, we just mark the size smaller.
490 */
491static
492int uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove)
493{
494 struct uwb_ie_hdr *ie_hdr;
495 size_t new_len = le16_to_cpu(uwb_rc->ies->wIELength);
496 void *itr = uwb_rc->ies->IEData;
497 void *top = itr + new_len;
498
499 while (itr < top) {
500 ie_hdr = itr;
501 if (ie_hdr->element_id != to_remove) {
502 itr += sizeof(*ie_hdr) + ie_hdr->length;
503 } else {
504 int ie_length;
505 ie_length = sizeof(*ie_hdr) + ie_hdr->length;
506 if (top - itr != ie_length)
507 memmove(itr, itr + ie_length, top - itr + ie_length);
508 top -= ie_length;
509 new_len -= ie_length;
510 }
511 }
512 uwb_rc->ies->wIELength = cpu_to_le16(new_len);
513 return 0;
514}
515
516
517/**
518 * Remove an IE currently being transmitted by device
519 *
520 * @element_id: id of IE to be removed from device's beacon
521 */
522int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id)
523{
524 struct device *dev = &uwb_rc->uwb_dev.dev;
525 int result;
526
527 if (uwb_rc->ies == NULL)
528 return -ESHUTDOWN;
529 mutex_lock(&uwb_rc->ies_mutex);
530 result = uwb_rc_ie_cache_rm(uwb_rc, element_id);
531 if (result < 0)
532 dev_err(dev, "Cannot remove IE from cache.\n");
533 if (uwb_rc->beaconing != -1) {
534 result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies);
535 if (result < 0)
536 dev_err(dev, "Cannot set new IE on device.\n");
537 }
538 mutex_unlock(&uwb_rc->ies_mutex);
539 return result;
540}
541EXPORT_SYMBOL_GPL(uwb_rc_ie_rm);
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c
new file mode 100644
index 000000000000..15f856c9689a
--- /dev/null
+++ b/drivers/uwb/lc-dev.c
@@ -0,0 +1,492 @@
1/*
2 * Ultra Wide Band
3 * Life cycle of devices
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/kernel.h>
27#include <linux/device.h>
28#include <linux/err.h>
29#include <linux/kdev_t.h>
30#include <linux/random.h>
31#include "uwb-internal.h"
32
33#define D_LOCAL 1
34#include <linux/uwb/debug.h>
35
36
37/* We initialize addresses to 0xff (invalid, as it is bcast) */
38static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr)
39{
40 memset(&addr->data, 0xff, sizeof(addr->data));
41}
42
43static inline void uwb_mac_addr_init(struct uwb_mac_addr *addr)
44{
45 memset(&addr->data, 0xff, sizeof(addr->data));
46}
47
48/* @returns !0 if a device @addr is a broadcast address */
49static inline int uwb_dev_addr_bcast(const struct uwb_dev_addr *addr)
50{
51 static const struct uwb_dev_addr bcast = { .data = { 0xff, 0xff } };
52 return !uwb_dev_addr_cmp(addr, &bcast);
53}
54
55/*
56 * Add callback @new to be called when an event occurs in @rc.
57 */
58int uwb_notifs_register(struct uwb_rc *rc, struct uwb_notifs_handler *new)
59{
60 if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
61 return -ERESTARTSYS;
62 list_add(&new->list_node, &rc->notifs_chain.list);
63 mutex_unlock(&rc->notifs_chain.mutex);
64 return 0;
65}
66EXPORT_SYMBOL_GPL(uwb_notifs_register);
67
68/*
69 * Remove event handler (callback)
70 */
71int uwb_notifs_deregister(struct uwb_rc *rc, struct uwb_notifs_handler *entry)
72{
73 if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
74 return -ERESTARTSYS;
75 list_del(&entry->list_node);
76 mutex_unlock(&rc->notifs_chain.mutex);
77 return 0;
78}
79EXPORT_SYMBOL_GPL(uwb_notifs_deregister);
80
81/*
82 * Notify all event handlers of a given event on @rc
83 *
84 * We are called with a valid reference to the device, or NULL if the
85 * event is not for a particular event (e.g., a BG join event).
86 */
87void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event)
88{
89 struct uwb_notifs_handler *handler;
90 if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
91 return;
92 if (!list_empty(&rc->notifs_chain.list)) {
93 list_for_each_entry(handler, &rc->notifs_chain.list, list_node) {
94 handler->cb(handler->data, uwb_dev, event);
95 }
96 }
97 mutex_unlock(&rc->notifs_chain.mutex);
98}
99
100/*
101 * Release the backing device of a uwb_dev that has been dynamically allocated.
102 */
103static void uwb_dev_sys_release(struct device *dev)
104{
105 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
106
107 d_fnstart(4, NULL, "(dev %p uwb_dev %p)\n", dev, uwb_dev);
108 uwb_bce_put(uwb_dev->bce);
109 d_printf(0, &uwb_dev->dev, "uwb_dev %p freed\n", uwb_dev);
110 memset(uwb_dev, 0x69, sizeof(*uwb_dev));
111 kfree(uwb_dev);
112 d_fnend(4, NULL, "(dev %p uwb_dev %p) = void\n", dev, uwb_dev);
113}
114
115/*
116 * Initialize a UWB device instance
117 *
118 * Alloc, zero and call this function.
119 */
120void uwb_dev_init(struct uwb_dev *uwb_dev)
121{
122 mutex_init(&uwb_dev->mutex);
123 device_initialize(&uwb_dev->dev);
124 uwb_dev->dev.release = uwb_dev_sys_release;
125 uwb_dev_addr_init(&uwb_dev->dev_addr);
126 uwb_mac_addr_init(&uwb_dev->mac_addr);
127 bitmap_fill(uwb_dev->streams, UWB_NUM_GLOBAL_STREAMS);
128}
129
130static ssize_t uwb_dev_EUI_48_show(struct device *dev,
131 struct device_attribute *attr, char *buf)
132{
133 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
134 char addr[UWB_ADDR_STRSIZE];
135
136 uwb_mac_addr_print(addr, sizeof(addr), &uwb_dev->mac_addr);
137 return sprintf(buf, "%s\n", addr);
138}
139static DEVICE_ATTR(EUI_48, S_IRUGO, uwb_dev_EUI_48_show, NULL);
140
141static ssize_t uwb_dev_DevAddr_show(struct device *dev,
142 struct device_attribute *attr, char *buf)
143{
144 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
145 char addr[UWB_ADDR_STRSIZE];
146
147 uwb_dev_addr_print(addr, sizeof(addr), &uwb_dev->dev_addr);
148 return sprintf(buf, "%s\n", addr);
149}
150static DEVICE_ATTR(DevAddr, S_IRUGO, uwb_dev_DevAddr_show, NULL);
151
152/*
153 * Show the BPST of this device.
154 *
155 * Calculated from the receive time of the device's beacon and it's
156 * slot number.
157 */
158static ssize_t uwb_dev_BPST_show(struct device *dev,
159 struct device_attribute *attr, char *buf)
160{
161 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
162 struct uwb_beca_e *bce;
163 struct uwb_beacon_frame *bf;
164 u16 bpst;
165
166 bce = uwb_dev->bce;
167 mutex_lock(&bce->mutex);
168 bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo;
169 bpst = bce->be->wBPSTOffset
170 - (u16)(bf->Beacon_Slot_Number * UWB_BEACON_SLOT_LENGTH_US);
171 mutex_unlock(&bce->mutex);
172
173 return sprintf(buf, "%d\n", bpst);
174}
175static DEVICE_ATTR(BPST, S_IRUGO, uwb_dev_BPST_show, NULL);
176
177/*
178 * Show the IEs a device is beaconing
179 *
180 * We need to access the beacon cache, so we just lock it really
181 * quick, print the IEs and unlock.
182 *
183 * We have a reference on the cache entry, so that should be
184 * quite safe.
185 */
186static ssize_t uwb_dev_IEs_show(struct device *dev,
187 struct device_attribute *attr, char *buf)
188{
189 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
190
191 return uwb_bce_print_IEs(uwb_dev, uwb_dev->bce, buf, PAGE_SIZE);
192}
193static DEVICE_ATTR(IEs, S_IRUGO | S_IWUSR, uwb_dev_IEs_show, NULL);
194
195static ssize_t uwb_dev_LQE_show(struct device *dev,
196 struct device_attribute *attr, char *buf)
197{
198 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
199 struct uwb_beca_e *bce = uwb_dev->bce;
200 size_t result;
201
202 mutex_lock(&bce->mutex);
203 result = stats_show(&uwb_dev->bce->lqe_stats, buf);
204 mutex_unlock(&bce->mutex);
205 return result;
206}
207
208static ssize_t uwb_dev_LQE_store(struct device *dev,
209 struct device_attribute *attr,
210 const char *buf, size_t size)
211{
212 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
213 struct uwb_beca_e *bce = uwb_dev->bce;
214 ssize_t result;
215
216 mutex_lock(&bce->mutex);
217 result = stats_store(&uwb_dev->bce->lqe_stats, buf, size);
218 mutex_unlock(&bce->mutex);
219 return result;
220}
221static DEVICE_ATTR(LQE, S_IRUGO | S_IWUSR, uwb_dev_LQE_show, uwb_dev_LQE_store);
222
223static ssize_t uwb_dev_RSSI_show(struct device *dev,
224 struct device_attribute *attr, char *buf)
225{
226 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
227 struct uwb_beca_e *bce = uwb_dev->bce;
228 size_t result;
229
230 mutex_lock(&bce->mutex);
231 result = stats_show(&uwb_dev->bce->rssi_stats, buf);
232 mutex_unlock(&bce->mutex);
233 return result;
234}
235
236static ssize_t uwb_dev_RSSI_store(struct device *dev,
237 struct device_attribute *attr,
238 const char *buf, size_t size)
239{
240 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
241 struct uwb_beca_e *bce = uwb_dev->bce;
242 ssize_t result;
243
244 mutex_lock(&bce->mutex);
245 result = stats_store(&uwb_dev->bce->rssi_stats, buf, size);
246 mutex_unlock(&bce->mutex);
247 return result;
248}
249static DEVICE_ATTR(RSSI, S_IRUGO | S_IWUSR, uwb_dev_RSSI_show, uwb_dev_RSSI_store);
250
251
252static struct attribute *dev_attrs[] = {
253 &dev_attr_EUI_48.attr,
254 &dev_attr_DevAddr.attr,
255 &dev_attr_BPST.attr,
256 &dev_attr_IEs.attr,
257 &dev_attr_LQE.attr,
258 &dev_attr_RSSI.attr,
259 NULL,
260};
261
262static struct attribute_group dev_attr_group = {
263 .attrs = dev_attrs,
264};
265
266static struct attribute_group *groups[] = {
267 &dev_attr_group,
268 NULL,
269};
270
271/**
272 * Device SYSFS registration
273 *
274 *
275 */
276static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev)
277{
278 int result;
279 struct device *dev;
280
281 d_fnstart(4, NULL, "(uwb_dev %p parent_dev %p)\n", uwb_dev, parent_dev);
282 BUG_ON(parent_dev == NULL);
283
284 dev = &uwb_dev->dev;
285 /* Device sysfs files are only useful for neighbor devices not
286 local radio controllers. */
287 if (&uwb_dev->rc->uwb_dev != uwb_dev)
288 dev->groups = groups;
289 dev->parent = parent_dev;
290 dev_set_drvdata(dev, uwb_dev);
291
292 result = device_add(dev);
293 d_fnend(4, NULL, "(uwb_dev %p parent_dev %p) = %d\n", uwb_dev, parent_dev, result);
294 return result;
295}
296
297
298static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev)
299{
300 d_fnstart(4, NULL, "(uwb_dev %p)\n", uwb_dev);
301 dev_set_drvdata(&uwb_dev->dev, NULL);
302 device_del(&uwb_dev->dev);
303 d_fnend(4, NULL, "(uwb_dev %p) = void\n", uwb_dev);
304}
305
306
307/**
308 * Register and initialize a new UWB device
309 *
310 * Did you call uwb_dev_init() on it?
311 *
312 * @parent_rc: is the parent radio controller who has the link to the
313 * device. When registering the UWB device that is a UWB
314 * Radio Controller, we point back to it.
315 *
316 * If registering the device that is part of a radio, caller has set
317 * rc->uwb_dev->dev. Otherwise it is to be left NULL--a new one will
318 * be allocated.
319 */
320int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev,
321 struct uwb_rc *parent_rc)
322{
323 int result;
324 struct device *dev;
325
326 BUG_ON(uwb_dev == NULL);
327 BUG_ON(parent_dev == NULL);
328 BUG_ON(parent_rc == NULL);
329
330 mutex_lock(&uwb_dev->mutex);
331 dev = &uwb_dev->dev;
332 uwb_dev->rc = parent_rc;
333 result = __uwb_dev_sys_add(uwb_dev, parent_dev);
334 if (result < 0)
335 printk(KERN_ERR "UWB: unable to register dev %s with sysfs: %d\n",
336 dev_name(dev), result);
337 mutex_unlock(&uwb_dev->mutex);
338 return result;
339}
340
341
342void uwb_dev_rm(struct uwb_dev *uwb_dev)
343{
344 mutex_lock(&uwb_dev->mutex);
345 __uwb_dev_sys_rm(uwb_dev);
346 mutex_unlock(&uwb_dev->mutex);
347}
348
349
350static
351int __uwb_dev_try_get(struct device *dev, void *__target_uwb_dev)
352{
353 struct uwb_dev *target_uwb_dev = __target_uwb_dev;
354 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
355 if (uwb_dev == target_uwb_dev) {
356 uwb_dev_get(uwb_dev);
357 return 1;
358 } else
359 return 0;
360}
361
362
363/**
364 * Given a UWB device descriptor, validate and refcount it
365 *
366 * @returns NULL if the device does not exist or is quiescing; the ptr to
367 * it otherwise.
368 */
369struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev)
370{
371 if (uwb_dev_for_each(rc, __uwb_dev_try_get, uwb_dev))
372 return uwb_dev;
373 else
374 return NULL;
375}
376EXPORT_SYMBOL_GPL(uwb_dev_try_get);
377
378
379/**
380 * Remove a device from the system [grunt for other functions]
381 */
382int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc)
383{
384 struct device *dev = &uwb_dev->dev;
385 char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
386
387 d_fnstart(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p)\n", dev, uwb_dev, rc);
388 uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr);
389 uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr);
390 dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n",
391 macbuf, devbuf,
392 rc ? rc->uwb_dev.dev.parent->bus->name : "n/a",
393 rc ? dev_name(rc->uwb_dev.dev.parent) : "");
394 uwb_dev_rm(uwb_dev);
395 uwb_dev_put(uwb_dev); /* for the creation in _onair() */
396 d_fnend(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p) = 0\n", dev, uwb_dev, rc);
397 return 0;
398}
399
400
401/**
402 * A device went off the air, clean up after it!
403 *
404 * This is called by the UWB Daemon (through the beacon purge function
405 * uwb_bcn_cache_purge) when it is detected that a device has been in
406 * radio silence for a while.
407 *
408 * If this device is actually a local radio controller we don't need
409 * to go through the offair process, as it is not registered as that.
410 *
411 * NOTE: uwb_bcn_cache.mutex is held!
412 */
413void uwbd_dev_offair(struct uwb_beca_e *bce)
414{
415 struct uwb_dev *uwb_dev;
416
417 uwb_dev = bce->uwb_dev;
418 if (uwb_dev) {
419 uwb_notify(uwb_dev->rc, uwb_dev, UWB_NOTIF_OFFAIR);
420 __uwb_dev_offair(uwb_dev, uwb_dev->rc);
421 }
422}
423
424
425/**
426 * A device went on the air, start it up!
427 *
428 * This is called by the UWB Daemon when it is detected that a device
429 * has popped up in the radio range of the radio controller.
430 *
431 * It will just create the freaking device, register the beacon and
432 * stuff and yatla, done.
433 *
434 *
435 * NOTE: uwb_beca.mutex is held, bce->mutex is held
436 */
437void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
438{
439 int result;
440 struct device *dev = &rc->uwb_dev.dev;
441 struct uwb_dev *uwb_dev;
442 char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
443
444 uwb_mac_addr_print(macbuf, sizeof(macbuf), bce->mac_addr);
445 uwb_dev_addr_print(devbuf, sizeof(devbuf), &bce->dev_addr);
446 uwb_dev = kzalloc(sizeof(struct uwb_dev), GFP_KERNEL);
447 if (uwb_dev == NULL) {
448 dev_err(dev, "new device %s: Cannot allocate memory\n",
449 macbuf);
450 return;
451 }
452 uwb_dev_init(uwb_dev); /* This sets refcnt to one, we own it */
453 uwb_dev->mac_addr = *bce->mac_addr;
454 uwb_dev->dev_addr = bce->dev_addr;
455 dev_set_name(&uwb_dev->dev, macbuf);
456 result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
457 if (result < 0) {
458 dev_err(dev, "new device %s: cannot instantiate device\n",
459 macbuf);
460 goto error_dev_add;
461 }
462 /* plug the beacon cache */
463 bce->uwb_dev = uwb_dev;
464 uwb_dev->bce = bce;
465 uwb_bce_get(bce); /* released in uwb_dev_sys_release() */
466 dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
467 macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name,
468 dev_name(rc->uwb_dev.dev.parent));
469 uwb_notify(rc, uwb_dev, UWB_NOTIF_ONAIR);
470 return;
471
472error_dev_add:
473 kfree(uwb_dev);
474 return;
475}
476
477/**
478 * Iterate over the list of UWB devices, calling a @function on each
479 *
480 * See docs for bus_for_each()....
481 *
482 * @rc: radio controller for the devices.
483 * @function: function to call.
484 * @priv: data to pass to @function.
485 * @returns: 0 if no invocation of function() returned a value
486 * different to zero. That value otherwise.
487 */
488int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f function, void *priv)
489{
490 return device_for_each_child(&rc->uwb_dev.dev, priv, function);
491}
492EXPORT_SYMBOL_GPL(uwb_dev_for_each);
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
new file mode 100644
index 000000000000..ee5772f00d42
--- /dev/null
+++ b/drivers/uwb/lc-rc.c
@@ -0,0 +1,495 @@
1/*
2 * Ultra Wide Band
3 * Life cycle of radio controllers
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * A UWB radio controller is also a UWB device, so it embeds one...
26 *
27 * List of RCs comes from the 'struct class uwb_rc_class'.
28 */
29
30#include <linux/kernel.h>
31#include <linux/string.h>
32#include <linux/device.h>
33#include <linux/err.h>
34#include <linux/random.h>
35#include <linux/kdev_t.h>
36#include <linux/etherdevice.h>
37#include <linux/usb.h>
38
39#define D_LOCAL 1
40#include <linux/uwb/debug.h>
41#include "uwb-internal.h"
42
43static int uwb_rc_index_match(struct device *dev, void *data)
44{
45 int *index = data;
46 struct uwb_rc *rc = dev_get_drvdata(dev);
47
48 if (rc->index == *index)
49 return 1;
50 return 0;
51}
52
53static struct uwb_rc *uwb_rc_find_by_index(int index)
54{
55 struct device *dev;
56 struct uwb_rc *rc = NULL;
57
58 dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
59 if (dev)
60 rc = dev_get_drvdata(dev);
61 return rc;
62}
63
64static int uwb_rc_new_index(void)
65{
66 int index = 0;
67
68 for (;;) {
69 if (!uwb_rc_find_by_index(index))
70 return index;
71 if (++index < 0)
72 index = 0;
73 }
74}
75
76/**
77 * Release the backing device of a uwb_rc that has been dynamically allocated.
78 */
79static void uwb_rc_sys_release(struct device *dev)
80{
81 struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev);
82 struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev);
83
84 uwb_rc_neh_destroy(rc);
85 uwb_rc_ie_release(rc);
86 d_printf(1, dev, "freed uwb_rc %p\n", rc);
87 kfree(rc);
88}
89
90
91void uwb_rc_init(struct uwb_rc *rc)
92{
93 struct uwb_dev *uwb_dev = &rc->uwb_dev;
94
95 uwb_dev_init(uwb_dev);
96 rc->uwb_dev.dev.class = &uwb_rc_class;
97 rc->uwb_dev.dev.release = uwb_rc_sys_release;
98 uwb_rc_neh_create(rc);
99 rc->beaconing = -1;
100 rc->scan_type = UWB_SCAN_DISABLED;
101 INIT_LIST_HEAD(&rc->notifs_chain.list);
102 mutex_init(&rc->notifs_chain.mutex);
103 uwb_drp_avail_init(rc);
104 uwb_rc_ie_init(rc);
105 uwb_rsv_init(rc);
106 uwb_rc_pal_init(rc);
107}
108EXPORT_SYMBOL_GPL(uwb_rc_init);
109
110
111struct uwb_rc *uwb_rc_alloc(void)
112{
113 struct uwb_rc *rc;
114 rc = kzalloc(sizeof(*rc), GFP_KERNEL);
115 if (rc == NULL)
116 return NULL;
117 uwb_rc_init(rc);
118 return rc;
119}
120EXPORT_SYMBOL_GPL(uwb_rc_alloc);
121
122static struct attribute *rc_attrs[] = {
123 &dev_attr_mac_address.attr,
124 &dev_attr_scan.attr,
125 &dev_attr_beacon.attr,
126 NULL,
127};
128
129static struct attribute_group rc_attr_group = {
130 .attrs = rc_attrs,
131};
132
133/*
134 * Registration of sysfs specific stuff
135 */
136static int uwb_rc_sys_add(struct uwb_rc *rc)
137{
138 return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
139}
140
141
142static void __uwb_rc_sys_rm(struct uwb_rc *rc)
143{
144 sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
145}
146
147/**
148 * uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it
149 * @rc: the radio controller.
150 *
151 * If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF
152 * then a random locally administered EUI-48 is generated and set on
153 * the device. The probability of address collisions is sufficiently
154 * unlikely (1/2^40 = 9.1e-13) that they're not checked for.
155 */
156static
157int uwb_rc_mac_addr_setup(struct uwb_rc *rc)
158{
159 int result;
160 struct device *dev = &rc->uwb_dev.dev;
161 struct uwb_dev *uwb_dev = &rc->uwb_dev;
162 char devname[UWB_ADDR_STRSIZE];
163 struct uwb_mac_addr addr;
164
165 result = uwb_rc_mac_addr_get(rc, &addr);
166 if (result < 0) {
167 dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result);
168 return result;
169 }
170
171 if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) {
172 addr.data[0] = 0x02; /* locally adminstered and unicast */
173 get_random_bytes(&addr.data[1], sizeof(addr.data)-1);
174
175 result = uwb_rc_mac_addr_set(rc, &addr);
176 if (result < 0) {
177 uwb_mac_addr_print(devname, sizeof(devname), &addr);
178 dev_err(dev, "cannot set EUI-48 address %s: %d\n",
179 devname, result);
180 return result;
181 }
182 }
183 uwb_dev->mac_addr = addr;
184 return 0;
185}
186
187
188
189static int uwb_rc_setup(struct uwb_rc *rc)
190{
191 int result;
192 struct device *dev = &rc->uwb_dev.dev;
193
194 result = uwb_rc_reset(rc);
195 if (result < 0) {
196 dev_err(dev, "cannot reset UWB radio: %d\n", result);
197 goto error;
198 }
199 result = uwb_rc_mac_addr_setup(rc);
200 if (result < 0) {
201 dev_err(dev, "cannot setup UWB MAC address: %d\n", result);
202 goto error;
203 }
204 result = uwb_rc_dev_addr_assign(rc);
205 if (result < 0) {
206 dev_err(dev, "cannot assign UWB DevAddr: %d\n", result);
207 goto error;
208 }
209 result = uwb_rc_ie_setup(rc);
210 if (result < 0) {
211 dev_err(dev, "cannot setup IE subsystem: %d\n", result);
212 goto error_ie_setup;
213 }
214 result = uwb_rsv_setup(rc);
215 if (result < 0) {
216 dev_err(dev, "cannot setup reservation subsystem: %d\n", result);
217 goto error_rsv_setup;
218 }
219 uwb_dbg_add_rc(rc);
220 return 0;
221
222error_rsv_setup:
223 uwb_rc_ie_release(rc);
224error_ie_setup:
225error:
226 return result;
227}
228
229
230/**
231 * Register a new UWB radio controller
232 *
233 * Did you call uwb_rc_init() on your rc?
234 *
235 * We assume that this is being called with a > 0 refcount on
236 * it [through ops->{get|put}_device(). We'll take our own, though.
237 *
238 * @parent_dev is our real device, the one that provides the actual UWB device
239 */
240int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv)
241{
242 int result;
243 struct device *dev;
244 char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
245
246 rc->index = uwb_rc_new_index();
247
248 dev = &rc->uwb_dev.dev;
249 dev_set_name(dev, "uwb%d", rc->index);
250
251 rc->priv = priv;
252
253 result = rc->start(rc);
254 if (result < 0)
255 goto error_rc_start;
256
257 result = uwb_rc_setup(rc);
258 if (result < 0) {
259 dev_err(dev, "cannot setup UWB radio controller: %d\n", result);
260 goto error_rc_setup;
261 }
262
263 result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc);
264 if (result < 0 && result != -EADDRNOTAVAIL)
265 goto error_dev_add;
266
267 result = uwb_rc_sys_add(rc);
268 if (result < 0) {
269 dev_err(parent_dev, "cannot register UWB radio controller "
270 "dev attributes: %d\n", result);
271 goto error_sys_add;
272 }
273
274 uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr);
275 uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr);
276 dev_info(dev,
277 "new uwb radio controller (mac %s dev %s) on %s %s\n",
278 macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev));
279 rc->ready = 1;
280 return 0;
281
282error_sys_add:
283 uwb_dev_rm(&rc->uwb_dev);
284error_dev_add:
285error_rc_setup:
286 rc->stop(rc);
287 uwbd_flush(rc);
288error_rc_start:
289 return result;
290}
291EXPORT_SYMBOL_GPL(uwb_rc_add);
292
293
294static int uwb_dev_offair_helper(struct device *dev, void *priv)
295{
296 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
297
298 return __uwb_dev_offair(uwb_dev, uwb_dev->rc);
299}
300
301/*
302 * Remove a Radio Controller; stop beaconing/scanning, disconnect all children
303 */
304void uwb_rc_rm(struct uwb_rc *rc)
305{
306 rc->ready = 0;
307
308 uwb_dbg_del_rc(rc);
309 uwb_rsv_cleanup(rc);
310 uwb_rc_ie_rm(rc, UWB_IDENTIFICATION_IE);
311 if (rc->beaconing >= 0)
312 uwb_rc_beacon(rc, -1, 0);
313 if (rc->scan_type != UWB_SCAN_DISABLED)
314 uwb_rc_scan(rc, rc->scanning, UWB_SCAN_DISABLED, 0);
315 uwb_rc_reset(rc);
316
317 rc->stop(rc);
318 uwbd_flush(rc);
319
320 uwb_dev_lock(&rc->uwb_dev);
321 rc->priv = NULL;
322 rc->cmd = NULL;
323 uwb_dev_unlock(&rc->uwb_dev);
324 mutex_lock(&uwb_beca.mutex);
325 uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL);
326 __uwb_rc_sys_rm(rc);
327 mutex_unlock(&uwb_beca.mutex);
328 uwb_dev_rm(&rc->uwb_dev);
329}
330EXPORT_SYMBOL_GPL(uwb_rc_rm);
331
332static int find_rc_try_get(struct device *dev, void *data)
333{
334 struct uwb_rc *target_rc = data;
335 struct uwb_rc *rc = dev_get_drvdata(dev);
336
337 if (rc == NULL) {
338 WARN_ON(1);
339 return 0;
340 }
341 if (rc == target_rc) {
342 if (rc->ready == 0)
343 return 0;
344 else
345 return 1;
346 }
347 return 0;
348}
349
350/**
351 * Given a radio controller descriptor, validate and refcount it
352 *
353 * @returns NULL if the rc does not exist or is quiescing; the ptr to
354 * it otherwise.
355 */
356struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
357{
358 struct device *dev;
359 struct uwb_rc *rc = NULL;
360
361 dev = class_find_device(&uwb_rc_class, NULL, target_rc,
362 find_rc_try_get);
363 if (dev) {
364 rc = dev_get_drvdata(dev);
365 __uwb_rc_get(rc);
366 }
367 return rc;
368}
369EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
370
371/*
372 * RC get for external refcount acquirers...
373 *
374 * Increments the refcount of the device and it's backend modules
375 */
376static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc)
377{
378 if (rc->ready == 0)
379 return NULL;
380 uwb_dev_get(&rc->uwb_dev);
381 return rc;
382}
383
384static int find_rc_grandpa(struct device *dev, void *data)
385{
386 struct device *grandpa_dev = data;
387 struct uwb_rc *rc = dev_get_drvdata(dev);
388
389 if (rc->uwb_dev.dev.parent->parent == grandpa_dev) {
390 rc = uwb_rc_get(rc);
391 return 1;
392 }
393 return 0;
394}
395
396/**
397 * Locate and refcount a radio controller given a common grand-parent
398 *
399 * @grandpa_dev Pointer to the 'grandparent' device structure.
400 * @returns NULL If the rc does not exist or is quiescing; the ptr to
401 * it otherwise, properly referenced.
402 *
403 * The Radio Control interface (or the UWB Radio Controller) is always
404 * an interface of a device. The parent is the interface, the
405 * grandparent is the device that encapsulates the interface.
406 *
407 * There is no need to lock around as the "grandpa" would be
408 * refcounted by the target, and to remove the referemes, the
409 * uwb_rc_class->sem would have to be taken--we hold it, ergo we
410 * should be safe.
411 */
412struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
413{
414 struct device *dev;
415 struct uwb_rc *rc = NULL;
416
417 dev = class_find_device(&uwb_rc_class, NULL, (void *)grandpa_dev,
418 find_rc_grandpa);
419 if (dev)
420 rc = dev_get_drvdata(dev);
421 return rc;
422}
423EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
424
425/**
426 * Find a radio controller by device address
427 *
428 * @returns the pointer to the radio controller, properly referenced
429 */
430static int find_rc_dev(struct device *dev, void *data)
431{
432 struct uwb_dev_addr *addr = data;
433 struct uwb_rc *rc = dev_get_drvdata(dev);
434
435 if (rc == NULL) {
436 WARN_ON(1);
437 return 0;
438 }
439 if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) {
440 rc = uwb_rc_get(rc);
441 return 1;
442 }
443 return 0;
444}
445
446struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
447{
448 struct device *dev;
449 struct uwb_rc *rc = NULL;
450
451 dev = class_find_device(&uwb_rc_class, NULL, (void *)addr,
452 find_rc_dev);
453 if (dev)
454 rc = dev_get_drvdata(dev);
455
456 return rc;
457}
458EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev);
459
460/**
461 * Drop a reference on a radio controller
462 *
463 * This is the version that should be done by entities external to the
464 * UWB Radio Control stack (ie: clients of the API).
465 */
466void uwb_rc_put(struct uwb_rc *rc)
467{
468 __uwb_rc_put(rc);
469}
470EXPORT_SYMBOL_GPL(uwb_rc_put);
471
472/*
473 *
474 *
475 */
476ssize_t uwb_rc_print_IEs(struct uwb_rc *uwb_rc, char *buf, size_t size)
477{
478 ssize_t result;
479 struct uwb_rc_evt_get_ie *ie_info;
480 struct uwb_buf_ctx ctx;
481
482 result = uwb_rc_get_ie(uwb_rc, &ie_info);
483 if (result < 0)
484 goto error_get_ie;
485 ctx.buf = buf;
486 ctx.size = size;
487 ctx.bytes = 0;
488 uwb_ie_for_each(&uwb_rc->uwb_dev, uwb_ie_dump_hex, &ctx,
489 ie_info->IEData, result - sizeof(*ie_info));
490 result = ctx.bytes;
491 kfree(ie_info);
492error_get_ie:
493 return result;
494}
495
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
new file mode 100644
index 000000000000..9b4eb64327ac
--- /dev/null
+++ b/drivers/uwb/neh.c
@@ -0,0 +1,616 @@
1/*
2 * WUSB Wire Adapter: Radio Control Interface (WUSB[8])
3 * Notification and Event Handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * The RC interface of the Host Wire Adapter (USB dongle) or WHCI PCI
24 * card delivers a stream of notifications and events to the
25 * notification end event endpoint or area. This code takes care of
26 * getting a buffer with that data, breaking it up in separate
27 * notifications and events and then deliver those.
28 *
29 * Events are answers to commands and they carry a context ID that
30 * associates them to the command. Notifications are that,
31 * notifications, they come out of the blue and have a context ID of
32 * zero. Think of the context ID kind of like a handler. The
33 * uwb_rc_neh_* code deals with managing context IDs.
34 *
35 * This is why you require a handle to operate on a UWB host. When you
36 * open a handle a context ID is assigned to you.
37 *
38 * So, as it is done is:
39 *
40 * 1. Add an event handler [uwb_rc_neh_add()] (assigns a ctx id)
41 * 2. Issue command [rc->cmd(rc, ...)]
42 * 3. Arm the timeout timer [uwb_rc_neh_arm()]
43 * 4, Release the reference to the neh [uwb_rc_neh_put()]
44 * 5. Wait for the callback
45 * 6. Command result (RCEB) is passed to the callback
46 *
47 * If (2) fails, you should remove the handle [uwb_rc_neh_rm()]
48 * instead of arming the timer.
49 *
50 * Handles are for using in *serialized* code, single thread.
51 *
52 * When the notification/event comes, the IRQ handler/endpoint
53 * callback passes the data read to uwb_rc_neh_grok() which will break
54 * it up in a discrete series of events, look up who is listening for
55 * them and execute the pertinent callbacks.
56 *
57 * If the reader detects an error while reading the data stream, call
58 * uwb_rc_neh_error().
59 *
60 * CONSTRAINTS/ASSUMPTIONS:
61 *
62 * - Most notifications/events are small (less thank .5k), copying
63 * around is ok.
64 *
65 * - Notifications/events are ALWAYS smaller than PAGE_SIZE
66 *
67 * - Notifications/events always come in a single piece (ie: a buffer
68 * will always contain entire notifications/events).
69 *
70 * - we cannot know in advance how long each event is (because they
71 * lack a length field in their header--smart move by the standards
72 * body, btw). So we need a facility to get the event size given the
73 * header. This is what the EST code does (notif/Event Size
74 * Tables), check nest.c--as well, you can associate the size to
75 * the handle [w/ neh->extra_size()].
76 *
77 * - Most notifications/events are fixed size; only a few are variable
78 * size (NEST takes care of that).
79 *
80 * - Listeners of events expect them, so they usually provide a
81 * buffer, as they know the size. Listeners to notifications don't,
82 * so we allocate their buffers dynamically.
83 */
84#include <linux/kernel.h>
85#include <linux/timer.h>
86#include <linux/err.h>
87
88#include "uwb-internal.h"
89#define D_LOCAL 0
90#include <linux/uwb/debug.h>
91
92/*
93 * UWB Radio Controller Notification/Event Handle
94 *
95 * Represents an entity waiting for an event coming from the UWB Radio
96 * Controller with a given context id (context) and type (evt_type and
97 * evt). On reception of the notification/event, the callback (cb) is
98 * called with the event.
99 *
100 * If the timer expires before the event is received, the callback is
101 * called with -ETIMEDOUT as the event size.
102 */
103struct uwb_rc_neh {
104 struct kref kref;
105
106 struct uwb_rc *rc;
107 u8 evt_type;
108 __le16 evt;
109 u8 context;
110 uwb_rc_cmd_cb_f cb;
111 void *arg;
112
113 struct timer_list timer;
114 struct list_head list_node;
115};
116
117static void uwb_rc_neh_timer(unsigned long arg);
118
119static void uwb_rc_neh_release(struct kref *kref)
120{
121 struct uwb_rc_neh *neh = container_of(kref, struct uwb_rc_neh, kref);
122
123 kfree(neh);
124}
125
126static void uwb_rc_neh_get(struct uwb_rc_neh *neh)
127{
128 kref_get(&neh->kref);
129}
130
131/**
132 * uwb_rc_neh_put - release reference to a neh
133 * @neh: the neh
134 */
135void uwb_rc_neh_put(struct uwb_rc_neh *neh)
136{
137 kref_put(&neh->kref, uwb_rc_neh_release);
138}
139
140
141/**
142 * Assigns @neh a context id from @rc's pool
143 *
144 * @rc: UWB Radio Controller descriptor; @rc->neh_lock taken
145 * @neh: Notification/Event Handle
146 * @returns 0 if context id was assigned ok; < 0 errno on error (if
147 * all the context IDs are taken).
148 *
149 * (assumes @wa is locked).
150 *
151 * NOTE: WUSB spec reserves context ids 0x00 for notifications and
152 * 0xff is invalid, so they must not be used. Initialization
153 * fills up those two in the bitmap so they are not allocated.
154 *
155 * We spread the allocation around to reduce the posiblity of two
156 * consecutive opened @neh's getting the same context ID assigned (to
157 * avoid surprises with late events that timed out long time ago). So
158 * first we search from where @rc->ctx_roll is, if not found, we
159 * search from zero.
160 */
161static
162int __uwb_rc_ctx_get(struct uwb_rc *rc, struct uwb_rc_neh *neh)
163{
164 int result;
165 result = find_next_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX,
166 rc->ctx_roll++);
167 if (result < UWB_RC_CTX_MAX)
168 goto found;
169 result = find_first_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX);
170 if (result < UWB_RC_CTX_MAX)
171 goto found;
172 return -ENFILE;
173found:
174 set_bit(result, rc->ctx_bm);
175 neh->context = result;
176 return 0;
177}
178
179
180/** Releases @neh's context ID back to @rc (@rc->neh_lock is locked). */
181static
182void __uwb_rc_ctx_put(struct uwb_rc *rc, struct uwb_rc_neh *neh)
183{
184 struct device *dev = &rc->uwb_dev.dev;
185 if (neh->context == 0)
186 return;
187 if (test_bit(neh->context, rc->ctx_bm) == 0) {
188 dev_err(dev, "context %u not set in bitmap\n",
189 neh->context);
190 WARN_ON(1);
191 }
192 clear_bit(neh->context, rc->ctx_bm);
193 neh->context = 0;
194}
195
196/**
197 * uwb_rc_neh_add - add a neh for a radio controller command
198 * @rc: the radio controller
199 * @cmd: the radio controller command
200 * @expected_type: the type of the expected response event
201 * @expected_event: the expected event ID
202 * @cb: callback for when the event is received
203 * @arg: argument for the callback
204 *
205 * Creates a neh and adds it to the list of those waiting for an
206 * event. A context ID will be assigned to the command.
207 */
208struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
209 u8 expected_type, u16 expected_event,
210 uwb_rc_cmd_cb_f cb, void *arg)
211{
212 int result;
213 unsigned long flags;
214 struct device *dev = &rc->uwb_dev.dev;
215 struct uwb_rc_neh *neh;
216
217 neh = kzalloc(sizeof(*neh), GFP_KERNEL);
218 if (neh == NULL) {
219 result = -ENOMEM;
220 goto error_kzalloc;
221 }
222
223 kref_init(&neh->kref);
224 INIT_LIST_HEAD(&neh->list_node);
225 init_timer(&neh->timer);
226 neh->timer.function = uwb_rc_neh_timer;
227 neh->timer.data = (unsigned long)neh;
228
229 neh->rc = rc;
230 neh->evt_type = expected_type;
231 neh->evt = cpu_to_le16(expected_event);
232 neh->cb = cb;
233 neh->arg = arg;
234
235 spin_lock_irqsave(&rc->neh_lock, flags);
236 result = __uwb_rc_ctx_get(rc, neh);
237 if (result >= 0) {
238 cmd->bCommandContext = neh->context;
239 list_add_tail(&neh->list_node, &rc->neh_list);
240 uwb_rc_neh_get(neh);
241 }
242 spin_unlock_irqrestore(&rc->neh_lock, flags);
243 if (result < 0)
244 goto error_ctx_get;
245
246 return neh;
247
248error_ctx_get:
249 kfree(neh);
250error_kzalloc:
251 dev_err(dev, "cannot open handle to radio controller: %d\n", result);
252 return ERR_PTR(result);
253}
254
255static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
256{
257 del_timer(&neh->timer);
258 __uwb_rc_ctx_put(rc, neh);
259 list_del(&neh->list_node);
260}
261
262/**
263 * uwb_rc_neh_rm - remove a neh.
264 * @rc: the radio controller
265 * @neh: the neh to remove
266 *
267 * Remove an active neh immediately instead of waiting for the event
268 * (or a time out).
269 */
270void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
271{
272 unsigned long flags;
273
274 spin_lock_irqsave(&rc->neh_lock, flags);
275 __uwb_rc_neh_rm(rc, neh);
276 spin_unlock_irqrestore(&rc->neh_lock, flags);
277
278 uwb_rc_neh_put(neh);
279}
280
281/**
282 * uwb_rc_neh_arm - arm an event handler timeout timer
283 *
284 * @rc: UWB Radio Controller
285 * @neh: Notification/event handler for @rc
286 *
287 * The timer is only armed if the neh is active.
288 */
289void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
290{
291 unsigned long flags;
292
293 spin_lock_irqsave(&rc->neh_lock, flags);
294 if (neh->context)
295 mod_timer(&neh->timer,
296 jiffies + msecs_to_jiffies(UWB_RC_CMD_TIMEOUT_MS));
297 spin_unlock_irqrestore(&rc->neh_lock, flags);
298}
299
300static void uwb_rc_neh_cb(struct uwb_rc_neh *neh, struct uwb_rceb *rceb, size_t size)
301{
302 (*neh->cb)(neh->rc, neh->arg, rceb, size);
303 uwb_rc_neh_put(neh);
304}
305
306static bool uwb_rc_neh_match(struct uwb_rc_neh *neh, const struct uwb_rceb *rceb)
307{
308 return neh->evt_type == rceb->bEventType
309 && neh->evt == rceb->wEvent
310 && neh->context == rceb->bEventContext;
311}
312
313/**
314 * Find the handle waiting for a RC Radio Control Event
315 *
316 * @rc: UWB Radio Controller
317 * @rceb: Pointer to the RCEB buffer
318 * @event_size: Pointer to the size of the RCEB buffer. Might be
319 * adjusted to take into account the @neh->extra_size
320 * settings.
321 *
322 * If the listener has no buffer (NULL buffer), one is allocated for
323 * the right size (the amount of data received). @neh->ptr will point
324 * to the event payload, which always starts with a 'struct
325 * uwb_rceb'. kfree() it when done.
326 */
327static
328struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc,
329 const struct uwb_rceb *rceb)
330{
331 struct uwb_rc_neh *neh = NULL, *h;
332 unsigned long flags;
333
334 spin_lock_irqsave(&rc->neh_lock, flags);
335
336 list_for_each_entry(h, &rc->neh_list, list_node) {
337 if (uwb_rc_neh_match(h, rceb)) {
338 neh = h;
339 break;
340 }
341 }
342
343 if (neh)
344 __uwb_rc_neh_rm(rc, neh);
345
346 spin_unlock_irqrestore(&rc->neh_lock, flags);
347
348 return neh;
349}
350
351
352/**
353 * Process notifications coming from the radio control interface
354 *
355 * @rc: UWB Radio Control Interface descriptor
356 * @neh: Notification/Event Handler @neh->ptr points to
357 * @uwb_evt->buffer.
358 *
359 * This function is called by the event/notif handling subsystem when
360 * notifications arrive (hwarc_probe() arms a notification/event handle
361 * that calls back this function for every received notification; this
362 * function then will rearm itself).
363 *
364 * Notification data buffers are dynamically allocated by the NEH
365 * handling code in neh.c [uwb_rc_neh_lookup()]. What is actually
366 * allocated is space to contain the notification data.
367 *
368 * Buffers are prefixed with a Radio Control Event Block (RCEB) as
369 * defined by the WUSB Wired-Adapter Radio Control interface. We
370 * just use it for the notification code.
371 *
372 * On each case statement we just transcode endianess of the different
373 * fields. We declare a pointer to a RCI definition of an event, and
374 * then to a UWB definition of the same event (which are the same,
375 * remember). Event if we use different pointers
376 */
377static
378void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size)
379{
380 struct device *dev = &rc->uwb_dev.dev;
381 struct uwb_event *uwb_evt;
382
383 if (size == -ESHUTDOWN)
384 return;
385 if (size < 0) {
386 dev_err(dev, "ignoring event with error code %zu\n",
387 size);
388 return;
389 }
390
391 uwb_evt = kzalloc(sizeof(*uwb_evt), GFP_ATOMIC);
392 if (unlikely(uwb_evt == NULL)) {
393 dev_err(dev, "no memory to queue event 0x%02x/%04x/%02x\n",
394 rceb->bEventType, le16_to_cpu(rceb->wEvent),
395 rceb->bEventContext);
396 return;
397 }
398 uwb_evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */
399 uwb_evt->ts_jiffies = jiffies;
400 uwb_evt->type = UWB_EVT_TYPE_NOTIF;
401 uwb_evt->notif.size = size;
402 uwb_evt->notif.rceb = rceb;
403
404 switch (le16_to_cpu(rceb->wEvent)) {
405 /* Trap some vendor specific events
406 *
407 * FIXME: move this to handling in ptc-est, where we
408 * register a NULL event handler for these two guys
409 * using the Intel IDs.
410 */
411 case 0x0103:
412 dev_info(dev, "FIXME: DEVICE ADD\n");
413 return;
414 case 0x0104:
415 dev_info(dev, "FIXME: DEVICE RM\n");
416 return;
417 default:
418 break;
419 }
420
421 uwbd_event_queue(uwb_evt);
422}
423
424static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size_t size)
425{
426 struct device *dev = &rc->uwb_dev.dev;
427 struct uwb_rc_neh *neh;
428 struct uwb_rceb *notif;
429
430 if (rceb->bEventContext == 0) {
431 notif = kmalloc(size, GFP_ATOMIC);
432 if (notif) {
433 memcpy(notif, rceb, size);
434 uwb_rc_notif(rc, notif, size);
435 } else
436 dev_err(dev, "event 0x%02x/%04x/%02x (%zu bytes): no memory\n",
437 rceb->bEventType, le16_to_cpu(rceb->wEvent),
438 rceb->bEventContext, size);
439 } else {
440 neh = uwb_rc_neh_lookup(rc, rceb);
441 if (neh)
442 uwb_rc_neh_cb(neh, rceb, size);
443 else
444 dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
445 rceb->bEventType, le16_to_cpu(rceb->wEvent),
446 rceb->bEventContext, size);
447 }
448}
449
450/**
451 * Given a buffer with one or more UWB RC events/notifications, break
452 * them up and dispatch them.
453 *
454 * @rc: UWB Radio Controller
455 * @buf: Buffer with the stream of notifications/events
456 * @buf_size: Amount of data in the buffer
457 *
458 * Note each notification/event starts always with a 'struct
459 * uwb_rceb', so the minimum size if 4 bytes.
460 *
461 * The device may pass us events formatted differently than expected.
462 * These are first filtered, potentially creating a new event in a new
463 * memory location. If a new event is created by the filter it is also
464 * freed here.
465 *
466 * For each notif/event, tries to guess the size looking at the EST
467 * tables, then looks for a neh that is waiting for that event and if
468 * found, copies the payload to the neh's buffer and calls it back. If
469 * not, the data is ignored.
470 *
471 * Note that if we can't find a size description in the EST tables, we
472 * still might find a size in the 'neh' handle in uwb_rc_neh_lookup().
473 *
474 * Assumptions:
475 *
476 * @rc->neh_lock is NOT taken
477 *
478 * We keep track of various sizes here:
479 * size: contains the size of the buffer that is processed for the
480 * incoming event. this buffer may contain events that are not
481 * formatted as WHCI.
482 * real_size: the actual space taken by this event in the buffer.
483 * We need to keep track of the real size of an event to be able to
484 * advance the buffer correctly.
485 * event_size: the size of the event as expected by the core layer
486 * [OR] the size of the event after filtering. if the filtering
487 * created a new event in a new memory location then this is
488 * effectively the size of a new event buffer
489 */
490void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size)
491{
492 struct device *dev = &rc->uwb_dev.dev;
493 void *itr;
494 struct uwb_rceb *rceb;
495 size_t size, real_size, event_size;
496 int needtofree;
497
498 d_fnstart(3, dev, "(rc %p buf %p %zu buf_size)\n", rc, buf, buf_size);
499 d_printf(2, dev, "groking event block: %zu bytes\n", buf_size);
500 itr = buf;
501 size = buf_size;
502 while (size > 0) {
503 if (size < sizeof(*rceb)) {
504 dev_err(dev, "not enough data in event buffer to "
505 "process incoming events (%zu left, minimum is "
506 "%zu)\n", size, sizeof(*rceb));
507 break;
508 }
509
510 rceb = itr;
511 if (rc->filter_event) {
512 needtofree = rc->filter_event(rc, &rceb, size,
513 &real_size, &event_size);
514 if (needtofree < 0 && needtofree != -ENOANO) {
515 dev_err(dev, "BUG: Unable to filter event "
516 "(0x%02x/%04x/%02x) from "
517 "device. \n", rceb->bEventType,
518 le16_to_cpu(rceb->wEvent),
519 rceb->bEventContext);
520 break;
521 }
522 } else
523 needtofree = -ENOANO;
524 /* do real processing if there was no filtering or the
525 * filtering didn't act */
526 if (needtofree == -ENOANO) {
527 ssize_t ret = uwb_est_find_size(rc, rceb, size);
528 if (ret < 0)
529 break;
530 if (ret > size) {
531 dev_err(dev, "BUG: hw sent incomplete event "
532 "0x%02x/%04x/%02x (%zd bytes), only got "
533 "%zu bytes. We don't handle that.\n",
534 rceb->bEventType, le16_to_cpu(rceb->wEvent),
535 rceb->bEventContext, ret, size);
536 break;
537 }
538 real_size = event_size = ret;
539 }
540 uwb_rc_neh_grok_event(rc, rceb, event_size);
541
542 if (needtofree == 1)
543 kfree(rceb);
544
545 itr += real_size;
546 size -= real_size;
547 d_printf(2, dev, "consumed %zd bytes, %zu left\n",
548 event_size, size);
549 }
550 d_fnend(3, dev, "(rc %p buf %p %zu buf_size) = void\n", rc, buf, buf_size);
551}
552EXPORT_SYMBOL_GPL(uwb_rc_neh_grok);
553
554
555/**
556 * The entity that reads from the device notification/event channel has
557 * detected an error.
558 *
559 * @rc: UWB Radio Controller
560 * @error: Errno error code
561 *
562 */
563void uwb_rc_neh_error(struct uwb_rc *rc, int error)
564{
565 struct uwb_rc_neh *neh, *next;
566 unsigned long flags;
567
568 BUG_ON(error >= 0);
569 spin_lock_irqsave(&rc->neh_lock, flags);
570 list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) {
571 __uwb_rc_neh_rm(rc, neh);
572 uwb_rc_neh_cb(neh, NULL, error);
573 }
574 spin_unlock_irqrestore(&rc->neh_lock, flags);
575}
576EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
577
578
579static void uwb_rc_neh_timer(unsigned long arg)
580{
581 struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg;
582 struct uwb_rc *rc = neh->rc;
583 unsigned long flags;
584
585 spin_lock_irqsave(&rc->neh_lock, flags);
586 __uwb_rc_neh_rm(rc, neh);
587 spin_unlock_irqrestore(&rc->neh_lock, flags);
588
589 uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT);
590}
591
592/** Initializes the @rc's neh subsystem
593 */
594void uwb_rc_neh_create(struct uwb_rc *rc)
595{
596 spin_lock_init(&rc->neh_lock);
597 INIT_LIST_HEAD(&rc->neh_list);
598 set_bit(0, rc->ctx_bm); /* 0 is reserved (see [WUSB] table 8-65) */
599 set_bit(0xff, rc->ctx_bm); /* and 0xff is invalid */
600 rc->ctx_roll = 1;
601}
602
603
604/** Release's the @rc's neh subsystem */
605void uwb_rc_neh_destroy(struct uwb_rc *rc)
606{
607 unsigned long flags;
608 struct uwb_rc_neh *neh, *next;
609
610 spin_lock_irqsave(&rc->neh_lock, flags);
611 list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) {
612 __uwb_rc_neh_rm(rc, neh);
613 uwb_rc_neh_put(neh);
614 }
615 spin_unlock_irqrestore(&rc->neh_lock, flags);
616}
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
new file mode 100644
index 000000000000..1afb38eacb9a
--- /dev/null
+++ b/drivers/uwb/pal.c
@@ -0,0 +1,91 @@
1/*
2 * UWB PAL support.
3 *
4 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/uwb.h>
20
21#include "uwb-internal.h"
22
23/**
24 * uwb_pal_init - initialize a UWB PAL
25 * @pal: the PAL to initialize
26 */
27void uwb_pal_init(struct uwb_pal *pal)
28{
29 INIT_LIST_HEAD(&pal->node);
30}
31EXPORT_SYMBOL_GPL(uwb_pal_init);
32
33/**
34 * uwb_pal_register - register a UWB PAL
35 * @rc: the radio controller the PAL will be using
36 * @pal: the PAL
37 *
38 * The PAL must be initialized with uwb_pal_init().
39 */
40int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal)
41{
42 int ret;
43
44 if (pal->device) {
45 ret = sysfs_create_link(&pal->device->kobj,
46 &rc->uwb_dev.dev.kobj, "uwb_rc");
47 if (ret < 0)
48 return ret;
49 ret = sysfs_create_link(&rc->uwb_dev.dev.kobj,
50 &pal->device->kobj, pal->name);
51 if (ret < 0) {
52 sysfs_remove_link(&pal->device->kobj, "uwb_rc");
53 return ret;
54 }
55 }
56
57 spin_lock(&rc->pal_lock);
58 list_add(&pal->node, &rc->pals);
59 spin_unlock(&rc->pal_lock);
60
61 return 0;
62}
63EXPORT_SYMBOL_GPL(uwb_pal_register);
64
65/**
66 * uwb_pal_register - unregister a UWB PAL
67 * @rc: the radio controller the PAL was using
68 * @pal: the PAL
69 */
70void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal)
71{
72 spin_lock(&rc->pal_lock);
73 list_del(&pal->node);
74 spin_unlock(&rc->pal_lock);
75
76 if (pal->device) {
77 sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name);
78 sysfs_remove_link(&pal->device->kobj, "uwb_rc");
79 }
80}
81EXPORT_SYMBOL_GPL(uwb_pal_unregister);
82
83/**
84 * uwb_rc_pal_init - initialize the PAL related parts of a radio controller
85 * @rc: the radio controller
86 */
87void uwb_rc_pal_init(struct uwb_rc *rc)
88{
89 spin_lock_init(&rc->pal_lock);
90 INIT_LIST_HEAD(&rc->pals);
91}
diff --git a/drivers/uwb/reset.c b/drivers/uwb/reset.c
new file mode 100644
index 000000000000..8de856fa7958
--- /dev/null
+++ b/drivers/uwb/reset.c
@@ -0,0 +1,362 @@
1/*
2 * Ultra Wide Band
3 * UWB basic command support and radio reset
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME:
24 *
25 * - docs
26 *
27 * - Now we are serializing (using the uwb_dev->mutex) the command
28 * execution; it should be parallelized as much as possible some
29 * day.
30 */
31#include <linux/kernel.h>
32#include <linux/err.h>
33
34#include "uwb-internal.h"
35#define D_LOCAL 0
36#include <linux/uwb/debug.h>
37
38/**
39 * Command result codes (WUSB1.0[T8-69])
40 */
41static
42const char *__strerror[] = {
43 "success",
44 "failure",
45 "hardware failure",
46 "no more slots",
47 "beacon is too large",
48 "invalid parameter",
49 "unsupported power level",
50 "time out (wa) or invalid ie data (whci)",
51 "beacon size exceeded",
52 "cancelled",
53 "invalid state",
54 "invalid size",
55 "ack not recieved",
56 "no more asie notification",
57};
58
59
60/** Return a string matching the given error code */
61const char *uwb_rc_strerror(unsigned code)
62{
63 if (code == 255)
64 return "time out";
65 if (code >= ARRAY_SIZE(__strerror))
66 return "unknown error";
67 return __strerror[code];
68}
69
70int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
71 struct uwb_rccb *cmd, size_t cmd_size,
72 u8 expected_type, u16 expected_event,
73 uwb_rc_cmd_cb_f cb, void *arg)
74{
75 struct device *dev = &rc->uwb_dev.dev;
76 struct uwb_rc_neh *neh;
77 int needtofree = 0;
78 int result;
79
80 uwb_dev_lock(&rc->uwb_dev); /* Protect against rc->priv being removed */
81 if (rc->priv == NULL) {
82 uwb_dev_unlock(&rc->uwb_dev);
83 return -ESHUTDOWN;
84 }
85
86 if (rc->filter_cmd) {
87 needtofree = rc->filter_cmd(rc, &cmd, &cmd_size);
88 if (needtofree < 0 && needtofree != -ENOANO) {
89 dev_err(dev, "%s: filter error: %d\n",
90 cmd_name, needtofree);
91 uwb_dev_unlock(&rc->uwb_dev);
92 return needtofree;
93 }
94 }
95
96 neh = uwb_rc_neh_add(rc, cmd, expected_type, expected_event, cb, arg);
97 if (IS_ERR(neh)) {
98 result = PTR_ERR(neh);
99 goto out;
100 }
101
102 result = rc->cmd(rc, cmd, cmd_size);
103 uwb_dev_unlock(&rc->uwb_dev);
104 if (result < 0)
105 uwb_rc_neh_rm(rc, neh);
106 else
107 uwb_rc_neh_arm(rc, neh);
108 uwb_rc_neh_put(neh);
109out:
110 if (needtofree == 1)
111 kfree(cmd);
112 return result < 0 ? result : 0;
113}
114EXPORT_SYMBOL_GPL(uwb_rc_cmd_async);
115
116struct uwb_rc_cmd_done_params {
117 struct completion completion;
118 struct uwb_rceb *reply;
119 ssize_t reply_size;
120};
121
122static void uwb_rc_cmd_done(struct uwb_rc *rc, void *arg,
123 struct uwb_rceb *reply, ssize_t reply_size)
124{
125 struct uwb_rc_cmd_done_params *p = (struct uwb_rc_cmd_done_params *)arg;
126
127 if (reply_size > 0) {
128 if (p->reply)
129 reply_size = min(p->reply_size, reply_size);
130 else
131 p->reply = kmalloc(reply_size, GFP_ATOMIC);
132
133 if (p->reply)
134 memcpy(p->reply, reply, reply_size);
135 else
136 reply_size = -ENOMEM;
137 }
138 p->reply_size = reply_size;
139 complete(&p->completion);
140}
141
142
143/**
144 * Generic function for issuing commands to the Radio Control Interface
145 *
146 * @rc: UWB Radio Control descriptor
147 * @cmd_name: Name of the command being issued (for error messages)
148 * @cmd: Pointer to rccb structure containing the command;
149 * normally you embed this structure as the first member of
150 * the full command structure.
151 * @cmd_size: Size of the whole command buffer pointed to by @cmd.
152 * @reply: Pointer to where to store the reply
153 * @reply_size: @reply's size
154 * @expected_type: Expected type in the return event
155 * @expected_event: Expected event code in the return event
156 * @preply: Here a pointer to where the event data is received will
157 * be stored. Once done with the data, free with kfree().
158 *
159 * This function is generic; it works for commands that return a fixed
160 * and known size or for commands that return a variable amount of data.
161 *
162 * If a buffer is provided, that is used, although it could be chopped
163 * to the maximum size of the buffer. If the buffer is NULL, then one
164 * be allocated in *preply with the whole contents of the reply.
165 *
166 * @rc needs to be referenced
167 */
168static
169ssize_t __uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
170 struct uwb_rccb *cmd, size_t cmd_size,
171 struct uwb_rceb *reply, size_t reply_size,
172 u8 expected_type, u16 expected_event,
173 struct uwb_rceb **preply)
174{
175 ssize_t result = 0;
176 struct device *dev = &rc->uwb_dev.dev;
177 struct uwb_rc_cmd_done_params params;
178
179 init_completion(&params.completion);
180 params.reply = reply;
181 params.reply_size = reply_size;
182
183 result = uwb_rc_cmd_async(rc, cmd_name, cmd, cmd_size,
184 expected_type, expected_event,
185 uwb_rc_cmd_done, &params);
186 if (result)
187 return result;
188
189 wait_for_completion(&params.completion);
190
191 if (preply)
192 *preply = params.reply;
193
194 if (params.reply_size < 0)
195 dev_err(dev, "%s: confirmation event 0x%02x/%04x/%02x "
196 "reception failed: %d\n", cmd_name,
197 expected_type, expected_event, cmd->bCommandContext,
198 (int)params.reply_size);
199 return params.reply_size;
200}
201
202
203/**
204 * Generic function for issuing commands to the Radio Control Interface
205 *
206 * @rc: UWB Radio Control descriptor
207 * @cmd_name: Name of the command being issued (for error messages)
208 * @cmd: Pointer to rccb structure containing the command;
209 * normally you embed this structure as the first member of
210 * the full command structure.
211 * @cmd_size: Size of the whole command buffer pointed to by @cmd.
212 * @reply: Pointer to the beginning of the confirmation event
213 * buffer. Normally bigger than an 'struct hwarc_rceb'.
214 * You need to fill out reply->bEventType and reply->wEvent (in
215 * cpu order) as the function will use them to verify the
216 * confirmation event.
217 * @reply_size: Size of the reply buffer
218 *
219 * The function checks that the length returned in the reply is at
220 * least as big as @reply_size; if not, it will be deemed an error and
221 * -EIO returned.
222 *
223 * @rc needs to be referenced
224 */
225ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
226 struct uwb_rccb *cmd, size_t cmd_size,
227 struct uwb_rceb *reply, size_t reply_size)
228{
229 struct device *dev = &rc->uwb_dev.dev;
230 ssize_t result;
231
232 result = __uwb_rc_cmd(rc, cmd_name,
233 cmd, cmd_size, reply, reply_size,
234 reply->bEventType, reply->wEvent, NULL);
235
236 if (result > 0 && result < reply_size) {
237 dev_err(dev, "%s: not enough data returned for decoding reply "
238 "(%zu bytes received vs at least %zu needed)\n",
239 cmd_name, result, reply_size);
240 result = -EIO;
241 }
242 return result;
243}
244EXPORT_SYMBOL_GPL(uwb_rc_cmd);
245
246
247/**
248 * Generic function for issuing commands to the Radio Control
249 * Interface that return an unknown amount of data
250 *
251 * @rc: UWB Radio Control descriptor
252 * @cmd_name: Name of the command being issued (for error messages)
253 * @cmd: Pointer to rccb structure containing the command;
254 * normally you embed this structure as the first member of
255 * the full command structure.
256 * @cmd_size: Size of the whole command buffer pointed to by @cmd.
257 * @expected_type: Expected type in the return event
258 * @expected_event: Expected event code in the return event
259 * @preply: Here a pointer to where the event data is received will
260 * be stored. Once done with the data, free with kfree().
261 *
262 * The function checks that the length returned in the reply is at
263 * least as big as a 'struct uwb_rceb *'; if not, it will be deemed an
264 * error and -EIO returned.
265 *
266 * @rc needs to be referenced
267 */
268ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
269 struct uwb_rccb *cmd, size_t cmd_size,
270 u8 expected_type, u16 expected_event,
271 struct uwb_rceb **preply)
272{
273 return __uwb_rc_cmd(rc, cmd_name, cmd, cmd_size, NULL, 0,
274 expected_type, expected_event, preply);
275}
276EXPORT_SYMBOL_GPL(uwb_rc_vcmd);
277
278
279/**
280 * Reset a UWB Host Controller (and all radio settings)
281 *
282 * @rc: Host Controller descriptor
283 * @returns: 0 if ok, < 0 errno code on error
284 *
285 * We put the command on kmalloc'ed memory as some arches cannot do
286 * USB from the stack. The reply event is copied from an stage buffer,
287 * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
288 */
289int uwb_rc_reset(struct uwb_rc *rc)
290{
291 int result = -ENOMEM;
292 struct uwb_rc_evt_confirm reply;
293 struct uwb_rccb *cmd;
294 size_t cmd_size = sizeof(*cmd);
295
296 mutex_lock(&rc->uwb_dev.mutex);
297 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
298 if (cmd == NULL)
299 goto error_kzalloc;
300 cmd->bCommandType = UWB_RC_CET_GENERAL;
301 cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET);
302 reply.rceb.bEventType = UWB_RC_CET_GENERAL;
303 reply.rceb.wEvent = UWB_RC_CMD_RESET;
304 result = uwb_rc_cmd(rc, "RESET", cmd, cmd_size,
305 &reply.rceb, sizeof(reply));
306 if (result < 0)
307 goto error_cmd;
308 if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
309 dev_err(&rc->uwb_dev.dev,
310 "RESET: command execution failed: %s (%d)\n",
311 uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
312 result = -EIO;
313 }
314error_cmd:
315 kfree(cmd);
316error_kzalloc:
317 mutex_unlock(&rc->uwb_dev.mutex);
318 return result;
319}
320
321int uwbd_msg_handle_reset(struct uwb_event *evt)
322{
323 struct uwb_rc *rc = evt->rc;
324 int ret;
325
326 /* Need to prevent the RC hardware module going away while in
327 the rc->reset() call. */
328 if (!try_module_get(rc->owner))
329 return 0;
330
331 dev_info(&rc->uwb_dev.dev, "resetting radio controller\n");
332 ret = rc->reset(rc);
333 if (ret)
334 dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret);
335
336 module_put(rc->owner);
337 return ret;
338}
339
340/**
341 * uwb_rc_reset_all - request a reset of the radio controller and PALs
342 * @rc: the radio controller of the hardware device to be reset.
343 *
344 * The full hardware reset of the radio controller and all the PALs
345 * will be scheduled.
346 */
347void uwb_rc_reset_all(struct uwb_rc *rc)
348{
349 struct uwb_event *evt;
350
351 evt = kzalloc(sizeof(struct uwb_event), GFP_ATOMIC);
352 if (unlikely(evt == NULL))
353 return;
354
355 evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */
356 evt->ts_jiffies = jiffies;
357 evt->type = UWB_EVT_TYPE_MSG;
358 evt->message = UWB_EVT_MSG_RESET;
359
360 uwbd_event_queue(evt);
361}
362EXPORT_SYMBOL_GPL(uwb_rc_reset_all);
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c
new file mode 100644
index 000000000000..bae16204576d
--- /dev/null
+++ b/drivers/uwb/rsv.c
@@ -0,0 +1,680 @@
1/*
2 * UWB reservation management.
3 *
4 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/uwb.h>
21
22#include "uwb-internal.h"
23
24static void uwb_rsv_timer(unsigned long arg);
25
26static const char *rsv_states[] = {
27 [UWB_RSV_STATE_NONE] = "none",
28 [UWB_RSV_STATE_O_INITIATED] = "initiated",
29 [UWB_RSV_STATE_O_PENDING] = "pending",
30 [UWB_RSV_STATE_O_MODIFIED] = "modified",
31 [UWB_RSV_STATE_O_ESTABLISHED] = "established",
32 [UWB_RSV_STATE_T_ACCEPTED] = "accepted",
33 [UWB_RSV_STATE_T_DENIED] = "denied",
34 [UWB_RSV_STATE_T_PENDING] = "pending",
35};
36
37static const char *rsv_types[] = {
38 [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp",
39 [UWB_DRP_TYPE_HARD] = "hard",
40 [UWB_DRP_TYPE_SOFT] = "soft",
41 [UWB_DRP_TYPE_PRIVATE] = "private",
42 [UWB_DRP_TYPE_PCA] = "pca",
43};
44
45/**
46 * uwb_rsv_state_str - return a string for a reservation state
47 * @state: the reservation state.
48 */
49const char *uwb_rsv_state_str(enum uwb_rsv_state state)
50{
51 if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST)
52 return "unknown";
53 return rsv_states[state];
54}
55EXPORT_SYMBOL_GPL(uwb_rsv_state_str);
56
57/**
58 * uwb_rsv_type_str - return a string for a reservation type
59 * @type: the reservation type
60 */
61const char *uwb_rsv_type_str(enum uwb_drp_type type)
62{
63 if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA)
64 return "invalid";
65 return rsv_types[type];
66}
67EXPORT_SYMBOL_GPL(uwb_rsv_type_str);
68
69static void uwb_rsv_dump(struct uwb_rsv *rsv)
70{
71 struct device *dev = &rsv->rc->uwb_dev.dev;
72 struct uwb_dev_addr devaddr;
73 char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
74
75 uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
76 if (rsv->target.type == UWB_RSV_TARGET_DEV)
77 devaddr = rsv->target.dev->dev_addr;
78 else
79 devaddr = rsv->target.devaddr;
80 uwb_dev_addr_print(target, sizeof(target), &devaddr);
81
82 dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state));
83}
84
85/*
86 * Get a free stream index for a reservation.
87 *
88 * If the target is a DevAddr (e.g., a WUSB cluster reservation) then
89 * the stream is allocated from a pool of per-RC stream indexes,
90 * otherwise a unique stream index for the target is selected.
91 */
92static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
93{
94 struct uwb_rc *rc = rsv->rc;
95 unsigned long *streams_bm;
96 int stream;
97
98 switch (rsv->target.type) {
99 case UWB_RSV_TARGET_DEV:
100 streams_bm = rsv->target.dev->streams;
101 break;
102 case UWB_RSV_TARGET_DEVADDR:
103 streams_bm = rc->uwb_dev.streams;
104 break;
105 default:
106 return -EINVAL;
107 }
108
109 stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS);
110 if (stream >= UWB_NUM_STREAMS)
111 return -EBUSY;
112
113 rsv->stream = stream;
114 set_bit(stream, streams_bm);
115
116 return 0;
117}
118
119static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
120{
121 struct uwb_rc *rc = rsv->rc;
122 unsigned long *streams_bm;
123
124 switch (rsv->target.type) {
125 case UWB_RSV_TARGET_DEV:
126 streams_bm = rsv->target.dev->streams;
127 break;
128 case UWB_RSV_TARGET_DEVADDR:
129 streams_bm = rc->uwb_dev.streams;
130 break;
131 default:
132 return;
133 }
134
135 clear_bit(rsv->stream, streams_bm);
136}
137
138/*
139 * Generate a MAS allocation with a single row component.
140 */
141static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas,
142 int first_mas, int mas_per_zone,
143 int zs, int ze)
144{
145 struct uwb_mas_bm col;
146 int z;
147
148 bitmap_zero(mas->bm, UWB_NUM_MAS);
149 bitmap_zero(col.bm, UWB_NUM_MAS);
150 bitmap_fill(col.bm, mas_per_zone);
151 bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS);
152
153 for (z = zs; z <= ze; z++) {
154 bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS);
155 bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
156 }
157}
158
159/*
160 * Allocate some MAS for this reservation based on current local
161 * availability, the reservation parameters (max_mas, min_mas,
162 * sparsity), and the WiMedia rules for MAS allocations.
163 *
164 * Returns -EBUSY is insufficient free MAS are available.
165 *
166 * FIXME: to simplify this, only safe reservations with a single row
167 * component in zones 1 to 15 are tried (zone 0 is skipped to avoid
168 * problems with the MAS reserved for the BP).
169 *
170 * [ECMA-368] section B.2.
171 */
172static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv)
173{
174 static const int safe_mas_in_row[UWB_NUM_ZONES] = {
175 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1,
176 };
177 int n, r;
178 struct uwb_mas_bm mas;
179 bool found = false;
180
181 /*
182 * Search all valid safe allocations until either: too few MAS
183 * are available; or the smallest allocation with sufficient
184 * MAS is found.
185 *
186 * The top of the zones are preferred, so space for larger
187 * allocations is available in the bottom of the zone (e.g., a
188 * 15 MAS allocation should start in row 14 leaving space for
189 * a 120 MAS allocation at row 0).
190 */
191 for (n = safe_mas_in_row[0]; n >= 1; n--) {
192 int num_mas;
193
194 num_mas = n * (UWB_NUM_ZONES - 1);
195 if (num_mas < rsv->min_mas)
196 break;
197 if (found && num_mas < rsv->max_mas)
198 break;
199
200 for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) {
201 if (safe_mas_in_row[r] < n)
202 continue;
203 uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES);
204 if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) {
205 found = true;
206 break;
207 }
208 }
209 }
210
211 if (!found)
212 return -EBUSY;
213
214 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
215 return 0;
216}
217
218static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
219{
220 int sframes = UWB_MAX_LOST_BEACONS;
221
222 /*
223 * Multicast reservations can become established within 1
224 * super frame and should not be terminated if no response is
225 * received.
226 */
227 if (rsv->is_multicast) {
228 if (rsv->state == UWB_RSV_STATE_O_INITIATED)
229 sframes = 1;
230 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED)
231 sframes = 0;
232 }
233
234 rsv->expired = false;
235 if (sframes > 0) {
236 /*
237 * Add an additional 2 superframes to account for the
238 * time to send the SET DRP IE command.
239 */
240 unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US;
241 mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us));
242 } else
243 del_timer(&rsv->timer);
244}
245
246/*
247 * Update a reservations state, and schedule an update of the
248 * transmitted DRP IEs.
249 */
250static void uwb_rsv_state_update(struct uwb_rsv *rsv,
251 enum uwb_rsv_state new_state)
252{
253 rsv->state = new_state;
254 rsv->ie_valid = false;
255
256 uwb_rsv_dump(rsv);
257
258 uwb_rsv_stroke_timer(rsv);
259 uwb_rsv_sched_update(rsv->rc);
260}
261
262static void uwb_rsv_callback(struct uwb_rsv *rsv)
263{
264 if (rsv->callback)
265 rsv->callback(rsv);
266}
267
268void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
269{
270 if (rsv->state == new_state) {
271 switch (rsv->state) {
272 case UWB_RSV_STATE_O_ESTABLISHED:
273 case UWB_RSV_STATE_T_ACCEPTED:
274 case UWB_RSV_STATE_NONE:
275 uwb_rsv_stroke_timer(rsv);
276 break;
277 default:
278 /* Expecting a state transition so leave timer
279 as-is. */
280 break;
281 }
282 return;
283 }
284
285 switch (new_state) {
286 case UWB_RSV_STATE_NONE:
287 uwb_drp_avail_release(rsv->rc, &rsv->mas);
288 uwb_rsv_put_stream(rsv);
289 uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE);
290 uwb_rsv_callback(rsv);
291 break;
292 case UWB_RSV_STATE_O_INITIATED:
293 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED);
294 break;
295 case UWB_RSV_STATE_O_PENDING:
296 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING);
297 break;
298 case UWB_RSV_STATE_O_ESTABLISHED:
299 uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
300 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED);
301 uwb_rsv_callback(rsv);
302 break;
303 case UWB_RSV_STATE_T_ACCEPTED:
304 uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
305 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED);
306 uwb_rsv_callback(rsv);
307 break;
308 case UWB_RSV_STATE_T_DENIED:
309 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED);
310 break;
311 default:
312 dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n",
313 uwb_rsv_state_str(new_state), new_state);
314 }
315}
316
317static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
318{
319 struct uwb_rsv *rsv;
320
321 rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL);
322 if (!rsv)
323 return NULL;
324
325 INIT_LIST_HEAD(&rsv->rc_node);
326 INIT_LIST_HEAD(&rsv->pal_node);
327 init_timer(&rsv->timer);
328 rsv->timer.function = uwb_rsv_timer;
329 rsv->timer.data = (unsigned long)rsv;
330
331 rsv->rc = rc;
332
333 return rsv;
334}
335
336static void uwb_rsv_free(struct uwb_rsv *rsv)
337{
338 uwb_dev_put(rsv->owner);
339 if (rsv->target.type == UWB_RSV_TARGET_DEV)
340 uwb_dev_put(rsv->target.dev);
341 kfree(rsv);
342}
343
344/**
345 * uwb_rsv_create - allocate and initialize a UWB reservation structure
346 * @rc: the radio controller
347 * @cb: callback to use when the reservation completes or terminates
348 * @pal_priv: data private to the PAL to be passed in the callback
349 *
350 * The callback is called when the state of the reservation changes from:
351 *
352 * - pending to accepted
353 * - pending to denined
354 * - accepted to terminated
355 * - pending to terminated
356 */
357struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv)
358{
359 struct uwb_rsv *rsv;
360
361 rsv = uwb_rsv_alloc(rc);
362 if (!rsv)
363 return NULL;
364
365 rsv->callback = cb;
366 rsv->pal_priv = pal_priv;
367
368 return rsv;
369}
370EXPORT_SYMBOL_GPL(uwb_rsv_create);
371
372void uwb_rsv_remove(struct uwb_rsv *rsv)
373{
374 if (rsv->state != UWB_RSV_STATE_NONE)
375 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
376 del_timer_sync(&rsv->timer);
377 list_del(&rsv->rc_node);
378 uwb_rsv_free(rsv);
379}
380
381/**
382 * uwb_rsv_destroy - free a UWB reservation structure
383 * @rsv: the reservation to free
384 *
385 * The reservation will be terminated if it is pending or established.
386 */
387void uwb_rsv_destroy(struct uwb_rsv *rsv)
388{
389 struct uwb_rc *rc = rsv->rc;
390
391 mutex_lock(&rc->rsvs_mutex);
392 uwb_rsv_remove(rsv);
393 mutex_unlock(&rc->rsvs_mutex);
394}
395EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
396
397/**
398 * usb_rsv_establish - start a reservation establishment
399 * @rsv: the reservation
400 *
401 * The PAL should fill in @rsv's owner, target, type, max_mas,
402 * min_mas, sparsity and is_multicast fields. If the target is a
403 * uwb_dev it must be referenced.
404 *
405 * The reservation's callback will be called when the reservation is
406 * accepted, denied or times out.
407 */
408int uwb_rsv_establish(struct uwb_rsv *rsv)
409{
410 struct uwb_rc *rc = rsv->rc;
411 int ret;
412
413 mutex_lock(&rc->rsvs_mutex);
414
415 ret = uwb_rsv_get_stream(rsv);
416 if (ret)
417 goto out;
418
419 ret = uwb_rsv_alloc_mas(rsv);
420 if (ret) {
421 uwb_rsv_put_stream(rsv);
422 goto out;
423 }
424
425 list_add_tail(&rsv->rc_node, &rc->reservations);
426 rsv->owner = &rc->uwb_dev;
427 uwb_dev_get(rsv->owner);
428 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED);
429out:
430 mutex_unlock(&rc->rsvs_mutex);
431 return ret;
432}
433EXPORT_SYMBOL_GPL(uwb_rsv_establish);
434
435/**
436 * uwb_rsv_modify - modify an already established reservation
437 * @rsv: the reservation to modify
438 * @max_mas: new maximum MAS to reserve
439 * @min_mas: new minimum MAS to reserve
440 * @sparsity: new sparsity to use
441 *
442 * FIXME: implement this once there are PALs that use it.
443 */
444int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity)
445{
446 return -ENOSYS;
447}
448EXPORT_SYMBOL_GPL(uwb_rsv_modify);
449
450/**
451 * uwb_rsv_terminate - terminate an established reservation
452 * @rsv: the reservation to terminate
453 *
454 * A reservation is terminated by removing the DRP IE from the beacon,
455 * the other end will consider the reservation to be terminated when
456 * it does not see the DRP IE for at least mMaxLostBeacons.
457 *
458 * If applicable, the reference to the target uwb_dev will be released.
459 */
460void uwb_rsv_terminate(struct uwb_rsv *rsv)
461{
462 struct uwb_rc *rc = rsv->rc;
463
464 mutex_lock(&rc->rsvs_mutex);
465
466 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
467
468 mutex_unlock(&rc->rsvs_mutex);
469}
470EXPORT_SYMBOL_GPL(uwb_rsv_terminate);
471
472/**
473 * uwb_rsv_accept - accept a new reservation from a peer
474 * @rsv: the reservation
475 * @cb: call back for reservation changes
476 * @pal_priv: data to be passed in the above call back
477 *
478 * Reservation requests from peers are denied unless a PAL accepts it
479 * by calling this function.
480 */
481void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv)
482{
483 rsv->callback = cb;
484 rsv->pal_priv = pal_priv;
485 rsv->state = UWB_RSV_STATE_T_ACCEPTED;
486}
487EXPORT_SYMBOL_GPL(uwb_rsv_accept);
488
489/*
490 * Is a received DRP IE for this reservation?
491 */
492static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src,
493 struct uwb_ie_drp *drp_ie)
494{
495 struct uwb_dev_addr *rsv_src;
496 int stream;
497
498 stream = uwb_ie_drp_stream_index(drp_ie);
499
500 if (rsv->stream != stream)
501 return false;
502
503 switch (rsv->target.type) {
504 case UWB_RSV_TARGET_DEVADDR:
505 return rsv->stream == stream;
506 case UWB_RSV_TARGET_DEV:
507 if (uwb_ie_drp_owner(drp_ie))
508 rsv_src = &rsv->owner->dev_addr;
509 else
510 rsv_src = &rsv->target.dev->dev_addr;
511 return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0;
512 }
513 return false;
514}
515
516static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
517 struct uwb_dev *src,
518 struct uwb_ie_drp *drp_ie)
519{
520 struct uwb_rsv *rsv;
521 struct uwb_pal *pal;
522 enum uwb_rsv_state state;
523
524 rsv = uwb_rsv_alloc(rc);
525 if (!rsv)
526 return NULL;
527
528 rsv->rc = rc;
529 rsv->owner = src;
530 uwb_dev_get(rsv->owner);
531 rsv->target.type = UWB_RSV_TARGET_DEV;
532 rsv->target.dev = &rc->uwb_dev;
533 rsv->type = uwb_ie_drp_type(drp_ie);
534 rsv->stream = uwb_ie_drp_stream_index(drp_ie);
535 set_bit(rsv->stream, rsv->owner->streams);
536 uwb_drp_ie_to_bm(&rsv->mas, drp_ie);
537
538 /*
539 * See if any PALs are interested in this reservation. If not,
540 * deny the request.
541 */
542 rsv->state = UWB_RSV_STATE_T_DENIED;
543 spin_lock(&rc->pal_lock);
544 list_for_each_entry(pal, &rc->pals, node) {
545 if (pal->new_rsv)
546 pal->new_rsv(rsv);
547 if (rsv->state == UWB_RSV_STATE_T_ACCEPTED)
548 break;
549 }
550 spin_unlock(&rc->pal_lock);
551
552 list_add_tail(&rsv->rc_node, &rc->reservations);
553 state = rsv->state;
554 rsv->state = UWB_RSV_STATE_NONE;
555 uwb_rsv_set_state(rsv, state);
556
557 return rsv;
558}
559
560/**
561 * uwb_rsv_find - find a reservation for a received DRP IE.
562 * @rc: the radio controller
563 * @src: source of the DRP IE
564 * @drp_ie: the DRP IE
565 *
566 * If the reservation cannot be found and the DRP IE is from a peer
567 * attempting to establish a new reservation, create a new reservation
568 * and add it to the list.
569 */
570struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
571 struct uwb_ie_drp *drp_ie)
572{
573 struct uwb_rsv *rsv;
574
575 list_for_each_entry(rsv, &rc->reservations, rc_node) {
576 if (uwb_rsv_match(rsv, src, drp_ie))
577 return rsv;
578 }
579
580 if (uwb_ie_drp_owner(drp_ie))
581 return uwb_rsv_new_target(rc, src, drp_ie);
582
583 return NULL;
584}
585
586/*
587 * Go through all the reservations and check for timeouts and (if
588 * necessary) update their DRP IEs.
589 *
590 * FIXME: look at building the SET_DRP_IE command here rather than
591 * having to rescan the list in uwb_rc_send_all_drp_ie().
592 */
593static bool uwb_rsv_update_all(struct uwb_rc *rc)
594{
595 struct uwb_rsv *rsv, *t;
596 bool ie_updated = false;
597
598 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
599 if (rsv->expired)
600 uwb_drp_handle_timeout(rsv);
601 if (!rsv->ie_valid) {
602 uwb_drp_ie_update(rsv);
603 ie_updated = true;
604 }
605 }
606
607 return ie_updated;
608}
609
610void uwb_rsv_sched_update(struct uwb_rc *rc)
611{
612 queue_work(rc->rsv_workq, &rc->rsv_update_work);
613}
614
615/*
616 * Update DRP IEs and, if necessary, the DRP Availability IE and send
617 * the updated IEs to the radio controller.
618 */
619static void uwb_rsv_update_work(struct work_struct *work)
620{
621 struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work);
622 bool ie_updated;
623
624 mutex_lock(&rc->rsvs_mutex);
625
626 ie_updated = uwb_rsv_update_all(rc);
627
628 if (!rc->drp_avail.ie_valid) {
629 uwb_drp_avail_ie_update(rc);
630 ie_updated = true;
631 }
632
633 if (ie_updated)
634 uwb_rc_send_all_drp_ie(rc);
635
636 mutex_unlock(&rc->rsvs_mutex);
637}
638
639static void uwb_rsv_timer(unsigned long arg)
640{
641 struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
642
643 rsv->expired = true;
644 uwb_rsv_sched_update(rsv->rc);
645}
646
647void uwb_rsv_init(struct uwb_rc *rc)
648{
649 INIT_LIST_HEAD(&rc->reservations);
650 mutex_init(&rc->rsvs_mutex);
651 INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work);
652
653 bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
654}
655
656int uwb_rsv_setup(struct uwb_rc *rc)
657{
658 char name[16];
659
660 snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev));
661 rc->rsv_workq = create_singlethread_workqueue(name);
662 if (rc->rsv_workq == NULL)
663 return -ENOMEM;
664
665 return 0;
666}
667
668void uwb_rsv_cleanup(struct uwb_rc *rc)
669{
670 struct uwb_rsv *rsv, *t;
671
672 mutex_lock(&rc->rsvs_mutex);
673 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
674 uwb_rsv_remove(rsv);
675 }
676 mutex_unlock(&rc->rsvs_mutex);
677
678 cancel_work_sync(&rc->rsv_update_work);
679 destroy_workqueue(rc->rsv_workq);
680}
diff --git a/drivers/uwb/scan.c b/drivers/uwb/scan.c
new file mode 100644
index 000000000000..2d270748f32b
--- /dev/null
+++ b/drivers/uwb/scan.c
@@ -0,0 +1,133 @@
1/*
2 * Ultra Wide Band
3 * Scanning management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 *
24 * FIXME: docs
25 * FIXME: there are issues here on how BEACON and SCAN on USB RCI deal
26 * with each other. Currently seems that START_BEACON while
27 * SCAN_ONLY will cancel the scan, so we need to update the
28 * state here. Clarification request sent by email on
29 * 10/05/2005.
30 * 10/28/2005 No clear answer heard--maybe we'll hack the API
31 * so that when we start beaconing, if the HC is
32 * scanning in a mode not compatible with beaconing
33 * we just fail.
34 */
35
36#include <linux/device.h>
37#include <linux/err.h>
38#include "uwb-internal.h"
39
40
41/**
42 * Start/stop scanning in a radio controller
43 *
44 * @rc: UWB Radio Controlller
45 * @channel: Channel to scan; encodings in WUSB1.0[Table 5.12]
46 * @type: Type of scanning to do.
47 * @bpst_offset: value at which to start scanning (if type ==
48 * UWB_SCAN_ONLY_STARTTIME)
49 * @returns: 0 if ok, < 0 errno code on error
50 *
51 * We put the command on kmalloc'ed memory as some arches cannot do
52 * USB from the stack. The reply event is copied from an stage buffer,
53 * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
54 */
55int uwb_rc_scan(struct uwb_rc *rc,
56 unsigned channel, enum uwb_scan_type type,
57 unsigned bpst_offset)
58{
59 int result;
60 struct uwb_rc_cmd_scan *cmd;
61 struct uwb_rc_evt_confirm reply;
62
63 result = -ENOMEM;
64 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
65 if (cmd == NULL)
66 goto error_kzalloc;
67 mutex_lock(&rc->uwb_dev.mutex);
68 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
69 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SCAN);
70 cmd->bChannelNumber = channel;
71 cmd->bScanState = type;
72 cmd->wStartTime = cpu_to_le16(bpst_offset);
73 reply.rceb.bEventType = UWB_RC_CET_GENERAL;
74 reply.rceb.wEvent = UWB_RC_CMD_SCAN;
75 result = uwb_rc_cmd(rc, "SCAN", &cmd->rccb, sizeof(*cmd),
76 &reply.rceb, sizeof(reply));
77 if (result < 0)
78 goto error_cmd;
79 if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
80 dev_err(&rc->uwb_dev.dev,
81 "SCAN: command execution failed: %s (%d)\n",
82 uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
83 result = -EIO;
84 goto error_cmd;
85 }
86 rc->scanning = channel;
87 rc->scan_type = type;
88error_cmd:
89 mutex_unlock(&rc->uwb_dev.mutex);
90 kfree(cmd);
91error_kzalloc:
92 return result;
93}
94
95/*
96 * Print scanning state
97 */
98static ssize_t uwb_rc_scan_show(struct device *dev,
99 struct device_attribute *attr, char *buf)
100{
101 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
102 struct uwb_rc *rc = uwb_dev->rc;
103 ssize_t result;
104
105 mutex_lock(&rc->uwb_dev.mutex);
106 result = sprintf(buf, "%d %d\n", rc->scanning, rc->scan_type);
107 mutex_unlock(&rc->uwb_dev.mutex);
108 return result;
109}
110
111/*
112 *
113 */
114static ssize_t uwb_rc_scan_store(struct device *dev,
115 struct device_attribute *attr,
116 const char *buf, size_t size)
117{
118 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
119 struct uwb_rc *rc = uwb_dev->rc;
120 unsigned channel;
121 unsigned type;
122 unsigned bpst_offset = 0;
123 ssize_t result = -EINVAL;
124
125 result = sscanf(buf, "%u %u %u\n", &channel, &type, &bpst_offset);
126 if (result >= 2 && type < UWB_SCAN_TOP)
127 result = uwb_rc_scan(rc, channel, type, bpst_offset);
128
129 return result < 0 ? result : size;
130}
131
132/** Radio Control sysfs interface (declaration) */
133DEVICE_ATTR(scan, S_IRUGO | S_IWUSR, uwb_rc_scan_show, uwb_rc_scan_store);
diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c
new file mode 100644
index 000000000000..2d8d62d9f53e
--- /dev/null
+++ b/drivers/uwb/umc-bus.c
@@ -0,0 +1,218 @@
1/*
2 * Bus for UWB Multi-interface Controller capabilities.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This file is released under the GNU GPL v2.
7 */
8#include <linux/kernel.h>
9#include <linux/sysfs.h>
10#include <linux/workqueue.h>
11#include <linux/uwb/umc.h>
12#include <linux/pci.h>
13
14static int umc_bus_unbind_helper(struct device *dev, void *data)
15{
16 struct device *parent = data;
17
18 if (dev->parent == parent && dev->driver)
19 device_release_driver(dev);
20 return 0;
21}
22
23/**
24 * umc_controller_reset - reset the whole UMC controller
25 * @umc: the UMC device for the radio controller.
26 *
27 * Drivers will be unbound from all UMC devices belonging to the
28 * controller and then the radio controller will be rebound. The
29 * radio controller is expected to do a full hardware reset when it is
30 * probed.
31 *
32 * If this is called while a probe() or remove() is in progress it
33 * will return -EAGAIN and not perform the reset.
34 */
35int umc_controller_reset(struct umc_dev *umc)
36{
37 struct device *parent = umc->dev.parent;
38 int ret;
39
40 if (down_trylock(&parent->sem))
41 return -EAGAIN;
42 bus_for_each_dev(&umc_bus_type, NULL, parent, umc_bus_unbind_helper);
43 ret = device_attach(&umc->dev);
44 if (ret == 1)
45 ret = 0;
46 up(&parent->sem);
47
48 return ret;
49}
50EXPORT_SYMBOL_GPL(umc_controller_reset);
51
52/**
53 * umc_match_pci_id - match a UMC driver to a UMC device's parent PCI device.
54 * @umc_drv: umc driver with match_data pointing to a zero-terminated
55 * table of pci_device_id's.
56 * @umc: umc device whose parent is to be matched.
57 */
58int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc)
59{
60 const struct pci_device_id *id_table = umc_drv->match_data;
61 struct pci_dev *pci;
62
63 if (umc->dev.parent->bus != &pci_bus_type)
64 return 0;
65
66 pci = to_pci_dev(umc->dev.parent);
67 return pci_match_id(id_table, pci) != NULL;
68}
69EXPORT_SYMBOL_GPL(umc_match_pci_id);
70
71static int umc_bus_rescan_helper(struct device *dev, void *data)
72{
73 int ret = 0;
74
75 if (!dev->driver)
76 ret = device_attach(dev);
77
78 return ret < 0 ? ret : 0;
79}
80
81static void umc_bus_rescan(void)
82{
83 int err;
84
85 /*
86 * We can't use bus_rescan_devices() here as it deadlocks when
87 * it tries to retake the dev->parent semaphore.
88 */
89 err = bus_for_each_dev(&umc_bus_type, NULL, NULL, umc_bus_rescan_helper);
90 if (err < 0)
91 printk(KERN_WARNING "%s: rescan of bus failed: %d\n",
92 KBUILD_MODNAME, err);
93}
94
95static int umc_bus_match(struct device *dev, struct device_driver *drv)
96{
97 struct umc_dev *umc = to_umc_dev(dev);
98 struct umc_driver *umc_driver = to_umc_driver(drv);
99
100 if (umc->cap_id == umc_driver->cap_id) {
101 if (umc_driver->match)
102 return umc_driver->match(umc_driver, umc);
103 else
104 return 1;
105 }
106 return 0;
107}
108
109static int umc_device_probe(struct device *dev)
110{
111 struct umc_dev *umc;
112 struct umc_driver *umc_driver;
113 int err;
114
115 umc_driver = to_umc_driver(dev->driver);
116 umc = to_umc_dev(dev);
117
118 get_device(dev);
119 err = umc_driver->probe(umc);
120 if (err)
121 put_device(dev);
122 else
123 umc_bus_rescan();
124
125 return err;
126}
127
128static int umc_device_remove(struct device *dev)
129{
130 struct umc_dev *umc;
131 struct umc_driver *umc_driver;
132
133 umc_driver = to_umc_driver(dev->driver);
134 umc = to_umc_dev(dev);
135
136 umc_driver->remove(umc);
137 put_device(dev);
138 return 0;
139}
140
141static int umc_device_suspend(struct device *dev, pm_message_t state)
142{
143 struct umc_dev *umc;
144 struct umc_driver *umc_driver;
145 int err = 0;
146
147 umc = to_umc_dev(dev);
148
149 if (dev->driver) {
150 umc_driver = to_umc_driver(dev->driver);
151 if (umc_driver->suspend)
152 err = umc_driver->suspend(umc, state);
153 }
154 return err;
155}
156
157static int umc_device_resume(struct device *dev)
158{
159 struct umc_dev *umc;
160 struct umc_driver *umc_driver;
161 int err = 0;
162
163 umc = to_umc_dev(dev);
164
165 if (dev->driver) {
166 umc_driver = to_umc_driver(dev->driver);
167 if (umc_driver->resume)
168 err = umc_driver->resume(umc);
169 }
170 return err;
171}
172
173static ssize_t capability_id_show(struct device *dev, struct device_attribute *attr, char *buf)
174{
175 struct umc_dev *umc = to_umc_dev(dev);
176
177 return sprintf(buf, "0x%02x\n", umc->cap_id);
178}
179
180static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf)
181{
182 struct umc_dev *umc = to_umc_dev(dev);
183
184 return sprintf(buf, "0x%04x\n", umc->version);
185}
186
187static struct device_attribute umc_dev_attrs[] = {
188 __ATTR_RO(capability_id),
189 __ATTR_RO(version),
190 __ATTR_NULL,
191};
192
193struct bus_type umc_bus_type = {
194 .name = "umc",
195 .match = umc_bus_match,
196 .probe = umc_device_probe,
197 .remove = umc_device_remove,
198 .suspend = umc_device_suspend,
199 .resume = umc_device_resume,
200 .dev_attrs = umc_dev_attrs,
201};
202EXPORT_SYMBOL_GPL(umc_bus_type);
203
204static int __init umc_bus_init(void)
205{
206 return bus_register(&umc_bus_type);
207}
208module_init(umc_bus_init);
209
210static void __exit umc_bus_exit(void)
211{
212 bus_unregister(&umc_bus_type);
213}
214module_exit(umc_bus_exit);
215
216MODULE_DESCRIPTION("UWB Multi-interface Controller capability bus");
217MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
218MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/umc-dev.c b/drivers/uwb/umc-dev.c
new file mode 100644
index 000000000000..aa44e1c1a102
--- /dev/null
+++ b/drivers/uwb/umc-dev.c
@@ -0,0 +1,104 @@
1/*
2 * UWB Multi-interface Controller device management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This file is released under the GNU GPL v2.
7 */
8#include <linux/kernel.h>
9#include <linux/uwb/umc.h>
10#define D_LOCAL 0
11#include <linux/uwb/debug.h>
12
13static void umc_device_release(struct device *dev)
14{
15 struct umc_dev *umc = to_umc_dev(dev);
16
17 kfree(umc);
18}
19
20/**
21 * umc_device_create - allocate a child UMC device
22 * @parent: parent of the new UMC device.
23 * @n: index of the new device.
24 *
25 * The new UMC device will have a bus ID of the parent with '-n'
26 * appended.
27 */
28struct umc_dev *umc_device_create(struct device *parent, int n)
29{
30 struct umc_dev *umc;
31
32 umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL);
33 if (umc) {
34 snprintf(umc->dev.bus_id, sizeof(umc->dev.bus_id), "%s-%d",
35 parent->bus_id, n);
36 umc->dev.parent = parent;
37 umc->dev.bus = &umc_bus_type;
38 umc->dev.release = umc_device_release;
39
40 umc->dev.dma_mask = parent->dma_mask;
41 }
42 return umc;
43}
44EXPORT_SYMBOL_GPL(umc_device_create);
45
46/**
47 * umc_device_register - register a UMC device
48 * @umc: pointer to the UMC device
49 *
50 * The memory resource for the UMC device is acquired and the device
51 * registered with the system.
52 */
53int umc_device_register(struct umc_dev *umc)
54{
55 int err;
56
57 d_fnstart(3, &umc->dev, "(umc_dev %p)\n", umc);
58
59 err = request_resource(umc->resource.parent, &umc->resource);
60 if (err < 0) {
61 dev_err(&umc->dev, "can't allocate resource range "
62 "%016Lx to %016Lx: %d\n",
63 (unsigned long long)umc->resource.start,
64 (unsigned long long)umc->resource.end,
65 err);
66 goto error_request_resource;
67 }
68
69 err = device_register(&umc->dev);
70 if (err < 0)
71 goto error_device_register;
72 d_fnend(3, &umc->dev, "(umc_dev %p) = 0\n", umc);
73 return 0;
74
75error_device_register:
76 release_resource(&umc->resource);
77error_request_resource:
78 d_fnend(3, &umc->dev, "(umc_dev %p) = %d\n", umc, err);
79 return err;
80}
81EXPORT_SYMBOL_GPL(umc_device_register);
82
83/**
84 * umc_device_unregister - unregister a UMC device
85 * @umc: pointer to the UMC device
86 *
87 * First we unregister the device, make sure the driver can do it's
88 * resource release thing and then we try to release any left over
89 * resources. We take a ref to the device, to make sure it doesn't
90 * dissapear under our feet.
91 */
92void umc_device_unregister(struct umc_dev *umc)
93{
94 struct device *dev;
95 if (!umc)
96 return;
97 dev = get_device(&umc->dev);
98 d_fnstart(3, dev, "(umc_dev %p)\n", umc);
99 device_unregister(&umc->dev);
100 release_resource(&umc->resource);
101 d_fnend(3, dev, "(umc_dev %p) = void\n", umc);
102 put_device(dev);
103}
104EXPORT_SYMBOL_GPL(umc_device_unregister);
diff --git a/drivers/uwb/umc-drv.c b/drivers/uwb/umc-drv.c
new file mode 100644
index 000000000000..367b5eb85d60
--- /dev/null
+++ b/drivers/uwb/umc-drv.c
@@ -0,0 +1,31 @@
1/*
2 * UWB Multi-interface Controller driver management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This file is released under the GNU GPL v2.
7 */
8#include <linux/kernel.h>
9#include <linux/uwb/umc.h>
10
11int __umc_driver_register(struct umc_driver *umc_drv, struct module *module,
12 const char *mod_name)
13{
14 umc_drv->driver.name = umc_drv->name;
15 umc_drv->driver.owner = module;
16 umc_drv->driver.mod_name = mod_name;
17 umc_drv->driver.bus = &umc_bus_type;
18
19 return driver_register(&umc_drv->driver);
20}
21EXPORT_SYMBOL_GPL(__umc_driver_register);
22
23/**
24 * umc_driver_register - unregister a UMC capabiltity driver.
25 * @umc_drv: pointer to the driver.
26 */
27void umc_driver_unregister(struct umc_driver *umc_drv)
28{
29 driver_unregister(&umc_drv->driver);
30}
31EXPORT_SYMBOL_GPL(umc_driver_unregister);
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c
new file mode 100644
index 000000000000..6d232c35d07d
--- /dev/null
+++ b/drivers/uwb/uwb-debug.c
@@ -0,0 +1,367 @@
1/*
2 * Ultra Wide Band
3 * Debug support
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: doc
24 */
25
26#include <linux/spinlock.h>
27#include <linux/module.h>
28#include <linux/slab.h>
29#include <linux/notifier.h>
30#include <linux/device.h>
31#include <linux/debugfs.h>
32#include <linux/uaccess.h>
33#include <linux/seq_file.h>
34
35#include <linux/uwb/debug-cmd.h>
36#define D_LOCAL 0
37#include <linux/uwb/debug.h>
38
39#include "uwb-internal.h"
40
41void dump_bytes(struct device *dev, const void *_buf, size_t rsize)
42{
43 const char *buf = _buf;
44 char line[32];
45 size_t offset = 0;
46 int cnt, cnt2;
47 for (cnt = 0; cnt < rsize; cnt += 8) {
48 size_t rtop = rsize - cnt < 8 ? rsize - cnt : 8;
49 for (offset = cnt2 = 0; cnt2 < rtop; cnt2++) {
50 offset += scnprintf(line + offset, sizeof(line) - offset,
51 "%02x ", buf[cnt + cnt2] & 0xff);
52 }
53 if (dev)
54 dev_info(dev, "%s\n", line);
55 else
56 printk(KERN_INFO "%s\n", line);
57 }
58}
59EXPORT_SYMBOL_GPL(dump_bytes);
60
61/*
62 * Debug interface
63 *
64 * Per radio controller debugfs files (in uwb/uwbN/):
65 *
66 * command: Flexible command interface (see <linux/uwb/debug-cmd.h>).
67 *
68 * reservations: information on reservations.
69 *
70 * accept: Set to true (Y or 1) to accept reservation requests from
71 * peers.
72 *
73 * drp_avail: DRP availability information.
74 */
75
76struct uwb_dbg {
77 struct uwb_pal pal;
78
79 u32 accept;
80 struct list_head rsvs;
81
82 struct dentry *root_d;
83 struct dentry *command_f;
84 struct dentry *reservations_f;
85 struct dentry *accept_f;
86 struct dentry *drp_avail_f;
87};
88
89static struct dentry *root_dir;
90
91static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv)
92{
93 struct uwb_rc *rc = rsv->rc;
94 struct device *dev = &rc->uwb_dev.dev;
95 struct uwb_dev_addr devaddr;
96 char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
97
98 uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
99 if (rsv->target.type == UWB_RSV_TARGET_DEV)
100 devaddr = rsv->target.dev->dev_addr;
101 else
102 devaddr = rsv->target.devaddr;
103 uwb_dev_addr_print(target, sizeof(target), &devaddr);
104
105 dev_dbg(dev, "debug: rsv %s -> %s: %s\n",
106 owner, target, uwb_rsv_state_str(rsv->state));
107}
108
109static int cmd_rsv_establish(struct uwb_rc *rc,
110 struct uwb_dbg_cmd_rsv_establish *cmd)
111{
112 struct uwb_mac_addr macaddr;
113 struct uwb_rsv *rsv;
114 struct uwb_dev *target;
115 int ret;
116
117 memcpy(&macaddr, cmd->target, sizeof(macaddr));
118 target = uwb_dev_get_by_macaddr(rc, &macaddr);
119 if (target == NULL)
120 return -ENODEV;
121
122 rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, NULL);
123 if (rsv == NULL) {
124 uwb_dev_put(target);
125 return -ENOMEM;
126 }
127
128 rsv->owner = &rc->uwb_dev;
129 rsv->target.type = UWB_RSV_TARGET_DEV;
130 rsv->target.dev = target;
131 rsv->type = cmd->type;
132 rsv->max_mas = cmd->max_mas;
133 rsv->min_mas = cmd->min_mas;
134 rsv->sparsity = cmd->sparsity;
135
136 ret = uwb_rsv_establish(rsv);
137 if (ret)
138 uwb_rsv_destroy(rsv);
139 else
140 list_add_tail(&rsv->pal_node, &rc->dbg->rsvs);
141
142 return ret;
143}
144
145static int cmd_rsv_terminate(struct uwb_rc *rc,
146 struct uwb_dbg_cmd_rsv_terminate *cmd)
147{
148 struct uwb_rsv *rsv, *found = NULL;
149 int i = 0;
150
151 list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) {
152 if (i == cmd->index) {
153 found = rsv;
154 break;
155 }
156 }
157 if (!found)
158 return -EINVAL;
159
160 list_del(&found->pal_node);
161 uwb_rsv_terminate(found);
162
163 return 0;
164}
165
166static int command_open(struct inode *inode, struct file *file)
167{
168 file->private_data = inode->i_private;
169
170 return 0;
171}
172
173static ssize_t command_write(struct file *file, const char __user *buf,
174 size_t len, loff_t *off)
175{
176 struct uwb_rc *rc = file->private_data;
177 struct uwb_dbg_cmd cmd;
178 int ret;
179
180 if (len != sizeof(struct uwb_dbg_cmd))
181 return -EINVAL;
182
183 if (copy_from_user(&cmd, buf, len) != 0)
184 return -EFAULT;
185
186 switch (cmd.type) {
187 case UWB_DBG_CMD_RSV_ESTABLISH:
188 ret = cmd_rsv_establish(rc, &cmd.rsv_establish);
189 break;
190 case UWB_DBG_CMD_RSV_TERMINATE:
191 ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate);
192 break;
193 default:
194 return -EINVAL;
195 }
196
197 return ret < 0 ? ret : len;
198}
199
200static struct file_operations command_fops = {
201 .open = command_open,
202 .write = command_write,
203 .read = NULL,
204 .llseek = no_llseek,
205 .owner = THIS_MODULE,
206};
207
208static int reservations_print(struct seq_file *s, void *p)
209{
210 struct uwb_rc *rc = s->private;
211 struct uwb_rsv *rsv;
212
213 mutex_lock(&rc->rsvs_mutex);
214
215 list_for_each_entry(rsv, &rc->reservations, rc_node) {
216 struct uwb_dev_addr devaddr;
217 char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
218 bool is_owner;
219 char buf[72];
220
221 uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
222 if (rsv->target.type == UWB_RSV_TARGET_DEV) {
223 devaddr = rsv->target.dev->dev_addr;
224 is_owner = &rc->uwb_dev == rsv->owner;
225 } else {
226 devaddr = rsv->target.devaddr;
227 is_owner = true;
228 }
229 uwb_dev_addr_print(target, sizeof(target), &devaddr);
230
231 seq_printf(s, "%c %s -> %s: %s\n",
232 is_owner ? 'O' : 'T',
233 owner, target, uwb_rsv_state_str(rsv->state));
234 seq_printf(s, " stream: %d type: %s\n",
235 rsv->stream, uwb_rsv_type_str(rsv->type));
236 bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS);
237 seq_printf(s, " %s\n", buf);
238 }
239
240 mutex_unlock(&rc->rsvs_mutex);
241
242 return 0;
243}
244
245static int reservations_open(struct inode *inode, struct file *file)
246{
247 return single_open(file, reservations_print, inode->i_private);
248}
249
250static struct file_operations reservations_fops = {
251 .open = reservations_open,
252 .read = seq_read,
253 .llseek = seq_lseek,
254 .release = single_release,
255 .owner = THIS_MODULE,
256};
257
258static int drp_avail_print(struct seq_file *s, void *p)
259{
260 struct uwb_rc *rc = s->private;
261 char buf[72];
262
263 bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.global, UWB_NUM_MAS);
264 seq_printf(s, "global: %s\n", buf);
265 bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.local, UWB_NUM_MAS);
266 seq_printf(s, "local: %s\n", buf);
267 bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.pending, UWB_NUM_MAS);
268 seq_printf(s, "pending: %s\n", buf);
269
270 return 0;
271}
272
273static int drp_avail_open(struct inode *inode, struct file *file)
274{
275 return single_open(file, drp_avail_print, inode->i_private);
276}
277
278static struct file_operations drp_avail_fops = {
279 .open = drp_avail_open,
280 .read = seq_read,
281 .llseek = seq_lseek,
282 .release = single_release,
283 .owner = THIS_MODULE,
284};
285
286static void uwb_dbg_new_rsv(struct uwb_rsv *rsv)
287{
288 struct uwb_rc *rc = rsv->rc;
289
290 if (rc->dbg->accept)
291 uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, NULL);
292}
293
294/**
295 * uwb_dbg_add_rc - add a debug interface for a radio controller
296 * @rc: the radio controller
297 */
298void uwb_dbg_add_rc(struct uwb_rc *rc)
299{
300 rc->dbg = kzalloc(sizeof(struct uwb_dbg), GFP_KERNEL);
301 if (rc->dbg == NULL)
302 return;
303
304 INIT_LIST_HEAD(&rc->dbg->rsvs);
305
306 uwb_pal_init(&rc->dbg->pal);
307 rc->dbg->pal.new_rsv = uwb_dbg_new_rsv;
308 uwb_pal_register(rc, &rc->dbg->pal);
309 if (root_dir) {
310 rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev),
311 root_dir);
312 rc->dbg->command_f = debugfs_create_file("command", 0200,
313 rc->dbg->root_d, rc,
314 &command_fops);
315 rc->dbg->reservations_f = debugfs_create_file("reservations", 0444,
316 rc->dbg->root_d, rc,
317 &reservations_fops);
318 rc->dbg->accept_f = debugfs_create_bool("accept", 0644,
319 rc->dbg->root_d,
320 &rc->dbg->accept);
321 rc->dbg->drp_avail_f = debugfs_create_file("drp_avail", 0444,
322 rc->dbg->root_d, rc,
323 &drp_avail_fops);
324 }
325}
326
327/**
328 * uwb_dbg_add_rc - remove a radio controller's debug interface
329 * @rc: the radio controller
330 */
331void uwb_dbg_del_rc(struct uwb_rc *rc)
332{
333 struct uwb_rsv *rsv, *t;
334
335 if (rc->dbg == NULL)
336 return;
337
338 list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) {
339 uwb_rsv_destroy(rsv);
340 }
341
342 uwb_pal_unregister(rc, &rc->dbg->pal);
343
344 if (root_dir) {
345 debugfs_remove(rc->dbg->drp_avail_f);
346 debugfs_remove(rc->dbg->accept_f);
347 debugfs_remove(rc->dbg->reservations_f);
348 debugfs_remove(rc->dbg->command_f);
349 debugfs_remove(rc->dbg->root_d);
350 }
351}
352
353/**
354 * uwb_dbg_exit - initialize the debug interface sub-module
355 */
356void uwb_dbg_init(void)
357{
358 root_dir = debugfs_create_dir("uwb", NULL);
359}
360
361/**
362 * uwb_dbg_exit - clean-up the debug interface sub-module
363 */
364void uwb_dbg_exit(void)
365{
366 debugfs_remove(root_dir);
367}
diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h
new file mode 100644
index 000000000000..2ad307d12961
--- /dev/null
+++ b/drivers/uwb/uwb-internal.h
@@ -0,0 +1,305 @@
1/*
2 * Ultra Wide Band
3 * UWB internal API
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 * This contains most of the internal API for UWB. This is stuff used
23 * across the stack that of course, is of no interest to the rest.
24 *
25 * Some parts might end up going public (like uwb_rc_*())...
26 */
27
28#ifndef __UWB_INTERNAL_H__
29#define __UWB_INTERNAL_H__
30
31#include <linux/version.h>
32#include <linux/kernel.h>
33#include <linux/device.h>
34#include <linux/uwb.h>
35#include <linux/mutex.h>
36
37struct uwb_beca_e;
38
39/* General device API */
40extern void uwb_dev_init(struct uwb_dev *uwb_dev);
41extern int __uwb_dev_offair(struct uwb_dev *, struct uwb_rc *);
42extern int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev,
43 struct uwb_rc *parent_rc);
44extern void uwb_dev_rm(struct uwb_dev *uwb_dev);
45extern void uwbd_dev_onair(struct uwb_rc *, struct uwb_beca_e *);
46extern void uwbd_dev_offair(struct uwb_beca_e *);
47void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event);
48
49/* General UWB Radio Controller Internal API */
50extern struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *);
51static inline struct uwb_rc *__uwb_rc_get(struct uwb_rc *rc)
52{
53 uwb_dev_get(&rc->uwb_dev);
54 return rc;
55}
56
57static inline void __uwb_rc_put(struct uwb_rc *rc)
58{
59 uwb_dev_put(&rc->uwb_dev);
60}
61
62extern int uwb_rc_reset(struct uwb_rc *rc);
63extern int uwb_rc_beacon(struct uwb_rc *rc,
64 int channel, unsigned bpst_offset);
65extern int uwb_rc_scan(struct uwb_rc *rc,
66 unsigned channel, enum uwb_scan_type type,
67 unsigned bpst_offset);
68extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc);
69extern ssize_t uwb_rc_print_IEs(struct uwb_rc *rc, char *, size_t);
70extern void uwb_rc_ie_init(struct uwb_rc *);
71extern void uwb_rc_ie_init(struct uwb_rc *);
72extern ssize_t uwb_rc_ie_setup(struct uwb_rc *);
73extern void uwb_rc_ie_release(struct uwb_rc *);
74extern int uwb_rc_ie_add(struct uwb_rc *,
75 const struct uwb_ie_hdr *, size_t);
76extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie);
77
78extern const char *uwb_rc_strerror(unsigned code);
79
80/*
81 * Time to wait for a response to an RC command.
82 *
83 * Some commands can take a long time to response. e.g., START_BEACON
84 * may scan for several superframes before joining an existing beacon
85 * group and this can take around 600 ms.
86 */
87#define UWB_RC_CMD_TIMEOUT_MS 1000 /* ms */
88
89/*
90 * Notification/Event Handlers
91 */
92
93struct uwb_rc_neh;
94
95void uwb_rc_neh_create(struct uwb_rc *rc);
96void uwb_rc_neh_destroy(struct uwb_rc *rc);
97
98struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
99 u8 expected_type, u16 expected_event,
100 uwb_rc_cmd_cb_f cb, void *arg);
101void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh);
102void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh);
103void uwb_rc_neh_put(struct uwb_rc_neh *neh);
104
105/* Event size tables */
106extern int uwb_est_create(void);
107extern void uwb_est_destroy(void);
108
109
110/*
111 * UWB Events & management daemon
112 */
113
114/**
115 * enum uwb_event_type - types of UWB management daemon events
116 *
117 * The UWB management daemon (uwbd) can receive two types of events:
118 * UWB_EVT_TYPE_NOTIF - notification from the radio controller.
119 * UWB_EVT_TYPE_MSG - a simple message.
120 */
121enum uwb_event_type {
122 UWB_EVT_TYPE_NOTIF,
123 UWB_EVT_TYPE_MSG,
124};
125
126/**
127 * struct uwb_event_notif - an event for a radio controller notification
128 * @size: Size of the buffer (ie: Guaranteed to contain at least
129 * a full 'struct uwb_rceb')
130 * @rceb: Pointer to a kmalloced() event payload
131 */
132struct uwb_event_notif {
133 size_t size;
134 struct uwb_rceb *rceb;
135};
136
137/**
138 * enum uwb_event_message - an event for a message for asynchronous processing
139 *
140 * UWB_EVT_MSG_RESET - reset the radio controller and all PAL hardware.
141 */
142enum uwb_event_message {
143 UWB_EVT_MSG_RESET,
144};
145
146/**
147 * UWB Event
148 * @rc: Radio controller that emitted the event (referenced)
149 * @ts_jiffies: Timestamp, when was it received
150 * @type: This event's type.
151 */
152struct uwb_event {
153 struct list_head list_node;
154 struct uwb_rc *rc;
155 unsigned long ts_jiffies;
156 enum uwb_event_type type;
157 union {
158 struct uwb_event_notif notif;
159 enum uwb_event_message message;
160 };
161};
162
163extern void uwbd_start(void);
164extern void uwbd_stop(void);
165extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask);
166extern void uwbd_event_queue(struct uwb_event *);
167void uwbd_flush(struct uwb_rc *rc);
168
169/* UWB event handlers */
170extern int uwbd_evt_handle_rc_beacon(struct uwb_event *);
171extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *);
172extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *);
173extern int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *);
174extern int uwbd_evt_handle_rc_drp(struct uwb_event *);
175extern int uwbd_evt_handle_rc_drp_avail(struct uwb_event *);
176
177int uwbd_msg_handle_reset(struct uwb_event *evt);
178
179
180/*
181 * Address management
182 */
183int uwb_rc_dev_addr_assign(struct uwb_rc *rc);
184int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt);
185
186/*
187 * UWB Beacon Cache
188 *
189 * Each beacon we received is kept in a cache--when we receive that
190 * beacon consistently, that means there is a new device that we have
191 * to add to the system.
192 */
193
194extern unsigned long beacon_timeout_ms;
195
196/** Beacon cache list */
197struct uwb_beca {
198 struct list_head list;
199 size_t entries;
200 struct mutex mutex;
201};
202
203extern struct uwb_beca uwb_beca;
204
205/**
206 * Beacon cache entry
207 *
208 * @jiffies_refresh: last time a beacon was received that refreshed
209 * this cache entry.
210 * @uwb_dev: device connected to this beacon. This pointer is not
211 * safe, you need to get it with uwb_dev_try_get()
212 *
213 * @hits: how many time we have seen this beacon since last time we
214 * cleared it
215 */
216struct uwb_beca_e {
217 struct mutex mutex;
218 struct kref refcnt;
219 struct list_head node;
220 struct uwb_mac_addr *mac_addr;
221 struct uwb_dev_addr dev_addr;
222 u8 hits;
223 unsigned long ts_jiffies;
224 struct uwb_dev *uwb_dev;
225 struct uwb_rc_evt_beacon *be;
226 struct stats lqe_stats, rssi_stats; /* radio statistics */
227};
228struct uwb_beacon_frame;
229extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *,
230 char *, size_t);
231extern struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *,
232 struct uwb_beacon_frame *,
233 unsigned long);
234
235extern void uwb_bce_kfree(struct kref *_bce);
236static inline void uwb_bce_get(struct uwb_beca_e *bce)
237{
238 kref_get(&bce->refcnt);
239}
240static inline void uwb_bce_put(struct uwb_beca_e *bce)
241{
242 kref_put(&bce->refcnt, uwb_bce_kfree);
243}
244extern void uwb_beca_purge(void);
245extern void uwb_beca_release(void);
246
247struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
248 const struct uwb_dev_addr *devaddr);
249struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
250 const struct uwb_mac_addr *macaddr);
251
252/* -- UWB Sysfs representation */
253extern struct class uwb_rc_class;
254extern struct device_attribute dev_attr_mac_address;
255extern struct device_attribute dev_attr_beacon;
256extern struct device_attribute dev_attr_scan;
257
258/* -- DRP Bandwidth allocator: bandwidth allocations, reservations, DRP */
259void uwb_rsv_init(struct uwb_rc *rc);
260int uwb_rsv_setup(struct uwb_rc *rc);
261void uwb_rsv_cleanup(struct uwb_rc *rc);
262
263void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state);
264void uwb_rsv_remove(struct uwb_rsv *rsv);
265struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
266 struct uwb_ie_drp *drp_ie);
267void uwb_rsv_sched_update(struct uwb_rc *rc);
268
269void uwb_drp_handle_timeout(struct uwb_rsv *rsv);
270int uwb_drp_ie_update(struct uwb_rsv *rsv);
271void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie);
272
273void uwb_drp_avail_init(struct uwb_rc *rc);
274int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas);
275void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas);
276void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas);
277void uwb_drp_avail_ie_update(struct uwb_rc *rc);
278
279/* -- PAL support */
280void uwb_rc_pal_init(struct uwb_rc *rc);
281
282/* -- Misc */
283
284extern ssize_t uwb_mac_frame_hdr_print(char *, size_t,
285 const struct uwb_mac_frame_hdr *);
286
287/* -- Debug interface */
288void uwb_dbg_init(void);
289void uwb_dbg_exit(void);
290void uwb_dbg_add_rc(struct uwb_rc *rc);
291void uwb_dbg_del_rc(struct uwb_rc *rc);
292
293/* Workarounds for version specific stuff */
294
295static inline void uwb_dev_lock(struct uwb_dev *uwb_dev)
296{
297 down(&uwb_dev->dev.sem);
298}
299
300static inline void uwb_dev_unlock(struct uwb_dev *uwb_dev)
301{
302 up(&uwb_dev->dev.sem);
303}
304
305#endif /* #ifndef __UWB_INTERNAL_H__ */
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
new file mode 100644
index 000000000000..78908416e42c
--- /dev/null
+++ b/drivers/uwb/uwbd.c
@@ -0,0 +1,410 @@
1/*
2 * Ultra Wide Band
3 * Neighborhood Management Daemon
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * This daemon takes care of maintaing information that describes the
24 * UWB neighborhood that the radios in this machine can see. It also
25 * keeps a tab of which devices are visible, makes sure each HC sits
26 * on a different channel to avoid interfering, etc.
27 *
28 * Different drivers (radio controller, device, any API in general)
29 * communicate with this daemon through an event queue. Daemon wakes
30 * up, takes a list of events and handles them one by one; handling
31 * function is extracted from a table based on the event's type and
32 * subtype. Events are freed only if the handling function says so.
33 *
34 * . Lock protecting the event list has to be an spinlock and locked
35 * with IRQSAVE because it might be called from an interrupt
36 * context (ie: when events arrive and the notification drops
37 * down from the ISR).
38 *
39 * . UWB radio controller drivers queue events to the daemon using
40 * uwbd_event_queue(). They just get the event, chew it to make it
41 * look like UWBD likes it and pass it in a buffer allocated with
42 * uwb_event_alloc().
43 *
44 * EVENTS
45 *
46 * Events have a type, a subtype, a lenght, some other stuff and the
47 * data blob, which depends on the event. The header is 'struct
48 * uwb_event'; for payloads, see 'struct uwbd_evt_*'.
49 *
50 * EVENT HANDLER TABLES
51 *
52 * To find a handling function for an event, the type is used to index
53 * a subtype-table in the type-table. The subtype-table is indexed
54 * with the subtype to get the function that handles the event. Start
55 * with the main type-table 'uwbd_evt_type_handler'.
56 *
57 * DEVICES
58 *
59 * Devices are created when a bunch of beacons have been received and
60 * it is stablished that the device has stable radio presence. CREATED
61 * only, not configured. Devices are ONLY configured when an
62 * Application-Specific IE Probe is receieved, in which the device
63 * declares which Protocol ID it groks. Then the device is CONFIGURED
64 * (and the driver->probe() stuff of the device model is invoked).
65 *
66 * Devices are considered disconnected when a certain number of
67 * beacons are not received in an amount of time.
68 *
69 * Handler functions are called normally uwbd_evt_handle_*().
70 */
71
72#include <linux/kthread.h>
73#include <linux/module.h>
74#include <linux/freezer.h>
75#include "uwb-internal.h"
76
77#define D_LOCAL 1
78#include <linux/uwb/debug.h>
79
80
81/**
82 * UWBD Event handler function signature
83 *
84 * Return !0 if the event needs not to be freed (ie the handler
85 * takes/took care of it). 0 means the daemon code will free the
86 * event.
87 *
88 * @evt->rc is already referenced and guaranteed to exist. See
89 * uwb_evt_handle().
90 */
91typedef int (*uwbd_evt_handler_f)(struct uwb_event *);
92
93/**
94 * Properties of a UWBD event
95 *
96 * @handler: the function that will handle this event
97 * @name: text name of event
98 */
99struct uwbd_event {
100 uwbd_evt_handler_f handler;
101 const char *name;
102};
103
104/** Table of handlers for and properties of the UWBD Radio Control Events */
105static
106struct uwbd_event uwbd_events[] = {
107 [UWB_RC_EVT_BEACON] = {
108 .handler = uwbd_evt_handle_rc_beacon,
109 .name = "BEACON_RECEIVED"
110 },
111 [UWB_RC_EVT_BEACON_SIZE] = {
112 .handler = uwbd_evt_handle_rc_beacon_size,
113 .name = "BEACON_SIZE_CHANGE"
114 },
115 [UWB_RC_EVT_BPOIE_CHANGE] = {
116 .handler = uwbd_evt_handle_rc_bpoie_change,
117 .name = "BPOIE_CHANGE"
118 },
119 [UWB_RC_EVT_BP_SLOT_CHANGE] = {
120 .handler = uwbd_evt_handle_rc_bp_slot_change,
121 .name = "BP_SLOT_CHANGE"
122 },
123 [UWB_RC_EVT_DRP_AVAIL] = {
124 .handler = uwbd_evt_handle_rc_drp_avail,
125 .name = "DRP_AVAILABILITY_CHANGE"
126 },
127 [UWB_RC_EVT_DRP] = {
128 .handler = uwbd_evt_handle_rc_drp,
129 .name = "DRP"
130 },
131 [UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
132 .handler = uwbd_evt_handle_rc_dev_addr_conflict,
133 .name = "DEV_ADDR_CONFLICT",
134 },
135};
136
137
138
139struct uwbd_evt_type_handler {
140 const char *name;
141 struct uwbd_event *uwbd_events;
142 size_t size;
143};
144
145#define UWBD_EVT_TYPE_HANDLER(n,a) { \
146 .name = (n), \
147 .uwbd_events = (a), \
148 .size = sizeof(a)/sizeof((a)[0]) \
149}
150
151
152/** Table of handlers for each UWBD Event type. */
153static
154struct uwbd_evt_type_handler uwbd_evt_type_handlers[] = {
155 [UWB_RC_CET_GENERAL] = UWBD_EVT_TYPE_HANDLER("RC", uwbd_events)
156};
157
158static const
159size_t uwbd_evt_type_handlers_len =
160 sizeof(uwbd_evt_type_handlers) / sizeof(uwbd_evt_type_handlers[0]);
161
162static const struct uwbd_event uwbd_message_handlers[] = {
163 [UWB_EVT_MSG_RESET] = {
164 .handler = uwbd_msg_handle_reset,
165 .name = "reset",
166 },
167};
168
169static DEFINE_MUTEX(uwbd_event_mutex);
170
171/**
172 * Handle an URC event passed to the UWB Daemon
173 *
174 * @evt: the event to handle
175 * @returns: 0 if the event can be kfreed, !0 on the contrary
176 * (somebody else took ownership) [coincidentally, returning
177 * a <0 errno code will free it :)].
178 *
179 * Looks up the two indirection tables (one for the type, one for the
180 * subtype) to decide which function handles it and then calls the
181 * handler.
182 *
183 * The event structure passed to the event handler has the radio
184 * controller in @evt->rc referenced. The reference will be dropped
185 * once the handler returns, so if it needs it for longer (async),
186 * it'll need to take another one.
187 */
188static
189int uwbd_event_handle_urc(struct uwb_event *evt)
190{
191 struct uwbd_evt_type_handler *type_table;
192 uwbd_evt_handler_f handler;
193 u8 type, context;
194 u16 event;
195
196 type = evt->notif.rceb->bEventType;
197 event = le16_to_cpu(evt->notif.rceb->wEvent);
198 context = evt->notif.rceb->bEventContext;
199
200 if (type > uwbd_evt_type_handlers_len) {
201 printk(KERN_ERR "UWBD: event type %u: unknown (too high)\n", type);
202 return -EINVAL;
203 }
204 type_table = &uwbd_evt_type_handlers[type];
205 if (type_table->uwbd_events == NULL) {
206 printk(KERN_ERR "UWBD: event type %u: unknown\n", type);
207 return -EINVAL;
208 }
209 if (event > type_table->size) {
210 printk(KERN_ERR "UWBD: event %s[%u]: unknown (too high)\n",
211 type_table->name, event);
212 return -EINVAL;
213 }
214 handler = type_table->uwbd_events[event].handler;
215 if (handler == NULL) {
216 printk(KERN_ERR "UWBD: event %s[%u]: unknown\n", type_table->name, event);
217 return -EINVAL;
218 }
219 return (*handler)(evt);
220}
221
222static void uwbd_event_handle_message(struct uwb_event *evt)
223{
224 struct uwb_rc *rc;
225 int result;
226
227 rc = evt->rc;
228
229 if (evt->message < 0 || evt->message >= ARRAY_SIZE(uwbd_message_handlers)) {
230 dev_err(&rc->uwb_dev.dev, "UWBD: invalid message type %d\n", evt->message);
231 return;
232 }
233
234 /* If this is a reset event we need to drop the
235 * uwbd_event_mutex or it deadlocks when the reset handler
236 * attempts to flush the uwbd events. */
237 if (evt->message == UWB_EVT_MSG_RESET)
238 mutex_unlock(&uwbd_event_mutex);
239
240 result = uwbd_message_handlers[evt->message].handler(evt);
241 if (result < 0)
242 dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n",
243 uwbd_message_handlers[evt->message].name, result);
244
245 if (evt->message == UWB_EVT_MSG_RESET)
246 mutex_lock(&uwbd_event_mutex);
247}
248
249static void uwbd_event_handle(struct uwb_event *evt)
250{
251 struct uwb_rc *rc;
252 int should_keep;
253
254 rc = evt->rc;
255
256 if (rc->ready) {
257 switch (evt->type) {
258 case UWB_EVT_TYPE_NOTIF:
259 should_keep = uwbd_event_handle_urc(evt);
260 if (should_keep <= 0)
261 kfree(evt->notif.rceb);
262 break;
263 case UWB_EVT_TYPE_MSG:
264 uwbd_event_handle_message(evt);
265 break;
266 default:
267 dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type);
268 break;
269 }
270 }
271
272 __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */
273}
274/* The UWB Daemon */
275
276
277/** Daemon's PID: used to decide if we can queue or not */
278static int uwbd_pid;
279/** Daemon's task struct for managing the kthread */
280static struct task_struct *uwbd_task;
281/** Daemon's waitqueue for waiting for new events */
282static DECLARE_WAIT_QUEUE_HEAD(uwbd_wq);
283/** Daemon's list of events; we queue/dequeue here */
284static struct list_head uwbd_event_list = LIST_HEAD_INIT(uwbd_event_list);
285/** Daemon's list lock to protect concurent access */
286static DEFINE_SPINLOCK(uwbd_event_list_lock);
287
288
289/**
290 * UWB Daemon
291 *
292 * Listens to all UWB notifications and takes care to track the state
293 * of the UWB neighboorhood for the kernel. When we do a run, we
294 * spinlock, move the list to a private copy and release the
295 * lock. Hold it as little as possible. Not a conflict: it is
296 * guaranteed we own the events in the private list.
297 *
298 * FIXME: should change so we don't have a 1HZ timer all the time, but
299 * only if there are devices.
300 */
301static int uwbd(void *unused)
302{
303 unsigned long flags;
304 struct list_head list = LIST_HEAD_INIT(list);
305 struct uwb_event *evt, *nxt;
306 int should_stop = 0;
307 while (1) {
308 wait_event_interruptible_timeout(
309 uwbd_wq,
310 !list_empty(&uwbd_event_list)
311 || (should_stop = kthread_should_stop()),
312 HZ);
313 if (should_stop)
314 break;
315 try_to_freeze();
316
317 mutex_lock(&uwbd_event_mutex);
318 spin_lock_irqsave(&uwbd_event_list_lock, flags);
319 list_splice_init(&uwbd_event_list, &list);
320 spin_unlock_irqrestore(&uwbd_event_list_lock, flags);
321 list_for_each_entry_safe(evt, nxt, &list, list_node) {
322 list_del(&evt->list_node);
323 uwbd_event_handle(evt);
324 kfree(evt);
325 }
326 mutex_unlock(&uwbd_event_mutex);
327
328 uwb_beca_purge(); /* Purge devices that left */
329 }
330 return 0;
331}
332
333
334/** Start the UWB daemon */
335void uwbd_start(void)
336{
337 uwbd_task = kthread_run(uwbd, NULL, "uwbd");
338 if (uwbd_task == NULL)
339 printk(KERN_ERR "UWB: Cannot start management daemon; "
340 "UWB won't work\n");
341 else
342 uwbd_pid = uwbd_task->pid;
343}
344
345/* Stop the UWB daemon and free any unprocessed events */
346void uwbd_stop(void)
347{
348 unsigned long flags;
349 struct uwb_event *evt, *nxt;
350 kthread_stop(uwbd_task);
351 spin_lock_irqsave(&uwbd_event_list_lock, flags);
352 uwbd_pid = 0;
353 list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) {
354 if (evt->type == UWB_EVT_TYPE_NOTIF)
355 kfree(evt->notif.rceb);
356 kfree(evt);
357 }
358 spin_unlock_irqrestore(&uwbd_event_list_lock, flags);
359 uwb_beca_release();
360}
361
362/*
363 * Queue an event for the management daemon
364 *
365 * When some lower layer receives an event, it uses this function to
366 * push it forward to the UWB daemon.
367 *
368 * Once you pass the event, you don't own it any more, but the daemon
369 * does. It will uwb_event_free() it when done, so make sure you
370 * uwb_event_alloc()ed it or bad things will happen.
371 *
372 * If the daemon is not running, we just free the event.
373 */
374void uwbd_event_queue(struct uwb_event *evt)
375{
376 unsigned long flags;
377 spin_lock_irqsave(&uwbd_event_list_lock, flags);
378 if (uwbd_pid != 0) {
379 list_add(&evt->list_node, &uwbd_event_list);
380 wake_up_all(&uwbd_wq);
381 } else {
382 __uwb_rc_put(evt->rc);
383 if (evt->type == UWB_EVT_TYPE_NOTIF)
384 kfree(evt->notif.rceb);
385 kfree(evt);
386 }
387 spin_unlock_irqrestore(&uwbd_event_list_lock, flags);
388 return;
389}
390
391void uwbd_flush(struct uwb_rc *rc)
392{
393 struct uwb_event *evt, *nxt;
394
395 mutex_lock(&uwbd_event_mutex);
396
397 spin_lock_irq(&uwbd_event_list_lock);
398 list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) {
399 if (evt->rc == rc) {
400 __uwb_rc_put(rc);
401 list_del(&evt->list_node);
402 if (evt->type == UWB_EVT_TYPE_NOTIF)
403 kfree(evt->notif.rceb);
404 kfree(evt);
405 }
406 }
407 spin_unlock_irq(&uwbd_event_list_lock);
408
409 mutex_unlock(&uwbd_event_mutex);
410}
diff --git a/drivers/uwb/whc-rc.c b/drivers/uwb/whc-rc.c
new file mode 100644
index 000000000000..1711deadb114
--- /dev/null
+++ b/drivers/uwb/whc-rc.c
@@ -0,0 +1,520 @@
1/*
2 * Wireless Host Controller: Radio Control Interface (WHCI v0.95[2.3])
3 * Radio Control command/event transport to the UWB stack
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Initialize and hook up the Radio Control interface.
24 *
25 * For each device probed, creates an 'struct whcrc' which contains
26 * just the representation of the UWB Radio Controller, and the logic
27 * for reading notifications and passing them to the UWB Core.
28 *
29 * So we initialize all of those, register the UWB Radio Controller
30 * and setup the notification/event handle to pipe the notifications
31 * to the UWB management Daemon.
32 *
33 * Once uwb_rc_add() is called, the UWB stack takes control, resets
34 * the radio and readies the device to take commands the UWB
35 * API/user-space.
36 *
37 * Note this driver is just a transport driver; the commands are
38 * formed at the UWB stack and given to this driver who will deliver
39 * them to the hw and transfer the replies/notifications back to the
40 * UWB stack through the UWB daemon (UWBD).
41 */
42#include <linux/version.h>
43#include <linux/init.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/dma-mapping.h>
47#include <linux/interrupt.h>
48#include <linux/workqueue.h>
49#include <linux/uwb.h>
50#include <linux/uwb/whci.h>
51#include <linux/uwb/umc.h>
52#include "uwb-internal.h"
53
54#define D_LOCAL 0
55#include <linux/uwb/debug.h>
56
57/**
58 * Descriptor for an instance of the UWB Radio Control Driver that
59 * attaches to the URC interface of the WHCI PCI card.
60 *
61 * Unless there is a lock specific to the 'data members', all access
62 * is protected by uwb_rc->mutex.
63 */
64struct whcrc {
65 struct umc_dev *umc_dev;
66 struct uwb_rc *uwb_rc; /* UWB host controller */
67
68 unsigned long area;
69 void __iomem *rc_base;
70 size_t rc_len;
71 spinlock_t irq_lock;
72
73 void *evt_buf, *cmd_buf;
74 dma_addr_t evt_dma_buf, cmd_dma_buf;
75 wait_queue_head_t cmd_wq;
76 struct work_struct event_work;
77};
78
79/**
80 * Execute an UWB RC command on WHCI/RC
81 *
82 * @rc: Instance of a Radio Controller that is a whcrc
83 * @cmd: Buffer containing the RCCB and payload to execute
84 * @cmd_size: Size of the command buffer.
85 *
86 * We copy the command into whcrc->cmd_buf (as it is pretty and
87 * aligned`and physically contiguous) and then press the right keys in
88 * the controller's URCCMD register to get it to read it. We might
89 * have to wait for the cmd_sem to be open to us.
90 *
91 * NOTE: rc's mutex has to be locked
92 */
93static int whcrc_cmd(struct uwb_rc *uwb_rc,
94 const struct uwb_rccb *cmd, size_t cmd_size)
95{
96 int result = 0;
97 struct whcrc *whcrc = uwb_rc->priv;
98 struct device *dev = &whcrc->umc_dev->dev;
99 u32 urccmd;
100
101 d_fnstart(3, dev, "(%p, %p, %zu)\n", uwb_rc, cmd, cmd_size);
102 might_sleep();
103
104 if (cmd_size >= 4096) {
105 result = -E2BIG;
106 goto error;
107 }
108
109 /*
110 * If the URC is halted, then the hardware has reset itself.
111 * Attempt to recover by restarting the device and then return
112 * an error as it's likely that the current command isn't
113 * valid for a newly started RC.
114 */
115 if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) {
116 dev_err(dev, "requesting reset of halted radio controller\n");
117 uwb_rc_reset_all(uwb_rc);
118 result = -EIO;
119 goto error;
120 }
121
122 result = wait_event_timeout(whcrc->cmd_wq,
123 !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2);
124 if (result == 0) {
125 dev_err(dev, "device is not ready to execute commands\n");
126 result = -ETIMEDOUT;
127 goto error;
128 }
129
130 memmove(whcrc->cmd_buf, cmd, cmd_size);
131 le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR);
132
133 spin_lock(&whcrc->irq_lock);
134 urccmd = le_readl(whcrc->rc_base + URCCMD);
135 urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK);
136 le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size,
137 whcrc->rc_base + URCCMD);
138 spin_unlock(&whcrc->irq_lock);
139
140error:
141 d_fnend(3, dev, "(%p, %p, %zu) = %d\n",
142 uwb_rc, cmd, cmd_size, result);
143 return result;
144}
145
146static int whcrc_reset(struct uwb_rc *rc)
147{
148 struct whcrc *whcrc = rc->priv;
149
150 return umc_controller_reset(whcrc->umc_dev);
151}
152
153/**
154 * Reset event reception mechanism and tell hw we are ready to get more
155 *
156 * We have read all the events in the event buffer, so we are ready to
157 * reset it to the beginning.
158 *
159 * This is only called during initialization or after an event buffer
160 * has been retired. This means we can be sure that event processing
161 * is disabled and it's safe to update the URCEVTADDR register.
162 *
163 * There's no need to wait for the event processing to start as the
164 * URC will not clear URCCMD_ACTIVE until (internal) event buffer
165 * space is available.
166 */
167static
168void whcrc_enable_events(struct whcrc *whcrc)
169{
170 struct device *dev = &whcrc->umc_dev->dev;
171 u32 urccmd;
172
173 d_fnstart(4, dev, "(whcrc %p)\n", whcrc);
174
175 le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR);
176
177 spin_lock(&whcrc->irq_lock);
178 urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE;
179 le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD);
180 spin_unlock(&whcrc->irq_lock);
181
182 d_fnend(4, dev, "(whcrc %p) = void\n", whcrc);
183}
184
185static void whcrc_event_work(struct work_struct *work)
186{
187 struct whcrc *whcrc = container_of(work, struct whcrc, event_work);
188 struct device *dev = &whcrc->umc_dev->dev;
189 size_t size;
190 u64 urcevtaddr;
191
192 urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR);
193 size = urcevtaddr & URCEVTADDR_OFFSET_MASK;
194
195 d_printf(3, dev, "received %zu octet event\n", size);
196 d_dump(4, dev, whcrc->evt_buf, size > 32 ? 32 : size);
197
198 uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size);
199 whcrc_enable_events(whcrc);
200}
201
202/**
203 * Catch interrupts?
204 *
205 * We ack inmediately (and expect the hw to do the right thing and
206 * raise another IRQ if things have changed :)
207 */
208static
209irqreturn_t whcrc_irq_cb(int irq, void *_whcrc)
210{
211 struct whcrc *whcrc = _whcrc;
212 struct device *dev = &whcrc->umc_dev->dev;
213 u32 urcsts;
214
215 urcsts = le_readl(whcrc->rc_base + URCSTS);
216 if (!(urcsts & URCSTS_INT_MASK))
217 return IRQ_NONE;
218 le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS);
219
220 d_printf(4, dev, "acked 0x%08x, urcsts 0x%08x\n",
221 le_readl(whcrc->rc_base + URCSTS), urcsts);
222
223 if (urcsts & URCSTS_HSE) {
224 dev_err(dev, "host system error -- hardware halted\n");
225 /* FIXME: do something sensible here */
226 goto out;
227 }
228 if (urcsts & URCSTS_ER) {
229 d_printf(3, dev, "ER: event ready\n");
230 schedule_work(&whcrc->event_work);
231 }
232 if (urcsts & URCSTS_RCI) {
233 d_printf(3, dev, "RCI: ready to execute another command\n");
234 wake_up_all(&whcrc->cmd_wq);
235 }
236out:
237 return IRQ_HANDLED;
238}
239
240
241/**
242 * Initialize a UMC RC interface: map regions, get (shared) IRQ
243 */
244static
245int whcrc_setup_rc_umc(struct whcrc *whcrc)
246{
247 int result = 0;
248 struct device *dev = &whcrc->umc_dev->dev;
249 struct umc_dev *umc_dev = whcrc->umc_dev;
250
251 whcrc->area = umc_dev->resource.start;
252 whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1;
253 result = -EBUSY;
254 if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME)
255 == NULL) {
256 dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n",
257 whcrc->rc_len, whcrc->area, result);
258 goto error_request_region;
259 }
260
261 whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len);
262 if (whcrc->rc_base == NULL) {
263 dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n",
264 whcrc->rc_len, whcrc->area, result);
265 goto error_ioremap_nocache;
266 }
267
268 result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED,
269 KBUILD_MODNAME, whcrc);
270 if (result < 0) {
271 dev_err(dev, "can't allocate IRQ %d: %d\n",
272 umc_dev->irq, result);
273 goto error_request_irq;
274 }
275
276 result = -ENOMEM;
277 whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
278 &whcrc->cmd_dma_buf, GFP_KERNEL);
279 if (whcrc->cmd_buf == NULL) {
280 dev_err(dev, "Can't allocate cmd transfer buffer\n");
281 goto error_cmd_buffer;
282 }
283
284 whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
285 &whcrc->evt_dma_buf, GFP_KERNEL);
286 if (whcrc->evt_buf == NULL) {
287 dev_err(dev, "Can't allocate evt transfer buffer\n");
288 goto error_evt_buffer;
289 }
290 d_printf(3, dev, "UWB RC Interface: %zu bytes at 0x%p, irq %u\n",
291 whcrc->rc_len, whcrc->rc_base, umc_dev->irq);
292 return 0;
293
294error_evt_buffer:
295 dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
296 whcrc->cmd_dma_buf);
297error_cmd_buffer:
298 free_irq(umc_dev->irq, whcrc);
299error_request_irq:
300 iounmap(whcrc->rc_base);
301error_ioremap_nocache:
302 release_mem_region(whcrc->area, whcrc->rc_len);
303error_request_region:
304 return result;
305}
306
307
308/**
309 * Release RC's UMC resources
310 */
311static
312void whcrc_release_rc_umc(struct whcrc *whcrc)
313{
314 struct umc_dev *umc_dev = whcrc->umc_dev;
315
316 dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf,
317 whcrc->evt_dma_buf);
318 dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
319 whcrc->cmd_dma_buf);
320 free_irq(umc_dev->irq, whcrc);
321 iounmap(whcrc->rc_base);
322 release_mem_region(whcrc->area, whcrc->rc_len);
323}
324
325
326/**
327 * whcrc_start_rc - start a WHCI radio controller
328 * @whcrc: the radio controller to start
329 *
330 * Reset the UMC device, start the radio controller, enable events and
331 * finally enable interrupts.
332 */
333static int whcrc_start_rc(struct uwb_rc *rc)
334{
335 struct whcrc *whcrc = rc->priv;
336 int result = 0;
337 struct device *dev = &whcrc->umc_dev->dev;
338 unsigned long start, duration;
339
340 /* Reset the thing */
341 le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD);
342 if (d_test(3))
343 start = jiffies;
344 if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0,
345 5000, "device to reset at init") < 0) {
346 result = -EBUSY;
347 goto error;
348 } else if (d_test(3)) {
349 duration = jiffies - start;
350 if (duration > msecs_to_jiffies(40))
351 dev_err(dev, "Device took %ums to "
352 "reset. MAX expected: 40ms\n",
353 jiffies_to_msecs(duration));
354 }
355
356 /* Set the event buffer, start the controller (enable IRQs later) */
357 le_writel(0, whcrc->rc_base + URCINTR);
358 le_writel(URCCMD_RS, whcrc->rc_base + URCCMD);
359 result = -ETIMEDOUT;
360 if (d_test(3))
361 start = jiffies;
362 if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0,
363 5000, "device to start") < 0)
364 goto error;
365 if (d_test(3)) {
366 duration = jiffies - start;
367 if (duration > msecs_to_jiffies(40))
368 dev_err(dev, "Device took %ums to start. "
369 "MAX expected: 40ms\n",
370 jiffies_to_msecs(duration));
371 }
372 whcrc_enable_events(whcrc);
373 result = 0;
374 le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR);
375error:
376 return result;
377}
378
379
380/**
381 * whcrc_stop_rc - stop a WHCI radio controller
382 * @whcrc: the radio controller to stop
383 *
384 * Disable interrupts and cancel any pending event processing work
385 * before clearing the Run/Stop bit.
386 */
387static
388void whcrc_stop_rc(struct uwb_rc *rc)
389{
390 struct whcrc *whcrc = rc->priv;
391 struct umc_dev *umc_dev = whcrc->umc_dev;
392
393 le_writel(0, whcrc->rc_base + URCINTR);
394 cancel_work_sync(&whcrc->event_work);
395
396 le_writel(0, whcrc->rc_base + URCCMD);
397 whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS,
398 URCSTS_HALTED, 0, 40, "URCSTS.HALTED");
399}
400
401static void whcrc_init(struct whcrc *whcrc)
402{
403 spin_lock_init(&whcrc->irq_lock);
404 init_waitqueue_head(&whcrc->cmd_wq);
405 INIT_WORK(&whcrc->event_work, whcrc_event_work);
406}
407
408/**
409 * Initialize the radio controller.
410 *
411 * NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the
412 * IRQ handler we use that to determine if the hw is ready to
413 * handle events. Looks like a race condition, but it really is
414 * not.
415 */
416static
417int whcrc_probe(struct umc_dev *umc_dev)
418{
419 int result;
420 struct uwb_rc *uwb_rc;
421 struct whcrc *whcrc;
422 struct device *dev = &umc_dev->dev;
423
424 d_fnstart(3, dev, "(umc_dev %p)\n", umc_dev);
425 result = -ENOMEM;
426 uwb_rc = uwb_rc_alloc();
427 if (uwb_rc == NULL) {
428 dev_err(dev, "unable to allocate RC instance\n");
429 goto error_rc_alloc;
430 }
431 whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL);
432 if (whcrc == NULL) {
433 dev_err(dev, "unable to allocate WHC-RC instance\n");
434 goto error_alloc;
435 }
436 whcrc_init(whcrc);
437 whcrc->umc_dev = umc_dev;
438
439 result = whcrc_setup_rc_umc(whcrc);
440 if (result < 0) {
441 dev_err(dev, "Can't setup RC UMC interface: %d\n", result);
442 goto error_setup_rc_umc;
443 }
444 whcrc->uwb_rc = uwb_rc;
445
446 uwb_rc->owner = THIS_MODULE;
447 uwb_rc->cmd = whcrc_cmd;
448 uwb_rc->reset = whcrc_reset;
449 uwb_rc->start = whcrc_start_rc;
450 uwb_rc->stop = whcrc_stop_rc;
451
452 result = uwb_rc_add(uwb_rc, dev, whcrc);
453 if (result < 0)
454 goto error_rc_add;
455 umc_set_drvdata(umc_dev, whcrc);
456 d_fnend(3, dev, "(umc_dev %p) = 0\n", umc_dev);
457 return 0;
458
459error_rc_add:
460 whcrc_release_rc_umc(whcrc);
461error_setup_rc_umc:
462 kfree(whcrc);
463error_alloc:
464 uwb_rc_put(uwb_rc);
465error_rc_alloc:
466 d_fnend(3, dev, "(umc_dev %p) = %d\n", umc_dev, result);
467 return result;
468}
469
470/**
471 * Clean up the radio control resources
472 *
473 * When we up the command semaphore, everybody possibly held trying to
474 * execute a command should be granted entry and then they'll see the
475 * host is quiescing and up it (so it will chain to the next waiter).
476 * This should not happen (in any case), as we can only remove when
477 * there are no handles open...
478 */
479static void whcrc_remove(struct umc_dev *umc_dev)
480{
481 struct whcrc *whcrc = umc_get_drvdata(umc_dev);
482 struct uwb_rc *uwb_rc = whcrc->uwb_rc;
483
484 umc_set_drvdata(umc_dev, NULL);
485 uwb_rc_rm(uwb_rc);
486 whcrc_release_rc_umc(whcrc);
487 kfree(whcrc);
488 uwb_rc_put(uwb_rc);
489 d_printf(1, &umc_dev->dev, "freed whcrc %p\n", whcrc);
490}
491
492/* PCI device ID's that we handle [so it gets loaded] */
493static struct pci_device_id whcrc_id_table[] = {
494 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
495 { /* empty last entry */ }
496};
497MODULE_DEVICE_TABLE(pci, whcrc_id_table);
498
499static struct umc_driver whcrc_driver = {
500 .name = "whc-rc",
501 .cap_id = UMC_CAP_ID_WHCI_RC,
502 .probe = whcrc_probe,
503 .remove = whcrc_remove,
504};
505
506static int __init whcrc_driver_init(void)
507{
508 return umc_driver_register(&whcrc_driver);
509}
510module_init(whcrc_driver_init);
511
512static void __exit whcrc_driver_exit(void)
513{
514 umc_driver_unregister(&whcrc_driver);
515}
516module_exit(whcrc_driver_exit);
517
518MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
519MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver");
520MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/whci.c b/drivers/uwb/whci.c
new file mode 100644
index 000000000000..3df2388f908f
--- /dev/null
+++ b/drivers/uwb/whci.c
@@ -0,0 +1,269 @@
1/*
2 * WHCI UWB Multi-interface Controller enumerator.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This file is released under the GNU GPL v2.
7 */
8#include <linux/delay.h>
9#include <linux/kernel.h>
10#include <linux/pci.h>
11#include <linux/dma-mapping.h>
12#include <linux/uwb/whci.h>
13#include <linux/uwb/umc.h>
14
15struct whci_card {
16 struct pci_dev *pci;
17 void __iomem *uwbbase;
18 u8 n_caps;
19 struct umc_dev *devs[0];
20};
21
22
23/* Fix faulty HW :( */
24static
25u64 whci_capdata_quirks(struct whci_card *card, u64 capdata)
26{
27 u64 capdata_orig = capdata;
28 struct pci_dev *pci_dev = card->pci;
29 if (pci_dev->vendor == PCI_VENDOR_ID_INTEL
30 && (pci_dev->device == 0x0c3b || pci_dev->device == 0004)
31 && pci_dev->class == 0x0d1010) {
32 switch (UWBCAPDATA_TO_CAP_ID(capdata)) {
33 /* WLP capability has 0x100 bytes of aperture */
34 case 0x80:
35 capdata |= 0x40 << 8; break;
36 /* WUSB capability has 0x80 bytes of aperture
37 * and ID is 1 */
38 case 0x02:
39 capdata &= ~0xffff;
40 capdata |= 0x2001;
41 break;
42 }
43 }
44 if (capdata_orig != capdata)
45 dev_warn(&pci_dev->dev,
46 "PCI v%04x d%04x c%06x#%02x: "
47 "corrected capdata from %016Lx to %016Lx\n",
48 pci_dev->vendor, pci_dev->device, pci_dev->class,
49 (unsigned)UWBCAPDATA_TO_CAP_ID(capdata),
50 (unsigned long long)capdata_orig,
51 (unsigned long long)capdata);
52 return capdata;
53}
54
55
56/**
57 * whci_wait_for - wait for a WHCI register to be set
58 *
59 * Polls (for at most @max_ms ms) until '*@reg & @mask == @result'.
60 */
61int whci_wait_for(struct device *dev, u32 __iomem *reg, u32 mask, u32 result,
62 unsigned long max_ms, const char *tag)
63{
64 unsigned t = 0;
65 u32 val;
66 for (;;) {
67 val = le_readl(reg);
68 if ((val & mask) == result)
69 break;
70 msleep(10);
71 if (t >= max_ms) {
72 dev_err(dev, "timed out waiting for %s ", tag);
73 return -ETIMEDOUT;
74 }
75 t += 10;
76 }
77 return 0;
78}
79EXPORT_SYMBOL_GPL(whci_wait_for);
80
81
82/*
83 * NOTE: the capinfo and capdata registers are slightly different
84 * (size and cap-id fields). So for cap #0, we need to fill
85 * in. Size comes from the size of the register block
86 * (statically calculated); cap_id comes from nowhere, we use
87 * zero, that is reserved, for the radio controller, because
88 * none was defined at the spec level.
89 */
90static int whci_add_cap(struct whci_card *card, int n)
91{
92 struct umc_dev *umc;
93 u64 capdata;
94 int bar, err;
95
96 umc = umc_device_create(&card->pci->dev, n);
97 if (umc == NULL)
98 return -ENOMEM;
99
100 capdata = le_readq(card->uwbbase + UWBCAPDATA(n));
101
102 bar = UWBCAPDATA_TO_BAR(capdata) << 1;
103
104 capdata = whci_capdata_quirks(card, capdata);
105 /* Capability 0 is the radio controller. It's size is 32
106 * bytes (WHCI0.95[2.3, T2-9]). */
107 umc->version = UWBCAPDATA_TO_VERSION(capdata);
108 umc->cap_id = n == 0 ? 0 : UWBCAPDATA_TO_CAP_ID(capdata);
109 umc->bar = bar;
110 umc->resource.start = pci_resource_start(card->pci, bar)
111 + UWBCAPDATA_TO_OFFSET(capdata);
112 umc->resource.end = umc->resource.start
113 + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1;
114 umc->resource.name = umc->dev.bus_id;
115 umc->resource.flags = card->pci->resource[bar].flags;
116 umc->resource.parent = &card->pci->resource[bar];
117 umc->irq = card->pci->irq;
118
119 err = umc_device_register(umc);
120 if (err < 0)
121 goto error;
122 card->devs[n] = umc;
123 return 0;
124
125error:
126 kfree(umc);
127 return err;
128}
129
130static void whci_del_cap(struct whci_card *card, int n)
131{
132 struct umc_dev *umc = card->devs[n];
133
134 if (umc != NULL)
135 umc_device_unregister(umc);
136}
137
138static int whci_n_caps(struct pci_dev *pci)
139{
140 void __iomem *uwbbase;
141 u64 capinfo;
142
143 uwbbase = pci_iomap(pci, 0, 8);
144 if (!uwbbase)
145 return -ENOMEM;
146 capinfo = le_readq(uwbbase + UWBCAPINFO);
147 pci_iounmap(pci, uwbbase);
148
149 return UWBCAPINFO_TO_N_CAPS(capinfo);
150}
151
152static int whci_probe(struct pci_dev *pci, const struct pci_device_id *id)
153{
154 struct whci_card *card;
155 int err, n_caps, n;
156
157 err = pci_enable_device(pci);
158 if (err < 0)
159 goto error;
160 pci_enable_msi(pci);
161 pci_set_master(pci);
162 err = -ENXIO;
163 if (!pci_set_dma_mask(pci, DMA_64BIT_MASK))
164 pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK);
165 else if (!pci_set_dma_mask(pci, DMA_32BIT_MASK))
166 pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK);
167 else
168 goto error_dma;
169
170 err = n_caps = whci_n_caps(pci);
171 if (n_caps < 0)
172 goto error_ncaps;
173
174 err = -ENOMEM;
175 card = kzalloc(sizeof(struct whci_card)
176 + sizeof(struct whci_dev *) * (n_caps + 1),
177 GFP_KERNEL);
178 if (card == NULL)
179 goto error_kzalloc;
180 card->pci = pci;
181 card->n_caps = n_caps;
182
183 err = -EBUSY;
184 if (!request_mem_region(pci_resource_start(pci, 0),
185 UWBCAPDATA_SIZE(card->n_caps),
186 "whci (capability data)"))
187 goto error_request_memregion;
188 err = -ENOMEM;
189 card->uwbbase = pci_iomap(pci, 0, UWBCAPDATA_SIZE(card->n_caps));
190 if (!card->uwbbase)
191 goto error_iomap;
192
193 /* Add each capability. */
194 for (n = 0; n <= card->n_caps; n++) {
195 err = whci_add_cap(card, n);
196 if (err < 0 && n == 0) {
197 dev_err(&pci->dev, "cannot bind UWB radio controller:"
198 " %d\n", err);
199 goto error_bind;
200 }
201 if (err < 0)
202 dev_warn(&pci->dev, "warning: cannot bind capability "
203 "#%u: %d\n", n, err);
204 }
205 pci_set_drvdata(pci, card);
206 return 0;
207
208error_bind:
209 pci_iounmap(pci, card->uwbbase);
210error_iomap:
211 release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps));
212error_request_memregion:
213 kfree(card);
214error_kzalloc:
215error_ncaps:
216error_dma:
217 pci_disable_msi(pci);
218 pci_disable_device(pci);
219error:
220 return err;
221}
222
223static void whci_remove(struct pci_dev *pci)
224{
225 struct whci_card *card = pci_get_drvdata(pci);
226 int n;
227
228 pci_set_drvdata(pci, NULL);
229 /* Unregister each capability in reverse (so the master device
230 * is unregistered last). */
231 for (n = card->n_caps; n >= 0 ; n--)
232 whci_del_cap(card, n);
233 pci_iounmap(pci, card->uwbbase);
234 release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps));
235 kfree(card);
236 pci_disable_msi(pci);
237 pci_disable_device(pci);
238}
239
240static struct pci_device_id whci_id_table[] = {
241 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
242 { 0 },
243};
244MODULE_DEVICE_TABLE(pci, whci_id_table);
245
246
247static struct pci_driver whci_driver = {
248 .name = "whci",
249 .id_table = whci_id_table,
250 .probe = whci_probe,
251 .remove = whci_remove,
252};
253
254static int __init whci_init(void)
255{
256 return pci_register_driver(&whci_driver);
257}
258
259static void __exit whci_exit(void)
260{
261 pci_unregister_driver(&whci_driver);
262}
263
264module_init(whci_init);
265module_exit(whci_exit);
266
267MODULE_DESCRIPTION("WHCI UWB Multi-interface Controller enumerator");
268MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
269MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/wlp/Makefile b/drivers/uwb/wlp/Makefile
new file mode 100644
index 000000000000..c72c11db5b1b
--- /dev/null
+++ b/drivers/uwb/wlp/Makefile
@@ -0,0 +1,10 @@
1obj-$(CONFIG_UWB_WLP) := wlp.o
2
3wlp-objs := \
4 driver.o \
5 eda.o \
6 messages.o \
7 sysfs.o \
8 txrx.o \
9 wlp-lc.o \
10 wss-lc.o
diff --git a/drivers/uwb/wlp/driver.c b/drivers/uwb/wlp/driver.c
new file mode 100644
index 000000000000..cb8d699b6a67
--- /dev/null
+++ b/drivers/uwb/wlp/driver.c
@@ -0,0 +1,43 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 *
4 * Copyright (C) 2007 Intel Corporation
5 * Reinette Chatre <reinette.chatre@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Life cycle of WLP substack
23 *
24 * FIXME: Docs
25 */
26
27#include <linux/module.h>
28
29static int __init wlp_subsys_init(void)
30{
31 return 0;
32}
33module_init(wlp_subsys_init);
34
35static void __exit wlp_subsys_exit(void)
36{
37 return;
38}
39module_exit(wlp_subsys_exit);
40
41MODULE_AUTHOR("Reinette Chatre <reinette.chatre@intel.com>");
42MODULE_DESCRIPTION("WiMedia Logical Link Control Protocol (WLP)");
43MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/wlp/eda.c b/drivers/uwb/wlp/eda.c
new file mode 100644
index 000000000000..cdfe8dfc4340
--- /dev/null
+++ b/drivers/uwb/wlp/eda.c
@@ -0,0 +1,449 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Ethernet to device address cache
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * We need to be able to map ethernet addresses to device addresses
24 * and back because there is not explicit relationship between the eth
25 * addresses used in the ETH frames and the device addresses (no, it
26 * would not have been simpler to force as ETH address the MBOA MAC
27 * address...no, not at all :).
28 *
29 * A device has one MBOA MAC address and one device address. It is possible
30 * for a device to have more than one virtual MAC address (although a
31 * virtual address can be the same as the MBOA MAC address). The device
32 * address is guaranteed to be unique among the devices in the extended
33 * beacon group (see ECMA 17.1.1). We thus use the device address as index
34 * to this cache. We do allow searching based on virtual address as this
35 * is how Ethernet frames will be addressed.
36 *
37 * We need to support virtual EUI-48. Although, right now the virtual
38 * EUI-48 will always be the same as the MAC SAP address. The EDA cache
39 * entry thus contains a MAC SAP address as well as the virtual address
40 * (used to map the network stack address to a neighbor). When we move
41 * to support more than one virtual MAC on a host then this organization
42 * will have to change. Perhaps a neighbor has a list of WSSs, each with a
43 * tag and virtual EUI-48.
44 *
45 * On data transmission
46 * it is used to determine if the neighbor is connected and what WSS it
47 * belongs to. With this we know what tag to add to the WLP frame. Storing
48 * the WSS in the EDA cache may be overkill because we only support one
49 * WSS. Hopefully we will support more than one WSS at some point.
50 * On data reception it is used to determine the WSS based on
51 * the tag and address of the transmitting neighbor.
52 */
53
54#define D_LOCAL 5
55#include <linux/netdevice.h>
56#include <linux/uwb/debug.h>
57#include <linux/etherdevice.h>
58#include <linux/wlp.h>
59#include "wlp-internal.h"
60
61
62/* FIXME: cache is not purged, only on device close */
63
64/* FIXME: does not scale, change to dynamic array */
65
66/*
67 * Initialize the EDA cache
68 *
69 * @returns 0 if ok, < 0 errno code on error
70 *
71 * Call when the interface is being brought up
72 *
73 * NOTE: Keep it as a separate function as the implementation will
74 * change and be more complex.
75 */
76void wlp_eda_init(struct wlp_eda *eda)
77{
78 INIT_LIST_HEAD(&eda->cache);
79 spin_lock_init(&eda->lock);
80}
81
82/*
83 * Release the EDA cache
84 *
85 * @returns 0 if ok, < 0 errno code on error
86 *
87 * Called when the interface is brought down
88 */
89void wlp_eda_release(struct wlp_eda *eda)
90{
91 unsigned long flags;
92 struct wlp_eda_node *itr, *next;
93
94 spin_lock_irqsave(&eda->lock, flags);
95 list_for_each_entry_safe(itr, next, &eda->cache, list_node) {
96 list_del(&itr->list_node);
97 kfree(itr);
98 }
99 spin_unlock_irqrestore(&eda->lock, flags);
100}
101
102/*
103 * Add an address mapping
104 *
105 * @returns 0 if ok, < 0 errno code on error
106 *
107 * An address mapping is initially created when the neighbor device is seen
108 * for the first time (it is "onair"). At this time the neighbor is not
109 * connected or associated with a WSS so we only populate the Ethernet and
110 * Device address fields.
111 *
112 */
113int wlp_eda_create_node(struct wlp_eda *eda,
114 const unsigned char eth_addr[ETH_ALEN],
115 const struct uwb_dev_addr *dev_addr)
116{
117 int result = 0;
118 struct wlp_eda_node *itr;
119 unsigned long flags;
120
121 BUG_ON(dev_addr == NULL || eth_addr == NULL);
122 spin_lock_irqsave(&eda->lock, flags);
123 list_for_each_entry(itr, &eda->cache, list_node) {
124 if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
125 printk(KERN_ERR "EDA cache already contains entry "
126 "for neighbor %02x:%02x\n",
127 dev_addr->data[1], dev_addr->data[0]);
128 result = -EEXIST;
129 goto out_unlock;
130 }
131 }
132 itr = kzalloc(sizeof(*itr), GFP_ATOMIC);
133 if (itr != NULL) {
134 memcpy(itr->eth_addr, eth_addr, sizeof(itr->eth_addr));
135 itr->dev_addr = *dev_addr;
136 list_add(&itr->list_node, &eda->cache);
137 } else
138 result = -ENOMEM;
139out_unlock:
140 spin_unlock_irqrestore(&eda->lock, flags);
141 return result;
142}
143
144/*
145 * Remove entry from EDA cache
146 *
147 * This is done when the device goes off air.
148 */
149void wlp_eda_rm_node(struct wlp_eda *eda, const struct uwb_dev_addr *dev_addr)
150{
151 struct wlp_eda_node *itr, *next;
152 unsigned long flags;
153
154 spin_lock_irqsave(&eda->lock, flags);
155 list_for_each_entry_safe(itr, next, &eda->cache, list_node) {
156 if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
157 list_del(&itr->list_node);
158 kfree(itr);
159 break;
160 }
161 }
162 spin_unlock_irqrestore(&eda->lock, flags);
163}
164
165/*
166 * Update an address mapping
167 *
168 * @returns 0 if ok, < 0 errno code on error
169 */
170int wlp_eda_update_node(struct wlp_eda *eda,
171 const struct uwb_dev_addr *dev_addr,
172 struct wlp_wss *wss,
173 const unsigned char virt_addr[ETH_ALEN],
174 const u8 tag, const enum wlp_wss_connect state)
175{
176 int result = -ENOENT;
177 struct wlp_eda_node *itr;
178 unsigned long flags;
179
180 spin_lock_irqsave(&eda->lock, flags);
181 list_for_each_entry(itr, &eda->cache, list_node) {
182 if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
183 /* Found it, update it */
184 itr->wss = wss;
185 memcpy(itr->virt_addr, virt_addr,
186 sizeof(itr->virt_addr));
187 itr->tag = tag;
188 itr->state = state;
189 result = 0;
190 goto out_unlock;
191 }
192 }
193 /* Not found */
194out_unlock:
195 spin_unlock_irqrestore(&eda->lock, flags);
196 return result;
197}
198
199/*
200 * Update only state field of an address mapping
201 *
202 * @returns 0 if ok, < 0 errno code on error
203 */
204int wlp_eda_update_node_state(struct wlp_eda *eda,
205 const struct uwb_dev_addr *dev_addr,
206 const enum wlp_wss_connect state)
207{
208 int result = -ENOENT;
209 struct wlp_eda_node *itr;
210 unsigned long flags;
211
212 spin_lock_irqsave(&eda->lock, flags);
213 list_for_each_entry(itr, &eda->cache, list_node) {
214 if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
215 /* Found it, update it */
216 itr->state = state;
217 result = 0;
218 goto out_unlock;
219 }
220 }
221 /* Not found */
222out_unlock:
223 spin_unlock_irqrestore(&eda->lock, flags);
224 return result;
225}
226
227/*
228 * Return contents of EDA cache entry
229 *
230 * @dev_addr: index to EDA cache
231 * @eda_entry: pointer to where contents of EDA cache will be copied
232 */
233int wlp_copy_eda_node(struct wlp_eda *eda, struct uwb_dev_addr *dev_addr,
234 struct wlp_eda_node *eda_entry)
235{
236 int result = -ENOENT;
237 struct wlp_eda_node *itr;
238 unsigned long flags;
239
240 spin_lock_irqsave(&eda->lock, flags);
241 list_for_each_entry(itr, &eda->cache, list_node) {
242 if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
243 *eda_entry = *itr;
244 result = 0;
245 goto out_unlock;
246 }
247 }
248 /* Not found */
249out_unlock:
250 spin_unlock_irqrestore(&eda->lock, flags);
251 return result;
252}
253
254/*
255 * Execute function for every element in the cache
256 *
257 * @function: function to execute on element of cache (must be atomic)
258 * @priv: private data of function
259 * @returns: result of first function that failed, or last function
260 * executed if no function failed.
261 *
262 * Stop executing when function returns error for any element in cache.
263 *
264 * IMPORTANT: We are using a spinlock here: the function executed on each
265 * element has to be atomic.
266 */
267int wlp_eda_for_each(struct wlp_eda *eda, wlp_eda_for_each_f function,
268 void *priv)
269{
270 int result = 0;
271 struct wlp *wlp = container_of(eda, struct wlp, eda);
272 struct wlp_eda_node *entry;
273 unsigned long flags;
274
275 spin_lock_irqsave(&eda->lock, flags);
276 list_for_each_entry(entry, &eda->cache, list_node) {
277 result = (*function)(wlp, entry, priv);
278 if (result < 0)
279 break;
280 }
281 spin_unlock_irqrestore(&eda->lock, flags);
282 return result;
283}
284
285/*
286 * Execute function for single element in the cache (return dev addr)
287 *
288 * @virt_addr: index into EDA cache used to determine which element to
289 * execute the function on
290 * @dev_addr: device address of element in cache will be returned using
291 * @dev_addr
292 * @function: function to execute on element of cache (must be atomic)
293 * @priv: private data of function
294 * @returns: result of function
295 *
296 * IMPORTANT: We are using a spinlock here: the function executed on the
297 * element has to be atomic.
298 */
299int wlp_eda_for_virtual(struct wlp_eda *eda,
300 const unsigned char virt_addr[ETH_ALEN],
301 struct uwb_dev_addr *dev_addr,
302 wlp_eda_for_each_f function,
303 void *priv)
304{
305 int result = 0;
306 struct wlp *wlp = container_of(eda, struct wlp, eda);
307 struct device *dev = &wlp->rc->uwb_dev.dev;
308 struct wlp_eda_node *itr;
309 unsigned long flags;
310 int found = 0;
311
312 spin_lock_irqsave(&eda->lock, flags);
313 list_for_each_entry(itr, &eda->cache, list_node) {
314 if (!memcmp(itr->virt_addr, virt_addr,
315 sizeof(itr->virt_addr))) {
316 d_printf(6, dev, "EDA: looking for "
317 "%02x:%02x:%02x:%02x:%02x:%02x hit %02x:%02x "
318 "wss %p tag 0x%02x state %u\n",
319 virt_addr[0], virt_addr[1],
320 virt_addr[2], virt_addr[3],
321 virt_addr[4], virt_addr[5],
322 itr->dev_addr.data[1],
323 itr->dev_addr.data[0], itr->wss,
324 itr->tag, itr->state);
325 result = (*function)(wlp, itr, priv);
326 *dev_addr = itr->dev_addr;
327 found = 1;
328 break;
329 } else
330 d_printf(6, dev, "EDA: looking for "
331 "%02x:%02x:%02x:%02x:%02x:%02x "
332 "against "
333 "%02x:%02x:%02x:%02x:%02x:%02x miss\n",
334 virt_addr[0], virt_addr[1],
335 virt_addr[2], virt_addr[3],
336 virt_addr[4], virt_addr[5],
337 itr->virt_addr[0], itr->virt_addr[1],
338 itr->virt_addr[2], itr->virt_addr[3],
339 itr->virt_addr[4], itr->virt_addr[5]);
340 }
341 if (!found) {
342 if (printk_ratelimit())
343 dev_err(dev, "EDA: Eth addr %02x:%02x:%02x"
344 ":%02x:%02x:%02x not found.\n",
345 virt_addr[0], virt_addr[1],
346 virt_addr[2], virt_addr[3],
347 virt_addr[4], virt_addr[5]);
348 result = -ENODEV;
349 }
350 spin_unlock_irqrestore(&eda->lock, flags);
351 return result;
352}
353
354static const char *__wlp_wss_connect_state[] = { "WLP_WSS_UNCONNECTED",
355 "WLP_WSS_CONNECTED",
356 "WLP_WSS_CONNECT_FAILED",
357};
358
359static const char *wlp_wss_connect_state_str(unsigned id)
360{
361 if (id >= ARRAY_SIZE(__wlp_wss_connect_state))
362 return "unknown WSS connection state";
363 return __wlp_wss_connect_state[id];
364}
365
366/*
367 * View EDA cache from user space
368 *
369 * A debugging feature to give user visibility into the EDA cache. Also
370 * used to display members of WSS to user (called from wlp_wss_members_show())
371 */
372ssize_t wlp_eda_show(struct wlp *wlp, char *buf)
373{
374 ssize_t result = 0;
375 struct wlp_eda_node *entry;
376 unsigned long flags;
377 struct wlp_eda *eda = &wlp->eda;
378 spin_lock_irqsave(&eda->lock, flags);
379 result = scnprintf(buf, PAGE_SIZE, "#eth_addr dev_addr wss_ptr "
380 "tag state virt_addr\n");
381 list_for_each_entry(entry, &eda->cache, list_node) {
382 result += scnprintf(buf + result, PAGE_SIZE - result,
383 "%02x:%02x:%02x:%02x:%02x:%02x %02x:%02x "
384 "%p 0x%02x %s "
385 "%02x:%02x:%02x:%02x:%02x:%02x\n",
386 entry->eth_addr[0], entry->eth_addr[1],
387 entry->eth_addr[2], entry->eth_addr[3],
388 entry->eth_addr[4], entry->eth_addr[5],
389 entry->dev_addr.data[1],
390 entry->dev_addr.data[0], entry->wss,
391 entry->tag,
392 wlp_wss_connect_state_str(entry->state),
393 entry->virt_addr[0], entry->virt_addr[1],
394 entry->virt_addr[2], entry->virt_addr[3],
395 entry->virt_addr[4], entry->virt_addr[5]);
396 if (result >= PAGE_SIZE)
397 break;
398 }
399 spin_unlock_irqrestore(&eda->lock, flags);
400 return result;
401}
402EXPORT_SYMBOL_GPL(wlp_eda_show);
403
404/*
405 * Add new EDA cache entry based on user input in sysfs
406 *
407 * Should only be used for debugging.
408 *
409 * The WSS is assumed to be the only WSS supported. This needs to be
410 * redesigned when we support more than one WSS.
411 */
412ssize_t wlp_eda_store(struct wlp *wlp, const char *buf, size_t size)
413{
414 ssize_t result;
415 struct wlp_eda *eda = &wlp->eda;
416 u8 eth_addr[6];
417 struct uwb_dev_addr dev_addr;
418 u8 tag;
419 unsigned state;
420
421 result = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx "
422 "%02hhx:%02hhx %02hhx %u\n",
423 &eth_addr[0], &eth_addr[1],
424 &eth_addr[2], &eth_addr[3],
425 &eth_addr[4], &eth_addr[5],
426 &dev_addr.data[1], &dev_addr.data[0], &tag, &state);
427 switch (result) {
428 case 6: /* no dev addr specified -- remove entry NOT IMPLEMENTED */
429 /*result = wlp_eda_rm(eda, eth_addr, &dev_addr);*/
430 result = -ENOSYS;
431 break;
432 case 10:
433 state = state >= 1 ? 1 : 0;
434 result = wlp_eda_create_node(eda, eth_addr, &dev_addr);
435 if (result < 0 && result != -EEXIST)
436 goto error;
437 /* Set virtual addr to be same as MAC */
438 result = wlp_eda_update_node(eda, &dev_addr, &wlp->wss,
439 eth_addr, tag, state);
440 if (result < 0)
441 goto error;
442 break;
443 default: /* bad format */
444 result = -EINVAL;
445 }
446error:
447 return result < 0 ? result : size;
448}
449EXPORT_SYMBOL_GPL(wlp_eda_store);
diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
new file mode 100644
index 000000000000..a64cb8241713
--- /dev/null
+++ b/drivers/uwb/wlp/messages.c
@@ -0,0 +1,1946 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 * Message construction and parsing
4 *
5 * Copyright (C) 2007 Intel Corporation
6 * Reinette Chatre <reinette.chatre@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/wlp.h>
27#define D_LOCAL 6
28#include <linux/uwb/debug.h>
29#include "wlp-internal.h"
30
31static
32const char *__wlp_assoc_frame[] = {
33 [WLP_ASSOC_D1] = "WLP_ASSOC_D1",
34 [WLP_ASSOC_D2] = "WLP_ASSOC_D2",
35 [WLP_ASSOC_M1] = "WLP_ASSOC_M1",
36 [WLP_ASSOC_M2] = "WLP_ASSOC_M2",
37 [WLP_ASSOC_M3] = "WLP_ASSOC_M3",
38 [WLP_ASSOC_M4] = "WLP_ASSOC_M4",
39 [WLP_ASSOC_M5] = "WLP_ASSOC_M5",
40 [WLP_ASSOC_M6] = "WLP_ASSOC_M6",
41 [WLP_ASSOC_M7] = "WLP_ASSOC_M7",
42 [WLP_ASSOC_M8] = "WLP_ASSOC_M8",
43 [WLP_ASSOC_F0] = "WLP_ASSOC_F0",
44 [WLP_ASSOC_E1] = "WLP_ASSOC_E1",
45 [WLP_ASSOC_E2] = "WLP_ASSOC_E2",
46 [WLP_ASSOC_C1] = "WLP_ASSOC_C1",
47 [WLP_ASSOC_C2] = "WLP_ASSOC_C2",
48 [WLP_ASSOC_C3] = "WLP_ASSOC_C3",
49 [WLP_ASSOC_C4] = "WLP_ASSOC_C4",
50};
51
52static const char *wlp_assoc_frame_str(unsigned id)
53{
54 if (id >= ARRAY_SIZE(__wlp_assoc_frame))
55 return "unknown association frame";
56 return __wlp_assoc_frame[id];
57}
58
59static const char *__wlp_assc_error[] = {
60 "none",
61 "Authenticator Failure",
62 "Rogue activity suspected",
63 "Device busy",
64 "Setup Locked",
65 "Registrar not ready",
66 "Invalid WSS selection",
67 "Message timeout",
68 "Enrollment session timeout",
69 "Device password invalid",
70 "Unsupported version",
71 "Internal error",
72 "Undefined error",
73 "Numeric comparison failure",
74 "Waiting for user input",
75};
76
77static const char *wlp_assc_error_str(unsigned id)
78{
79 if (id >= ARRAY_SIZE(__wlp_assc_error))
80 return "unknown WLP association error";
81 return __wlp_assc_error[id];
82}
83
84static inline void wlp_set_attr_hdr(struct wlp_attr_hdr *hdr, unsigned type,
85 size_t len)
86{
87 hdr->type = cpu_to_le16(type);
88 hdr->length = cpu_to_le16(len);
89}
90
91/*
92 * Populate fields of a constant sized attribute
93 *
94 * @returns: total size of attribute including size of new value
95 *
96 * We have two instances of this function (wlp_pset and wlp_set): one takes
97 * the value as a parameter, the other takes a pointer to the value as
98 * parameter. They thus only differ in how the value is assigned to the
99 * attribute.
100 *
101 * We use sizeof(*attr) - sizeof(struct wlp_attr_hdr) instead of
102 * sizeof(type) to be able to use this same code for the structures that
103 * contain 8bit enum values and be able to deal with pointer types.
104 */
105#define wlp_set(type, type_code, name) \
106static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \
107{ \
108 d_fnstart(6, NULL, "(attribute %p)\n", attr); \
109 wlp_set_attr_hdr(&attr->hdr, type_code, \
110 sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \
111 attr->name = value; \
112 d_dump(6, NULL, attr, sizeof(*attr)); \
113 d_fnend(6, NULL, "(attribute %p)\n", attr); \
114 return sizeof(*attr); \
115}
116
117#define wlp_pset(type, type_code, name) \
118static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \
119{ \
120 d_fnstart(6, NULL, "(attribute %p)\n", attr); \
121 wlp_set_attr_hdr(&attr->hdr, type_code, \
122 sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \
123 attr->name = *value; \
124 d_dump(6, NULL, attr, sizeof(*attr)); \
125 d_fnend(6, NULL, "(attribute %p)\n", attr); \
126 return sizeof(*attr); \
127}
128
129/**
130 * Populate fields of a variable attribute
131 *
132 * @returns: total size of attribute including size of new value
133 *
134 * Provided with a pointer to the memory area reserved for the
135 * attribute structure, the field is populated with the value. The
136 * reserved memory has to contain enough space for the value.
137 */
138#define wlp_vset(type, type_code, name) \
139static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value, \
140 size_t len) \
141{ \
142 d_fnstart(6, NULL, "(attribute %p)\n", attr); \
143 wlp_set_attr_hdr(&attr->hdr, type_code, len); \
144 memcpy(attr->name, value, len); \
145 d_dump(6, NULL, attr, sizeof(*attr) + len); \
146 d_fnend(6, NULL, "(attribute %p)\n", attr); \
147 return sizeof(*attr) + len; \
148}
149
150wlp_vset(char *, WLP_ATTR_DEV_NAME, dev_name)
151wlp_vset(char *, WLP_ATTR_MANUF, manufacturer)
152wlp_set(enum wlp_assoc_type, WLP_ATTR_MSG_TYPE, msg_type)
153wlp_vset(char *, WLP_ATTR_MODEL_NAME, model_name)
154wlp_vset(char *, WLP_ATTR_MODEL_NR, model_nr)
155wlp_vset(char *, WLP_ATTR_SERIAL, serial)
156wlp_vset(char *, WLP_ATTR_WSS_NAME, wss_name)
157wlp_pset(struct wlp_uuid *, WLP_ATTR_UUID_E, uuid_e)
158wlp_pset(struct wlp_uuid *, WLP_ATTR_UUID_R, uuid_r)
159wlp_pset(struct wlp_uuid *, WLP_ATTR_WSSID, wssid)
160wlp_pset(struct wlp_dev_type *, WLP_ATTR_PRI_DEV_TYPE, prim_dev_type)
161/*wlp_pset(struct wlp_dev_type *, WLP_ATTR_SEC_DEV_TYPE, sec_dev_type)*/
162wlp_set(u8, WLP_ATTR_WLP_VER, version)
163wlp_set(enum wlp_assc_error, WLP_ATTR_WLP_ASSC_ERR, wlp_assc_err)
164wlp_set(enum wlp_wss_sel_mthd, WLP_ATTR_WSS_SEL_MTHD, wss_sel_mthd)
165wlp_set(u8, WLP_ATTR_ACC_ENRL, accept_enrl)
166wlp_set(u8, WLP_ATTR_WSS_SEC_STAT, wss_sec_status)
167wlp_pset(struct uwb_mac_addr *, WLP_ATTR_WSS_BCAST, wss_bcast)
168wlp_pset(struct wlp_nonce *, WLP_ATTR_ENRL_NONCE, enonce)
169wlp_pset(struct wlp_nonce *, WLP_ATTR_REG_NONCE, rnonce)
170wlp_set(u8, WLP_ATTR_WSS_TAG, wss_tag)
171wlp_pset(struct uwb_mac_addr *, WLP_ATTR_WSS_VIRT, wss_virt)
172
173/**
174 * Fill in the WSS information attributes
175 *
176 * We currently only support one WSS, and this is assumed in this function
177 * that can populate only one WSS information attribute.
178 */
179static size_t wlp_set_wss_info(struct wlp_attr_wss_info *attr,
180 struct wlp_wss *wss)
181{
182 size_t datalen;
183 void *ptr = attr->wss_info;
184 size_t used = sizeof(*attr);
185 d_fnstart(6, NULL, "(attribute %p)\n", attr);
186 datalen = sizeof(struct wlp_wss_info) + strlen(wss->name);
187 wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen);
188 used = wlp_set_wssid(ptr, &wss->wssid);
189 used += wlp_set_wss_name(ptr + used, wss->name, strlen(wss->name));
190 used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll);
191 used += wlp_set_wss_sec_status(ptr + used, wss->secure_status);
192 used += wlp_set_wss_bcast(ptr + used, &wss->bcast);
193 d_dump(6, NULL, attr, sizeof(*attr) + datalen);
194 d_fnend(6, NULL, "(attribute %p, used %d)\n",
195 attr, (int)(sizeof(*attr) + used));
196 return sizeof(*attr) + used;
197}
198
199/**
200 * Verify attribute header
201 *
202 * @hdr: Pointer to attribute header that will be verified.
203 * @type: Expected attribute type.
204 * @len: Expected length of attribute value (excluding header).
205 *
206 * Most attribute values have a known length even when they do have a
207 * length field. This knowledge can be used via this function to verify
208 * that the length field matches the expected value.
209 */
210static int wlp_check_attr_hdr(struct wlp *wlp, struct wlp_attr_hdr *hdr,
211 enum wlp_attr_type type, unsigned len)
212{
213 struct device *dev = &wlp->rc->uwb_dev.dev;
214
215 if (le16_to_cpu(hdr->type) != type) {
216 dev_err(dev, "WLP: unexpected header type. Expected "
217 "%u, got %u.\n", type, le16_to_cpu(hdr->type));
218 return -EINVAL;
219 }
220 if (le16_to_cpu(hdr->length) != len) {
221 dev_err(dev, "WLP: unexpected length in header. Expected "
222 "%u, got %u.\n", len, le16_to_cpu(hdr->length));
223 return -EINVAL;
224 }
225 return 0;
226}
227
228/**
229 * Check if header of WSS information attribute valid
230 *
231 * @returns: length of WSS attributes (value of length attribute field) if
232 * valid WSS information attribute found
233 * -ENODATA if no WSS information attribute found
234 * -EIO other error occured
235 *
236 * The WSS information attribute is optional. The function will be provided
237 * with a pointer to data that could _potentially_ be a WSS information
238 * attribute. If a valid WSS information attribute is found it will return
239 * 0, if no WSS information attribute is found it will return -ENODATA, and
240 * another error will be returned if it is a WSS information attribute, but
241 * some parsing failure occured.
242 */
243static int wlp_check_wss_info_attr_hdr(struct wlp *wlp,
244 struct wlp_attr_hdr *hdr, size_t buflen)
245{
246 struct device *dev = &wlp->rc->uwb_dev.dev;
247 size_t len;
248 int result = 0;
249
250 if (buflen < sizeof(*hdr)) {
251 dev_err(dev, "WLP: Not enough space in buffer to parse"
252 " WSS information attribute header.\n");
253 result = -EIO;
254 goto out;
255 }
256 if (le16_to_cpu(hdr->type) != WLP_ATTR_WSS_INFO) {
257 /* WSS information is optional */
258 result = -ENODATA;
259 goto out;
260 }
261 len = le16_to_cpu(hdr->length);
262 if (buflen < sizeof(*hdr) + len) {
263 dev_err(dev, "WLP: Not enough space in buffer to parse "
264 "variable data. Got %d, expected %d.\n",
265 (int)buflen, (int)(sizeof(*hdr) + len));
266 result = -EIO;
267 goto out;
268 }
269 result = len;
270out:
271 return result;
272}
273
274
275/**
276 * Get value of attribute from fixed size attribute field.
277 *
278 * @attr: Pointer to attribute field.
279 * @value: Pointer to variable in which attribute value will be placed.
280 * @buflen: Size of buffer in which attribute field (including header)
281 * can be found.
282 * @returns: Amount of given buffer consumed by parsing for this attribute.
283 *
284 * The size and type of the value is known by the type of the attribute.
285 */
286#define wlp_get(type, type_code, name) \
287ssize_t wlp_get_##name(struct wlp *wlp, struct wlp_attr_##name *attr, \
288 type *value, ssize_t buflen) \
289{ \
290 struct device *dev = &wlp->rc->uwb_dev.dev; \
291 if (buflen < 0) \
292 return -EINVAL; \
293 if (buflen < sizeof(*attr)) { \
294 dev_err(dev, "WLP: Not enough space in buffer to parse" \
295 " attribute field. Need %d, received %zu\n", \
296 (int)sizeof(*attr), buflen); \
297 return -EIO; \
298 } \
299 if (wlp_check_attr_hdr(wlp, &attr->hdr, type_code, \
300 sizeof(attr->name)) < 0) { \
301 dev_err(dev, "WLP: Header verification failed. \n"); \
302 return -EINVAL; \
303 } \
304 *value = attr->name; \
305 return sizeof(*attr); \
306}
307
308#define wlp_get_sparse(type, type_code, name) \
309 static wlp_get(type, type_code, name)
310
311/**
312 * Get value of attribute from variable sized attribute field.
313 *
314 * @max: The maximum size of this attribute. This value is dictated by
315 * the maximum value from the WLP specification.
316 *
317 * @attr: Pointer to attribute field.
318 * @value: Pointer to variable that will contain the value. The memory
319 * must already have been allocated for this value.
320 * @buflen: Size of buffer in which attribute field (including header)
321 * can be found.
322 * @returns: Amount of given bufferconsumed by parsing for this attribute.
323 */
324#define wlp_vget(type_val, type_code, name, max) \
325static ssize_t wlp_get_##name(struct wlp *wlp, \
326 struct wlp_attr_##name *attr, \
327 type_val *value, ssize_t buflen) \
328{ \
329 struct device *dev = &wlp->rc->uwb_dev.dev; \
330 size_t len; \
331 if (buflen < 0) \
332 return -EINVAL; \
333 if (buflen < sizeof(*attr)) { \
334 dev_err(dev, "WLP: Not enough space in buffer to parse" \
335 " header.\n"); \
336 return -EIO; \
337 } \
338 if (le16_to_cpu(attr->hdr.type) != type_code) { \
339 dev_err(dev, "WLP: Unexpected attribute type. Got %u, " \
340 "expected %u.\n", le16_to_cpu(attr->hdr.type), \
341 type_code); \
342 return -EINVAL; \
343 } \
344 len = le16_to_cpu(attr->hdr.length); \
345 if (len > max) { \
346 dev_err(dev, "WLP: Attribute larger than maximum " \
347 "allowed. Received %zu, max is %d.\n", len, \
348 (int)max); \
349 return -EFBIG; \
350 } \
351 if (buflen < sizeof(*attr) + len) { \
352 dev_err(dev, "WLP: Not enough space in buffer to parse "\
353 "variable data.\n"); \
354 return -EIO; \
355 } \
356 memcpy(value, (void *) attr + sizeof(*attr), len); \
357 return sizeof(*attr) + len; \
358}
359
360wlp_get(u8, WLP_ATTR_WLP_VER, version)
361wlp_get_sparse(enum wlp_wss_sel_mthd, WLP_ATTR_WSS_SEL_MTHD, wss_sel_mthd)
362wlp_get_sparse(struct wlp_dev_type, WLP_ATTR_PRI_DEV_TYPE, prim_dev_type)
363wlp_get_sparse(enum wlp_assc_error, WLP_ATTR_WLP_ASSC_ERR, wlp_assc_err)
364wlp_get_sparse(struct wlp_uuid, WLP_ATTR_UUID_E, uuid_e)
365wlp_get_sparse(struct wlp_uuid, WLP_ATTR_UUID_R, uuid_r)
366wlp_get(struct wlp_uuid, WLP_ATTR_WSSID, wssid)
367wlp_get_sparse(u8, WLP_ATTR_ACC_ENRL, accept_enrl)
368wlp_get_sparse(u8, WLP_ATTR_WSS_SEC_STAT, wss_sec_status)
369wlp_get_sparse(struct uwb_mac_addr, WLP_ATTR_WSS_BCAST, wss_bcast)
370wlp_get_sparse(u8, WLP_ATTR_WSS_TAG, wss_tag)
371wlp_get_sparse(struct uwb_mac_addr, WLP_ATTR_WSS_VIRT, wss_virt)
372wlp_get_sparse(struct wlp_nonce, WLP_ATTR_ENRL_NONCE, enonce)
373wlp_get_sparse(struct wlp_nonce, WLP_ATTR_REG_NONCE, rnonce)
374
375/* The buffers for the device info attributes can be found in the
376 * wlp_device_info struct. These buffers contain one byte more than the
377 * max allowed by the spec - this is done to be able to add the
378 * terminating \0 for user display. This terminating byte is not required
379 * in the actual attribute field (because it has a length field) so the
380 * maximum allowed for this value is one less than its size in the
381 * structure.
382 */
383wlp_vget(char, WLP_ATTR_WSS_NAME, wss_name,
384 FIELD_SIZEOF(struct wlp_wss, name) - 1)
385wlp_vget(char, WLP_ATTR_DEV_NAME, dev_name,
386 FIELD_SIZEOF(struct wlp_device_info, name) - 1)
387wlp_vget(char, WLP_ATTR_MANUF, manufacturer,
388 FIELD_SIZEOF(struct wlp_device_info, manufacturer) - 1)
389wlp_vget(char, WLP_ATTR_MODEL_NAME, model_name,
390 FIELD_SIZEOF(struct wlp_device_info, model_name) - 1)
391wlp_vget(char, WLP_ATTR_MODEL_NR, model_nr,
392 FIELD_SIZEOF(struct wlp_device_info, model_nr) - 1)
393wlp_vget(char, WLP_ATTR_SERIAL, serial,
394 FIELD_SIZEOF(struct wlp_device_info, serial) - 1)
395
396/**
397 * Retrieve WSS Name, Accept enroll, Secure status, Broadcast from WSS info
398 *
399 * @attr: pointer to WSS name attribute in WSS information attribute field
400 * @info: structure that will be populated with data from WSS information
401 * field (WSS name, Accept enroll, secure status, broadcast address)
402 * @buflen: size of buffer
403 *
404 * Although the WSSID attribute forms part of the WSS info attribute it is
405 * retrieved separately and stored in a different location.
406 */
407static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp,
408 struct wlp_attr_hdr *attr,
409 struct wlp_wss_tmp_info *info,
410 ssize_t buflen)
411{
412 struct device *dev = &wlp->rc->uwb_dev.dev;
413 void *ptr = attr;
414 size_t used = 0;
415 ssize_t result = -EINVAL;
416
417 d_printf(6, dev, "WLP: WSS info: Retrieving WSS name\n");
418 result = wlp_get_wss_name(wlp, ptr, info->name, buflen);
419 if (result < 0) {
420 dev_err(dev, "WLP: unable to obtain WSS name from "
421 "WSS info in D2 message.\n");
422 goto error_parse;
423 }
424 used += result;
425 d_printf(6, dev, "WLP: WSS info: Retrieving accept enroll\n");
426 result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll,
427 buflen - used);
428 if (result < 0) {
429 dev_err(dev, "WLP: unable to obtain accepting "
430 "enrollment from WSS info in D2 message.\n");
431 goto error_parse;
432 }
433 if (info->accept_enroll != 0 && info->accept_enroll != 1) {
434 dev_err(dev, "WLP: invalid value for accepting "
435 "enrollment in D2 message.\n");
436 result = -EINVAL;
437 goto error_parse;
438 }
439 used += result;
440 d_printf(6, dev, "WLP: WSS info: Retrieving secure status\n");
441 result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status,
442 buflen - used);
443 if (result < 0) {
444 dev_err(dev, "WLP: unable to obtain secure "
445 "status from WSS info in D2 message.\n");
446 goto error_parse;
447 }
448 if (info->sec_status != 0 && info->sec_status != 1) {
449 dev_err(dev, "WLP: invalid value for secure "
450 "status in D2 message.\n");
451 result = -EINVAL;
452 goto error_parse;
453 }
454 used += result;
455 d_printf(6, dev, "WLP: WSS info: Retrieving broadcast\n");
456 result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast,
457 buflen - used);
458 if (result < 0) {
459 dev_err(dev, "WLP: unable to obtain broadcast "
460 "address from WSS info in D2 message.\n");
461 goto error_parse;
462 }
463 used += result;
464 result = used;
465error_parse:
466 return result;
467}
468
469/**
470 * Create a new WSSID entry for the neighbor, allocate temporary storage
471 *
472 * Each neighbor can have many WSS active. We maintain a list of WSSIDs
473 * advertised by neighbor. During discovery we also cache information about
474 * these WSS in temporary storage.
475 *
476 * The temporary storage will be removed after it has been used (eg.
477 * displayed to user), the wssid element will be removed from the list when
478 * the neighbor is rediscovered or when it disappears.
479 */
480static struct wlp_wssid_e *wlp_create_wssid_e(struct wlp *wlp,
481 struct wlp_neighbor_e *neighbor)
482{
483 struct device *dev = &wlp->rc->uwb_dev.dev;
484 struct wlp_wssid_e *wssid_e;
485
486 wssid_e = kzalloc(sizeof(*wssid_e), GFP_KERNEL);
487 if (wssid_e == NULL) {
488 dev_err(dev, "WLP: unable to allocate memory "
489 "for WSS information.\n");
490 goto error_alloc;
491 }
492 wssid_e->info = kzalloc(sizeof(struct wlp_wss_tmp_info), GFP_KERNEL);
493 if (wssid_e->info == NULL) {
494 dev_err(dev, "WLP: unable to allocate memory "
495 "for temporary WSS information.\n");
496 kfree(wssid_e);
497 wssid_e = NULL;
498 goto error_alloc;
499 }
500 list_add(&wssid_e->node, &neighbor->wssid);
501error_alloc:
502 return wssid_e;
503}
504
505/**
506 * Parse WSS information attribute
507 *
508 * @attr: pointer to WSS information attribute header
509 * @buflen: size of buffer in which WSS information attribute appears
510 * @wssid: will place wssid from WSS info attribute in this location
511 * @wss_info: will place other information from WSS information attribute
512 * in this location
513 *
514 * memory for @wssid and @wss_info must be allocated when calling this
515 */
516static ssize_t wlp_get_wss_info(struct wlp *wlp, struct wlp_attr_wss_info *attr,
517 size_t buflen, struct wlp_uuid *wssid,
518 struct wlp_wss_tmp_info *wss_info)
519{
520 struct device *dev = &wlp->rc->uwb_dev.dev;
521 ssize_t result;
522 size_t len;
523 size_t used = 0;
524 void *ptr;
525
526 result = wlp_check_wss_info_attr_hdr(wlp, (struct wlp_attr_hdr *)attr,
527 buflen);
528 if (result < 0)
529 goto out;
530 len = result;
531 used = sizeof(*attr);
532 ptr = attr;
533 d_printf(6, dev, "WLP: WSS info: Retrieving WSSID\n");
534 result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used);
535 if (result < 0) {
536 dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n");
537 goto out;
538 }
539 used += result;
540 result = wlp_get_wss_info_attrs(wlp, ptr + used, wss_info,
541 buflen - used);
542 if (result < 0) {
543 dev_err(dev, "WLP: unable to obtain WSS information "
544 "from WSS information attributes. \n");
545 goto out;
546 }
547 used += result;
548 if (len + sizeof(*attr) != used) {
549 dev_err(dev, "WLP: Amount of data parsed does not "
550 "match length field. Parsed %zu, length "
551 "field %zu. \n", used, len);
552 result = -EINVAL;
553 goto out;
554 }
555 result = used;
556 d_printf(6, dev, "WLP: Successfully parsed WLP information "
557 "attribute. used %zu bytes\n", used);
558out:
559 return result;
560}
561
562/**
563 * Retrieve WSS info from association frame
564 *
565 * @attr: pointer to WSS information attribute
566 * @neighbor: ptr to neighbor being discovered, NULL if enrollment in
567 * progress
568 * @wss: ptr to WSS being enrolled in, NULL if discovery in progress
569 * @buflen: size of buffer in which WSS information appears
570 *
571 * The WSS information attribute appears in the D2 association message.
572 * This message is used in two ways: to discover all neighbors or to enroll
573 * into a WSS activated by a neighbor. During discovery we only want to
574 * store the WSS info in a cache, to be deleted right after it has been
575 * used (eg. displayed to the user). During enrollment we store the WSS
576 * information for the lifetime of enrollment.
577 *
578 * During discovery we are interested in all WSS information, during
579 * enrollment we are only interested in the WSS being enrolled in. Even so,
580 * when in enrollment we keep parsing the message after finding the WSS of
581 * interest, this simplifies the calling routine in that it can be sure
582 * that all WSS information attributes have been parsed out of the message.
583 *
584 * Association frame is process with nbmutex held. The list access is safe.
585 */
586static ssize_t wlp_get_all_wss_info(struct wlp *wlp,
587 struct wlp_attr_wss_info *attr,
588 struct wlp_neighbor_e *neighbor,
589 struct wlp_wss *wss, ssize_t buflen)
590{
591 struct device *dev = &wlp->rc->uwb_dev.dev;
592 size_t used = 0;
593 ssize_t result = -EINVAL;
594 struct wlp_attr_wss_info *cur;
595 struct wlp_uuid wssid;
596 struct wlp_wss_tmp_info wss_info;
597 unsigned enroll; /* 0 - discovery to cache, 1 - enrollment */
598 struct wlp_wssid_e *wssid_e;
599 char buf[WLP_WSS_UUID_STRSIZE];
600
601 d_fnstart(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d \n",
602 wlp, attr, neighbor, wss, (int)buflen);
603 if (buflen < 0)
604 goto out;
605
606 if (neighbor != NULL && wss == NULL)
607 enroll = 0; /* discovery */
608 else if (wss != NULL && neighbor == NULL)
609 enroll = 1; /* enrollment */
610 else
611 goto out;
612
613 cur = attr;
614 while (buflen - used > 0) {
615 memset(&wss_info, 0, sizeof(wss_info));
616 cur = (void *)cur + used;
617 result = wlp_get_wss_info(wlp, cur, buflen - used, &wssid,
618 &wss_info);
619 if (result == -ENODATA) {
620 result = used;
621 goto out;
622 } else if (result < 0) {
623 dev_err(dev, "WLP: Unable to parse WSS information "
624 "from WSS information attribute. \n");
625 result = -EINVAL;
626 goto error_parse;
627 }
628 if (enroll && !memcmp(&wssid, &wss->wssid, sizeof(wssid))) {
629 if (wss_info.accept_enroll != 1) {
630 dev_err(dev, "WLP: Requested WSS does "
631 "not accept enrollment.\n");
632 result = -EINVAL;
633 goto out;
634 }
635 memcpy(wss->name, wss_info.name, sizeof(wss->name));
636 wss->bcast = wss_info.bcast;
637 wss->secure_status = wss_info.sec_status;
638 wss->accept_enroll = wss_info.accept_enroll;
639 wss->state = WLP_WSS_STATE_PART_ENROLLED;
640 wlp_wss_uuid_print(buf, sizeof(buf), &wssid);
641 d_printf(2, dev, "WLP: Found WSS %s. Enrolling.\n",
642 buf);
643 } else {
644 wssid_e = wlp_create_wssid_e(wlp, neighbor);
645 if (wssid_e == NULL) {
646 dev_err(dev, "WLP: Cannot create new WSSID "
647 "entry for neighbor %02x:%02x.\n",
648 neighbor->uwb_dev->dev_addr.data[1],
649 neighbor->uwb_dev->dev_addr.data[0]);
650 result = -ENOMEM;
651 goto out;
652 }
653 wssid_e->wssid = wssid;
654 *wssid_e->info = wss_info;
655 }
656 used += result;
657 }
658 result = used;
659error_parse:
660 if (result < 0 && !enroll) /* this was a discovery */
661 wlp_remove_neighbor_tmp_info(neighbor);
662out:
663 d_fnend(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d, "
664 "result %d \n", wlp, attr, neighbor, wss, (int)buflen,
665 (int)result);
666 return result;
667
668}
669
670/**
671 * Parse WSS information attributes into cache for discovery
672 *
673 * @attr: the first WSS information attribute in message
674 * @neighbor: the neighbor whose cache will be populated
675 * @buflen: size of the input buffer
676 */
677static ssize_t wlp_get_wss_info_to_cache(struct wlp *wlp,
678 struct wlp_attr_wss_info *attr,
679 struct wlp_neighbor_e *neighbor,
680 ssize_t buflen)
681{
682 return wlp_get_all_wss_info(wlp, attr, neighbor, NULL, buflen);
683}
684
685/**
686 * Parse WSS information attributes into WSS struct for enrollment
687 *
688 * @attr: the first WSS information attribute in message
689 * @wss: the WSS that will be enrolled
690 * @buflen: size of the input buffer
691 */
692static ssize_t wlp_get_wss_info_to_enroll(struct wlp *wlp,
693 struct wlp_attr_wss_info *attr,
694 struct wlp_wss *wss, ssize_t buflen)
695{
696 return wlp_get_all_wss_info(wlp, attr, NULL, wss, buflen);
697}
698
699/**
700 * Construct a D1 association frame
701 *
702 * We use the radio control functions to determine the values of the device
703 * properties. These are of variable length and the total space needed is
704 * tallied first before we start constructing the message. The radio
705 * control functions return strings that are terminated with \0. This
706 * character should not be included in the message (there is a length field
707 * accompanying it in the attribute).
708 */
709static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss,
710 struct sk_buff **skb)
711{
712
713 struct device *dev = &wlp->rc->uwb_dev.dev;
714 int result = 0;
715 struct wlp_device_info *info;
716 size_t used = 0;
717 struct wlp_frame_assoc *_d1;
718 struct sk_buff *_skb;
719 void *d1_itr;
720
721 d_fnstart(6, dev, "wlp %p\n", wlp);
722 if (wlp->dev_info == NULL) {
723 result = __wlp_setup_device_info(wlp);
724 if (result < 0) {
725 dev_err(dev, "WLP: Unable to setup device "
726 "information for D1 message.\n");
727 goto error;
728 }
729 }
730 info = wlp->dev_info;
731 d_printf(6, dev, "Local properties:\n"
732 "Device name (%d bytes): %s\n"
733 "Model name (%d bytes): %s\n"
734 "Manufacturer (%d bytes): %s\n"
735 "Model number (%d bytes): %s\n"
736 "Serial number (%d bytes): %s\n"
737 "Primary device type: \n"
738 " Category: %d \n"
739 " OUI: %02x:%02x:%02x \n"
740 " OUI Subdivision: %u \n",
741 (int)strlen(info->name), info->name,
742 (int)strlen(info->model_name), info->model_name,
743 (int)strlen(info->manufacturer), info->manufacturer,
744 (int)strlen(info->model_nr), info->model_nr,
745 (int)strlen(info->serial), info->serial,
746 info->prim_dev_type.category,
747 info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1],
748 info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv);
749 _skb = dev_alloc_skb(sizeof(*_d1)
750 + sizeof(struct wlp_attr_uuid_e)
751 + sizeof(struct wlp_attr_wss_sel_mthd)
752 + sizeof(struct wlp_attr_dev_name)
753 + strlen(info->name)
754 + sizeof(struct wlp_attr_manufacturer)
755 + strlen(info->manufacturer)
756 + sizeof(struct wlp_attr_model_name)
757 + strlen(info->model_name)
758 + sizeof(struct wlp_attr_model_nr)
759 + strlen(info->model_nr)
760 + sizeof(struct wlp_attr_serial)
761 + strlen(info->serial)
762 + sizeof(struct wlp_attr_prim_dev_type)
763 + sizeof(struct wlp_attr_wlp_assc_err));
764 if (_skb == NULL) {
765 dev_err(dev, "WLP: Cannot allocate memory for association "
766 "message.\n");
767 result = -ENOMEM;
768 goto error;
769 }
770 _d1 = (void *) _skb->data;
771 d_printf(6, dev, "D1 starts at %p \n", _d1);
772 _d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
773 _d1->hdr.type = WLP_FRAME_ASSOCIATION;
774 _d1->type = WLP_ASSOC_D1;
775
776 wlp_set_version(&_d1->version, WLP_VERSION);
777 wlp_set_msg_type(&_d1->msg_type, WLP_ASSOC_D1);
778 d1_itr = _d1->attr;
779 used = wlp_set_uuid_e(d1_itr, &wlp->uuid);
780 used += wlp_set_wss_sel_mthd(d1_itr + used, WLP_WSS_REG_SELECT);
781 used += wlp_set_dev_name(d1_itr + used, info->name,
782 strlen(info->name));
783 used += wlp_set_manufacturer(d1_itr + used, info->manufacturer,
784 strlen(info->manufacturer));
785 used += wlp_set_model_name(d1_itr + used, info->model_name,
786 strlen(info->model_name));
787 used += wlp_set_model_nr(d1_itr + used, info->model_nr,
788 strlen(info->model_nr));
789 used += wlp_set_serial(d1_itr + used, info->serial,
790 strlen(info->serial));
791 used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type);
792 used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE);
793 skb_put(_skb, sizeof(*_d1) + used);
794 d_printf(6, dev, "D1 message:\n");
795 d_dump(6, dev, _d1, sizeof(*_d1)
796 + sizeof(struct wlp_attr_uuid_e)
797 + sizeof(struct wlp_attr_wss_sel_mthd)
798 + sizeof(struct wlp_attr_dev_name)
799 + strlen(info->name)
800 + sizeof(struct wlp_attr_manufacturer)
801 + strlen(info->manufacturer)
802 + sizeof(struct wlp_attr_model_name)
803 + strlen(info->model_name)
804 + sizeof(struct wlp_attr_model_nr)
805 + strlen(info->model_nr)
806 + sizeof(struct wlp_attr_serial)
807 + strlen(info->serial)
808 + sizeof(struct wlp_attr_prim_dev_type)
809 + sizeof(struct wlp_attr_wlp_assc_err));
810 *skb = _skb;
811error:
812 d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result);
813 return result;
814}
815
816/**
817 * Construct a D2 association frame
818 *
819 * We use the radio control functions to determine the values of the device
820 * properties. These are of variable length and the total space needed is
821 * tallied first before we start constructing the message. The radio
822 * control functions return strings that are terminated with \0. This
823 * character should not be included in the message (there is a length field
824 * accompanying it in the attribute).
825 */
826static
827int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss,
828 struct sk_buff **skb, struct wlp_uuid *uuid_e)
829{
830
831 struct device *dev = &wlp->rc->uwb_dev.dev;
832 int result = 0;
833 struct wlp_device_info *info;
834 size_t used = 0;
835 struct wlp_frame_assoc *_d2;
836 struct sk_buff *_skb;
837 void *d2_itr;
838 size_t mem_needed;
839
840 d_fnstart(6, dev, "wlp %p\n", wlp);
841 if (wlp->dev_info == NULL) {
842 result = __wlp_setup_device_info(wlp);
843 if (result < 0) {
844 dev_err(dev, "WLP: Unable to setup device "
845 "information for D2 message.\n");
846 goto error;
847 }
848 }
849 info = wlp->dev_info;
850 d_printf(6, dev, "Local properties:\n"
851 "Device name (%d bytes): %s\n"
852 "Model name (%d bytes): %s\n"
853 "Manufacturer (%d bytes): %s\n"
854 "Model number (%d bytes): %s\n"
855 "Serial number (%d bytes): %s\n"
856 "Primary device type: \n"
857 " Category: %d \n"
858 " OUI: %02x:%02x:%02x \n"
859 " OUI Subdivision: %u \n",
860 (int)strlen(info->name), info->name,
861 (int)strlen(info->model_name), info->model_name,
862 (int)strlen(info->manufacturer), info->manufacturer,
863 (int)strlen(info->model_nr), info->model_nr,
864 (int)strlen(info->serial), info->serial,
865 info->prim_dev_type.category,
866 info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1],
867 info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv);
868 mem_needed = sizeof(*_d2)
869 + sizeof(struct wlp_attr_uuid_e)
870 + sizeof(struct wlp_attr_uuid_r)
871 + sizeof(struct wlp_attr_dev_name)
872 + strlen(info->name)
873 + sizeof(struct wlp_attr_manufacturer)
874 + strlen(info->manufacturer)
875 + sizeof(struct wlp_attr_model_name)
876 + strlen(info->model_name)
877 + sizeof(struct wlp_attr_model_nr)
878 + strlen(info->model_nr)
879 + sizeof(struct wlp_attr_serial)
880 + strlen(info->serial)
881 + sizeof(struct wlp_attr_prim_dev_type)
882 + sizeof(struct wlp_attr_wlp_assc_err);
883 if (wlp->wss.state >= WLP_WSS_STATE_ACTIVE)
884 mem_needed += sizeof(struct wlp_attr_wss_info)
885 + sizeof(struct wlp_wss_info)
886 + strlen(wlp->wss.name);
887 _skb = dev_alloc_skb(mem_needed);
888 if (_skb == NULL) {
889 dev_err(dev, "WLP: Cannot allocate memory for association "
890 "message.\n");
891 result = -ENOMEM;
892 goto error;
893 }
894 _d2 = (void *) _skb->data;
895 d_printf(6, dev, "D2 starts at %p \n", _d2);
896 _d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
897 _d2->hdr.type = WLP_FRAME_ASSOCIATION;
898 _d2->type = WLP_ASSOC_D2;
899
900 wlp_set_version(&_d2->version, WLP_VERSION);
901 wlp_set_msg_type(&_d2->msg_type, WLP_ASSOC_D2);
902 d2_itr = _d2->attr;
903 used = wlp_set_uuid_e(d2_itr, uuid_e);
904 used += wlp_set_uuid_r(d2_itr + used, &wlp->uuid);
905 if (wlp->wss.state >= WLP_WSS_STATE_ACTIVE)
906 used += wlp_set_wss_info(d2_itr + used, &wlp->wss);
907 used += wlp_set_dev_name(d2_itr + used, info->name,
908 strlen(info->name));
909 used += wlp_set_manufacturer(d2_itr + used, info->manufacturer,
910 strlen(info->manufacturer));
911 used += wlp_set_model_name(d2_itr + used, info->model_name,
912 strlen(info->model_name));
913 used += wlp_set_model_nr(d2_itr + used, info->model_nr,
914 strlen(info->model_nr));
915 used += wlp_set_serial(d2_itr + used, info->serial,
916 strlen(info->serial));
917 used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type);
918 used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE);
919 skb_put(_skb, sizeof(*_d2) + used);
920 d_printf(6, dev, "D2 message:\n");
921 d_dump(6, dev, _d2, mem_needed);
922 *skb = _skb;
923error:
924 d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result);
925 return result;
926}
927
928/**
929 * Allocate memory for and populate fields of F0 association frame
930 *
931 * Currently (while focusing on unsecure enrollment) we ignore the
932 * nonce's that could be placed in the message. Only the error field is
933 * populated by the value provided by the caller.
934 */
935static
936int wlp_build_assoc_f0(struct wlp *wlp, struct sk_buff **skb,
937 enum wlp_assc_error error)
938{
939 struct device *dev = &wlp->rc->uwb_dev.dev;
940 int result = -ENOMEM;
941 struct {
942 struct wlp_frame_assoc f0_hdr;
943 struct wlp_attr_enonce enonce;
944 struct wlp_attr_rnonce rnonce;
945 struct wlp_attr_wlp_assc_err assc_err;
946 } *f0;
947 struct sk_buff *_skb;
948 struct wlp_nonce tmp;
949
950 d_fnstart(6, dev, "wlp %p\n", wlp);
951 _skb = dev_alloc_skb(sizeof(*f0));
952 if (_skb == NULL) {
953 dev_err(dev, "WLP: Unable to allocate memory for F0 "
954 "association frame. \n");
955 goto error_alloc;
956 }
957 f0 = (void *) _skb->data;
958 d_printf(6, dev, "F0 starts at %p \n", f0);
959 f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
960 f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION;
961 f0->f0_hdr.type = WLP_ASSOC_F0;
962 wlp_set_version(&f0->f0_hdr.version, WLP_VERSION);
963 wlp_set_msg_type(&f0->f0_hdr.msg_type, WLP_ASSOC_F0);
964 memset(&tmp, 0, sizeof(tmp));
965 wlp_set_enonce(&f0->enonce, &tmp);
966 wlp_set_rnonce(&f0->rnonce, &tmp);
967 wlp_set_wlp_assc_err(&f0->assc_err, error);
968 skb_put(_skb, sizeof(*f0));
969 *skb = _skb;
970 result = 0;
971error_alloc:
972 d_fnend(6, dev, "wlp %p, result %d \n", wlp, result);
973 return result;
974}
975
976/**
977 * Parse F0 frame
978 *
979 * We just retrieve the values and print it as an error to the user.
980 * Calling function already knows an error occured (F0 indicates error), so
981 * we just parse the content as debug for higher layers.
982 */
983int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
984{
985 struct device *dev = &wlp->rc->uwb_dev.dev;
986 struct wlp_frame_assoc *f0 = (void *) skb->data;
987 void *ptr = skb->data;
988 size_t len = skb->len;
989 size_t used;
990 ssize_t result;
991 struct wlp_nonce enonce, rnonce;
992 enum wlp_assc_error assc_err;
993 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
994 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
995
996 used = sizeof(*f0);
997 result = wlp_get_enonce(wlp, ptr + used, &enonce, len - used);
998 if (result < 0) {
999 dev_err(dev, "WLP: unable to obtain Enrollee nonce "
1000 "attribute from F0 message.\n");
1001 goto error_parse;
1002 }
1003 used += result;
1004 result = wlp_get_rnonce(wlp, ptr + used, &rnonce, len - used);
1005 if (result < 0) {
1006 dev_err(dev, "WLP: unable to obtain Registrar nonce "
1007 "attribute from F0 message.\n");
1008 goto error_parse;
1009 }
1010 used += result;
1011 result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used);
1012 if (result < 0) {
1013 dev_err(dev, "WLP: unable to obtain WLP Association error "
1014 "attribute from F0 message.\n");
1015 goto error_parse;
1016 }
1017 wlp_wss_nonce_print(enonce_buf, sizeof(enonce_buf), &enonce);
1018 wlp_wss_nonce_print(rnonce_buf, sizeof(rnonce_buf), &rnonce);
1019 dev_err(dev, "WLP: Received F0 error frame from neighbor. Enrollee "
1020 "nonce: %s, Registrar nonce: %s, WLP Association error: %s.\n",
1021 enonce_buf, rnonce_buf, wlp_assc_error_str(assc_err));
1022 result = 0;
1023error_parse:
1024 return result;
1025}
1026
1027/**
1028 * Retrieve variable device information from association message
1029 *
1030 * The device information parsed is not required in any message. This
1031 * routine will thus not fail if an attribute is not present.
1032 * The attributes are expected in a certain order, even if all are not
1033 * present. The "attribute type" value is used to ensure the attributes
1034 * are parsed in the correct order.
1035 *
1036 * If an error is encountered during parsing the function will return an
1037 * error code, when this happens the given device_info structure may be
1038 * partially filled.
1039 */
1040static
1041int wlp_get_variable_info(struct wlp *wlp, void *data,
1042 struct wlp_device_info *dev_info, ssize_t len)
1043{
1044 struct device *dev = &wlp->rc->uwb_dev.dev;
1045 size_t used = 0;
1046 struct wlp_attr_hdr *hdr;
1047 ssize_t result = 0;
1048 unsigned last = 0;
1049
1050 while (len - used > 0) {
1051 if (len - used < sizeof(*hdr)) {
1052 dev_err(dev, "WLP: Partial data in frame, cannot "
1053 "parse. \n");
1054 goto error_parse;
1055 }
1056 hdr = data + used;
1057 switch (le16_to_cpu(hdr->type)) {
1058 case WLP_ATTR_MANUF:
1059 if (last >= WLP_ATTR_MANUF) {
1060 dev_err(dev, "WLP: Incorrect order of "
1061 "attribute values in D1 msg.\n");
1062 goto error_parse;
1063 }
1064 result = wlp_get_manufacturer(wlp, data + used,
1065 dev_info->manufacturer,
1066 len - used);
1067 if (result < 0) {
1068 dev_err(dev, "WLP: Unable to obtain "
1069 "Manufacturer attribute from D1 "
1070 "message.\n");
1071 goto error_parse;
1072 }
1073 last = WLP_ATTR_MANUF;
1074 used += result;
1075 break;
1076 case WLP_ATTR_MODEL_NAME:
1077 if (last >= WLP_ATTR_MODEL_NAME) {
1078 dev_err(dev, "WLP: Incorrect order of "
1079 "attribute values in D1 msg.\n");
1080 goto error_parse;
1081 }
1082 result = wlp_get_model_name(wlp, data + used,
1083 dev_info->model_name,
1084 len - used);
1085 if (result < 0) {
1086 dev_err(dev, "WLP: Unable to obtain Model "
1087 "name attribute from D1 message.\n");
1088 goto error_parse;
1089 }
1090 last = WLP_ATTR_MODEL_NAME;
1091 used += result;
1092 break;
1093 case WLP_ATTR_MODEL_NR:
1094 if (last >= WLP_ATTR_MODEL_NR) {
1095 dev_err(dev, "WLP: Incorrect order of "
1096 "attribute values in D1 msg.\n");
1097 goto error_parse;
1098 }
1099 result = wlp_get_model_nr(wlp, data + used,
1100 dev_info->model_nr,
1101 len - used);
1102 if (result < 0) {
1103 dev_err(dev, "WLP: Unable to obtain Model "
1104 "number attribute from D1 message.\n");
1105 goto error_parse;
1106 }
1107 last = WLP_ATTR_MODEL_NR;
1108 used += result;
1109 break;
1110 case WLP_ATTR_SERIAL:
1111 if (last >= WLP_ATTR_SERIAL) {
1112 dev_err(dev, "WLP: Incorrect order of "
1113 "attribute values in D1 msg.\n");
1114 goto error_parse;
1115 }
1116 result = wlp_get_serial(wlp, data + used,
1117 dev_info->serial, len - used);
1118 if (result < 0) {
1119 dev_err(dev, "WLP: Unable to obtain Serial "
1120 "number attribute from D1 message.\n");
1121 goto error_parse;
1122 }
1123 last = WLP_ATTR_SERIAL;
1124 used += result;
1125 break;
1126 case WLP_ATTR_PRI_DEV_TYPE:
1127 if (last >= WLP_ATTR_PRI_DEV_TYPE) {
1128 dev_err(dev, "WLP: Incorrect order of "
1129 "attribute values in D1 msg.\n");
1130 goto error_parse;
1131 }
1132 result = wlp_get_prim_dev_type(wlp, data + used,
1133 &dev_info->prim_dev_type,
1134 len - used);
1135 if (result < 0) {
1136 dev_err(dev, "WLP: Unable to obtain Primary "
1137 "device type attribute from D1 "
1138 "message.\n");
1139 goto error_parse;
1140 }
1141 dev_info->prim_dev_type.category =
1142 le16_to_cpu(dev_info->prim_dev_type.category);
1143 dev_info->prim_dev_type.subID =
1144 le16_to_cpu(dev_info->prim_dev_type.subID);
1145 last = WLP_ATTR_PRI_DEV_TYPE;
1146 used += result;
1147 break;
1148 default:
1149 /* This is not variable device information. */
1150 goto out;
1151 break;
1152 }
1153 }
1154out:
1155 return used;
1156error_parse:
1157 return -EINVAL;
1158}
1159
1160/**
1161 * Parse incoming D1 frame, populate attribute values
1162 *
1163 * Caller provides pointers to memory already allocated for attributes
1164 * expected in the D1 frame. These variables will be populated.
1165 */
1166static
1167int wlp_parse_d1_frame(struct wlp *wlp, struct sk_buff *skb,
1168 struct wlp_uuid *uuid_e,
1169 enum wlp_wss_sel_mthd *sel_mthd,
1170 struct wlp_device_info *dev_info,
1171 enum wlp_assc_error *assc_err)
1172{
1173 struct device *dev = &wlp->rc->uwb_dev.dev;
1174 struct wlp_frame_assoc *d1 = (void *) skb->data;
1175 void *ptr = skb->data;
1176 size_t len = skb->len;
1177 size_t used;
1178 ssize_t result;
1179
1180 used = sizeof(*d1);
1181 result = wlp_get_uuid_e(wlp, ptr + used, uuid_e, len - used);
1182 if (result < 0) {
1183 dev_err(dev, "WLP: unable to obtain UUID-E attribute from D1 "
1184 "message.\n");
1185 goto error_parse;
1186 }
1187 used += result;
1188 result = wlp_get_wss_sel_mthd(wlp, ptr + used, sel_mthd, len - used);
1189 if (result < 0) {
1190 dev_err(dev, "WLP: unable to obtain WSS selection method "
1191 "from D1 message.\n");
1192 goto error_parse;
1193 }
1194 used += result;
1195 result = wlp_get_dev_name(wlp, ptr + used, dev_info->name,
1196 len - used);
1197 if (result < 0) {
1198 dev_err(dev, "WLP: unable to obtain Device Name from D1 "
1199 "message.\n");
1200 goto error_parse;
1201 }
1202 used += result;
1203 result = wlp_get_variable_info(wlp, ptr + used, dev_info, len - used);
1204 if (result < 0) {
1205 dev_err(dev, "WLP: unable to obtain Device Information from "
1206 "D1 message.\n");
1207 goto error_parse;
1208 }
1209 used += result;
1210 result = wlp_get_wlp_assc_err(wlp, ptr + used, assc_err, len - used);
1211 if (result < 0) {
1212 dev_err(dev, "WLP: unable to obtain WLP Association Error "
1213 "Information from D1 message.\n");
1214 goto error_parse;
1215 }
1216 result = 0;
1217error_parse:
1218 return result;
1219}
1220/**
1221 * Handle incoming D1 frame
1222 *
1223 * The frame has already been verified to contain an Association header with
1224 * the correct version number. Parse the incoming frame, construct and send
1225 * a D2 frame in response.
1226 *
1227 * It is not clear what to do with most fields in the incoming D1 frame. We
1228 * retrieve and discard the information here for now.
1229 */
1230void wlp_handle_d1_frame(struct work_struct *ws)
1231{
1232 struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws,
1233 struct wlp_assoc_frame_ctx,
1234 ws);
1235 struct wlp *wlp = frame_ctx->wlp;
1236 struct wlp_wss *wss = &wlp->wss;
1237 struct sk_buff *skb = frame_ctx->skb;
1238 struct uwb_dev_addr *src = &frame_ctx->src;
1239 int result;
1240 struct device *dev = &wlp->rc->uwb_dev.dev;
1241 struct wlp_uuid uuid_e;
1242 enum wlp_wss_sel_mthd sel_mthd = 0;
1243 struct wlp_device_info dev_info;
1244 enum wlp_assc_error assc_err;
1245 char uuid[WLP_WSS_UUID_STRSIZE];
1246 struct sk_buff *resp = NULL;
1247
1248 /* Parse D1 frame */
1249 d_fnstart(6, dev, "WLP: handle D1 frame. wlp = %p, skb = %p\n",
1250 wlp, skb);
1251 mutex_lock(&wss->mutex);
1252 mutex_lock(&wlp->mutex); /* to access wlp->uuid */
1253 memset(&dev_info, 0, sizeof(dev_info));
1254 result = wlp_parse_d1_frame(wlp, skb, &uuid_e, &sel_mthd, &dev_info,
1255 &assc_err);
1256 if (result < 0) {
1257 dev_err(dev, "WLP: Unable to parse incoming D1 frame.\n");
1258 kfree_skb(skb);
1259 goto out;
1260 }
1261 wlp_wss_uuid_print(uuid, sizeof(uuid), &uuid_e);
1262 d_printf(6, dev, "From D1 frame:\n"
1263 "UUID-E: %s\n"
1264 "Selection method: %d\n"
1265 "Device name (%d bytes): %s\n"
1266 "Model name (%d bytes): %s\n"
1267 "Manufacturer (%d bytes): %s\n"
1268 "Model number (%d bytes): %s\n"
1269 "Serial number (%d bytes): %s\n"
1270 "Primary device type: \n"
1271 " Category: %d \n"
1272 " OUI: %02x:%02x:%02x \n"
1273 " OUI Subdivision: %u \n",
1274 uuid, sel_mthd,
1275 (int)strlen(dev_info.name), dev_info.name,
1276 (int)strlen(dev_info.model_name), dev_info.model_name,
1277 (int)strlen(dev_info.manufacturer), dev_info.manufacturer,
1278 (int)strlen(dev_info.model_nr), dev_info.model_nr,
1279 (int)strlen(dev_info.serial), dev_info.serial,
1280 dev_info.prim_dev_type.category,
1281 dev_info.prim_dev_type.OUI[0],
1282 dev_info.prim_dev_type.OUI[1],
1283 dev_info.prim_dev_type.OUI[2],
1284 dev_info.prim_dev_type.OUIsubdiv);
1285
1286 kfree_skb(skb);
1287 if (!wlp_uuid_is_set(&wlp->uuid)) {
1288 dev_err(dev, "WLP: UUID is not set. Set via sysfs to "
1289 "proceed. Respong to D1 message with error F0.\n");
1290 result = wlp_build_assoc_f0(wlp, &resp,
1291 WLP_ASSOC_ERROR_NOT_READY);
1292 if (result < 0) {
1293 dev_err(dev, "WLP: Unable to construct F0 message.\n");
1294 goto out;
1295 }
1296 } else {
1297 /* Construct D2 frame */
1298 result = wlp_build_assoc_d2(wlp, wss, &resp, &uuid_e);
1299 if (result < 0) {
1300 dev_err(dev, "WLP: Unable to construct D2 message.\n");
1301 goto out;
1302 }
1303 }
1304 /* Send D2 frame */
1305 BUG_ON(wlp->xmit_frame == NULL);
1306 result = wlp->xmit_frame(wlp, resp, src);
1307 if (result < 0) {
1308 dev_err(dev, "WLP: Unable to transmit D2 association "
1309 "message: %d\n", result);
1310 if (result == -ENXIO)
1311 dev_err(dev, "WLP: Is network interface up? \n");
1312 /* We could try again ... */
1313 dev_kfree_skb_any(resp); /* we need to free if tx fails */
1314 }
1315out:
1316 kfree(frame_ctx);
1317 mutex_unlock(&wlp->mutex);
1318 mutex_unlock(&wss->mutex);
1319 d_fnend(6, dev, "WLP: handle D1 frame. wlp = %p\n", wlp);
1320}
1321
1322/**
1323 * Parse incoming D2 frame, create and populate temporary cache
1324 *
1325 * @skb: socket buffer in which D2 frame can be found
1326 * @neighbor: the neighbor that sent the D2 frame
1327 *
1328 * Will allocate memory for temporary storage of information learned during
1329 * discovery.
1330 */
1331int wlp_parse_d2_frame_to_cache(struct wlp *wlp, struct sk_buff *skb,
1332 struct wlp_neighbor_e *neighbor)
1333{
1334 struct device *dev = &wlp->rc->uwb_dev.dev;
1335 struct wlp_frame_assoc *d2 = (void *) skb->data;
1336 void *ptr = skb->data;
1337 size_t len = skb->len;
1338 size_t used;
1339 ssize_t result;
1340 struct wlp_uuid uuid_e;
1341 struct wlp_device_info *nb_info;
1342 enum wlp_assc_error assc_err;
1343
1344 used = sizeof(*d2);
1345 result = wlp_get_uuid_e(wlp, ptr + used, &uuid_e, len - used);
1346 if (result < 0) {
1347 dev_err(dev, "WLP: unable to obtain UUID-E attribute from D2 "
1348 "message.\n");
1349 goto error_parse;
1350 }
1351 if (memcmp(&uuid_e, &wlp->uuid, sizeof(uuid_e))) {
1352 dev_err(dev, "WLP: UUID-E in incoming D2 does not match "
1353 "local UUID sent in D1. \n");
1354 goto error_parse;
1355 }
1356 used += result;
1357 result = wlp_get_uuid_r(wlp, ptr + used, &neighbor->uuid, len - used);
1358 if (result < 0) {
1359 dev_err(dev, "WLP: unable to obtain UUID-R attribute from D2 "
1360 "message.\n");
1361 goto error_parse;
1362 }
1363 used += result;
1364 result = wlp_get_wss_info_to_cache(wlp, ptr + used, neighbor,
1365 len - used);
1366 if (result < 0) {
1367 dev_err(dev, "WLP: unable to obtain WSS information "
1368 "from D2 message.\n");
1369 goto error_parse;
1370 }
1371 used += result;
1372 neighbor->info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL);
1373 if (neighbor->info == NULL) {
1374 dev_err(dev, "WLP: cannot allocate memory to store device "
1375 "info.\n");
1376 result = -ENOMEM;
1377 goto error_parse;
1378 }
1379 nb_info = neighbor->info;
1380 result = wlp_get_dev_name(wlp, ptr + used, nb_info->name,
1381 len - used);
1382 if (result < 0) {
1383 dev_err(dev, "WLP: unable to obtain Device Name from D2 "
1384 "message.\n");
1385 goto error_parse;
1386 }
1387 used += result;
1388 result = wlp_get_variable_info(wlp, ptr + used, nb_info, len - used);
1389 if (result < 0) {
1390 dev_err(dev, "WLP: unable to obtain Device Information from "
1391 "D2 message.\n");
1392 goto error_parse;
1393 }
1394 used += result;
1395 result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used);
1396 if (result < 0) {
1397 dev_err(dev, "WLP: unable to obtain WLP Association Error "
1398 "Information from D2 message.\n");
1399 goto error_parse;
1400 }
1401 if (assc_err != WLP_ASSOC_ERROR_NONE) {
1402 dev_err(dev, "WLP: neighbor device returned association "
1403 "error %d\n", assc_err);
1404 result = -EINVAL;
1405 goto error_parse;
1406 }
1407 result = 0;
1408error_parse:
1409 if (result < 0)
1410 wlp_remove_neighbor_tmp_info(neighbor);
1411 return result;
1412}
1413
1414/**
1415 * Parse incoming D2 frame, populate attribute values of WSS bein enrolled in
1416 *
1417 * @wss: our WSS that will be enrolled
1418 * @skb: socket buffer in which D2 frame can be found
1419 * @neighbor: the neighbor that sent the D2 frame
1420 * @wssid: the wssid of the WSS in which we want to enroll
1421 *
1422 * Forms part of enrollment sequence. We are trying to enroll in WSS with
1423 * @wssid by using @neighbor as registrar. A D1 message was sent to
1424 * @neighbor and now we need to parse the D2 response. The neighbor's
1425 * response is searched for the requested WSS and if found (and it accepts
1426 * enrollment), we store the information.
1427 */
1428int wlp_parse_d2_frame_to_enroll(struct wlp_wss *wss, struct sk_buff *skb,
1429 struct wlp_neighbor_e *neighbor,
1430 struct wlp_uuid *wssid)
1431{
1432 struct wlp *wlp = container_of(wss, struct wlp, wss);
1433 struct device *dev = &wlp->rc->uwb_dev.dev;
1434 void *ptr = skb->data;
1435 size_t len = skb->len;
1436 size_t used;
1437 ssize_t result;
1438 struct wlp_uuid uuid_e;
1439 struct wlp_uuid uuid_r;
1440 struct wlp_device_info nb_info;
1441 enum wlp_assc_error assc_err;
1442 char uuid_bufA[WLP_WSS_UUID_STRSIZE];
1443 char uuid_bufB[WLP_WSS_UUID_STRSIZE];
1444
1445 used = sizeof(struct wlp_frame_assoc);
1446 result = wlp_get_uuid_e(wlp, ptr + used, &uuid_e, len - used);
1447 if (result < 0) {
1448 dev_err(dev, "WLP: unable to obtain UUID-E attribute from D2 "
1449 "message.\n");
1450 goto error_parse;
1451 }
1452 if (memcmp(&uuid_e, &wlp->uuid, sizeof(uuid_e))) {
1453 dev_err(dev, "WLP: UUID-E in incoming D2 does not match "
1454 "local UUID sent in D1. \n");
1455 goto error_parse;
1456 }
1457 used += result;
1458 result = wlp_get_uuid_r(wlp, ptr + used, &uuid_r, len - used);
1459 if (result < 0) {
1460 dev_err(dev, "WLP: unable to obtain UUID-R attribute from D2 "
1461 "message.\n");
1462 goto error_parse;
1463 }
1464 if (memcmp(&uuid_r, &neighbor->uuid, sizeof(uuid_r))) {
1465 wlp_wss_uuid_print(uuid_bufA, sizeof(uuid_bufA),
1466 &neighbor->uuid);
1467 wlp_wss_uuid_print(uuid_bufB, sizeof(uuid_bufB), &uuid_r);
1468 dev_err(dev, "WLP: UUID of neighbor does not match UUID "
1469 "learned during discovery. Originally discovered: %s, "
1470 "now from D2 message: %s\n", uuid_bufA, uuid_bufB);
1471 result = -EINVAL;
1472 goto error_parse;
1473 }
1474 used += result;
1475 wss->wssid = *wssid;
1476 result = wlp_get_wss_info_to_enroll(wlp, ptr + used, wss, len - used);
1477 if (result < 0) {
1478 dev_err(dev, "WLP: unable to obtain WSS information "
1479 "from D2 message.\n");
1480 goto error_parse;
1481 }
1482 if (wss->state != WLP_WSS_STATE_PART_ENROLLED) {
1483 dev_err(dev, "WLP: D2 message did not contain information "
1484 "for successful enrollment. \n");
1485 result = -EINVAL;
1486 goto error_parse;
1487 }
1488 used += result;
1489 /* Place device information on stack to continue parsing of message */
1490 result = wlp_get_dev_name(wlp, ptr + used, nb_info.name,
1491 len - used);
1492 if (result < 0) {
1493 dev_err(dev, "WLP: unable to obtain Device Name from D2 "
1494 "message.\n");
1495 goto error_parse;
1496 }
1497 used += result;
1498 result = wlp_get_variable_info(wlp, ptr + used, &nb_info, len - used);
1499 if (result < 0) {
1500 dev_err(dev, "WLP: unable to obtain Device Information from "
1501 "D2 message.\n");
1502 goto error_parse;
1503 }
1504 used += result;
1505 result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used);
1506 if (result < 0) {
1507 dev_err(dev, "WLP: unable to obtain WLP Association Error "
1508 "Information from D2 message.\n");
1509 goto error_parse;
1510 }
1511 if (assc_err != WLP_ASSOC_ERROR_NONE) {
1512 dev_err(dev, "WLP: neighbor device returned association "
1513 "error %d\n", assc_err);
1514 if (wss->state == WLP_WSS_STATE_PART_ENROLLED) {
1515 dev_err(dev, "WLP: Enrolled in WSS (should not "
1516 "happen according to spec). Undoing. \n");
1517 wlp_wss_reset(wss);
1518 }
1519 result = -EINVAL;
1520 goto error_parse;
1521 }
1522 result = 0;
1523error_parse:
1524 return result;
1525}
1526
1527/**
1528 * Parse C3/C4 frame into provided variables
1529 *
1530 * @wssid: will point to copy of wssid retrieved from C3/C4 frame
1531 * @tag: will point to copy of tag retrieved from C3/C4 frame
1532 * @virt_addr: will point to copy of virtual address retrieved from C3/C4
1533 * frame.
1534 *
1535 * Calling function has to allocate memory for these values.
1536 *
1537 * skb contains a valid C3/C4 frame, return the individual fields of this
1538 * frame in the provided variables.
1539 */
1540int wlp_parse_c3c4_frame(struct wlp *wlp, struct sk_buff *skb,
1541 struct wlp_uuid *wssid, u8 *tag,
1542 struct uwb_mac_addr *virt_addr)
1543{
1544 struct device *dev = &wlp->rc->uwb_dev.dev;
1545 int result;
1546 void *ptr = skb->data;
1547 size_t len = skb->len;
1548 size_t used;
1549 char buf[WLP_WSS_UUID_STRSIZE];
1550 struct wlp_frame_assoc *assoc = ptr;
1551
1552 d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb);
1553 used = sizeof(*assoc);
1554 result = wlp_get_wssid(wlp, ptr + used, wssid, len - used);
1555 if (result < 0) {
1556 dev_err(dev, "WLP: unable to obtain WSSID attribute from "
1557 "%s message.\n", wlp_assoc_frame_str(assoc->type));
1558 goto error_parse;
1559 }
1560 used += result;
1561 result = wlp_get_wss_tag(wlp, ptr + used, tag, len - used);
1562 if (result < 0) {
1563 dev_err(dev, "WLP: unable to obtain WSS tag attribute from "
1564 "%s message.\n", wlp_assoc_frame_str(assoc->type));
1565 goto error_parse;
1566 }
1567 used += result;
1568 result = wlp_get_wss_virt(wlp, ptr + used, virt_addr, len - used);
1569 if (result < 0) {
1570 dev_err(dev, "WLP: unable to obtain WSS virtual address "
1571 "attribute from %s message.\n",
1572 wlp_assoc_frame_str(assoc->type));
1573 goto error_parse;
1574 }
1575 wlp_wss_uuid_print(buf, sizeof(buf), wssid);
1576 d_printf(6, dev, "WLP: parsed: WSSID %s, tag 0x%02x, virt "
1577 "%02x:%02x:%02x:%02x:%02x:%02x \n", buf, *tag,
1578 virt_addr->data[0], virt_addr->data[1], virt_addr->data[2],
1579 virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]);
1580
1581error_parse:
1582 d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result);
1583 return result;
1584}
1585
1586/**
1587 * Allocate memory for and populate fields of C1 or C2 association frame
1588 *
1589 * The C1 and C2 association frames appear identical - except for the type.
1590 */
1591static
1592int wlp_build_assoc_c1c2(struct wlp *wlp, struct wlp_wss *wss,
1593 struct sk_buff **skb, enum wlp_assoc_type type)
1594{
1595 struct device *dev = &wlp->rc->uwb_dev.dev;
1596 int result = -ENOMEM;
1597 struct {
1598 struct wlp_frame_assoc c_hdr;
1599 struct wlp_attr_wssid wssid;
1600 } *c;
1601 struct sk_buff *_skb;
1602
1603 d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss);
1604 _skb = dev_alloc_skb(sizeof(*c));
1605 if (_skb == NULL) {
1606 dev_err(dev, "WLP: Unable to allocate memory for C1/C2 "
1607 "association frame. \n");
1608 goto error_alloc;
1609 }
1610 c = (void *) _skb->data;
1611 d_printf(6, dev, "C1/C2 starts at %p \n", c);
1612 c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
1613 c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION;
1614 c->c_hdr.type = type;
1615 wlp_set_version(&c->c_hdr.version, WLP_VERSION);
1616 wlp_set_msg_type(&c->c_hdr.msg_type, type);
1617 wlp_set_wssid(&c->wssid, &wss->wssid);
1618 skb_put(_skb, sizeof(*c));
1619 d_printf(6, dev, "C1/C2 message:\n");
1620 d_dump(6, dev, c, sizeof(*c));
1621 *skb = _skb;
1622 result = 0;
1623error_alloc:
1624 d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result);
1625 return result;
1626}
1627
1628
1629static
1630int wlp_build_assoc_c1(struct wlp *wlp, struct wlp_wss *wss,
1631 struct sk_buff **skb)
1632{
1633 return wlp_build_assoc_c1c2(wlp, wss, skb, WLP_ASSOC_C1);
1634}
1635
1636static
1637int wlp_build_assoc_c2(struct wlp *wlp, struct wlp_wss *wss,
1638 struct sk_buff **skb)
1639{
1640 return wlp_build_assoc_c1c2(wlp, wss, skb, WLP_ASSOC_C2);
1641}
1642
1643
1644/**
1645 * Allocate memory for and populate fields of C3 or C4 association frame
1646 *
1647 * The C3 and C4 association frames appear identical - except for the type.
1648 */
1649static
1650int wlp_build_assoc_c3c4(struct wlp *wlp, struct wlp_wss *wss,
1651 struct sk_buff **skb, enum wlp_assoc_type type)
1652{
1653 struct device *dev = &wlp->rc->uwb_dev.dev;
1654 int result = -ENOMEM;
1655 struct {
1656 struct wlp_frame_assoc c_hdr;
1657 struct wlp_attr_wssid wssid;
1658 struct wlp_attr_wss_tag wss_tag;
1659 struct wlp_attr_wss_virt wss_virt;
1660 } *c;
1661 struct sk_buff *_skb;
1662
1663 d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss);
1664 _skb = dev_alloc_skb(sizeof(*c));
1665 if (_skb == NULL) {
1666 dev_err(dev, "WLP: Unable to allocate memory for C3/C4 "
1667 "association frame. \n");
1668 goto error_alloc;
1669 }
1670 c = (void *) _skb->data;
1671 d_printf(6, dev, "C3/C4 starts at %p \n", c);
1672 c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
1673 c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION;
1674 c->c_hdr.type = type;
1675 wlp_set_version(&c->c_hdr.version, WLP_VERSION);
1676 wlp_set_msg_type(&c->c_hdr.msg_type, type);
1677 wlp_set_wssid(&c->wssid, &wss->wssid);
1678 wlp_set_wss_tag(&c->wss_tag, wss->tag);
1679 wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr);
1680 skb_put(_skb, sizeof(*c));
1681 d_printf(6, dev, "C3/C4 message:\n");
1682 d_dump(6, dev, c, sizeof(*c));
1683 *skb = _skb;
1684 result = 0;
1685error_alloc:
1686 d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result);
1687 return result;
1688}
1689
1690static
1691int wlp_build_assoc_c3(struct wlp *wlp, struct wlp_wss *wss,
1692 struct sk_buff **skb)
1693{
1694 return wlp_build_assoc_c3c4(wlp, wss, skb, WLP_ASSOC_C3);
1695}
1696
1697static
1698int wlp_build_assoc_c4(struct wlp *wlp, struct wlp_wss *wss,
1699 struct sk_buff **skb)
1700{
1701 return wlp_build_assoc_c3c4(wlp, wss, skb, WLP_ASSOC_C4);
1702}
1703
1704
1705#define wlp_send_assoc(type, id) \
1706static int wlp_send_assoc_##type(struct wlp *wlp, struct wlp_wss *wss, \
1707 struct uwb_dev_addr *dev_addr) \
1708{ \
1709 struct device *dev = &wlp->rc->uwb_dev.dev; \
1710 int result; \
1711 struct sk_buff *skb = NULL; \
1712 d_fnstart(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \
1713 wlp, wss, dev_addr->data[1], dev_addr->data[0]); \
1714 d_printf(6, dev, "WLP: Constructing %s frame. \n", \
1715 wlp_assoc_frame_str(id)); \
1716 /* Build the frame */ \
1717 result = wlp_build_assoc_##type(wlp, wss, &skb); \
1718 if (result < 0) { \
1719 dev_err(dev, "WLP: Unable to construct %s association " \
1720 "frame: %d\n", wlp_assoc_frame_str(id), result);\
1721 goto error_build_assoc; \
1722 } \
1723 /* Send the frame */ \
1724 d_printf(6, dev, "Transmitting %s frame to %02x:%02x \n", \
1725 wlp_assoc_frame_str(id), \
1726 dev_addr->data[1], dev_addr->data[0]); \
1727 BUG_ON(wlp->xmit_frame == NULL); \
1728 result = wlp->xmit_frame(wlp, skb, dev_addr); \
1729 if (result < 0) { \
1730 dev_err(dev, "WLP: Unable to transmit %s association " \
1731 "message: %d\n", wlp_assoc_frame_str(id), \
1732 result); \
1733 if (result == -ENXIO) \
1734 dev_err(dev, "WLP: Is network interface " \
1735 "up? \n"); \
1736 goto error_xmit; \
1737 } \
1738 return 0; \
1739error_xmit: \
1740 /* We could try again ... */ \
1741 dev_kfree_skb_any(skb);/*we need to free if tx fails*/ \
1742error_build_assoc: \
1743 d_fnend(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \
1744 wlp, wss, dev_addr->data[1], dev_addr->data[0]); \
1745 return result; \
1746}
1747
1748wlp_send_assoc(d1, WLP_ASSOC_D1)
1749wlp_send_assoc(c1, WLP_ASSOC_C1)
1750wlp_send_assoc(c3, WLP_ASSOC_C3)
1751
1752int wlp_send_assoc_frame(struct wlp *wlp, struct wlp_wss *wss,
1753 struct uwb_dev_addr *dev_addr,
1754 enum wlp_assoc_type type)
1755{
1756 int result = 0;
1757 struct device *dev = &wlp->rc->uwb_dev.dev;
1758 switch (type) {
1759 case WLP_ASSOC_D1:
1760 result = wlp_send_assoc_d1(wlp, wss, dev_addr);
1761 break;
1762 case WLP_ASSOC_C1:
1763 result = wlp_send_assoc_c1(wlp, wss, dev_addr);
1764 break;
1765 case WLP_ASSOC_C3:
1766 result = wlp_send_assoc_c3(wlp, wss, dev_addr);
1767 break;
1768 default:
1769 dev_err(dev, "WLP: Received request to send unknown "
1770 "association message.\n");
1771 result = -EINVAL;
1772 break;
1773 }
1774 return result;
1775}
1776
1777/**
1778 * Handle incoming C1 frame
1779 *
1780 * The frame has already been verified to contain an Association header with
1781 * the correct version number. Parse the incoming frame, construct and send
1782 * a C2 frame in response.
1783 */
1784void wlp_handle_c1_frame(struct work_struct *ws)
1785{
1786 struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws,
1787 struct wlp_assoc_frame_ctx,
1788 ws);
1789 struct wlp *wlp = frame_ctx->wlp;
1790 struct wlp_wss *wss = &wlp->wss;
1791 struct device *dev = &wlp->rc->uwb_dev.dev;
1792 struct wlp_frame_assoc *c1 = (void *) frame_ctx->skb->data;
1793 unsigned int len = frame_ctx->skb->len;
1794 struct uwb_dev_addr *src = &frame_ctx->src;
1795 int result;
1796 struct wlp_uuid wssid;
1797 char buf[WLP_WSS_UUID_STRSIZE];
1798 struct sk_buff *resp = NULL;
1799
1800 /* Parse C1 frame */
1801 d_fnstart(6, dev, "WLP: handle C1 frame. wlp = %p, c1 = %p\n",
1802 wlp, c1);
1803 mutex_lock(&wss->mutex);
1804 result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid,
1805 len - sizeof(*c1));
1806 if (result < 0) {
1807 dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n");
1808 goto out;
1809 }
1810 wlp_wss_uuid_print(buf, sizeof(buf), &wssid);
1811 d_printf(6, dev, "Received C1 frame with WSSID %s \n", buf);
1812 if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))
1813 && wss->state == WLP_WSS_STATE_ACTIVE) {
1814 d_printf(6, dev, "WSSID from C1 frame is known locally "
1815 "and is active\n");
1816 /* Construct C2 frame */
1817 result = wlp_build_assoc_c2(wlp, wss, &resp);
1818 if (result < 0) {
1819 dev_err(dev, "WLP: Unable to construct C2 message.\n");
1820 goto out;
1821 }
1822 } else {
1823 d_printf(6, dev, "WSSID from C1 frame is not known locally "
1824 "or is not active\n");
1825 /* Construct F0 frame */
1826 result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV);
1827 if (result < 0) {
1828 dev_err(dev, "WLP: Unable to construct F0 message.\n");
1829 goto out;
1830 }
1831 }
1832 /* Send C2 frame */
1833 d_printf(6, dev, "Transmitting response (C2/F0) frame to %02x:%02x \n",
1834 src->data[1], src->data[0]);
1835 BUG_ON(wlp->xmit_frame == NULL);
1836 result = wlp->xmit_frame(wlp, resp, src);
1837 if (result < 0) {
1838 dev_err(dev, "WLP: Unable to transmit response association "
1839 "message: %d\n", result);
1840 if (result == -ENXIO)
1841 dev_err(dev, "WLP: Is network interface up? \n");
1842 /* We could try again ... */
1843 dev_kfree_skb_any(resp); /* we need to free if tx fails */
1844 }
1845out:
1846 kfree_skb(frame_ctx->skb);
1847 kfree(frame_ctx);
1848 mutex_unlock(&wss->mutex);
1849 d_fnend(6, dev, "WLP: handle C1 frame. wlp = %p\n", wlp);
1850}
1851
1852/**
1853 * Handle incoming C3 frame
1854 *
1855 * The frame has already been verified to contain an Association header with
1856 * the correct version number. Parse the incoming frame, construct and send
1857 * a C4 frame in response. If the C3 frame identifies a WSS that is locally
1858 * active then we connect to this neighbor (add it to our EDA cache).
1859 */
1860void wlp_handle_c3_frame(struct work_struct *ws)
1861{
1862 struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws,
1863 struct wlp_assoc_frame_ctx,
1864 ws);
1865 struct wlp *wlp = frame_ctx->wlp;
1866 struct wlp_wss *wss = &wlp->wss;
1867 struct device *dev = &wlp->rc->uwb_dev.dev;
1868 struct sk_buff *skb = frame_ctx->skb;
1869 struct uwb_dev_addr *src = &frame_ctx->src;
1870 int result;
1871 char buf[WLP_WSS_UUID_STRSIZE];
1872 struct sk_buff *resp = NULL;
1873 struct wlp_uuid wssid;
1874 u8 tag;
1875 struct uwb_mac_addr virt_addr;
1876
1877 /* Parse C3 frame */
1878 d_fnstart(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n",
1879 wlp, skb);
1880 mutex_lock(&wss->mutex);
1881 result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr);
1882 if (result < 0) {
1883 dev_err(dev, "WLP: unable to obtain values from C3 frame.\n");
1884 goto out;
1885 }
1886 wlp_wss_uuid_print(buf, sizeof(buf), &wssid);
1887 d_printf(6, dev, "Received C3 frame with WSSID %s \n", buf);
1888 if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))
1889 && wss->state >= WLP_WSS_STATE_ACTIVE) {
1890 d_printf(6, dev, "WSSID from C3 frame is known locally "
1891 "and is active\n");
1892 result = wlp_eda_update_node(&wlp->eda, src, wss,
1893 (void *) virt_addr.data, tag,
1894 WLP_WSS_CONNECTED);
1895 if (result < 0) {
1896 dev_err(dev, "WLP: Unable to update EDA cache "
1897 "with new connected neighbor information.\n");
1898 result = wlp_build_assoc_f0(wlp, &resp,
1899 WLP_ASSOC_ERROR_INT);
1900 if (result < 0) {
1901 dev_err(dev, "WLP: Unable to construct F0 "
1902 "message.\n");
1903 goto out;
1904 }
1905 } else {
1906 wss->state = WLP_WSS_STATE_CONNECTED;
1907 /* Construct C4 frame */
1908 result = wlp_build_assoc_c4(wlp, wss, &resp);
1909 if (result < 0) {
1910 dev_err(dev, "WLP: Unable to construct C4 "
1911 "message.\n");
1912 goto out;
1913 }
1914 }
1915 } else {
1916 d_printf(6, dev, "WSSID from C3 frame is not known locally "
1917 "or is not active\n");
1918 /* Construct F0 frame */
1919 result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV);
1920 if (result < 0) {
1921 dev_err(dev, "WLP: Unable to construct F0 message.\n");
1922 goto out;
1923 }
1924 }
1925 /* Send C4 frame */
1926 d_printf(6, dev, "Transmitting response (C4/F0) frame to %02x:%02x \n",
1927 src->data[1], src->data[0]);
1928 BUG_ON(wlp->xmit_frame == NULL);
1929 result = wlp->xmit_frame(wlp, resp, src);
1930 if (result < 0) {
1931 dev_err(dev, "WLP: Unable to transmit response association "
1932 "message: %d\n", result);
1933 if (result == -ENXIO)
1934 dev_err(dev, "WLP: Is network interface up? \n");
1935 /* We could try again ... */
1936 dev_kfree_skb_any(resp); /* we need to free if tx fails */
1937 }
1938out:
1939 kfree_skb(frame_ctx->skb);
1940 kfree(frame_ctx);
1941 mutex_unlock(&wss->mutex);
1942 d_fnend(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n",
1943 wlp, skb);
1944}
1945
1946
diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
new file mode 100644
index 000000000000..1bb9b1f97d47
--- /dev/null
+++ b/drivers/uwb/wlp/sysfs.c
@@ -0,0 +1,709 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 * sysfs functions
4 *
5 * Copyright (C) 2007 Intel Corporation
6 * Reinette Chatre <reinette.chatre@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: Docs
24 *
25 */
26
27#include <linux/wlp.h>
28#include "wlp-internal.h"
29
30static
31size_t wlp_wss_wssid_e_print(char *buf, size_t bufsize,
32 struct wlp_wssid_e *wssid_e)
33{
34 size_t used = 0;
35 used += scnprintf(buf, bufsize, " WSS: ");
36 used += wlp_wss_uuid_print(buf + used, bufsize - used,
37 &wssid_e->wssid);
38
39 if (wssid_e->info != NULL) {
40 used += scnprintf(buf + used, bufsize - used, " ");
41 used += uwb_mac_addr_print(buf + used, bufsize - used,
42 &wssid_e->info->bcast);
43 used += scnprintf(buf + used, bufsize - used, " %u %u %s\n",
44 wssid_e->info->accept_enroll,
45 wssid_e->info->sec_status,
46 wssid_e->info->name);
47 }
48 return used;
49}
50
51/**
52 * Print out information learned from neighbor discovery
53 *
54 * Some fields being printed may not be included in the device discovery
55 * information (it is not mandatory). We are thus careful how the
56 * information is printed to ensure it is clear to the user what field is
57 * being referenced.
58 * The information being printed is for one time use - temporary storage is
59 * cleaned after it is printed.
60 *
61 * Ideally sysfs output should be on one line. The information printed here
62 * contain a few strings so it will be hard to parse if they are all
63 * printed on the same line - without agreeing on a standard field
64 * separator.
65 */
66static
67ssize_t wlp_wss_neighborhood_print_remove(struct wlp *wlp, char *buf,
68 size_t bufsize)
69{
70 size_t used = 0;
71 struct wlp_neighbor_e *neighb;
72 struct wlp_wssid_e *wssid_e;
73
74 mutex_lock(&wlp->nbmutex);
75 used = scnprintf(buf, bufsize, "#Neighbor information\n"
76 "#uuid dev_addr\n"
77 "# Device Name:\n# Model Name:\n# Manufacturer:\n"
78 "# Model Nr:\n# Serial:\n"
79 "# Pri Dev type: CategoryID OUI OUISubdiv "
80 "SubcategoryID\n"
81 "# WSS: WSSID WSS_name accept_enroll sec_status "
82 "bcast\n"
83 "# WSS: WSSID WSS_name accept_enroll sec_status "
84 "bcast\n\n");
85 list_for_each_entry(neighb, &wlp->neighbors, node) {
86 if (bufsize - used <= 0)
87 goto out;
88 used += wlp_wss_uuid_print(buf + used, bufsize - used,
89 &neighb->uuid);
90 buf[used++] = ' ';
91 used += uwb_dev_addr_print(buf + used, bufsize - used,
92 &neighb->uwb_dev->dev_addr);
93 if (neighb->info != NULL)
94 used += scnprintf(buf + used, bufsize - used,
95 "\n Device Name: %s\n"
96 " Model Name: %s\n"
97 " Manufacturer:%s \n"
98 " Model Nr: %s\n"
99 " Serial: %s\n"
100 " Pri Dev type: "
101 "%u %02x:%02x:%02x %u %u\n",
102 neighb->info->name,
103 neighb->info->model_name,
104 neighb->info->manufacturer,
105 neighb->info->model_nr,
106 neighb->info->serial,
107 neighb->info->prim_dev_type.category,
108 neighb->info->prim_dev_type.OUI[0],
109 neighb->info->prim_dev_type.OUI[1],
110 neighb->info->prim_dev_type.OUI[2],
111 neighb->info->prim_dev_type.OUIsubdiv,
112 neighb->info->prim_dev_type.subID);
113 list_for_each_entry(wssid_e, &neighb->wssid, node) {
114 used += wlp_wss_wssid_e_print(buf + used,
115 bufsize - used,
116 wssid_e);
117 }
118 buf[used++] = '\n';
119 wlp_remove_neighbor_tmp_info(neighb);
120 }
121
122
123out:
124 mutex_unlock(&wlp->nbmutex);
125 return used;
126}
127
128
129/**
130 * Show properties of all WSS in neighborhood.
131 *
132 * Will trigger a complete discovery of WSS activated by this device and
133 * its neighbors.
134 */
135ssize_t wlp_neighborhood_show(struct wlp *wlp, char *buf)
136{
137 wlp_discover(wlp);
138 return wlp_wss_neighborhood_print_remove(wlp, buf, PAGE_SIZE);
139}
140EXPORT_SYMBOL_GPL(wlp_neighborhood_show);
141
142static
143ssize_t __wlp_wss_properties_show(struct wlp_wss *wss, char *buf,
144 size_t bufsize)
145{
146 ssize_t result;
147
148 result = wlp_wss_uuid_print(buf, bufsize, &wss->wssid);
149 result += scnprintf(buf + result, bufsize - result, " ");
150 result += uwb_mac_addr_print(buf + result, bufsize - result,
151 &wss->bcast);
152 result += scnprintf(buf + result, bufsize - result,
153 " 0x%02x %u ", wss->hash, wss->secure_status);
154 result += wlp_wss_key_print(buf + result, bufsize - result,
155 wss->master_key);
156 result += scnprintf(buf + result, bufsize - result, " 0x%02x ",
157 wss->tag);
158 result += uwb_mac_addr_print(buf + result, bufsize - result,
159 &wss->virtual_addr);
160 result += scnprintf(buf + result, bufsize - result, " %s", wss->name);
161 result += scnprintf(buf + result, bufsize - result,
162 "\n\n#WSSID\n#WSS broadcast address\n"
163 "#WSS hash\n#WSS secure status\n"
164 "#WSS master key\n#WSS local tag\n"
165 "#WSS local virtual EUI-48\n#WSS name\n");
166 return result;
167}
168
169/**
170 * Show which WSS is activated.
171 */
172ssize_t wlp_wss_activate_show(struct wlp_wss *wss, char *buf)
173{
174 int result = 0;
175
176 if (mutex_lock_interruptible(&wss->mutex))
177 goto out;
178 if (wss->state >= WLP_WSS_STATE_ACTIVE)
179 result = __wlp_wss_properties_show(wss, buf, PAGE_SIZE);
180 else
181 result = scnprintf(buf, PAGE_SIZE, "No local WSS active.\n");
182 result += scnprintf(buf + result, PAGE_SIZE - result,
183 "\n\n"
184 "# echo WSSID SECURE_STATUS ACCEPT_ENROLLMENT "
185 "NAME #create new WSS\n"
186 "# echo WSSID [DEV ADDR] #enroll in and activate "
187 "existing WSS, can request registrar\n"
188 "#\n"
189 "# WSSID is a 16 byte hex array. Eg. 12 A3 3B ... \n"
190 "# SECURE_STATUS 0 - unsecure, 1 - secure (default)\n"
191 "# ACCEPT_ENROLLMENT 0 - no, 1 - yes (default)\n"
192 "# NAME is the text string identifying the WSS\n"
193 "# DEV ADDR is the device address of neighbor "
194 "that should be registrar. Eg. 32:AB\n");
195
196 mutex_unlock(&wss->mutex);
197out:
198 return result;
199
200}
201EXPORT_SYMBOL_GPL(wlp_wss_activate_show);
202
203/**
204 * Create/activate a new WSS or enroll/activate in neighboring WSS
205 *
206 * The user can provide the WSSID of a WSS in which it wants to enroll.
207 * Only the WSSID is necessary if the WSS have been discovered before. If
208 * the WSS has not been discovered before, or the user wants to use a
209 * particular neighbor as its registrar, then the user can also provide a
210 * device address or the neighbor that will be used as registrar.
211 *
212 * A new WSS is created when the user provides a WSSID, secure status, and
213 * WSS name.
214 */
215ssize_t wlp_wss_activate_store(struct wlp_wss *wss,
216 const char *buf, size_t size)
217{
218 ssize_t result = -EINVAL;
219 struct wlp_uuid wssid;
220 struct uwb_dev_addr dev;
221 struct uwb_dev_addr bcast = {.data = {0xff, 0xff} };
222 char name[65];
223 unsigned sec_status, accept;
224 memset(name, 0, sizeof(name));
225 result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx "
226 "%02hhx %02hhx %02hhx %02hhx "
227 "%02hhx %02hhx %02hhx %02hhx "
228 "%02hhx %02hhx %02hhx %02hhx "
229 "%02hhx:%02hhx",
230 &wssid.data[0] , &wssid.data[1],
231 &wssid.data[2] , &wssid.data[3],
232 &wssid.data[4] , &wssid.data[5],
233 &wssid.data[6] , &wssid.data[7],
234 &wssid.data[8] , &wssid.data[9],
235 &wssid.data[10], &wssid.data[11],
236 &wssid.data[12], &wssid.data[13],
237 &wssid.data[14], &wssid.data[15],
238 &dev.data[1], &dev.data[0]);
239 if (result == 16 || result == 17) {
240 result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx "
241 "%02hhx %02hhx %02hhx %02hhx "
242 "%02hhx %02hhx %02hhx %02hhx "
243 "%02hhx %02hhx %02hhx %02hhx "
244 "%u %u %64c",
245 &wssid.data[0] , &wssid.data[1],
246 &wssid.data[2] , &wssid.data[3],
247 &wssid.data[4] , &wssid.data[5],
248 &wssid.data[6] , &wssid.data[7],
249 &wssid.data[8] , &wssid.data[9],
250 &wssid.data[10], &wssid.data[11],
251 &wssid.data[12], &wssid.data[13],
252 &wssid.data[14], &wssid.data[15],
253 &sec_status, &accept, name);
254 if (result == 16)
255 result = wlp_wss_enroll_activate(wss, &wssid, &bcast);
256 else if (result == 19) {
257 sec_status = sec_status == 0 ? 0 : 1;
258 accept = accept == 0 ? 0 : 1;
259 /* We read name using %c, so the newline needs to be
260 * removed */
261 if (strlen(name) != sizeof(name) - 1)
262 name[strlen(name) - 1] = '\0';
263 result = wlp_wss_create_activate(wss, &wssid, name,
264 sec_status, accept);
265 } else
266 result = -EINVAL;
267 } else if (result == 18)
268 result = wlp_wss_enroll_activate(wss, &wssid, &dev);
269 else
270 result = -EINVAL;
271 return result < 0 ? result : size;
272}
273EXPORT_SYMBOL_GPL(wlp_wss_activate_store);
274
275/**
276 * Show the UUID of this host
277 */
278ssize_t wlp_uuid_show(struct wlp *wlp, char *buf)
279{
280 ssize_t result = 0;
281
282 mutex_lock(&wlp->mutex);
283 result = wlp_wss_uuid_print(buf, PAGE_SIZE, &wlp->uuid);
284 buf[result++] = '\n';
285 mutex_unlock(&wlp->mutex);
286 return result;
287}
288EXPORT_SYMBOL_GPL(wlp_uuid_show);
289
290/**
291 * Store a new UUID for this host
292 *
293 * According to the spec this should be encoded as an octet string in the
294 * order the octets are shown in string representation in RFC 4122 (WLP
295 * 0.99 [Table 6])
296 *
297 * We do not check value provided by user.
298 */
299ssize_t wlp_uuid_store(struct wlp *wlp, const char *buf, size_t size)
300{
301 ssize_t result;
302 struct wlp_uuid uuid;
303
304 mutex_lock(&wlp->mutex);
305 result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx "
306 "%02hhx %02hhx %02hhx %02hhx "
307 "%02hhx %02hhx %02hhx %02hhx "
308 "%02hhx %02hhx %02hhx %02hhx ",
309 &uuid.data[0] , &uuid.data[1],
310 &uuid.data[2] , &uuid.data[3],
311 &uuid.data[4] , &uuid.data[5],
312 &uuid.data[6] , &uuid.data[7],
313 &uuid.data[8] , &uuid.data[9],
314 &uuid.data[10], &uuid.data[11],
315 &uuid.data[12], &uuid.data[13],
316 &uuid.data[14], &uuid.data[15]);
317 if (result != 16) {
318 result = -EINVAL;
319 goto error;
320 }
321 wlp->uuid = uuid;
322error:
323 mutex_unlock(&wlp->mutex);
324 return result < 0 ? result : size;
325}
326EXPORT_SYMBOL_GPL(wlp_uuid_store);
327
328/**
329 * Show contents of members of device information structure
330 */
331#define wlp_dev_info_show(type) \
332ssize_t wlp_dev_##type##_show(struct wlp *wlp, char *buf) \
333{ \
334 ssize_t result = 0; \
335 mutex_lock(&wlp->mutex); \
336 if (wlp->dev_info == NULL) { \
337 result = __wlp_setup_device_info(wlp); \
338 if (result < 0) \
339 goto out; \
340 } \
341 result = scnprintf(buf, PAGE_SIZE, "%s\n", wlp->dev_info->type);\
342out: \
343 mutex_unlock(&wlp->mutex); \
344 return result; \
345} \
346EXPORT_SYMBOL_GPL(wlp_dev_##type##_show);
347
348wlp_dev_info_show(name)
349wlp_dev_info_show(model_name)
350wlp_dev_info_show(model_nr)
351wlp_dev_info_show(manufacturer)
352wlp_dev_info_show(serial)
353
354/**
355 * Store contents of members of device information structure
356 */
357#define wlp_dev_info_store(type, len) \
358ssize_t wlp_dev_##type##_store(struct wlp *wlp, const char *buf, size_t size)\
359{ \
360 ssize_t result; \
361 char format[10]; \
362 mutex_lock(&wlp->mutex); \
363 if (wlp->dev_info == NULL) { \
364 result = __wlp_alloc_device_info(wlp); \
365 if (result < 0) \
366 goto out; \
367 } \
368 memset(wlp->dev_info->type, 0, sizeof(wlp->dev_info->type)); \
369 sprintf(format, "%%%uc", len); \
370 result = sscanf(buf, format, wlp->dev_info->type); \
371out: \
372 mutex_unlock(&wlp->mutex); \
373 return result < 0 ? result : size; \
374} \
375EXPORT_SYMBOL_GPL(wlp_dev_##type##_store);
376
377wlp_dev_info_store(name, 32)
378wlp_dev_info_store(manufacturer, 64)
379wlp_dev_info_store(model_name, 32)
380wlp_dev_info_store(model_nr, 32)
381wlp_dev_info_store(serial, 32)
382
383static
384const char *__wlp_dev_category[] = {
385 [WLP_DEV_CAT_COMPUTER] = "Computer",
386 [WLP_DEV_CAT_INPUT] = "Input device",
387 [WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER] = "Printer, scanner, FAX, or "
388 "Copier",
389 [WLP_DEV_CAT_CAMERA] = "Camera",
390 [WLP_DEV_CAT_STORAGE] = "Storage Network",
391 [WLP_DEV_CAT_INFRASTRUCTURE] = "Infrastructure",
392 [WLP_DEV_CAT_DISPLAY] = "Display",
393 [WLP_DEV_CAT_MULTIM] = "Multimedia device",
394 [WLP_DEV_CAT_GAMING] = "Gaming device",
395 [WLP_DEV_CAT_TELEPHONE] = "Telephone",
396 [WLP_DEV_CAT_OTHER] = "Other",
397};
398
399static
400const char *wlp_dev_category_str(unsigned cat)
401{
402 if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE)
403 || cat == WLP_DEV_CAT_OTHER)
404 return __wlp_dev_category[cat];
405 return "unknown category";
406}
407
408ssize_t wlp_dev_prim_category_show(struct wlp *wlp, char *buf)
409{
410 ssize_t result = 0;
411 mutex_lock(&wlp->mutex);
412 if (wlp->dev_info == NULL) {
413 result = __wlp_setup_device_info(wlp);
414 if (result < 0)
415 goto out;
416 }
417 result = scnprintf(buf, PAGE_SIZE, "%s\n",
418 wlp_dev_category_str(wlp->dev_info->prim_dev_type.category));
419out:
420 mutex_unlock(&wlp->mutex);
421 return result;
422}
423EXPORT_SYMBOL_GPL(wlp_dev_prim_category_show);
424
425ssize_t wlp_dev_prim_category_store(struct wlp *wlp, const char *buf,
426 size_t size)
427{
428 ssize_t result;
429 u16 cat;
430 mutex_lock(&wlp->mutex);
431 if (wlp->dev_info == NULL) {
432 result = __wlp_alloc_device_info(wlp);
433 if (result < 0)
434 goto out;
435 }
436 result = sscanf(buf, "%hu", &cat);
437 if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE)
438 || cat == WLP_DEV_CAT_OTHER)
439 wlp->dev_info->prim_dev_type.category = cat;
440 else
441 result = -EINVAL;
442out:
443 mutex_unlock(&wlp->mutex);
444 return result < 0 ? result : size;
445}
446EXPORT_SYMBOL_GPL(wlp_dev_prim_category_store);
447
448ssize_t wlp_dev_prim_OUI_show(struct wlp *wlp, char *buf)
449{
450 ssize_t result = 0;
451 mutex_lock(&wlp->mutex);
452 if (wlp->dev_info == NULL) {
453 result = __wlp_setup_device_info(wlp);
454 if (result < 0)
455 goto out;
456 }
457 result = scnprintf(buf, PAGE_SIZE, "%02x:%02x:%02x\n",
458 wlp->dev_info->prim_dev_type.OUI[0],
459 wlp->dev_info->prim_dev_type.OUI[1],
460 wlp->dev_info->prim_dev_type.OUI[2]);
461out:
462 mutex_unlock(&wlp->mutex);
463 return result;
464}
465EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_show);
466
467ssize_t wlp_dev_prim_OUI_store(struct wlp *wlp, const char *buf, size_t size)
468{
469 ssize_t result;
470 u8 OUI[3];
471 mutex_lock(&wlp->mutex);
472 if (wlp->dev_info == NULL) {
473 result = __wlp_alloc_device_info(wlp);
474 if (result < 0)
475 goto out;
476 }
477 result = sscanf(buf, "%hhx:%hhx:%hhx",
478 &OUI[0], &OUI[1], &OUI[2]);
479 if (result != 3) {
480 result = -EINVAL;
481 goto out;
482 } else
483 memcpy(wlp->dev_info->prim_dev_type.OUI, OUI, sizeof(OUI));
484out:
485 mutex_unlock(&wlp->mutex);
486 return result < 0 ? result : size;
487}
488EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_store);
489
490
491ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *wlp, char *buf)
492{
493 ssize_t result = 0;
494 mutex_lock(&wlp->mutex);
495 if (wlp->dev_info == NULL) {
496 result = __wlp_setup_device_info(wlp);
497 if (result < 0)
498 goto out;
499 }
500 result = scnprintf(buf, PAGE_SIZE, "%u\n",
501 wlp->dev_info->prim_dev_type.OUIsubdiv);
502out:
503 mutex_unlock(&wlp->mutex);
504 return result;
505}
506EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_sub_show);
507
508ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *wlp, const char *buf,
509 size_t size)
510{
511 ssize_t result;
512 unsigned sub;
513 u8 max_sub = ~0;
514 mutex_lock(&wlp->mutex);
515 if (wlp->dev_info == NULL) {
516 result = __wlp_alloc_device_info(wlp);
517 if (result < 0)
518 goto out;
519 }
520 result = sscanf(buf, "%u", &sub);
521 if (sub <= max_sub)
522 wlp->dev_info->prim_dev_type.OUIsubdiv = sub;
523 else
524 result = -EINVAL;
525out:
526 mutex_unlock(&wlp->mutex);
527 return result < 0 ? result : size;
528}
529EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_sub_store);
530
531ssize_t wlp_dev_prim_subcat_show(struct wlp *wlp, char *buf)
532{
533 ssize_t result = 0;
534 mutex_lock(&wlp->mutex);
535 if (wlp->dev_info == NULL) {
536 result = __wlp_setup_device_info(wlp);
537 if (result < 0)
538 goto out;
539 }
540 result = scnprintf(buf, PAGE_SIZE, "%u\n",
541 wlp->dev_info->prim_dev_type.subID);
542out:
543 mutex_unlock(&wlp->mutex);
544 return result;
545}
546EXPORT_SYMBOL_GPL(wlp_dev_prim_subcat_show);
547
548ssize_t wlp_dev_prim_subcat_store(struct wlp *wlp, const char *buf,
549 size_t size)
550{
551 ssize_t result;
552 unsigned sub;
553 __le16 max_sub = ~0;
554 mutex_lock(&wlp->mutex);
555 if (wlp->dev_info == NULL) {
556 result = __wlp_alloc_device_info(wlp);
557 if (result < 0)
558 goto out;
559 }
560 result = sscanf(buf, "%u", &sub);
561 if (sub <= max_sub)
562 wlp->dev_info->prim_dev_type.subID = sub;
563 else
564 result = -EINVAL;
565out:
566 mutex_unlock(&wlp->mutex);
567 return result < 0 ? result : size;
568}
569EXPORT_SYMBOL_GPL(wlp_dev_prim_subcat_store);
570
571/**
572 * Subsystem implementation for interaction with individual WSS via sysfs
573 *
574 * Followed instructions for subsystem in Documentation/filesystems/sysfs.txt
575 */
576
577#define kobj_to_wlp_wss(obj) container_of(obj, struct wlp_wss, kobj)
578#define attr_to_wlp_wss_attr(_attr) \
579 container_of(_attr, struct wlp_wss_attribute, attr)
580
581/**
582 * Sysfs subsystem: forward read calls
583 *
584 * Sysfs operation for forwarding read call to the show method of the
585 * attribute owner
586 */
587static
588ssize_t wlp_wss_attr_show(struct kobject *kobj, struct attribute *attr,
589 char *buf)
590{
591 struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr);
592 struct wlp_wss *wss = kobj_to_wlp_wss(kobj);
593 ssize_t ret = -EIO;
594
595 if (wss_attr->show)
596 ret = wss_attr->show(wss, buf);
597 return ret;
598}
599/**
600 * Sysfs subsystem: forward write calls
601 *
602 * Sysfs operation for forwarding write call to the store method of the
603 * attribute owner
604 */
605static
606ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
607 const char *buf, size_t count)
608{
609 struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr);
610 struct wlp_wss *wss = kobj_to_wlp_wss(kobj);
611 ssize_t ret = -EIO;
612
613 if (wss_attr->store)
614 ret = wss_attr->store(wss, buf, count);
615 return ret;
616}
617
618static
619struct sysfs_ops wss_sysfs_ops = {
620 .show = wlp_wss_attr_show,
621 .store = wlp_wss_attr_store,
622};
623
624struct kobj_type wss_ktype = {
625 .release = wlp_wss_release,
626 .sysfs_ops = &wss_sysfs_ops,
627};
628
629
630/**
631 * Sysfs files for individual WSS
632 */
633
634/**
635 * Print static properties of this WSS
636 *
637 * The name of a WSS may not be null teminated. It's max size is 64 bytes
638 * so we copy it to a larger array just to make sure we print sane data.
639 */
640static ssize_t wlp_wss_properties_show(struct wlp_wss *wss, char *buf)
641{
642 int result = 0;
643
644 if (mutex_lock_interruptible(&wss->mutex))
645 goto out;
646 result = __wlp_wss_properties_show(wss, buf, PAGE_SIZE);
647 mutex_unlock(&wss->mutex);
648out:
649 return result;
650}
651WSS_ATTR(properties, S_IRUGO, wlp_wss_properties_show, NULL);
652
653/**
654 * Print all connected members of this WSS
655 * The EDA cache contains all members of WSS neighborhood.
656 */
657static ssize_t wlp_wss_members_show(struct wlp_wss *wss, char *buf)
658{
659 struct wlp *wlp = container_of(wss, struct wlp, wss);
660 return wlp_eda_show(wlp, buf);
661}
662WSS_ATTR(members, S_IRUGO, wlp_wss_members_show, NULL);
663
664static
665const char *__wlp_strstate[] = {
666 "none",
667 "partially enrolled",
668 "enrolled",
669 "active",
670 "connected",
671};
672
673static const char *wlp_wss_strstate(unsigned state)
674{
675 if (state >= ARRAY_SIZE(__wlp_strstate))
676 return "unknown state";
677 return __wlp_strstate[state];
678}
679
680/*
681 * Print current state of this WSS
682 */
683static ssize_t wlp_wss_state_show(struct wlp_wss *wss, char *buf)
684{
685 int result = 0;
686
687 if (mutex_lock_interruptible(&wss->mutex))
688 goto out;
689 result = scnprintf(buf, PAGE_SIZE, "%s\n",
690 wlp_wss_strstate(wss->state));
691 mutex_unlock(&wss->mutex);
692out:
693 return result;
694}
695WSS_ATTR(state, S_IRUGO, wlp_wss_state_show, NULL);
696
697
698static
699struct attribute *wss_attrs[] = {
700 &wss_attr_properties.attr,
701 &wss_attr_members.attr,
702 &wss_attr_state.attr,
703 NULL,
704};
705
706struct attribute_group wss_attr_group = {
707 .name = NULL, /* we want them in the same directory */
708 .attrs = wss_attrs,
709};
diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c
new file mode 100644
index 000000000000..c701bd1a2887
--- /dev/null
+++ b/drivers/uwb/wlp/txrx.c
@@ -0,0 +1,374 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 * Message exchange infrastructure
4 *
5 * Copyright (C) 2007 Intel Corporation
6 * Reinette Chatre <reinette.chatre@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: Docs
24 *
25 */
26
27#include <linux/etherdevice.h>
28#include <linux/wlp.h>
29#define D_LOCAL 5
30#include <linux/uwb/debug.h>
31#include "wlp-internal.h"
32
33
34/**
35 * Direct incoming association msg to correct parsing routine
36 *
37 * We only expect D1, E1, C1, C3 messages as new. All other incoming
38 * association messages should form part of an established session that is
39 * handled elsewhere.
40 * The handling of these messages often require calling sleeping functions
41 * - this cannot be done in interrupt context. We use the kernel's
42 * workqueue to handle these messages.
43 */
44static
45void wlp_direct_assoc_frame(struct wlp *wlp, struct sk_buff *skb,
46 struct uwb_dev_addr *src)
47{
48 struct device *dev = &wlp->rc->uwb_dev.dev;
49 struct wlp_frame_assoc *assoc = (void *) skb->data;
50 struct wlp_assoc_frame_ctx *frame_ctx;
51 d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb);
52 frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC);
53 if (frame_ctx == NULL) {
54 dev_err(dev, "WLP: Unable to allocate memory for association "
55 "frame handling.\n");
56 kfree_skb(skb);
57 goto out;
58 }
59 frame_ctx->wlp = wlp;
60 frame_ctx->skb = skb;
61 frame_ctx->src = *src;
62 switch (assoc->type) {
63 case WLP_ASSOC_D1:
64 d_printf(5, dev, "Received a D1 frame.\n");
65 INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame);
66 schedule_work(&frame_ctx->ws);
67 break;
68 case WLP_ASSOC_E1:
69 d_printf(5, dev, "Received a E1 frame. FIXME?\n");
70 kfree_skb(skb); /* Temporary until we handle it */
71 kfree(frame_ctx); /* Temporary until we handle it */
72 break;
73 case WLP_ASSOC_C1:
74 d_printf(5, dev, "Received a C1 frame.\n");
75 INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame);
76 schedule_work(&frame_ctx->ws);
77 break;
78 case WLP_ASSOC_C3:
79 d_printf(5, dev, "Received a C3 frame.\n");
80 INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame);
81 schedule_work(&frame_ctx->ws);
82 break;
83 default:
84 dev_err(dev, "Received unexpected association frame. "
85 "Type = %d \n", assoc->type);
86 kfree_skb(skb);
87 kfree(frame_ctx);
88 break;
89 }
90out:
91 d_fnend(5, dev, "wlp %p\n", wlp);
92}
93
94/**
95 * Process incoming association frame
96 *
97 * Although it could be possible to deal with some incoming association
98 * messages without creating a new session we are keeping things simple. We
99 * do not accept new association messages if there is a session in progress
100 * and the messages do not belong to that session.
101 *
102 * If an association message arrives that causes the creation of a session
103 * (WLP_ASSOC_E1) while we are in the process of creating a session then we
104 * rely on the neighbor mutex to protect the data. That is, the new session
105 * will not be started until the previous is completed.
106 */
107static
108void wlp_receive_assoc_frame(struct wlp *wlp, struct sk_buff *skb,
109 struct uwb_dev_addr *src)
110{
111 struct device *dev = &wlp->rc->uwb_dev.dev;
112 struct wlp_frame_assoc *assoc = (void *) skb->data;
113 struct wlp_session *session = wlp->session;
114 u8 version;
115 d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb);
116
117 if (wlp_get_version(wlp, &assoc->version, &version,
118 sizeof(assoc->version)) < 0)
119 goto error;
120 if (version != WLP_VERSION) {
121 dev_err(dev, "Unsupported WLP version in association "
122 "message.\n");
123 goto error;
124 }
125 if (session != NULL) {
126 /* Function that created this session is still holding the
127 * &wlp->mutex to protect this session. */
128 if (assoc->type == session->exp_message ||
129 assoc->type == WLP_ASSOC_F0) {
130 if (!memcmp(&session->neighbor_addr, src,
131 sizeof(*src))) {
132 session->data = skb;
133 (session->cb)(wlp);
134 } else {
135 dev_err(dev, "Received expected message from "
136 "unexpected source. Expected message "
137 "%d or F0 from %02x:%02x, but received "
138 "it from %02x:%02x. Dropping.\n",
139 session->exp_message,
140 session->neighbor_addr.data[1],
141 session->neighbor_addr.data[0],
142 src->data[1], src->data[0]);
143 goto error;
144 }
145 } else {
146 dev_err(dev, "Association already in progress. "
147 "Dropping.\n");
148 goto error;
149 }
150 } else {
151 wlp_direct_assoc_frame(wlp, skb, src);
152 }
153 d_fnend(5, dev, "wlp %p\n", wlp);
154 return;
155error:
156 kfree_skb(skb);
157 d_fnend(5, dev, "wlp %p\n", wlp);
158}
159
160/**
161 * Verify incoming frame is from connected neighbor, prep to pass to WLP client
162 *
163 * Verification proceeds according to WLP 0.99 [7.3.1]. The source address
164 * is used to determine which neighbor is sending the frame and the WSS tag
165 * is used to know to which WSS the frame belongs (we only support one WSS
166 * so this test is straight forward).
167 * With the WSS found we need to ensure that we are connected before
168 * allowing the exchange of data frames.
169 */
170static
171int wlp_verify_prep_rx_frame(struct wlp *wlp, struct sk_buff *skb,
172 struct uwb_dev_addr *src)
173{
174 struct device *dev = &wlp->rc->uwb_dev.dev;
175 int result = -EINVAL;
176 struct wlp_eda_node eda_entry;
177 struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data;
178
179 d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb);
180 /*verify*/
181 result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry);
182 if (result < 0) {
183 if (printk_ratelimit())
184 dev_err(dev, "WLP: Incoming frame is from unknown "
185 "neighbor %02x:%02x.\n", src->data[1],
186 src->data[0]);
187 goto out;
188 }
189 if (hdr->tag != eda_entry.tag) {
190 if (printk_ratelimit())
191 dev_err(dev, "WLP: Tag of incoming frame from "
192 "%02x:%02x does not match expected tag. "
193 "Received 0x%02x, expected 0x%02x. \n",
194 src->data[1], src->data[0], hdr->tag,
195 eda_entry.tag);
196 result = -EINVAL;
197 goto out;
198 }
199 if (eda_entry.state != WLP_WSS_CONNECTED) {
200 if (printk_ratelimit())
201 dev_err(dev, "WLP: Incoming frame from "
202 "%02x:%02x does is not from connected WSS.\n",
203 src->data[1], src->data[0]);
204 result = -EINVAL;
205 goto out;
206 }
207 /*prep*/
208 skb_pull(skb, sizeof(*hdr));
209out:
210 d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result);
211 return result;
212}
213
214/**
215 * Receive a WLP frame from device
216 *
217 * @returns: 1 if calling function should free the skb
218 * 0 if it successfully handled skb and freed it
219 * 0 if error occured, will free skb in this case
220 */
221int wlp_receive_frame(struct device *dev, struct wlp *wlp, struct sk_buff *skb,
222 struct uwb_dev_addr *src)
223{
224 unsigned len = skb->len;
225 void *ptr = skb->data;
226 struct wlp_frame_hdr *hdr;
227 int result = 0;
228
229 d_fnstart(6, dev, "skb (%p), len (%u)\n", skb, len);
230 if (len < sizeof(*hdr)) {
231 dev_err(dev, "Not enough data to parse WLP header.\n");
232 result = -EINVAL;
233 goto out;
234 }
235 hdr = ptr;
236 d_dump(6, dev, hdr, sizeof(*hdr));
237 if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) {
238 dev_err(dev, "Not a WLP frame type.\n");
239 result = -EINVAL;
240 goto out;
241 }
242 switch (hdr->type) {
243 case WLP_FRAME_STANDARD:
244 if (len < sizeof(struct wlp_frame_std_abbrv_hdr)) {
245 dev_err(dev, "Not enough data to parse Standard "
246 "WLP header.\n");
247 goto out;
248 }
249 result = wlp_verify_prep_rx_frame(wlp, skb, src);
250 if (result < 0) {
251 if (printk_ratelimit())
252 dev_err(dev, "WLP: Verification of frame "
253 "from neighbor %02x:%02x failed.\n",
254 src->data[1], src->data[0]);
255 goto out;
256 }
257 result = 1;
258 break;
259 case WLP_FRAME_ABBREVIATED:
260 dev_err(dev, "Abbreviated frame received. FIXME?\n");
261 kfree_skb(skb);
262 break;
263 case WLP_FRAME_CONTROL:
264 dev_err(dev, "Control frame received. FIXME?\n");
265 kfree_skb(skb);
266 break;
267 case WLP_FRAME_ASSOCIATION:
268 if (len < sizeof(struct wlp_frame_assoc)) {
269 dev_err(dev, "Not enough data to parse Association "
270 "WLP header.\n");
271 goto out;
272 }
273 d_printf(5, dev, "Association frame received.\n");
274 wlp_receive_assoc_frame(wlp, skb, src);
275 break;
276 default:
277 dev_err(dev, "Invalid frame received.\n");
278 result = -EINVAL;
279 break;
280 }
281out:
282 if (result < 0) {
283 kfree_skb(skb);
284 result = 0;
285 }
286 d_fnend(6, dev, "skb (%p)\n", skb);
287 return result;
288}
289EXPORT_SYMBOL_GPL(wlp_receive_frame);
290
291
292/**
293 * Verify frame from network stack, prepare for further transmission
294 *
295 * @skb: the socket buffer that needs to be prepared for transmission (it
296 * is in need of a WLP header). If this is a broadcast frame we take
297 * over the entire transmission.
298 * If it is a unicast the WSS connection should already be established
299 * and transmission will be done by the calling function.
300 * @dst: On return this will contain the device address to which the
301 * frame is destined.
302 * @returns: 0 on success no tx : WLP header sucessfully applied to skb buffer,
303 * calling function can proceed with tx
304 * 1 on success with tx : WLP will take over transmission of this
305 * frame
306 * <0 on error
307 *
308 * The network stack (WLP client) is attempting to transmit a frame. We can
309 * only transmit data if a local WSS is at least active (connection will be
310 * done here if this is a broadcast frame and neighbor also has the WSS
311 * active).
312 *
313 * The frame can be either broadcast or unicast. Broadcast in a WSS is
314 * supported via multicast, but we don't support multicast yet (until
315 * devices start to support MAB IEs). If a broadcast frame needs to be
316 * transmitted it is treated as a unicast frame to each neighbor. In this
317 * case the WLP takes over transmission of the skb and returns 1
318 * to the caller to indicate so. Also, in this case, if a neighbor has the
319 * same WSS activated but is not connected then the WSS connection will be
320 * done at this time. The neighbor's virtual address will be learned at
321 * this time.
322 *
323 * The destination address in a unicast frame is the virtual address of the
324 * neighbor. This address only becomes known when a WSS connection is
325 * established. We thus rely on a broadcast frame to trigger the setup of
326 * WSS connections to all neighbors before we are able to send unicast
327 * frames to them. This seems reasonable as IP would usually use ARP first
328 * before any unicast frames are sent.
329 *
330 * If we are already connected to the neighbor (neighbor's virtual address
331 * is known) we just prepare the WLP header and the caller will continue to
332 * send the frame.
333 *
334 * A failure in this function usually indicates something that cannot be
335 * fixed automatically. So, if this function fails (@return < 0) the calling
336 * function should not retry to send the frame as it will very likely keep
337 * failing.
338 *
339 */
340int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp,
341 struct sk_buff *skb, struct uwb_dev_addr *dst)
342{
343 int result = -EINVAL;
344 struct ethhdr *eth_hdr = (void *) skb->data;
345
346 d_fnstart(6, dev, "wlp (%p), skb (%p) \n", wlp, skb);
347 if (is_broadcast_ether_addr(eth_hdr->h_dest)) {
348 d_printf(6, dev, "WLP: handling broadcast frame. \n");
349 result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb);
350 if (result < 0) {
351 if (printk_ratelimit())
352 dev_err(dev, "Unable to handle broadcast "
353 "frame from WLP client.\n");
354 goto out;
355 }
356 dev_kfree_skb_irq(skb);
357 result = 1;
358 /* Frame will be transmitted by WLP. */
359 } else {
360 d_printf(6, dev, "WLP: handling unicast frame. \n");
361 result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst,
362 wlp_wss_prep_hdr, skb);
363 if (unlikely(result < 0)) {
364 if (printk_ratelimit())
365 dev_err(dev, "Unable to prepare "
366 "skb for transmission. \n");
367 goto out;
368 }
369 }
370out:
371 d_fnend(6, dev, "wlp (%p), skb (%p). result = %d \n", wlp, skb, result);
372 return result;
373}
374EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame);
diff --git a/drivers/uwb/wlp/wlp-internal.h b/drivers/uwb/wlp/wlp-internal.h
new file mode 100644
index 000000000000..1c94fabfb1a7
--- /dev/null
+++ b/drivers/uwb/wlp/wlp-internal.h
@@ -0,0 +1,228 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 * Internal API
4 *
5 * Copyright (C) 2007 Intel Corporation
6 * Reinette Chatre <reinette.chatre@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23
24#ifndef __WLP_INTERNAL_H__
25#define __WLP_INTERNAL_H__
26
27/**
28 * State of WSS connection
29 *
30 * A device needs to connect to a neighbor in an activated WSS before data
31 * can be transmitted. The spec also distinguishes between a new connection
32 * attempt and a connection attempt after previous connection attempts. The
33 * state WLP_WSS_CONNECT_FAILED is used for this scenario. See WLP 0.99
34 * [7.2.6]
35 */
36enum wlp_wss_connect {
37 WLP_WSS_UNCONNECTED = 0,
38 WLP_WSS_CONNECTED,
39 WLP_WSS_CONNECT_FAILED,
40};
41
42extern struct kobj_type wss_ktype;
43extern struct attribute_group wss_attr_group;
44
45extern int uwb_rc_ie_add(struct uwb_rc *, const struct uwb_ie_hdr *, size_t);
46extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie);
47
48
49/* This should be changed to a dynamic array where entries are sorted
50 * by eth_addr and search is done in a binary form
51 *
52 * Although thinking twice about it: this technologie's maximum reach
53 * is 10 meters...unless you want to pack too much stuff in around
54 * your radio controller/WLP device, the list will probably not be
55 * too big.
56 *
57 * In any case, there is probably some data structure in the kernel
58 * than we could reused for that already.
59 *
60 * The below structure is really just good while we support one WSS per
61 * host.
62 */
63struct wlp_eda_node {
64 struct list_head list_node;
65 unsigned char eth_addr[ETH_ALEN];
66 struct uwb_dev_addr dev_addr;
67 struct wlp_wss *wss;
68 unsigned char virt_addr[ETH_ALEN];
69 u8 tag;
70 enum wlp_wss_connect state;
71};
72
73typedef int (*wlp_eda_for_each_f)(struct wlp *, struct wlp_eda_node *, void *);
74
75extern void wlp_eda_init(struct wlp_eda *);
76extern void wlp_eda_release(struct wlp_eda *);
77extern int wlp_eda_create_node(struct wlp_eda *,
78 const unsigned char eth_addr[ETH_ALEN],
79 const struct uwb_dev_addr *);
80extern void wlp_eda_rm_node(struct wlp_eda *, const struct uwb_dev_addr *);
81extern int wlp_eda_update_node(struct wlp_eda *,
82 const struct uwb_dev_addr *,
83 struct wlp_wss *,
84 const unsigned char virt_addr[ETH_ALEN],
85 const u8, const enum wlp_wss_connect);
86extern int wlp_eda_update_node_state(struct wlp_eda *,
87 const struct uwb_dev_addr *,
88 const enum wlp_wss_connect);
89
90extern int wlp_copy_eda_node(struct wlp_eda *, struct uwb_dev_addr *,
91 struct wlp_eda_node *);
92extern int wlp_eda_for_each(struct wlp_eda *, wlp_eda_for_each_f , void *);
93extern int wlp_eda_for_virtual(struct wlp_eda *,
94 const unsigned char eth_addr[ETH_ALEN],
95 struct uwb_dev_addr *,
96 wlp_eda_for_each_f , void *);
97
98
99extern void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *);
100
101extern size_t wlp_wss_key_print(char *, size_t, u8 *);
102
103/* Function called when no more references to WSS exists */
104extern void wlp_wss_release(struct kobject *);
105
106extern void wlp_wss_reset(struct wlp_wss *);
107extern int wlp_wss_create_activate(struct wlp_wss *, struct wlp_uuid *,
108 char *, unsigned, unsigned);
109extern int wlp_wss_enroll_activate(struct wlp_wss *, struct wlp_uuid *,
110 struct uwb_dev_addr *);
111extern ssize_t wlp_discover(struct wlp *);
112
113extern int wlp_enroll_neighbor(struct wlp *, struct wlp_neighbor_e *,
114 struct wlp_wss *, struct wlp_uuid *);
115extern int wlp_wss_is_active(struct wlp *, struct wlp_wss *,
116 struct uwb_dev_addr *);
117
118struct wlp_assoc_conn_ctx {
119 struct work_struct ws;
120 struct wlp *wlp;
121 struct sk_buff *skb;
122 struct wlp_eda_node eda_entry;
123};
124
125
126extern int wlp_wss_connect_prep(struct wlp *, struct wlp_eda_node *, void *);
127extern int wlp_wss_send_copy(struct wlp *, struct wlp_eda_node *, void *);
128
129
130/* Message handling */
131struct wlp_assoc_frame_ctx {
132 struct work_struct ws;
133 struct wlp *wlp;
134 struct sk_buff *skb;
135 struct uwb_dev_addr src;
136};
137
138extern int wlp_wss_prep_hdr(struct wlp *, struct wlp_eda_node *, void *);
139extern void wlp_handle_d1_frame(struct work_struct *);
140extern int wlp_parse_d2_frame_to_cache(struct wlp *, struct sk_buff *,
141 struct wlp_neighbor_e *);
142extern int wlp_parse_d2_frame_to_enroll(struct wlp_wss *, struct sk_buff *,
143 struct wlp_neighbor_e *,
144 struct wlp_uuid *);
145extern void wlp_handle_c1_frame(struct work_struct *);
146extern void wlp_handle_c3_frame(struct work_struct *);
147extern int wlp_parse_c3c4_frame(struct wlp *, struct sk_buff *,
148 struct wlp_uuid *, u8 *,
149 struct uwb_mac_addr *);
150extern int wlp_parse_f0(struct wlp *, struct sk_buff *);
151extern int wlp_send_assoc_frame(struct wlp *, struct wlp_wss *,
152 struct uwb_dev_addr *, enum wlp_assoc_type);
153extern ssize_t wlp_get_version(struct wlp *, struct wlp_attr_version *,
154 u8 *, ssize_t);
155extern ssize_t wlp_get_wssid(struct wlp *, struct wlp_attr_wssid *,
156 struct wlp_uuid *, ssize_t);
157extern int __wlp_alloc_device_info(struct wlp *);
158extern int __wlp_setup_device_info(struct wlp *);
159
160extern struct wlp_wss_attribute wss_attribute_properties;
161extern struct wlp_wss_attribute wss_attribute_members;
162extern struct wlp_wss_attribute wss_attribute_state;
163
164static inline
165size_t wlp_wss_uuid_print(char *buf, size_t bufsize, struct wlp_uuid *uuid)
166{
167 size_t result;
168
169 result = scnprintf(buf, bufsize,
170 "%02x:%02x:%02x:%02x:%02x:%02x:"
171 "%02x:%02x:%02x:%02x:%02x:%02x:"
172 "%02x:%02x:%02x:%02x",
173 uuid->data[0], uuid->data[1],
174 uuid->data[2], uuid->data[3],
175 uuid->data[4], uuid->data[5],
176 uuid->data[6], uuid->data[7],
177 uuid->data[8], uuid->data[9],
178 uuid->data[10], uuid->data[11],
179 uuid->data[12], uuid->data[13],
180 uuid->data[14], uuid->data[15]);
181 return result;
182}
183
184/**
185 * FIXME: How should a nonce be displayed?
186 */
187static inline
188size_t wlp_wss_nonce_print(char *buf, size_t bufsize, struct wlp_nonce *nonce)
189{
190 size_t result;
191
192 result = scnprintf(buf, bufsize,
193 "%02x %02x %02x %02x %02x %02x "
194 "%02x %02x %02x %02x %02x %02x "
195 "%02x %02x %02x %02x",
196 nonce->data[0], nonce->data[1],
197 nonce->data[2], nonce->data[3],
198 nonce->data[4], nonce->data[5],
199 nonce->data[6], nonce->data[7],
200 nonce->data[8], nonce->data[9],
201 nonce->data[10], nonce->data[11],
202 nonce->data[12], nonce->data[13],
203 nonce->data[14], nonce->data[15]);
204 return result;
205}
206
207
208static inline
209void wlp_session_cb(struct wlp *wlp)
210{
211 struct completion *completion = wlp->session->cb_priv;
212 complete(completion);
213}
214
215static inline
216int wlp_uuid_is_set(struct wlp_uuid *uuid)
217{
218 struct wlp_uuid zero_uuid = { .data = { 0x00, 0x00, 0x00, 0x00,
219 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00} };
222
223 if (!memcmp(uuid, &zero_uuid, sizeof(*uuid)))
224 return 0;
225 return 1;
226}
227
228#endif /* __WLP_INTERNAL_H__ */
diff --git a/drivers/uwb/wlp/wlp-lc.c b/drivers/uwb/wlp/wlp-lc.c
new file mode 100644
index 000000000000..0799402e73fb
--- /dev/null
+++ b/drivers/uwb/wlp/wlp-lc.c
@@ -0,0 +1,585 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 *
4 * Copyright (C) 2005-2006 Intel Corporation
5 * Reinette Chatre <reinette.chatre@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * FIXME: docs
23 */
24
25#include <linux/wlp.h>
26#define D_LOCAL 6
27#include <linux/uwb/debug.h>
28#include "wlp-internal.h"
29
30
31static
32void wlp_neighbor_init(struct wlp_neighbor_e *neighbor)
33{
34 INIT_LIST_HEAD(&neighbor->wssid);
35}
36
37/**
38 * Create area for device information storage
39 *
40 * wlp->mutex must be held
41 */
42int __wlp_alloc_device_info(struct wlp *wlp)
43{
44 struct device *dev = &wlp->rc->uwb_dev.dev;
45 BUG_ON(wlp->dev_info != NULL);
46 wlp->dev_info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL);
47 if (wlp->dev_info == NULL) {
48 dev_err(dev, "WLP: Unable to allocate memory for "
49 "device information.\n");
50 return -ENOMEM;
51 }
52 return 0;
53}
54
55
56/**
57 * Fill in device information using function provided by driver
58 *
59 * wlp->mutex must be held
60 */
61static
62void __wlp_fill_device_info(struct wlp *wlp)
63{
64 struct device *dev = &wlp->rc->uwb_dev.dev;
65
66 BUG_ON(wlp->fill_device_info == NULL);
67 d_printf(6, dev, "Retrieving device information "
68 "from device driver.\n");
69 wlp->fill_device_info(wlp, wlp->dev_info);
70}
71
72/**
73 * Setup device information
74 *
75 * Allocate area for device information and populate it.
76 *
77 * wlp->mutex must be held
78 */
79int __wlp_setup_device_info(struct wlp *wlp)
80{
81 int result;
82 struct device *dev = &wlp->rc->uwb_dev.dev;
83
84 result = __wlp_alloc_device_info(wlp);
85 if (result < 0) {
86 dev_err(dev, "WLP: Unable to allocate area for "
87 "device information.\n");
88 return result;
89 }
90 __wlp_fill_device_info(wlp);
91 return 0;
92}
93
94/**
95 * Remove information about neighbor stored temporarily
96 *
97 * Information learned during discovey should only be stored when the
98 * device enrolls in the neighbor's WSS. We do need to store this
99 * information temporarily in order to present it to the user.
100 *
101 * We are only interested in keeping neighbor WSS information if that
102 * neighbor is accepting enrollment.
103 *
104 * should be called with wlp->nbmutex held
105 */
106void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *neighbor)
107{
108 struct wlp_wssid_e *wssid_e, *next;
109 u8 keep;
110 if (!list_empty(&neighbor->wssid)) {
111 list_for_each_entry_safe(wssid_e, next, &neighbor->wssid,
112 node) {
113 if (wssid_e->info != NULL) {
114 keep = wssid_e->info->accept_enroll;
115 kfree(wssid_e->info);
116 wssid_e->info = NULL;
117 if (!keep) {
118 list_del(&wssid_e->node);
119 kfree(wssid_e);
120 }
121 }
122 }
123 }
124 if (neighbor->info != NULL) {
125 kfree(neighbor->info);
126 neighbor->info = NULL;
127 }
128}
129
130/**
131 * Populate WLP neighborhood cache with neighbor information
132 *
133 * A new neighbor is found. If it is discoverable then we add it to the
134 * neighborhood cache.
135 *
136 */
137static
138int wlp_add_neighbor(struct wlp *wlp, struct uwb_dev *dev)
139{
140 int result = 0;
141 int discoverable;
142 struct wlp_neighbor_e *neighbor;
143
144 d_fnstart(6, &dev->dev, "uwb %p \n", dev);
145 d_printf(6, &dev->dev, "Found neighbor device %02x:%02x \n",
146 dev->dev_addr.data[1], dev->dev_addr.data[0]);
147 /**
148 * FIXME:
149 * Use contents of WLP IE found in beacon cache to determine if
150 * neighbor is discoverable.
151 * The device does not support WLP IE yet so this still needs to be
152 * done. Until then we assume all devices are discoverable.
153 */
154 discoverable = 1; /* will be changed when FIXME disappears */
155 if (discoverable) {
156 /* Add neighbor to cache for discovery */
157 neighbor = kzalloc(sizeof(*neighbor), GFP_KERNEL);
158 if (neighbor == NULL) {
159 dev_err(&dev->dev, "Unable to create memory for "
160 "new neighbor. \n");
161 result = -ENOMEM;
162 goto error_no_mem;
163 }
164 wlp_neighbor_init(neighbor);
165 uwb_dev_get(dev);
166 neighbor->uwb_dev = dev;
167 list_add(&neighbor->node, &wlp->neighbors);
168 }
169error_no_mem:
170 d_fnend(6, &dev->dev, "uwb %p, result = %d \n", dev, result);
171 return result;
172}
173
174/**
175 * Remove one neighbor from cache
176 */
177static
178void __wlp_neighbor_release(struct wlp_neighbor_e *neighbor)
179{
180 struct wlp_wssid_e *wssid_e, *next_wssid_e;
181
182 list_for_each_entry_safe(wssid_e, next_wssid_e,
183 &neighbor->wssid, node) {
184 list_del(&wssid_e->node);
185 kfree(wssid_e);
186 }
187 uwb_dev_put(neighbor->uwb_dev);
188 list_del(&neighbor->node);
189 kfree(neighbor);
190}
191
192/**
193 * Clear entire neighborhood cache.
194 */
195static
196void __wlp_neighbors_release(struct wlp *wlp)
197{
198 struct wlp_neighbor_e *neighbor, *next;
199 if (list_empty(&wlp->neighbors))
200 return;
201 list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) {
202 __wlp_neighbor_release(neighbor);
203 }
204}
205
206static
207void wlp_neighbors_release(struct wlp *wlp)
208{
209 mutex_lock(&wlp->nbmutex);
210 __wlp_neighbors_release(wlp);
211 mutex_unlock(&wlp->nbmutex);
212}
213
214
215
216/**
217 * Send D1 message to neighbor, receive D2 message
218 *
219 * @neighbor: neighbor to which D1 message will be sent
220 * @wss: if not NULL, it is an enrollment request for this WSS
221 * @wssid: if wss not NULL, this is the wssid of the WSS in which we
222 * want to enroll
223 *
224 * A D1/D2 exchange is done for one of two reasons: discovery or
225 * enrollment. If done for discovery the D1 message is sent to the neighbor
226 * and the contents of the D2 response is stored in a temporary cache.
227 * If done for enrollment the @wss and @wssid are provided also. In this
228 * case the D1 message is sent to the neighbor, the D2 response is parsed
229 * for enrollment of the WSS with wssid.
230 *
231 * &wss->mutex is held
232 */
233static
234int wlp_d1d2_exchange(struct wlp *wlp, struct wlp_neighbor_e *neighbor,
235 struct wlp_wss *wss, struct wlp_uuid *wssid)
236{
237 int result;
238 struct device *dev = &wlp->rc->uwb_dev.dev;
239 DECLARE_COMPLETION_ONSTACK(completion);
240 struct wlp_session session;
241 struct sk_buff *skb;
242 struct wlp_frame_assoc *resp;
243 struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr;
244
245 mutex_lock(&wlp->mutex);
246 if (!wlp_uuid_is_set(&wlp->uuid)) {
247 dev_err(dev, "WLP: UUID is not set. Set via sysfs to "
248 "proceed.\n");
249 result = -ENXIO;
250 goto out;
251 }
252 /* Send D1 association frame */
253 result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_D1);
254 if (result < 0) {
255 dev_err(dev, "Unable to send D1 frame to neighbor "
256 "%02x:%02x (%d)\n", dev_addr->data[1],
257 dev_addr->data[0], result);
258 d_printf(6, dev, "Add placeholders into buffer next to "
259 "neighbor information we have (dev address).\n");
260 goto out;
261 }
262 /* Create session, wait for response */
263 session.exp_message = WLP_ASSOC_D2;
264 session.cb = wlp_session_cb;
265 session.cb_priv = &completion;
266 session.neighbor_addr = *dev_addr;
267 BUG_ON(wlp->session != NULL);
268 wlp->session = &session;
269 /* Wait for D2/F0 frame */
270 result = wait_for_completion_interruptible_timeout(&completion,
271 WLP_PER_MSG_TIMEOUT * HZ);
272 if (result == 0) {
273 result = -ETIMEDOUT;
274 dev_err(dev, "Timeout while sending D1 to neighbor "
275 "%02x:%02x.\n", dev_addr->data[1],
276 dev_addr->data[0]);
277 goto error_session;
278 }
279 if (result < 0) {
280 dev_err(dev, "Unable to discover/enroll neighbor %02x:%02x.\n",
281 dev_addr->data[1], dev_addr->data[0]);
282 goto error_session;
283 }
284 /* Parse message in session->data: it will be either D2 or F0 */
285 skb = session.data;
286 resp = (void *) skb->data;
287 d_printf(6, dev, "Received response to D1 frame. \n");
288 d_dump(6, dev, skb->data, skb->len > 72 ? 72 : skb->len);
289
290 if (resp->type == WLP_ASSOC_F0) {
291 result = wlp_parse_f0(wlp, skb);
292 if (result < 0)
293 dev_err(dev, "WLP: Unable to parse F0 from neighbor "
294 "%02x:%02x.\n", dev_addr->data[1],
295 dev_addr->data[0]);
296 result = -EINVAL;
297 goto error_resp_parse;
298 }
299 if (wss == NULL) {
300 /* Discovery */
301 result = wlp_parse_d2_frame_to_cache(wlp, skb, neighbor);
302 if (result < 0) {
303 dev_err(dev, "WLP: Unable to parse D2 message from "
304 "neighbor %02x:%02x for discovery.\n",
305 dev_addr->data[1], dev_addr->data[0]);
306 goto error_resp_parse;
307 }
308 } else {
309 /* Enrollment */
310 result = wlp_parse_d2_frame_to_enroll(wss, skb, neighbor,
311 wssid);
312 if (result < 0) {
313 dev_err(dev, "WLP: Unable to parse D2 message from "
314 "neighbor %02x:%02x for enrollment.\n",
315 dev_addr->data[1], dev_addr->data[0]);
316 goto error_resp_parse;
317 }
318 }
319error_resp_parse:
320 kfree_skb(skb);
321error_session:
322 wlp->session = NULL;
323out:
324 mutex_unlock(&wlp->mutex);
325 return result;
326}
327
328/**
329 * Enroll into WSS of provided WSSID by using neighbor as registrar
330 *
331 * &wss->mutex is held
332 */
333int wlp_enroll_neighbor(struct wlp *wlp, struct wlp_neighbor_e *neighbor,
334 struct wlp_wss *wss, struct wlp_uuid *wssid)
335{
336 int result = 0;
337 struct device *dev = &wlp->rc->uwb_dev.dev;
338 char buf[WLP_WSS_UUID_STRSIZE];
339 struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr;
340 wlp_wss_uuid_print(buf, sizeof(buf), wssid);
341 d_fnstart(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n",
342 wlp, neighbor, wss, wssid, buf);
343 d_printf(6, dev, "Complete me.\n");
344 result = wlp_d1d2_exchange(wlp, neighbor, wss, wssid);
345 if (result < 0) {
346 dev_err(dev, "WLP: D1/D2 message exchange for enrollment "
347 "failed. result = %d \n", result);
348 goto out;
349 }
350 if (wss->state != WLP_WSS_STATE_PART_ENROLLED) {
351 dev_err(dev, "WLP: Unable to enroll into WSS %s using "
352 "neighbor %02x:%02x. \n", buf,
353 dev_addr->data[1], dev_addr->data[0]);
354 result = -EINVAL;
355 goto out;
356 }
357 if (wss->secure_status == WLP_WSS_SECURE) {
358 dev_err(dev, "FIXME: need to complete secure enrollment.\n");
359 result = -EINVAL;
360 goto error;
361 } else {
362 wss->state = WLP_WSS_STATE_ENROLLED;
363 d_printf(2, dev, "WLP: Success Enrollment into unsecure WSS "
364 "%s using neighbor %02x:%02x. \n", buf,
365 dev_addr->data[1], dev_addr->data[0]);
366 }
367
368 d_fnend(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n",
369 wlp, neighbor, wss, wssid, buf);
370out:
371 return result;
372error:
373 wlp_wss_reset(wss);
374 return result;
375}
376
377/**
378 * Discover WSS information of neighbor's active WSS
379 */
380static
381int wlp_discover_neighbor(struct wlp *wlp,
382 struct wlp_neighbor_e *neighbor)
383{
384 return wlp_d1d2_exchange(wlp, neighbor, NULL, NULL);
385}
386
387
388/**
389 * Each neighbor in the neighborhood cache is discoverable. Discover it.
390 *
391 * Discovery is done through sending of D1 association frame and parsing
392 * the D2 association frame response. Only wssid from D2 will be included
393 * in neighbor cache, rest is just displayed to user and forgotten.
394 *
395 * The discovery is not done in parallel. This is simple and enables us to
396 * maintain only one association context.
397 *
398 * The discovery of one neighbor does not affect the other, but if the
399 * discovery of a neighbor fails it is removed from the neighborhood cache.
400 */
401static
402int wlp_discover_all_neighbors(struct wlp *wlp)
403{
404 int result = 0;
405 struct device *dev = &wlp->rc->uwb_dev.dev;
406 struct wlp_neighbor_e *neighbor, *next;
407
408 list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) {
409 result = wlp_discover_neighbor(wlp, neighbor);
410 if (result < 0) {
411 dev_err(dev, "WLP: Unable to discover neighbor "
412 "%02x:%02x, removing from neighborhood. \n",
413 neighbor->uwb_dev->dev_addr.data[1],
414 neighbor->uwb_dev->dev_addr.data[0]);
415 __wlp_neighbor_release(neighbor);
416 }
417 }
418 return result;
419}
420
421static int wlp_add_neighbor_helper(struct device *dev, void *priv)
422{
423 struct wlp *wlp = priv;
424 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
425
426 return wlp_add_neighbor(wlp, uwb_dev);
427}
428
429/**
430 * Discover WLP neighborhood
431 *
432 * Will send D1 association frame to all devices in beacon group that have
433 * discoverable bit set in WLP IE. D2 frames will be received, information
434 * displayed to user in @buf. Partial information (from D2 association
435 * frame) will be cached to assist with future association
436 * requests.
437 *
438 * The discovery of the WLP neighborhood is triggered by the user. This
439 * should occur infrequently and we thus free current cache and re-allocate
440 * memory if needed.
441 *
442 * If one neighbor fails during initial discovery (determining if it is a
443 * neighbor or not), we fail all - note that interaction with neighbor has
444 * not occured at this point so if a failure occurs we know something went wrong
445 * locally. We thus undo everything.
446 */
447ssize_t wlp_discover(struct wlp *wlp)
448{
449 int result = 0;
450 struct device *dev = &wlp->rc->uwb_dev.dev;
451
452 d_fnstart(6, dev, "wlp %p \n", wlp);
453 mutex_lock(&wlp->nbmutex);
454 /* Clear current neighborhood cache. */
455 __wlp_neighbors_release(wlp);
456 /* Determine which devices in neighborhood. Repopulate cache. */
457 result = uwb_dev_for_each(wlp->rc, wlp_add_neighbor_helper, wlp);
458 if (result < 0) {
459 /* May have partial neighbor information, release all. */
460 __wlp_neighbors_release(wlp);
461 goto error_dev_for_each;
462 }
463 /* Discover the properties of devices in neighborhood. */
464 result = wlp_discover_all_neighbors(wlp);
465 /* In case of failure we still print our partial results. */
466 if (result < 0) {
467 dev_err(dev, "Unable to fully discover neighborhood. \n");
468 result = 0;
469 }
470error_dev_for_each:
471 mutex_unlock(&wlp->nbmutex);
472 d_fnend(6, dev, "wlp %p \n", wlp);
473 return result;
474}
475
476/**
477 * Handle events from UWB stack
478 *
479 * We handle events conservatively. If a neighbor goes off the air we
480 * remove it from the neighborhood. If an association process is in
481 * progress this function will block waiting for the nbmutex to become
482 * free. The association process will thus be allowed to complete before it
483 * is removed.
484 */
485static
486void wlp_uwb_notifs_cb(void *_wlp, struct uwb_dev *uwb_dev,
487 enum uwb_notifs event)
488{
489 struct wlp *wlp = _wlp;
490 struct device *dev = &wlp->rc->uwb_dev.dev;
491 struct wlp_neighbor_e *neighbor, *next;
492 int result;
493 switch (event) {
494 case UWB_NOTIF_ONAIR:
495 d_printf(6, dev, "UWB device %02x:%02x is onair\n",
496 uwb_dev->dev_addr.data[1],
497 uwb_dev->dev_addr.data[0]);
498 result = wlp_eda_create_node(&wlp->eda,
499 uwb_dev->mac_addr.data,
500 &uwb_dev->dev_addr);
501 if (result < 0)
502 dev_err(dev, "WLP: Unable to add new neighbor "
503 "%02x:%02x to EDA cache.\n",
504 uwb_dev->dev_addr.data[1],
505 uwb_dev->dev_addr.data[0]);
506 break;
507 case UWB_NOTIF_OFFAIR:
508 d_printf(6, dev, "UWB device %02x:%02x is offair\n",
509 uwb_dev->dev_addr.data[1],
510 uwb_dev->dev_addr.data[0]);
511 wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr);
512 mutex_lock(&wlp->nbmutex);
513 list_for_each_entry_safe(neighbor, next, &wlp->neighbors,
514 node) {
515 if (neighbor->uwb_dev == uwb_dev) {
516 d_printf(6, dev, "Removing device from "
517 "neighborhood.\n");
518 __wlp_neighbor_release(neighbor);
519 }
520 }
521 mutex_unlock(&wlp->nbmutex);
522 break;
523 default:
524 dev_err(dev, "don't know how to handle event %d from uwb\n",
525 event);
526 }
527}
528
529int wlp_setup(struct wlp *wlp, struct uwb_rc *rc)
530{
531 struct device *dev = &rc->uwb_dev.dev;
532 int result;
533
534 d_fnstart(6, dev, "wlp %p\n", wlp);
535 BUG_ON(wlp->fill_device_info == NULL);
536 BUG_ON(wlp->xmit_frame == NULL);
537 BUG_ON(wlp->stop_queue == NULL);
538 BUG_ON(wlp->start_queue == NULL);
539 wlp->rc = rc;
540 wlp_eda_init(&wlp->eda);/* Set up address cache */
541 wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb;
542 wlp->uwb_notifs_handler.data = wlp;
543 uwb_notifs_register(rc, &wlp->uwb_notifs_handler);
544
545 uwb_pal_init(&wlp->pal);
546 result = uwb_pal_register(rc, &wlp->pal);
547 if (result < 0)
548 uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler);
549
550 d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result);
551 return result;
552}
553EXPORT_SYMBOL_GPL(wlp_setup);
554
555void wlp_remove(struct wlp *wlp)
556{
557 struct device *dev = &wlp->rc->uwb_dev.dev;
558 d_fnstart(6, dev, "wlp %p\n", wlp);
559 wlp_neighbors_release(wlp);
560 uwb_pal_unregister(wlp->rc, &wlp->pal);
561 uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler);
562 wlp_eda_release(&wlp->eda);
563 mutex_lock(&wlp->mutex);
564 if (wlp->dev_info != NULL)
565 kfree(wlp->dev_info);
566 mutex_unlock(&wlp->mutex);
567 wlp->rc = NULL;
568 /* We have to use NULL here because this function can be called
569 * when the device disappeared. */
570 d_fnend(6, NULL, "wlp %p\n", wlp);
571}
572EXPORT_SYMBOL_GPL(wlp_remove);
573
574/**
575 * wlp_reset_all - reset the WLP hardware
576 * @wlp: the WLP device to reset.
577 *
578 * This schedules a full hardware reset of the WLP device. The radio
579 * controller and any other PALs will also be reset.
580 */
581void wlp_reset_all(struct wlp *wlp)
582{
583 uwb_rc_reset_all(wlp->rc);
584}
585EXPORT_SYMBOL_GPL(wlp_reset_all);
diff --git a/drivers/uwb/wlp/wss-lc.c b/drivers/uwb/wlp/wss-lc.c
new file mode 100644
index 000000000000..96b18c9bd6e9
--- /dev/null
+++ b/drivers/uwb/wlp/wss-lc.c
@@ -0,0 +1,1055 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 *
4 * Copyright (C) 2007 Intel Corporation
5 * Reinette Chatre <reinette.chatre@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Implementation of the WLP association protocol.
23 *
24 * FIXME: Docs
25 *
26 * A UWB network interface will configure a WSS through wlp_wss_setup() after
27 * the interface has been assigned a MAC address, typically after
28 * "ifconfig" has been called. When the interface goes down it should call
29 * wlp_wss_remove().
30 *
31 * When the WSS is ready for use the user interacts via sysfs to create,
32 * discover, and activate WSS.
33 *
34 * wlp_wss_enroll_activate()
35 *
36 * wlp_wss_create_activate()
37 * wlp_wss_set_wssid_hash()
38 * wlp_wss_comp_wssid_hash()
39 * wlp_wss_sel_bcast_addr()
40 * wlp_wss_sysfs_add()
41 *
42 * Called when no more references to WSS exist:
43 * wlp_wss_release()
44 * wlp_wss_reset()
45 */
46
47#include <linux/etherdevice.h> /* for is_valid_ether_addr */
48#include <linux/skbuff.h>
49#include <linux/wlp.h>
50#define D_LOCAL 5
51#include <linux/uwb/debug.h>
52#include "wlp-internal.h"
53
54
55size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key)
56{
57 size_t result;
58
59 result = scnprintf(buf, bufsize,
60 "%02x %02x %02x %02x %02x %02x "
61 "%02x %02x %02x %02x %02x %02x "
62 "%02x %02x %02x %02x",
63 key[0], key[1], key[2], key[3],
64 key[4], key[5], key[6], key[7],
65 key[8], key[9], key[10], key[11],
66 key[12], key[13], key[14], key[15]);
67 return result;
68}
69
70/**
71 * Compute WSSID hash
72 * WLP Draft 0.99 [7.2.1]
73 *
74 * The WSSID hash for a WSSID is the result of an octet-wise exclusive-OR
75 * of all octets in the WSSID.
76 */
77static
78u8 wlp_wss_comp_wssid_hash(struct wlp_uuid *wssid)
79{
80 return wssid->data[0] ^ wssid->data[1] ^ wssid->data[2]
81 ^ wssid->data[3] ^ wssid->data[4] ^ wssid->data[5]
82 ^ wssid->data[6] ^ wssid->data[7] ^ wssid->data[8]
83 ^ wssid->data[9] ^ wssid->data[10] ^ wssid->data[11]
84 ^ wssid->data[12] ^ wssid->data[13] ^ wssid->data[14]
85 ^ wssid->data[15];
86}
87
88/**
89 * Select a multicast EUI-48 for the WSS broadcast address.
90 * WLP Draft 0.99 [7.2.1]
91 *
92 * Selected based on the WiMedia Alliance OUI, 00-13-88, within the WLP
93 * range, [01-13-88-00-01-00, 01-13-88-00-01-FF] inclusive.
94 *
95 * This address is currently hardcoded.
96 * FIXME?
97 */
98static
99struct uwb_mac_addr wlp_wss_sel_bcast_addr(struct wlp_wss *wss)
100{
101 struct uwb_mac_addr bcast = {
102 .data = { 0x01, 0x13, 0x88, 0x00, 0x01, 0x00 }
103 };
104 return bcast;
105}
106
107/**
108 * Clear the contents of the WSS structure - all except kobj, mutex, virtual
109 *
110 * We do not want to reinitialize - the internal kobj should not change as
111 * it still points to the parent received during setup. The mutex should
112 * remain also. We thus just reset values individually.
113 * The virutal address assigned to WSS will remain the same for the
114 * lifetime of the WSS. We only reset the fields that can change during its
115 * lifetime.
116 */
117void wlp_wss_reset(struct wlp_wss *wss)
118{
119 struct wlp *wlp = container_of(wss, struct wlp, wss);
120 struct device *dev = &wlp->rc->uwb_dev.dev;
121 d_fnstart(5, dev, "wss (%p) \n", wss);
122 memset(&wss->wssid, 0, sizeof(wss->wssid));
123 wss->hash = 0;
124 memset(&wss->name[0], 0, sizeof(wss->name));
125 memset(&wss->bcast, 0, sizeof(wss->bcast));
126 wss->secure_status = WLP_WSS_UNSECURE;
127 memset(&wss->master_key[0], 0, sizeof(wss->master_key));
128 wss->tag = 0;
129 wss->state = WLP_WSS_STATE_NONE;
130 d_fnend(5, dev, "wss (%p) \n", wss);
131}
132
133/**
134 * Create sysfs infrastructure for WSS
135 *
136 * The WSS is configured to have the interface as parent (see wlp_wss_setup())
137 * a new sysfs directory that includes wssid as its name is created in the
138 * interface's sysfs directory. The group of files interacting with WSS are
139 * created also.
140 */
141static
142int wlp_wss_sysfs_add(struct wlp_wss *wss, char *wssid_str)
143{
144 struct wlp *wlp = container_of(wss, struct wlp, wss);
145 struct device *dev = &wlp->rc->uwb_dev.dev;
146 int result;
147
148 d_fnstart(5, dev, "wss (%p), wssid: %s\n", wss, wssid_str);
149 result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str);
150 if (result < 0)
151 return result;
152 wss->kobj.ktype = &wss_ktype;
153 result = kobject_init_and_add(&wss->kobj,
154 &wss_ktype, wss->kobj.parent, "wlp");
155 if (result < 0) {
156 dev_err(dev, "WLP: Cannot register WSS kobject.\n");
157 goto error_kobject_register;
158 }
159 result = sysfs_create_group(&wss->kobj, &wss_attr_group);
160 if (result < 0) {
161 dev_err(dev, "WLP: Cannot register WSS attributes: %d\n",
162 result);
163 goto error_sysfs_create_group;
164 }
165 d_fnend(5, dev, "Completed. result = %d \n", result);
166 return 0;
167error_sysfs_create_group:
168
169 kobject_put(&wss->kobj); /* will free name if needed */
170 return result;
171error_kobject_register:
172 kfree(wss->kobj.name);
173 wss->kobj.name = NULL;
174 wss->kobj.ktype = NULL;
175 return result;
176}
177
178
179/**
180 * Release WSS
181 *
182 * No more references exist to this WSS. We should undo everything that was
183 * done in wlp_wss_create_activate() except removing the group. The group
184 * is not removed because an object can be unregistered before the group is
185 * created. We also undo any additional operations on the WSS after this
186 * (addition of members).
187 *
188 * If memory was allocated for the kobject's name then it will
189 * be freed by the kobject system during this time.
190 *
191 * The EDA cache is removed and reinitilized when the WSS is removed. We
192 * thus loose knowledge of members of this WSS at that time and need not do
193 * it here.
194 */
195void wlp_wss_release(struct kobject *kobj)
196{
197 struct wlp_wss *wss = container_of(kobj, struct wlp_wss, kobj);
198
199 wlp_wss_reset(wss);
200}
201
202/**
203 * Enroll into a WSS using provided neighbor as registrar
204 *
205 * First search the neighborhood information to learn which neighbor is
206 * referred to, next proceed with enrollment.
207 *
208 * &wss->mutex is held
209 */
210static
211int wlp_wss_enroll_target(struct wlp_wss *wss, struct wlp_uuid *wssid,
212 struct uwb_dev_addr *dest)
213{
214 struct wlp *wlp = container_of(wss, struct wlp, wss);
215 struct device *dev = &wlp->rc->uwb_dev.dev;
216 struct wlp_neighbor_e *neighbor;
217 char buf[WLP_WSS_UUID_STRSIZE];
218 int result = -ENXIO;
219 struct uwb_dev_addr *dev_addr;
220
221 wlp_wss_uuid_print(buf, sizeof(buf), wssid);
222 d_fnstart(5, dev, "wss %p, wssid %s, registrar %02x:%02x \n",
223 wss, buf, dest->data[1], dest->data[0]);
224 mutex_lock(&wlp->nbmutex);
225 list_for_each_entry(neighbor, &wlp->neighbors, node) {
226 dev_addr = &neighbor->uwb_dev->dev_addr;
227 if (!memcmp(dest, dev_addr, sizeof(*dest))) {
228 d_printf(5, dev, "Neighbor %02x:%02x is valid, "
229 "enrolling. \n",
230 dev_addr->data[1], dev_addr->data[0]);
231 result = wlp_enroll_neighbor(wlp, neighbor, wss,
232 wssid);
233 break;
234 }
235 }
236 if (result == -ENXIO)
237 dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n",
238 dest->data[1], dest->data[0]);
239 mutex_unlock(&wlp->nbmutex);
240 d_fnend(5, dev, "wss %p, wssid %s, registrar %02x:%02x, result %d \n",
241 wss, buf, dest->data[1], dest->data[0], result);
242 return result;
243}
244
245/**
246 * Enroll into a WSS previously discovered
247 *
248 * User provides WSSID of WSS, search for neighbor that has this WSS
249 * activated and attempt to enroll.
250 *
251 * &wss->mutex is held
252 */
253static
254int wlp_wss_enroll_discovered(struct wlp_wss *wss, struct wlp_uuid *wssid)
255{
256 struct wlp *wlp = container_of(wss, struct wlp, wss);
257 struct device *dev = &wlp->rc->uwb_dev.dev;
258 struct wlp_neighbor_e *neighbor;
259 struct wlp_wssid_e *wssid_e;
260 char buf[WLP_WSS_UUID_STRSIZE];
261 int result = -ENXIO;
262
263 wlp_wss_uuid_print(buf, sizeof(buf), wssid);
264 d_fnstart(5, dev, "wss %p, wssid %s \n", wss, buf);
265 mutex_lock(&wlp->nbmutex);
266 list_for_each_entry(neighbor, &wlp->neighbors, node) {
267 list_for_each_entry(wssid_e, &neighbor->wssid, node) {
268 if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) {
269 d_printf(5, dev, "Found WSSID %s in neighbor "
270 "%02x:%02x cache. \n", buf,
271 neighbor->uwb_dev->dev_addr.data[1],
272 neighbor->uwb_dev->dev_addr.data[0]);
273 result = wlp_enroll_neighbor(wlp, neighbor,
274 wss, wssid);
275 if (result == 0) /* enrollment success */
276 goto out;
277 break;
278 }
279 }
280 }
281out:
282 if (result == -ENXIO)
283 dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf);
284 mutex_unlock(&wlp->nbmutex);
285 d_fnend(5, dev, "wss %p, wssid %s, result %d \n", wss, buf, result);
286 return result;
287}
288
289/**
290 * Enroll into WSS with provided WSSID, registrar may be provided
291 *
292 * @wss: out WSS that will be enrolled
293 * @wssid: wssid of neighboring WSS that we want to enroll in
294 * @devaddr: registrar can be specified, will be broadcast (ff:ff) if any
295 * neighbor can be used as registrar.
296 *
297 * &wss->mutex is held
298 */
299static
300int wlp_wss_enroll(struct wlp_wss *wss, struct wlp_uuid *wssid,
301 struct uwb_dev_addr *devaddr)
302{
303 int result;
304 struct wlp *wlp = container_of(wss, struct wlp, wss);
305 struct device *dev = &wlp->rc->uwb_dev.dev;
306 char buf[WLP_WSS_UUID_STRSIZE];
307 struct uwb_dev_addr bcast = {.data = {0xff, 0xff} };
308
309 wlp_wss_uuid_print(buf, sizeof(buf), wssid);
310 if (wss->state != WLP_WSS_STATE_NONE) {
311 dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf);
312 result = -EEXIST;
313 goto error;
314 }
315 if (!memcmp(&bcast, devaddr, sizeof(bcast))) {
316 d_printf(5, dev, "Request to enroll in discovered WSS "
317 "with WSSID %s \n", buf);
318 result = wlp_wss_enroll_discovered(wss, wssid);
319 } else {
320 d_printf(5, dev, "Request to enroll in WSSID %s with "
321 "registrar %02x:%02x\n", buf, devaddr->data[1],
322 devaddr->data[0]);
323 result = wlp_wss_enroll_target(wss, wssid, devaddr);
324 }
325 if (result < 0) {
326 dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n",
327 buf, result);
328 goto error;
329 }
330 d_printf(2, dev, "Successfully enrolled into WSS %s \n", buf);
331 result = wlp_wss_sysfs_add(wss, buf);
332 if (result < 0) {
333 dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n");
334 wlp_wss_reset(wss);
335 }
336error:
337 return result;
338
339}
340
341/**
342 * Activate given WSS
343 *
344 * Prior to activation a WSS must be enrolled. To activate a WSS a device
345 * includes the WSS hash in the WLP IE in its beacon in each superframe.
346 * WLP 0.99 [7.2.5].
347 *
348 * The WSS tag is also computed at this time. We only support one activated
349 * WSS so we can use the hash as a tag - there will never be a conflict.
350 *
351 * We currently only support one activated WSS so only one WSS hash is
352 * included in the WLP IE.
353 */
354static
355int wlp_wss_activate(struct wlp_wss *wss)
356{
357 struct wlp *wlp = container_of(wss, struct wlp, wss);
358 struct device *dev = &wlp->rc->uwb_dev.dev;
359 struct uwb_rc *uwb_rc = wlp->rc;
360 int result;
361 struct {
362 struct wlp_ie wlp_ie;
363 u8 hash; /* only include one hash */
364 } ie_data;
365
366 d_fnstart(5, dev, "Activating WSS %p. \n", wss);
367 BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED);
368 wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid);
369 wss->tag = wss->hash;
370 memset(&ie_data, 0, sizeof(ie_data));
371 ie_data.wlp_ie.hdr.element_id = UWB_IE_WLP;
372 ie_data.wlp_ie.hdr.length = sizeof(ie_data) - sizeof(struct uwb_ie_hdr);
373 wlp_ie_set_hash_length(&ie_data.wlp_ie, sizeof(ie_data.hash));
374 ie_data.hash = wss->hash;
375 result = uwb_rc_ie_add(uwb_rc, &ie_data.wlp_ie.hdr,
376 sizeof(ie_data));
377 if (result < 0) {
378 dev_err(dev, "WLP: Unable to add WLP IE to beacon. "
379 "result = %d.\n", result);
380 goto error_wlp_ie;
381 }
382 wss->state = WLP_WSS_STATE_ACTIVE;
383 result = 0;
384error_wlp_ie:
385 d_fnend(5, dev, "Activating WSS %p, result = %d \n", wss, result);
386 return result;
387}
388
389/**
390 * Enroll in and activate WSS identified by provided WSSID
391 *
392 * The neighborhood cache should contain a list of all neighbors and the
393 * WSS they have activated. Based on that cache we search which neighbor we
394 * can perform the association process with. The user also has option to
395 * specify which neighbor it prefers as registrar.
396 * Successful enrollment is followed by activation.
397 * Successful activation will create the sysfs directory containing
398 * specific information regarding this WSS.
399 */
400int wlp_wss_enroll_activate(struct wlp_wss *wss, struct wlp_uuid *wssid,
401 struct uwb_dev_addr *devaddr)
402{
403 struct wlp *wlp = container_of(wss, struct wlp, wss);
404 struct device *dev = &wlp->rc->uwb_dev.dev;
405 int result = 0;
406 char buf[WLP_WSS_UUID_STRSIZE];
407
408 d_fnstart(5, dev, "Enrollment and activation requested. \n");
409 mutex_lock(&wss->mutex);
410 result = wlp_wss_enroll(wss, wssid, devaddr);
411 if (result < 0) {
412 wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
413 dev_err(dev, "WLP: Enrollment into WSS %s failed.\n", buf);
414 goto error_enroll;
415 }
416 result = wlp_wss_activate(wss);
417 if (result < 0) {
418 dev_err(dev, "WLP: Unable to activate WSS. Undoing enrollment "
419 "result = %d \n", result);
420 /* Undo enrollment */
421 wlp_wss_reset(wss);
422 goto error_activate;
423 }
424error_activate:
425error_enroll:
426 mutex_unlock(&wss->mutex);
427 d_fnend(5, dev, "Completed. result = %d \n", result);
428 return result;
429}
430
431/**
432 * Create, enroll, and activate a new WSS
433 *
434 * @wssid: new wssid provided by user
435 * @name: WSS name requested by used.
436 * @sec_status: security status requested by user
437 *
438 * A user requested the creation of a new WSS. All operations are done
439 * locally. The new WSS will be stored locally, the hash will be included
440 * in the WLP IE, and the sysfs infrastructure for this WSS will be
441 * created.
442 */
443int wlp_wss_create_activate(struct wlp_wss *wss, struct wlp_uuid *wssid,
444 char *name, unsigned sec_status, unsigned accept)
445{
446 struct wlp *wlp = container_of(wss, struct wlp, wss);
447 struct device *dev = &wlp->rc->uwb_dev.dev;
448 int result = 0;
449 char buf[WLP_WSS_UUID_STRSIZE];
450 d_fnstart(5, dev, "Request to create new WSS.\n");
451 result = wlp_wss_uuid_print(buf, sizeof(buf), wssid);
452 d_printf(5, dev, "Request to create WSS: WSSID=%s, name=%s, "
453 "sec_status=%u, accepting enrollment=%u \n",
454 buf, name, sec_status, accept);
455 if (!mutex_trylock(&wss->mutex)) {
456 dev_err(dev, "WLP: WLP association session in progress.\n");
457 return -EBUSY;
458 }
459 if (wss->state != WLP_WSS_STATE_NONE) {
460 dev_err(dev, "WLP: WSS already exists. Not creating new.\n");
461 result = -EEXIST;
462 goto out;
463 }
464 if (wss->kobj.parent == NULL) {
465 dev_err(dev, "WLP: WSS parent not ready. Is network interface "
466 "up?\n");
467 result = -ENXIO;
468 goto out;
469 }
470 if (sec_status == WLP_WSS_SECURE) {
471 dev_err(dev, "WLP: FIXME Creation of secure WSS not "
472 "supported yet.\n");
473 result = -EINVAL;
474 goto out;
475 }
476 wss->wssid = *wssid;
477 memcpy(wss->name, name, sizeof(wss->name));
478 wss->bcast = wlp_wss_sel_bcast_addr(wss);
479 wss->secure_status = sec_status;
480 wss->accept_enroll = accept;
481 /*wss->virtual_addr is initialized in call to wlp_wss_setup*/
482 /* sysfs infrastructure */
483 result = wlp_wss_sysfs_add(wss, buf);
484 if (result < 0) {
485 dev_err(dev, "Cannot set up sysfs for WSS kobject.\n");
486 wlp_wss_reset(wss);
487 goto out;
488 } else
489 result = 0;
490 wss->state = WLP_WSS_STATE_ENROLLED;
491 result = wlp_wss_activate(wss);
492 if (result < 0) {
493 dev_err(dev, "WLP: Unable to activate WSS. Undoing "
494 "enrollment\n");
495 wlp_wss_reset(wss);
496 goto out;
497 }
498 result = 0;
499out:
500 mutex_unlock(&wss->mutex);
501 d_fnend(5, dev, "Completed. result = %d \n", result);
502 return result;
503}
504
505/**
506 * Determine if neighbor has WSS activated
507 *
508 * @returns: 1 if neighbor has WSS activated, zero otherwise
509 *
510 * This can be done in two ways:
511 * - send a C1 frame, parse C2/F0 response
512 * - examine the WLP IE sent by the neighbor
513 *
514 * The WLP IE is not fully supported in hardware so we use the C1/C2 frame
515 * exchange to determine if a WSS is activated. Using the WLP IE should be
516 * faster and should be used when it becomes possible.
517 */
518int wlp_wss_is_active(struct wlp *wlp, struct wlp_wss *wss,
519 struct uwb_dev_addr *dev_addr)
520{
521 int result = 0;
522 struct device *dev = &wlp->rc->uwb_dev.dev;
523 char buf[WLP_WSS_UUID_STRSIZE];
524 DECLARE_COMPLETION_ONSTACK(completion);
525 struct wlp_session session;
526 struct sk_buff *skb;
527 struct wlp_frame_assoc *resp;
528 struct wlp_uuid wssid;
529
530 wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
531 d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
532 wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
533 mutex_lock(&wlp->mutex);
534 /* Send C1 association frame */
535 result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1);
536 if (result < 0) {
537 dev_err(dev, "Unable to send C1 frame to neighbor "
538 "%02x:%02x (%d)\n", dev_addr->data[1],
539 dev_addr->data[0], result);
540 result = 0;
541 goto out;
542 }
543 /* Create session, wait for response */
544 session.exp_message = WLP_ASSOC_C2;
545 session.cb = wlp_session_cb;
546 session.cb_priv = &completion;
547 session.neighbor_addr = *dev_addr;
548 BUG_ON(wlp->session != NULL);
549 wlp->session = &session;
550 /* Wait for C2/F0 frame */
551 result = wait_for_completion_interruptible_timeout(&completion,
552 WLP_PER_MSG_TIMEOUT * HZ);
553 if (result == 0) {
554 dev_err(dev, "Timeout while sending C1 to neighbor "
555 "%02x:%02x.\n", dev_addr->data[1],
556 dev_addr->data[0]);
557 goto out;
558 }
559 if (result < 0) {
560 dev_err(dev, "Unable to send C1 to neighbor %02x:%02x.\n",
561 dev_addr->data[1], dev_addr->data[0]);
562 result = 0;
563 goto out;
564 }
565 /* Parse message in session->data: it will be either C2 or F0 */
566 skb = session.data;
567 resp = (void *) skb->data;
568 d_printf(5, dev, "Received response to C1 frame. \n");
569 d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len);
570 if (resp->type == WLP_ASSOC_F0) {
571 result = wlp_parse_f0(wlp, skb);
572 if (result < 0)
573 dev_err(dev, "WLP: unable to parse incoming F0 "
574 "frame from neighbor %02x:%02x.\n",
575 dev_addr->data[1], dev_addr->data[0]);
576 result = 0;
577 goto error_resp_parse;
578 }
579 /* WLP version and message type fields have already been parsed */
580 result = wlp_get_wssid(wlp, (void *)resp + sizeof(*resp), &wssid,
581 skb->len - sizeof(*resp));
582 if (result < 0) {
583 dev_err(dev, "WLP: unable to obtain WSSID from C2 frame.\n");
584 result = 0;
585 goto error_resp_parse;
586 }
587 if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) {
588 d_printf(5, dev, "WSSID in C2 frame matches local "
589 "active WSS.\n");
590 result = 1;
591 } else {
592 dev_err(dev, "WLP: Received a C2 frame without matching "
593 "WSSID.\n");
594 result = 0;
595 }
596error_resp_parse:
597 kfree_skb(skb);
598out:
599 wlp->session = NULL;
600 mutex_unlock(&wlp->mutex);
601 d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
602 wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
603 return result;
604}
605
606/**
607 * Activate connection with neighbor by updating EDA cache
608 *
609 * @wss: local WSS to which neighbor wants to connect
610 * @dev_addr: neighbor's address
611 * @wssid: neighbor's WSSID - must be same as our WSS's WSSID
612 * @tag: neighbor's WSS tag used to identify frames transmitted by it
613 * @virt_addr: neighbor's virtual EUI-48
614 */
615static
616int wlp_wss_activate_connection(struct wlp *wlp, struct wlp_wss *wss,
617 struct uwb_dev_addr *dev_addr,
618 struct wlp_uuid *wssid, u8 *tag,
619 struct uwb_mac_addr *virt_addr)
620{
621 struct device *dev = &wlp->rc->uwb_dev.dev;
622 int result = 0;
623 char buf[WLP_WSS_UUID_STRSIZE];
624 wlp_wss_uuid_print(buf, sizeof(buf), wssid);
625 d_fnstart(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual "
626 "%02x:%02x:%02x:%02x:%02x:%02x \n", wlp, wss, buf, *tag,
627 virt_addr->data[0], virt_addr->data[1], virt_addr->data[2],
628 virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]);
629
630 if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) {
631 d_printf(5, dev, "WSSID from neighbor frame matches local "
632 "active WSS.\n");
633 /* Update EDA cache */
634 result = wlp_eda_update_node(&wlp->eda, dev_addr, wss,
635 (void *) virt_addr->data, *tag,
636 WLP_WSS_CONNECTED);
637 if (result < 0)
638 dev_err(dev, "WLP: Unable to update EDA cache "
639 "with new connected neighbor information.\n");
640 } else {
641 dev_err(dev, "WLP: Neighbor does not have matching "
642 "WSSID.\n");
643 result = -EINVAL;
644 }
645
646 d_fnend(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual "
647 "%02x:%02x:%02x:%02x:%02x:%02x, result = %d \n",
648 wlp, wss, buf, *tag,
649 virt_addr->data[0], virt_addr->data[1], virt_addr->data[2],
650 virt_addr->data[3], virt_addr->data[4], virt_addr->data[5],
651 result);
652
653 return result;
654}
655
656/**
657 * Connect to WSS neighbor
658 *
659 * Use C3/C4 exchange to determine if neighbor has WSS activated and
660 * retrieve the WSS tag and virtual EUI-48 of the neighbor.
661 */
662static
663int wlp_wss_connect_neighbor(struct wlp *wlp, struct wlp_wss *wss,
664 struct uwb_dev_addr *dev_addr)
665{
666 int result;
667 struct device *dev = &wlp->rc->uwb_dev.dev;
668 char buf[WLP_WSS_UUID_STRSIZE];
669 struct wlp_uuid wssid;
670 u8 tag;
671 struct uwb_mac_addr virt_addr;
672 DECLARE_COMPLETION_ONSTACK(completion);
673 struct wlp_session session;
674 struct wlp_frame_assoc *resp;
675 struct sk_buff *skb;
676
677 wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
678 d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
679 wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
680 mutex_lock(&wlp->mutex);
681 /* Send C3 association frame */
682 result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3);
683 if (result < 0) {
684 dev_err(dev, "Unable to send C3 frame to neighbor "
685 "%02x:%02x (%d)\n", dev_addr->data[1],
686 dev_addr->data[0], result);
687 goto out;
688 }
689 /* Create session, wait for response */
690 session.exp_message = WLP_ASSOC_C4;
691 session.cb = wlp_session_cb;
692 session.cb_priv = &completion;
693 session.neighbor_addr = *dev_addr;
694 BUG_ON(wlp->session != NULL);
695 wlp->session = &session;
696 /* Wait for C4/F0 frame */
697 result = wait_for_completion_interruptible_timeout(&completion,
698 WLP_PER_MSG_TIMEOUT * HZ);
699 if (result == 0) {
700 dev_err(dev, "Timeout while sending C3 to neighbor "
701 "%02x:%02x.\n", dev_addr->data[1],
702 dev_addr->data[0]);
703 result = -ETIMEDOUT;
704 goto out;
705 }
706 if (result < 0) {
707 dev_err(dev, "Unable to send C3 to neighbor %02x:%02x.\n",
708 dev_addr->data[1], dev_addr->data[0]);
709 goto out;
710 }
711 /* Parse message in session->data: it will be either C4 or F0 */
712 skb = session.data;
713 resp = (void *) skb->data;
714 d_printf(5, dev, "Received response to C3 frame. \n");
715 d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len);
716 if (resp->type == WLP_ASSOC_F0) {
717 result = wlp_parse_f0(wlp, skb);
718 if (result < 0)
719 dev_err(dev, "WLP: unable to parse incoming F0 "
720 "frame from neighbor %02x:%02x.\n",
721 dev_addr->data[1], dev_addr->data[0]);
722 result = -EINVAL;
723 goto error_resp_parse;
724 }
725 result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr);
726 if (result < 0) {
727 dev_err(dev, "WLP: Unable to parse C4 frame from neighbor.\n");
728 goto error_resp_parse;
729 }
730 result = wlp_wss_activate_connection(wlp, wss, dev_addr, &wssid, &tag,
731 &virt_addr);
732 if (result < 0) {
733 dev_err(dev, "WLP: Unable to activate connection to "
734 "neighbor %02x:%02x.\n", dev_addr->data[1],
735 dev_addr->data[0]);
736 goto error_resp_parse;
737 }
738error_resp_parse:
739 kfree_skb(skb);
740out:
741 /* Record that we unsuccessfully tried to connect to this neighbor */
742 if (result < 0)
743 wlp_eda_update_node_state(&wlp->eda, dev_addr,
744 WLP_WSS_CONNECT_FAILED);
745 wlp->session = NULL;
746 mutex_unlock(&wlp->mutex);
747 d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
748 wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
749 return result;
750}
751
752/**
753 * Connect to neighbor with common WSS, send pending frame
754 *
755 * This function is scheduled when a frame is destined to a neighbor with
756 * which we do not have a connection. A copy of the EDA cache entry is
757 * provided - not the actual cache entry (because it is protected by a
758 * spinlock).
759 *
760 * First determine if neighbor has the same WSS activated, connect if it
761 * does. The C3/C4 exchange is dual purpose to determine if neighbor has
762 * WSS activated and proceed with the connection.
763 *
764 * The frame that triggered the connection setup is sent after connection
765 * setup.
766 *
767 * network queue is stopped - we need to restart when done
768 *
769 */
770static
771void wlp_wss_connect_send(struct work_struct *ws)
772{
773 struct wlp_assoc_conn_ctx *conn_ctx = container_of(ws,
774 struct wlp_assoc_conn_ctx,
775 ws);
776 struct wlp *wlp = conn_ctx->wlp;
777 struct sk_buff *skb = conn_ctx->skb;
778 struct wlp_eda_node *eda_entry = &conn_ctx->eda_entry;
779 struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
780 struct wlp_wss *wss = &wlp->wss;
781 int result;
782 struct device *dev = &wlp->rc->uwb_dev.dev;
783 char buf[WLP_WSS_UUID_STRSIZE];
784
785 mutex_lock(&wss->mutex);
786 wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
787 d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
788 wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
789 if (wss->state < WLP_WSS_STATE_ACTIVE) {
790 if (printk_ratelimit())
791 dev_err(dev, "WLP: Attempting to connect with "
792 "WSS that is not active or connected.\n");
793 dev_kfree_skb(skb);
794 goto out;
795 }
796 /* Establish connection - send C3 rcv C4 */
797 result = wlp_wss_connect_neighbor(wlp, wss, dev_addr);
798 if (result < 0) {
799 if (printk_ratelimit())
800 dev_err(dev, "WLP: Unable to establish connection "
801 "with neighbor %02x:%02x.\n",
802 dev_addr->data[1], dev_addr->data[0]);
803 dev_kfree_skb(skb);
804 goto out;
805 }
806 /* EDA entry changed, update the local copy being used */
807 result = wlp_copy_eda_node(&wlp->eda, dev_addr, eda_entry);
808 if (result < 0) {
809 if (printk_ratelimit())
810 dev_err(dev, "WLP: Cannot find EDA entry for "
811 "neighbor %02x:%02x \n",
812 dev_addr->data[1], dev_addr->data[0]);
813 }
814 result = wlp_wss_prep_hdr(wlp, eda_entry, skb);
815 if (result < 0) {
816 if (printk_ratelimit())
817 dev_err(dev, "WLP: Unable to prepare frame header for "
818 "transmission (neighbor %02x:%02x). \n",
819 dev_addr->data[1], dev_addr->data[0]);
820 dev_kfree_skb(skb);
821 goto out;
822 }
823 BUG_ON(wlp->xmit_frame == NULL);
824 result = wlp->xmit_frame(wlp, skb, dev_addr);
825 if (result < 0) {
826 if (printk_ratelimit())
827 dev_err(dev, "WLP: Unable to transmit frame: %d\n",
828 result);
829 if (result == -ENXIO)
830 dev_err(dev, "WLP: Is network interface up? \n");
831 /* We could try again ... */
832 dev_kfree_skb(skb);/*we need to free if tx fails */
833 }
834out:
835 kfree(conn_ctx);
836 BUG_ON(wlp->start_queue == NULL);
837 wlp->start_queue(wlp);
838 mutex_unlock(&wss->mutex);
839 d_fnend(5, dev, "wlp %p, wss %p (wssid %s)\n", wlp, wss, buf);
840}
841
842/**
843 * Add WLP header to outgoing skb
844 *
845 * @eda_entry: pointer to neighbor's entry in the EDA cache
846 * @_skb: skb containing data destined to the neighbor
847 */
848int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry,
849 void *_skb)
850{
851 struct device *dev = &wlp->rc->uwb_dev.dev;
852 int result = 0;
853 unsigned char *eth_addr = eda_entry->eth_addr;
854 struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
855 struct sk_buff *skb = _skb;
856 struct wlp_frame_std_abbrv_hdr *std_hdr;
857
858 d_fnstart(6, dev, "wlp %p \n", wlp);
859 if (eda_entry->state == WLP_WSS_CONNECTED) {
860 /* Add WLP header */
861 BUG_ON(skb_headroom(skb) < sizeof(*std_hdr));
862 std_hdr = (void *) __skb_push(skb, sizeof(*std_hdr));
863 std_hdr->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
864 std_hdr->hdr.type = WLP_FRAME_STANDARD;
865 std_hdr->tag = eda_entry->wss->tag;
866 } else {
867 if (printk_ratelimit())
868 dev_err(dev, "WLP: Destination neighbor (Ethernet: "
869 "%02x:%02x:%02x:%02x:%02x:%02x, Dev: "
870 "%02x:%02x) is not connected. \n", eth_addr[0],
871 eth_addr[1], eth_addr[2], eth_addr[3],
872 eth_addr[4], eth_addr[5], dev_addr->data[1],
873 dev_addr->data[0]);
874 result = -EINVAL;
875 }
876 d_fnend(6, dev, "wlp %p \n", wlp);
877 return result;
878}
879
880
881/**
882 * Prepare skb for neighbor: connect if not already and prep WLP header
883 *
884 * This function is called in interrupt context, but it needs to sleep. We
885 * temporarily stop the net queue to establish the WLP connection.
886 * Setup of the WLP connection and restart of queue is scheduled
887 * on the default work queue.
888 *
889 * run with eda->lock held (spinlock)
890 */
891int wlp_wss_connect_prep(struct wlp *wlp, struct wlp_eda_node *eda_entry,
892 void *_skb)
893{
894 int result = 0;
895 struct device *dev = &wlp->rc->uwb_dev.dev;
896 struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
897 unsigned char *eth_addr = eda_entry->eth_addr;
898 struct sk_buff *skb = _skb;
899 struct wlp_assoc_conn_ctx *conn_ctx;
900
901 d_fnstart(5, dev, "wlp %p\n", wlp);
902 d_printf(5, dev, "To neighbor %02x:%02x with eth "
903 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev_addr->data[1],
904 dev_addr->data[0], eth_addr[0], eth_addr[1], eth_addr[2],
905 eth_addr[3], eth_addr[4], eth_addr[5]);
906 if (eda_entry->state == WLP_WSS_UNCONNECTED) {
907 /* We don't want any more packets while we set up connection */
908 BUG_ON(wlp->stop_queue == NULL);
909 wlp->stop_queue(wlp);
910 conn_ctx = kmalloc(sizeof(*conn_ctx), GFP_ATOMIC);
911 if (conn_ctx == NULL) {
912 if (printk_ratelimit())
913 dev_err(dev, "WLP: Unable to allocate memory "
914 "for connection handling.\n");
915 result = -ENOMEM;
916 goto out;
917 }
918 conn_ctx->wlp = wlp;
919 conn_ctx->skb = skb;
920 conn_ctx->eda_entry = *eda_entry;
921 INIT_WORK(&conn_ctx->ws, wlp_wss_connect_send);
922 schedule_work(&conn_ctx->ws);
923 result = 1;
924 } else if (eda_entry->state == WLP_WSS_CONNECT_FAILED) {
925 /* Previous connection attempts failed, don't retry - see
926 * conditions for connection in WLP 0.99 [7.6.2] */
927 if (printk_ratelimit())
928 dev_err(dev, "Could not connect to neighbor "
929 "previously. Not retrying. \n");
930 result = -ENONET;
931 goto out;
932 } else { /* eda_entry->state == WLP_WSS_CONNECTED */
933 d_printf(5, dev, "Neighbor is connected, preparing frame.\n");
934 result = wlp_wss_prep_hdr(wlp, eda_entry, skb);
935 }
936out:
937 d_fnend(5, dev, "wlp %p, result = %d \n", wlp, result);
938 return result;
939}
940
941/**
942 * Emulate broadcast: copy skb, send copy to neighbor (connect if not already)
943 *
944 * We need to copy skbs in the case where we emulate broadcast through
945 * unicast. We copy instead of clone because we are modifying the data of
946 * the frame after copying ... clones share data so we cannot emulate
947 * broadcast using clones.
948 *
949 * run with eda->lock held (spinlock)
950 */
951int wlp_wss_send_copy(struct wlp *wlp, struct wlp_eda_node *eda_entry,
952 void *_skb)
953{
954 int result = -ENOMEM;
955 struct device *dev = &wlp->rc->uwb_dev.dev;
956 struct sk_buff *skb = _skb;
957 struct sk_buff *copy;
958 struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
959
960 d_fnstart(5, dev, "to neighbor %02x:%02x, skb (%p) \n",
961 dev_addr->data[1], dev_addr->data[0], skb);
962 copy = skb_copy(skb, GFP_ATOMIC);
963 if (copy == NULL) {
964 if (printk_ratelimit())
965 dev_err(dev, "WLP: Unable to copy skb for "
966 "transmission.\n");
967 goto out;
968 }
969 result = wlp_wss_connect_prep(wlp, eda_entry, copy);
970 if (result < 0) {
971 if (printk_ratelimit())
972 dev_err(dev, "WLP: Unable to connect/send skb "
973 "to neighbor.\n");
974 dev_kfree_skb_irq(copy);
975 goto out;
976 } else if (result == 1)
977 /* Frame will be transmitted separately */
978 goto out;
979 BUG_ON(wlp->xmit_frame == NULL);
980 result = wlp->xmit_frame(wlp, copy, dev_addr);
981 if (result < 0) {
982 if (printk_ratelimit())
983 dev_err(dev, "WLP: Unable to transmit frame: %d\n",
984 result);
985 if ((result == -ENXIO) && printk_ratelimit())
986 dev_err(dev, "WLP: Is network interface up? \n");
987 /* We could try again ... */
988 dev_kfree_skb_irq(copy);/*we need to free if tx fails */
989 }
990out:
991 d_fnend(5, dev, "to neighbor %02x:%02x \n", dev_addr->data[1],
992 dev_addr->data[0]);
993 return result;
994}
995
996
997/**
998 * Setup WSS
999 *
1000 * Should be called by network driver after the interface has been given a
1001 * MAC address.
1002 */
1003int wlp_wss_setup(struct net_device *net_dev, struct wlp_wss *wss)
1004{
1005 struct wlp *wlp = container_of(wss, struct wlp, wss);
1006 struct device *dev = &wlp->rc->uwb_dev.dev;
1007 int result = 0;
1008 d_fnstart(5, dev, "wss (%p) \n", wss);
1009 mutex_lock(&wss->mutex);
1010 wss->kobj.parent = &net_dev->dev.kobj;
1011 if (!is_valid_ether_addr(net_dev->dev_addr)) {
1012 dev_err(dev, "WLP: Invalid MAC address. Cannot use for"
1013 "virtual.\n");
1014 result = -EINVAL;
1015 goto out;
1016 }
1017 memcpy(wss->virtual_addr.data, net_dev->dev_addr,
1018 sizeof(wss->virtual_addr.data));
1019out:
1020 mutex_unlock(&wss->mutex);
1021 d_fnend(5, dev, "wss (%p) \n", wss);
1022 return result;
1023}
1024EXPORT_SYMBOL_GPL(wlp_wss_setup);
1025
1026/**
1027 * Remove WSS
1028 *
1029 * Called by client that configured WSS through wlp_wss_setup(). This
1030 * function is called when client no longer needs WSS, eg. client shuts
1031 * down.
1032 *
1033 * We remove the WLP IE from the beacon before initiating local cleanup.
1034 */
1035void wlp_wss_remove(struct wlp_wss *wss)
1036{
1037 struct wlp *wlp = container_of(wss, struct wlp, wss);
1038 struct device *dev = &wlp->rc->uwb_dev.dev;
1039 d_fnstart(5, dev, "wss (%p) \n", wss);
1040 mutex_lock(&wss->mutex);
1041 if (wss->state == WLP_WSS_STATE_ACTIVE)
1042 uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP);
1043 if (wss->state != WLP_WSS_STATE_NONE) {
1044 sysfs_remove_group(&wss->kobj, &wss_attr_group);
1045 kobject_put(&wss->kobj);
1046 }
1047 wss->kobj.parent = NULL;
1048 memset(&wss->virtual_addr, 0, sizeof(wss->virtual_addr));
1049 /* Cleanup EDA cache */
1050 wlp_eda_release(&wlp->eda);
1051 wlp_eda_init(&wlp->eda);
1052 mutex_unlock(&wss->mutex);
1053 d_fnend(5, dev, "wss (%p) \n", wss);
1054}
1055EXPORT_SYMBOL_GPL(wlp_wss_remove);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 0f13448c6f79..3f3ce13fef43 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2083,6 +2083,38 @@ config FB_METRONOME
2083 controller. The pre-release name for this device was 8track 2083 controller. The pre-release name for this device was 8track
2084 and could also have been called by some vendors as PVI-nnnn. 2084 and could also have been called by some vendors as PVI-nnnn.
2085 2085
2086config FB_MB862XX
2087 tristate "Fujitsu MB862xx GDC support"
2088 depends on FB
2089 select FB_CFB_FILLRECT
2090 select FB_CFB_COPYAREA
2091 select FB_CFB_IMAGEBLIT
2092 ---help---
2093 Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers.
2094
2095config FB_MB862XX_PCI_GDC
2096 bool "Carmine/Coral-P(A) GDC"
2097 depends on PCI && FB_MB862XX
2098 ---help---
2099 This enables framebuffer support for Fujitsu Carmine/Coral-P(A)
2100 PCI graphics controller devices.
2101
2102config FB_MB862XX_LIME
2103 bool "Lime GDC"
2104 depends on FB_MB862XX
2105 depends on OF && !FB_MB862XX_PCI_GDC
2106 select FB_FOREIGN_ENDIAN
2107 select FB_LITTLE_ENDIAN
2108 ---help---
2109 Framebuffer support for Fujitsu Lime GDC on host CPU bus.
2110
2111config FB_PRE_INIT_FB
2112 bool "Don't reinitialize, use bootloader's GDC/Display configuration"
2113 depends on FB_MB862XX_LIME
2114 ---help---
2115 Select this option if display contents should be inherited as set by
2116 the bootloader.
2117
2086source "drivers/video/omap/Kconfig" 2118source "drivers/video/omap/Kconfig"
2087 2119
2088source "drivers/video/backlight/Kconfig" 2120source "drivers/video/backlight/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 248bddc8d0b0..e39e33e797da 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -122,6 +122,7 @@ obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
122obj-$(CONFIG_FB_OMAP) += omap/ 122obj-$(CONFIG_FB_OMAP) += omap/
123obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o 123obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
124obj-$(CONFIG_FB_CARMINE) += carminefb.o 124obj-$(CONFIG_FB_CARMINE) += carminefb.o
125obj-$(CONFIG_FB_MB862XX) += mb862xx/
125 126
126# Platform or fallback drivers go here 127# Platform or fallback drivers go here
127obj-$(CONFIG_FB_UVESA) += uvesafb.o 128obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index f8d0a57a07cb..9a577a800db5 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -132,7 +132,7 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
132 132
133 bl = backlight_device_register("backlight", &sinfo->pdev->dev, 133 bl = backlight_device_register("backlight", &sinfo->pdev->dev,
134 sinfo, &atmel_lcdc_bl_ops); 134 sinfo, &atmel_lcdc_bl_ops);
135 if (IS_ERR(sinfo->backlight)) { 135 if (IS_ERR(bl)) {
136 dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n", 136 dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n",
137 PTR_ERR(bl)); 137 PTR_ERR(bl));
138 return; 138 return;
diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c
index 8718f7349d6b..a469a3d6edcb 100644
--- a/drivers/video/aty/radeon_accel.c
+++ b/drivers/video/aty/radeon_accel.c
@@ -5,61 +5,61 @@
5 * --dte 5 * --dte
6 */ 6 */
7 7
8#define FLUSH_CACHE_WORKAROUND 1 8static void radeon_fixup_offset(struct radeonfb_info *rinfo)
9
10void radeon_fifo_update_and_wait(struct radeonfb_info *rinfo, int entries)
11{ 9{
12 int i; 10 u32 local_base;
11
12 /* *** Ugly workaround *** */
13 /*
14 * On some platforms, the video memory is mapped at 0 in radeon chip space
15 * (like PPCs) by the firmware. X will always move it up so that it's seen
16 * by the chip to be at the same address as the PCI BAR.
17 * That means that when switching back from X, there is a mismatch between
18 * the offsets programmed into the engine. This means that potentially,
19 * accel operations done before radeonfb has a chance to re-init the engine
20 * will have incorrect offsets, and potentially trash system memory !
21 *
22 * The correct fix is for fbcon to never call any accel op before the engine
23 * has properly been re-initialized (by a call to set_var), but this is a
24 * complex fix. This workaround in the meantime, called before every accel
25 * operation, makes sure the offsets are in sync.
26 */
13 27
14 for (i=0; i<2000000; i++) { 28 radeon_fifo_wait (1);
15 rinfo->fifo_free = INREG(RBBM_STATUS) & 0x7f; 29 local_base = INREG(MC_FB_LOCATION) << 16;
16 if (rinfo->fifo_free >= entries) 30 if (local_base == rinfo->fb_local_base)
17 return; 31 return;
18 udelay(10);
19 }
20 printk(KERN_ERR "radeonfb: FIFO Timeout !\n");
21 /* XXX Todo: attempt to reset the engine */
22}
23 32
24static inline void radeon_fifo_wait(struct radeonfb_info *rinfo, int entries) 33 rinfo->fb_local_base = local_base;
25{
26 if (entries <= rinfo->fifo_free)
27 rinfo->fifo_free -= entries;
28 else
29 radeon_fifo_update_and_wait(rinfo, entries);
30}
31 34
32static inline void radeonfb_set_creg(struct radeonfb_info *rinfo, u32 reg, 35 radeon_fifo_wait (3);
33 u32 *cache, u32 new_val) 36 OUTREG(DEFAULT_PITCH_OFFSET, (rinfo->pitch << 0x16) |
34{ 37 (rinfo->fb_local_base >> 10));
35 if (new_val == *cache) 38 OUTREG(DST_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10));
36 return; 39 OUTREG(SRC_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10));
37 *cache = new_val;
38 radeon_fifo_wait(rinfo, 1);
39 OUTREG(reg, new_val);
40} 40}
41 41
42static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo, 42static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo,
43 const struct fb_fillrect *region) 43 const struct fb_fillrect *region)
44{ 44{
45 radeonfb_set_creg(rinfo, DP_GUI_MASTER_CNTL, &rinfo->dp_gui_mc_cache, 45 radeon_fifo_wait(4);
46 rinfo->dp_gui_mc_base | GMC_BRUSH_SOLID_COLOR | ROP3_P); 46
47 radeonfb_set_creg(rinfo, DP_CNTL, &rinfo->dp_cntl_cache, 47 OUTREG(DP_GUI_MASTER_CNTL,
48 DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); 48 rinfo->dp_gui_master_cntl /* contains, like GMC_DST_32BPP */
49 radeonfb_set_creg(rinfo, DP_BRUSH_FRGD_CLR, &rinfo->dp_brush_fg_cache, 49 | GMC_BRUSH_SOLID_COLOR
50 region->color); 50 | ROP3_P);
51 51 if (radeon_get_dstbpp(rinfo->depth) != DST_8BPP)
52 /* Ensure the dst cache is flushed and the engine idle before 52 OUTREG(DP_BRUSH_FRGD_CLR, rinfo->pseudo_palette[region->color]);
53 * issuing the operation. 53 else
54 * 54 OUTREG(DP_BRUSH_FRGD_CLR, region->color);
55 * This works around engine lockups on some cards 55 OUTREG(DP_WRITE_MSK, 0xffffffff);
56 */ 56 OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM));
57#if FLUSH_CACHE_WORKAROUND 57
58 radeon_fifo_wait(rinfo, 2); 58 radeon_fifo_wait(2);
59 OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL); 59 OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
60 OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE)); 60 OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
61#endif 61
62 radeon_fifo_wait(rinfo, 2); 62 radeon_fifo_wait(2);
63 OUTREG(DST_Y_X, (region->dy << 16) | region->dx); 63 OUTREG(DST_Y_X, (region->dy << 16) | region->dx);
64 OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height); 64 OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height);
65} 65}
@@ -70,14 +70,15 @@ void radeonfb_fillrect(struct fb_info *info, const struct fb_fillrect *region)
70 struct fb_fillrect modded; 70 struct fb_fillrect modded;
71 int vxres, vyres; 71 int vxres, vyres;
72 72
73 WARN_ON(rinfo->gfx_mode); 73 if (info->state != FBINFO_STATE_RUNNING)
74 if (info->state != FBINFO_STATE_RUNNING || rinfo->gfx_mode)
75 return; 74 return;
76 if (info->flags & FBINFO_HWACCEL_DISABLED) { 75 if (info->flags & FBINFO_HWACCEL_DISABLED) {
77 cfb_fillrect(info, region); 76 cfb_fillrect(info, region);
78 return; 77 return;
79 } 78 }
80 79
80 radeon_fixup_offset(rinfo);
81
81 vxres = info->var.xres_virtual; 82 vxres = info->var.xres_virtual;
82 vyres = info->var.yres_virtual; 83 vyres = info->var.yres_virtual;
83 84
@@ -90,10 +91,6 @@ void radeonfb_fillrect(struct fb_info *info, const struct fb_fillrect *region)
90 if(modded.dx + modded.width > vxres) modded.width = vxres - modded.dx; 91 if(modded.dx + modded.width > vxres) modded.width = vxres - modded.dx;
91 if(modded.dy + modded.height > vyres) modded.height = vyres - modded.dy; 92 if(modded.dy + modded.height > vyres) modded.height = vyres - modded.dy;
92 93
93 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
94 info->fix.visual == FB_VISUAL_DIRECTCOLOR )
95 modded.color = ((u32 *) (info->pseudo_palette))[region->color];
96
97 radeonfb_prim_fillrect(rinfo, &modded); 94 radeonfb_prim_fillrect(rinfo, &modded);
98} 95}
99 96
@@ -112,22 +109,22 @@ static void radeonfb_prim_copyarea(struct radeonfb_info *rinfo,
112 if ( xdir < 0 ) { sx += w-1; dx += w-1; } 109 if ( xdir < 0 ) { sx += w-1; dx += w-1; }
113 if ( ydir < 0 ) { sy += h-1; dy += h-1; } 110 if ( ydir < 0 ) { sy += h-1; dy += h-1; }
114 111
115 radeonfb_set_creg(rinfo, DP_GUI_MASTER_CNTL, &rinfo->dp_gui_mc_cache, 112 radeon_fifo_wait(3);
116 rinfo->dp_gui_mc_base | 113 OUTREG(DP_GUI_MASTER_CNTL,
117 GMC_BRUSH_NONE | 114 rinfo->dp_gui_master_cntl /* i.e. GMC_DST_32BPP */
118 GMC_SRC_DATATYPE_COLOR | 115 | GMC_BRUSH_NONE
119 ROP3_S | 116 | GMC_SRC_DSTCOLOR
120 DP_SRC_SOURCE_MEMORY); 117 | ROP3_S
121 radeonfb_set_creg(rinfo, DP_CNTL, &rinfo->dp_cntl_cache, 118 | DP_SRC_SOURCE_MEMORY );
122 (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0) | 119 OUTREG(DP_WRITE_MSK, 0xffffffff);
123 (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0)); 120 OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0)
124 121 | (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0));
125#if FLUSH_CACHE_WORKAROUND 122
126 radeon_fifo_wait(rinfo, 2); 123 radeon_fifo_wait(2);
127 OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL); 124 OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
128 OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE)); 125 OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
129#endif 126
130 radeon_fifo_wait(rinfo, 3); 127 radeon_fifo_wait(3);
131 OUTREG(SRC_Y_X, (sy << 16) | sx); 128 OUTREG(SRC_Y_X, (sy << 16) | sx);
132 OUTREG(DST_Y_X, (dy << 16) | dx); 129 OUTREG(DST_Y_X, (dy << 16) | dx);
133 OUTREG(DST_HEIGHT_WIDTH, (h << 16) | w); 130 OUTREG(DST_HEIGHT_WIDTH, (h << 16) | w);
@@ -146,14 +143,15 @@ void radeonfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
146 modded.width = area->width; 143 modded.width = area->width;
147 modded.height = area->height; 144 modded.height = area->height;
148 145
149 WARN_ON(rinfo->gfx_mode); 146 if (info->state != FBINFO_STATE_RUNNING)
150 if (info->state != FBINFO_STATE_RUNNING || rinfo->gfx_mode)
151 return; 147 return;
152 if (info->flags & FBINFO_HWACCEL_DISABLED) { 148 if (info->flags & FBINFO_HWACCEL_DISABLED) {
153 cfb_copyarea(info, area); 149 cfb_copyarea(info, area);
154 return; 150 return;
155 } 151 }
156 152
153 radeon_fixup_offset(rinfo);
154
157 vxres = info->var.xres_virtual; 155 vxres = info->var.xres_virtual;
158 vyres = info->var.yres_virtual; 156 vyres = info->var.yres_virtual;
159 157
@@ -170,112 +168,13 @@ void radeonfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
170 radeonfb_prim_copyarea(rinfo, &modded); 168 radeonfb_prim_copyarea(rinfo, &modded);
171} 169}
172 170
173static void radeonfb_prim_imageblit(struct radeonfb_info *rinfo,
174 const struct fb_image *image,
175 u32 fg, u32 bg)
176{
177 unsigned int src_bytes, dwords;
178 u32 *bits;
179
180 radeonfb_set_creg(rinfo, DP_GUI_MASTER_CNTL, &rinfo->dp_gui_mc_cache,
181 rinfo->dp_gui_mc_base |
182 GMC_BRUSH_NONE |
183 GMC_SRC_DATATYPE_MONO_FG_BG |
184 ROP3_S |
185 GMC_BYTE_ORDER_MSB_TO_LSB |
186 DP_SRC_SOURCE_HOST_DATA);
187 radeonfb_set_creg(rinfo, DP_CNTL, &rinfo->dp_cntl_cache,
188 DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM);
189 radeonfb_set_creg(rinfo, DP_SRC_FRGD_CLR, &rinfo->dp_src_fg_cache, fg);
190 radeonfb_set_creg(rinfo, DP_SRC_BKGD_CLR, &rinfo->dp_src_bg_cache, bg);
191
192 radeon_fifo_wait(rinfo, 1);
193 OUTREG(DST_Y_X, (image->dy << 16) | image->dx);
194
195 /* Ensure the dst cache is flushed and the engine idle before
196 * issuing the operation.
197 *
198 * This works around engine lockups on some cards
199 */
200#if FLUSH_CACHE_WORKAROUND
201 radeon_fifo_wait(rinfo, 2);
202 OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
203 OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
204#endif
205
206 /* X here pads width to a multiple of 32 and uses the clipper to
207 * adjust the result. Is that really necessary ? Things seem to
208 * work ok for me without that and the doco doesn't seem to imply
209 * there is such a restriction.
210 */
211 OUTREG(DST_WIDTH_HEIGHT, (image->width << 16) | image->height);
212
213 src_bytes = (((image->width * image->depth) + 7) / 8) * image->height;
214 dwords = (src_bytes + 3) / 4;
215 bits = (u32*)(image->data);
216
217 while(dwords >= 8) {
218 radeon_fifo_wait(rinfo, 8);
219#if BITS_PER_LONG == 64
220 __raw_writeq(*((u64 *)(bits)), rinfo->mmio_base + HOST_DATA0);
221 __raw_writeq(*((u64 *)(bits+2)), rinfo->mmio_base + HOST_DATA2);
222 __raw_writeq(*((u64 *)(bits+4)), rinfo->mmio_base + HOST_DATA4);
223 __raw_writeq(*((u64 *)(bits+6)), rinfo->mmio_base + HOST_DATA6);
224 bits += 8;
225#else
226 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA0);
227 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA1);
228 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA2);
229 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA3);
230 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA4);
231 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA5);
232 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA6);
233 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA7);
234#endif
235 dwords -= 8;
236 }
237 while(dwords--) {
238 radeon_fifo_wait(rinfo, 1);
239 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA0);
240 }
241}
242
243void radeonfb_imageblit(struct fb_info *info, const struct fb_image *image) 171void radeonfb_imageblit(struct fb_info *info, const struct fb_image *image)
244{ 172{
245 struct radeonfb_info *rinfo = info->par; 173 struct radeonfb_info *rinfo = info->par;
246 u32 fg, bg;
247
248 WARN_ON(rinfo->gfx_mode);
249 if (info->state != FBINFO_STATE_RUNNING || rinfo->gfx_mode)
250 return;
251 174
252 if (!image->width || !image->height) 175 if (info->state != FBINFO_STATE_RUNNING)
253 return; 176 return;
254 177 radeon_engine_idle();
255 /* We only do 1 bpp color expansion for now */
256 if (info->flags & FBINFO_HWACCEL_DISABLED || image->depth != 1)
257 goto fallback;
258
259 /* Fallback if running out of the screen. We may do clipping
260 * in the future */
261 if ((image->dx + image->width) > info->var.xres_virtual ||
262 (image->dy + image->height) > info->var.yres_virtual)
263 goto fallback;
264
265 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
266 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
267 fg = ((u32*)(info->pseudo_palette))[image->fg_color];
268 bg = ((u32*)(info->pseudo_palette))[image->bg_color];
269 } else {
270 fg = image->fg_color;
271 bg = image->bg_color;
272 }
273
274 radeonfb_prim_imageblit(rinfo, image, fg, bg);
275 return;
276
277 fallback:
278 radeon_engine_idle(rinfo);
279 178
280 cfb_imageblit(info, image); 179 cfb_imageblit(info, image);
281} 180}
@@ -286,8 +185,7 @@ int radeonfb_sync(struct fb_info *info)
286 185
287 if (info->state != FBINFO_STATE_RUNNING) 186 if (info->state != FBINFO_STATE_RUNNING)
288 return 0; 187 return 0;
289 188 radeon_engine_idle();
290 radeon_engine_idle(rinfo);
291 189
292 return 0; 190 return 0;
293} 191}
@@ -363,10 +261,9 @@ void radeonfb_engine_init (struct radeonfb_info *rinfo)
363 /* disable 3D engine */ 261 /* disable 3D engine */
364 OUTREG(RB3D_CNTL, 0); 262 OUTREG(RB3D_CNTL, 0);
365 263
366 rinfo->fifo_free = 0;
367 radeonfb_engine_reset(rinfo); 264 radeonfb_engine_reset(rinfo);
368 265
369 radeon_fifo_wait(rinfo, 1); 266 radeon_fifo_wait (1);
370 if (IS_R300_VARIANT(rinfo)) { 267 if (IS_R300_VARIANT(rinfo)) {
371 OUTREG(RB2D_DSTCACHE_MODE, INREG(RB2D_DSTCACHE_MODE) | 268 OUTREG(RB2D_DSTCACHE_MODE, INREG(RB2D_DSTCACHE_MODE) |
372 RB2D_DC_AUTOFLUSH_ENABLE | 269 RB2D_DC_AUTOFLUSH_ENABLE |
@@ -380,7 +277,7 @@ void radeonfb_engine_init (struct radeonfb_info *rinfo)
380 OUTREG(RB2D_DSTCACHE_MODE, 0); 277 OUTREG(RB2D_DSTCACHE_MODE, 0);
381 } 278 }
382 279
383 radeon_fifo_wait(rinfo, 3); 280 radeon_fifo_wait (3);
384 /* We re-read MC_FB_LOCATION from card as it can have been 281 /* We re-read MC_FB_LOCATION from card as it can have been
385 * modified by XFree drivers (ouch !) 282 * modified by XFree drivers (ouch !)
386 */ 283 */
@@ -391,57 +288,41 @@ void radeonfb_engine_init (struct radeonfb_info *rinfo)
391 OUTREG(DST_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10)); 288 OUTREG(DST_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10));
392 OUTREG(SRC_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10)); 289 OUTREG(SRC_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10));
393 290
394 radeon_fifo_wait(rinfo, 1); 291 radeon_fifo_wait (1);
395#ifdef __BIG_ENDIAN 292#if defined(__BIG_ENDIAN)
396 OUTREGP(DP_DATATYPE, HOST_BIG_ENDIAN_EN, ~HOST_BIG_ENDIAN_EN); 293 OUTREGP(DP_DATATYPE, HOST_BIG_ENDIAN_EN, ~HOST_BIG_ENDIAN_EN);
397#else 294#else
398 OUTREGP(DP_DATATYPE, 0, ~HOST_BIG_ENDIAN_EN); 295 OUTREGP(DP_DATATYPE, 0, ~HOST_BIG_ENDIAN_EN);
399#endif 296#endif
400 radeon_fifo_wait(rinfo, 2); 297 radeon_fifo_wait (2);
401 OUTREG(DEFAULT_SC_TOP_LEFT, 0); 298 OUTREG(DEFAULT_SC_TOP_LEFT, 0);
402 OUTREG(DEFAULT_SC_BOTTOM_RIGHT, (DEFAULT_SC_RIGHT_MAX | 299 OUTREG(DEFAULT_SC_BOTTOM_RIGHT, (DEFAULT_SC_RIGHT_MAX |
403 DEFAULT_SC_BOTTOM_MAX)); 300 DEFAULT_SC_BOTTOM_MAX));
404 301
405 /* set default DP_GUI_MASTER_CNTL */
406 temp = radeon_get_dstbpp(rinfo->depth); 302 temp = radeon_get_dstbpp(rinfo->depth);
407 rinfo->dp_gui_mc_base = ((temp << 8) | GMC_CLR_CMP_CNTL_DIS); 303 rinfo->dp_gui_master_cntl = ((temp << 8) | GMC_CLR_CMP_CNTL_DIS);
408 304
409 rinfo->dp_gui_mc_cache = rinfo->dp_gui_mc_base | 305 radeon_fifo_wait (1);
410 GMC_BRUSH_SOLID_COLOR | 306 OUTREG(DP_GUI_MASTER_CNTL, (rinfo->dp_gui_master_cntl |
411 GMC_SRC_DATATYPE_COLOR; 307 GMC_BRUSH_SOLID_COLOR |
412 radeon_fifo_wait(rinfo, 1); 308 GMC_SRC_DATATYPE_COLOR));
413 OUTREG(DP_GUI_MASTER_CNTL, rinfo->dp_gui_mc_cache);
414 309
310 radeon_fifo_wait (7);
415 311
416 /* clear line drawing regs */ 312 /* clear line drawing regs */
417 radeon_fifo_wait(rinfo, 2);
418 OUTREG(DST_LINE_START, 0); 313 OUTREG(DST_LINE_START, 0);
419 OUTREG(DST_LINE_END, 0); 314 OUTREG(DST_LINE_END, 0);
420 315
421 /* set brush and source color regs */ 316 /* set brush color regs */
422 rinfo->dp_brush_fg_cache = 0xffffffff; 317 OUTREG(DP_BRUSH_FRGD_CLR, 0xffffffff);
423 rinfo->dp_brush_bg_cache = 0x00000000; 318 OUTREG(DP_BRUSH_BKGD_CLR, 0x00000000);
424 rinfo->dp_src_fg_cache = 0xffffffff; 319
425 rinfo->dp_src_bg_cache = 0x00000000; 320 /* set source color regs */
426 radeon_fifo_wait(rinfo, 4); 321 OUTREG(DP_SRC_FRGD_CLR, 0xffffffff);
427 OUTREG(DP_BRUSH_FRGD_CLR, rinfo->dp_brush_fg_cache); 322 OUTREG(DP_SRC_BKGD_CLR, 0x00000000);
428 OUTREG(DP_BRUSH_BKGD_CLR, rinfo->dp_brush_bg_cache);
429 OUTREG(DP_SRC_FRGD_CLR, rinfo->dp_src_fg_cache);
430 OUTREG(DP_SRC_BKGD_CLR, rinfo->dp_src_bg_cache);
431
432 /* Default direction */
433 rinfo->dp_cntl_cache = DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM;
434 radeon_fifo_wait(rinfo, 1);
435 OUTREG(DP_CNTL, rinfo->dp_cntl_cache);
436 323
437 /* default write mask */ 324 /* default write mask */
438 radeon_fifo_wait(rinfo, 1);
439 OUTREG(DP_WRITE_MSK, 0xffffffff); 325 OUTREG(DP_WRITE_MSK, 0xffffffff);
440 326
441 /* Default to no swapping of host data */ 327 radeon_engine_idle ();
442 radeon_fifo_wait(rinfo, 1);
443 OUTREG(RBBM_GUICNTL, RBBM_GUICNTL_HOST_DATA_SWAP_NONE);
444
445 /* Make sure it's settled */
446 radeon_engine_idle(rinfo);
447} 328}
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index f343ba83f0ae..1a056adb61c8 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -66,7 +66,7 @@ static int radeon_bl_update_status(struct backlight_device *bd)
66 level = bd->props.brightness; 66 level = bd->props.brightness;
67 67
68 del_timer_sync(&rinfo->lvds_timer); 68 del_timer_sync(&rinfo->lvds_timer);
69 radeon_engine_idle(rinfo); 69 radeon_engine_idle();
70 70
71 lvds_gen_cntl = INREG(LVDS_GEN_CNTL); 71 lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
72 if (level > 0) { 72 if (level > 0) {
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 9a5821c65ebf..d0f1a7fc2c9d 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -852,6 +852,7 @@ static int radeonfb_pan_display (struct fb_var_screeninfo *var,
852 if (rinfo->asleep) 852 if (rinfo->asleep)
853 return 0; 853 return 0;
854 854
855 radeon_fifo_wait(2);
855 OUTREG(CRTC_OFFSET, ((var->yoffset * var->xres_virtual + var->xoffset) 856 OUTREG(CRTC_OFFSET, ((var->yoffset * var->xres_virtual + var->xoffset)
856 * var->bits_per_pixel / 8) & ~7); 857 * var->bits_per_pixel / 8) & ~7);
857 return 0; 858 return 0;
@@ -881,6 +882,7 @@ static int radeonfb_ioctl (struct fb_info *info, unsigned int cmd,
881 if (rc) 882 if (rc)
882 return rc; 883 return rc;
883 884
885 radeon_fifo_wait(2);
884 if (value & 0x01) { 886 if (value & 0x01) {
885 tmp = INREG(LVDS_GEN_CNTL); 887 tmp = INREG(LVDS_GEN_CNTL);
886 888
@@ -938,7 +940,7 @@ int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch)
938 if (rinfo->lock_blank) 940 if (rinfo->lock_blank)
939 return 0; 941 return 0;
940 942
941 radeon_engine_idle(rinfo); 943 radeon_engine_idle();
942 944
943 val = INREG(CRTC_EXT_CNTL); 945 val = INREG(CRTC_EXT_CNTL);
944 val &= ~(CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS | 946 val &= ~(CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS |
@@ -1046,7 +1048,7 @@ static int radeonfb_blank (int blank, struct fb_info *info)
1046 1048
1047 if (rinfo->asleep) 1049 if (rinfo->asleep)
1048 return 0; 1050 return 0;
1049 1051
1050 return radeon_screen_blank(rinfo, blank, 0); 1052 return radeon_screen_blank(rinfo, blank, 0);
1051} 1053}
1052 1054
@@ -1072,6 +1074,8 @@ static int radeon_setcolreg (unsigned regno, unsigned red, unsigned green,
1072 pindex = regno; 1074 pindex = regno;
1073 1075
1074 if (!rinfo->asleep) { 1076 if (!rinfo->asleep) {
1077 radeon_fifo_wait(9);
1078
1075 if (rinfo->bpp == 16) { 1079 if (rinfo->bpp == 16) {
1076 pindex = regno * 8; 1080 pindex = regno * 8;
1077 1081
@@ -1240,6 +1244,8 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
1240{ 1244{
1241 int i; 1245 int i;
1242 1246
1247 radeon_fifo_wait(20);
1248
1243 /* Workaround from XFree */ 1249 /* Workaround from XFree */
1244 if (rinfo->is_mobility) { 1250 if (rinfo->is_mobility) {
1245 /* A temporal workaround for the occational blanking on certain laptop 1251 /* A temporal workaround for the occational blanking on certain laptop
@@ -1335,7 +1341,7 @@ static void radeon_lvds_timer_func(unsigned long data)
1335{ 1341{
1336 struct radeonfb_info *rinfo = (struct radeonfb_info *)data; 1342 struct radeonfb_info *rinfo = (struct radeonfb_info *)data;
1337 1343
1338 radeon_engine_idle(rinfo); 1344 radeon_engine_idle();
1339 1345
1340 OUTREG(LVDS_GEN_CNTL, rinfo->pending_lvds_gen_cntl); 1346 OUTREG(LVDS_GEN_CNTL, rinfo->pending_lvds_gen_cntl);
1341} 1347}
@@ -1353,11 +1359,10 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
1353 if (nomodeset) 1359 if (nomodeset)
1354 return; 1360 return;
1355 1361
1356 radeon_engine_idle(rinfo);
1357
1358 if (!regs_only) 1362 if (!regs_only)
1359 radeon_screen_blank(rinfo, FB_BLANK_NORMAL, 0); 1363 radeon_screen_blank(rinfo, FB_BLANK_NORMAL, 0);
1360 1364
1365 radeon_fifo_wait(31);
1361 for (i=0; i<10; i++) 1366 for (i=0; i<10; i++)
1362 OUTREG(common_regs[i].reg, common_regs[i].val); 1367 OUTREG(common_regs[i].reg, common_regs[i].val);
1363 1368
@@ -1385,6 +1390,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
1385 radeon_write_pll_regs(rinfo, mode); 1390 radeon_write_pll_regs(rinfo, mode);
1386 1391
1387 if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) { 1392 if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
1393 radeon_fifo_wait(10);
1388 OUTREG(FP_CRTC_H_TOTAL_DISP, mode->fp_crtc_h_total_disp); 1394 OUTREG(FP_CRTC_H_TOTAL_DISP, mode->fp_crtc_h_total_disp);
1389 OUTREG(FP_CRTC_V_TOTAL_DISP, mode->fp_crtc_v_total_disp); 1395 OUTREG(FP_CRTC_V_TOTAL_DISP, mode->fp_crtc_v_total_disp);
1390 OUTREG(FP_H_SYNC_STRT_WID, mode->fp_h_sync_strt_wid); 1396 OUTREG(FP_H_SYNC_STRT_WID, mode->fp_h_sync_strt_wid);
@@ -1399,6 +1405,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
1399 if (!regs_only) 1405 if (!regs_only)
1400 radeon_screen_blank(rinfo, FB_BLANK_UNBLANK, 0); 1406 radeon_screen_blank(rinfo, FB_BLANK_UNBLANK, 0);
1401 1407
1408 radeon_fifo_wait(2);
1402 OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl); 1409 OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl);
1403 1410
1404 return; 1411 return;
@@ -1549,7 +1556,7 @@ static int radeonfb_set_par(struct fb_info *info)
1549 /* We always want engine to be idle on a mode switch, even 1556 /* We always want engine to be idle on a mode switch, even
1550 * if we won't actually change the mode 1557 * if we won't actually change the mode
1551 */ 1558 */
1552 radeon_engine_idle(rinfo); 1559 radeon_engine_idle();
1553 1560
1554 hSyncStart = mode->xres + mode->right_margin; 1561 hSyncStart = mode->xres + mode->right_margin;
1555 hSyncEnd = hSyncStart + mode->hsync_len; 1562 hSyncEnd = hSyncStart + mode->hsync_len;
@@ -1844,6 +1851,7 @@ static int radeonfb_set_par(struct fb_info *info)
1844 return 0; 1851 return 0;
1845} 1852}
1846 1853
1854
1847static struct fb_ops radeonfb_ops = { 1855static struct fb_ops radeonfb_ops = {
1848 .owner = THIS_MODULE, 1856 .owner = THIS_MODULE,
1849 .fb_check_var = radeonfb_check_var, 1857 .fb_check_var = radeonfb_check_var,
@@ -1867,7 +1875,6 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
1867 info->par = rinfo; 1875 info->par = rinfo;
1868 info->pseudo_palette = rinfo->pseudo_palette; 1876 info->pseudo_palette = rinfo->pseudo_palette;
1869 info->flags = FBINFO_DEFAULT 1877 info->flags = FBINFO_DEFAULT
1870 | FBINFO_HWACCEL_IMAGEBLIT
1871 | FBINFO_HWACCEL_COPYAREA 1878 | FBINFO_HWACCEL_COPYAREA
1872 | FBINFO_HWACCEL_FILLRECT 1879 | FBINFO_HWACCEL_FILLRECT
1873 | FBINFO_HWACCEL_XPAN 1880 | FBINFO_HWACCEL_XPAN
@@ -1999,6 +2006,7 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
1999 u32 tom = INREG(NB_TOM); 2006 u32 tom = INREG(NB_TOM);
2000 tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024); 2007 tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
2001 2008
2009 radeon_fifo_wait(6);
2002 OUTREG(MC_FB_LOCATION, tom); 2010 OUTREG(MC_FB_LOCATION, tom);
2003 OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16); 2011 OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
2004 OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16); 2012 OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 3df5015f1d13..675abdafc2d8 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -2653,9 +2653,9 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
2653 2653
2654 if (!(info->flags & FBINFO_HWACCEL_DISABLED)) { 2654 if (!(info->flags & FBINFO_HWACCEL_DISABLED)) {
2655 /* Make sure engine is reset */ 2655 /* Make sure engine is reset */
2656 radeon_engine_idle(rinfo); 2656 radeon_engine_idle();
2657 radeonfb_engine_reset(rinfo); 2657 radeonfb_engine_reset(rinfo);
2658 radeon_engine_idle(rinfo); 2658 radeon_engine_idle();
2659 } 2659 }
2660 2660
2661 /* Blank display and LCD */ 2661 /* Blank display and LCD */
@@ -2767,7 +2767,7 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
2767 2767
2768 rinfo->asleep = 0; 2768 rinfo->asleep = 0;
2769 } else 2769 } else
2770 radeon_engine_idle(rinfo); 2770 radeon_engine_idle();
2771 2771
2772 /* Restore display & engine */ 2772 /* Restore display & engine */
2773 radeon_write_mode (rinfo, &rinfo->state, 1); 2773 radeon_write_mode (rinfo, &rinfo->state, 1);
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index ea0b5b47acaf..3ea1b00fdd22 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -336,15 +336,7 @@ struct radeonfb_info {
336 int mon2_type; 336 int mon2_type;
337 u8 *mon2_EDID; 337 u8 *mon2_EDID;
338 338
339 /* accel bits */ 339 u32 dp_gui_master_cntl;
340 u32 dp_gui_mc_base;
341 u32 dp_gui_mc_cache;
342 u32 dp_cntl_cache;
343 u32 dp_brush_fg_cache;
344 u32 dp_brush_bg_cache;
345 u32 dp_src_fg_cache;
346 u32 dp_src_bg_cache;
347 u32 fifo_free;
348 340
349 struct pll_info pll; 341 struct pll_info pll;
350 342
@@ -356,7 +348,6 @@ struct radeonfb_info {
356 int lock_blank; 348 int lock_blank;
357 int dynclk; 349 int dynclk;
358 int no_schedule; 350 int no_schedule;
359 int gfx_mode;
360 enum radeon_pm_mode pm_mode; 351 enum radeon_pm_mode pm_mode;
361 reinit_function_ptr reinit_func; 352 reinit_function_ptr reinit_func;
362 353
@@ -401,14 +392,8 @@ static inline void _radeon_msleep(struct radeonfb_info *rinfo, unsigned long ms)
401#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr) 392#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr)
402#define INREG16(addr) readw((rinfo->mmio_base)+addr) 393#define INREG16(addr) readw((rinfo->mmio_base)+addr)
403#define OUTREG16(addr,val) writew(val, (rinfo->mmio_base)+addr) 394#define OUTREG16(addr,val) writew(val, (rinfo->mmio_base)+addr)
404
405#ifdef CONFIG_PPC
406#define INREG(addr) ({ eieio(); ld_le32(rinfo->mmio_base+(addr)); })
407#define OUTREG(addr,val) do { eieio(); st_le32(rinfo->mmio_base+(addr),(val)); } while(0)
408#else
409#define INREG(addr) readl((rinfo->mmio_base)+addr) 395#define INREG(addr) readl((rinfo->mmio_base)+addr)
410#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr) 396#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr)
411#endif
412 397
413static inline void _OUTREGP(struct radeonfb_info *rinfo, u32 addr, 398static inline void _OUTREGP(struct radeonfb_info *rinfo, u32 addr,
414 u32 val, u32 mask) 399 u32 val, u32 mask)
@@ -550,7 +535,17 @@ static inline u32 radeon_get_dstbpp(u16 depth)
550 * 2D Engine helper routines 535 * 2D Engine helper routines
551 */ 536 */
552 537
553extern void radeon_fifo_update_and_wait(struct radeonfb_info *rinfo, int entries); 538static inline void _radeon_fifo_wait(struct radeonfb_info *rinfo, int entries)
539{
540 int i;
541
542 for (i=0; i<2000000; i++) {
543 if ((INREG(RBBM_STATUS) & 0x7f) >= entries)
544 return;
545 udelay(1);
546 }
547 printk(KERN_ERR "radeonfb: FIFO Timeout !\n");
548}
554 549
555static inline void radeon_engine_flush (struct radeonfb_info *rinfo) 550static inline void radeon_engine_flush (struct radeonfb_info *rinfo)
556{ 551{
@@ -563,7 +558,7 @@ static inline void radeon_engine_flush (struct radeonfb_info *rinfo)
563 /* Ensure FIFO is empty, ie, make sure the flush commands 558 /* Ensure FIFO is empty, ie, make sure the flush commands
564 * has reached the cache 559 * has reached the cache
565 */ 560 */
566 radeon_fifo_update_and_wait(rinfo, 64); 561 _radeon_fifo_wait (rinfo, 64);
567 562
568 /* Wait for the flush to complete */ 563 /* Wait for the flush to complete */
569 for (i=0; i < 2000000; i++) { 564 for (i=0; i < 2000000; i++) {
@@ -575,12 +570,12 @@ static inline void radeon_engine_flush (struct radeonfb_info *rinfo)
575} 570}
576 571
577 572
578static inline void radeon_engine_idle(struct radeonfb_info *rinfo) 573static inline void _radeon_engine_idle(struct radeonfb_info *rinfo)
579{ 574{
580 int i; 575 int i;
581 576
582 /* ensure FIFO is empty before waiting for idle */ 577 /* ensure FIFO is empty before waiting for idle */
583 radeon_fifo_update_and_wait (rinfo, 64); 578 _radeon_fifo_wait (rinfo, 64);
584 579
585 for (i=0; i<2000000; i++) { 580 for (i=0; i<2000000; i++) {
586 if (((INREG(RBBM_STATUS) & GUI_ACTIVE)) == 0) { 581 if (((INREG(RBBM_STATUS) & GUI_ACTIVE)) == 0) {
@@ -593,6 +588,8 @@ static inline void radeon_engine_idle(struct radeonfb_info *rinfo)
593} 588}
594 589
595 590
591#define radeon_engine_idle() _radeon_engine_idle(rinfo)
592#define radeon_fifo_wait(entries) _radeon_fifo_wait(rinfo,entries)
596#define radeon_msleep(ms) _radeon_msleep(rinfo,ms) 593#define radeon_msleep(ms) _radeon_msleep(rinfo,ms)
597 594
598 595
@@ -622,7 +619,6 @@ extern void radeonfb_imageblit(struct fb_info *p, const struct fb_image *image);
622extern int radeonfb_sync(struct fb_info *info); 619extern int radeonfb_sync(struct fb_info *info);
623extern void radeonfb_engine_init (struct radeonfb_info *rinfo); 620extern void radeonfb_engine_init (struct radeonfb_info *rinfo);
624extern void radeonfb_engine_reset(struct radeonfb_info *rinfo); 621extern void radeonfb_engine_reset(struct radeonfb_info *rinfo);
625extern void radeon_fixup_mem_offset(struct radeonfb_info *rinfo);
626 622
627/* Other functions */ 623/* Other functions */
628extern int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch); 624extern int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index c72a13562954..4a4dd9adc328 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -75,6 +75,15 @@ config LCD_PLATFORM
75 This driver provides a platform-device registered LCD power 75 This driver provides a platform-device registered LCD power
76 control interface. 76 control interface.
77 77
78config LCD_TOSA
79 tristate "Sharp SL-6000 LCD Driver"
80 depends on LCD_CLASS_DEVICE && SPI
81 depends on MACH_TOSA
82 default n
83 help
84 If you have an Sharp SL-6000 Zaurus say Y to enable a driver
85 for its LCD.
86
78# 87#
79# Backlight 88# Backlight
80# 89#
@@ -115,7 +124,7 @@ config BACKLIGHT_ATMEL_PWM
115 called atmel-pwm-bl. 124 called atmel-pwm-bl.
116 125
117config BACKLIGHT_CORGI 126config BACKLIGHT_CORGI
118 tristate "Generic (aka Sharp Corgi) Backlight Driver" 127 tristate "Generic (aka Sharp Corgi) Backlight Driver (DEPRECATED)"
119 depends on BACKLIGHT_CLASS_DEVICE 128 depends on BACKLIGHT_CLASS_DEVICE
120 default n 129 default n
121 help 130 help
@@ -123,6 +132,9 @@ config BACKLIGHT_CORGI
123 known as the Corgi backlight driver. If you have a Sharp Zaurus 132 known as the Corgi backlight driver. If you have a Sharp Zaurus
124 SL-C7xx, SL-Cxx00 or SL-6000x say y. Most users can say n. 133 SL-C7xx, SL-Cxx00 or SL-6000x say y. Most users can say n.
125 134
135 Note: this driver is marked as deprecated, try enable SPI and
136 use the new corgi_lcd driver with integrated backlight control
137
126config BACKLIGHT_LOCOMO 138config BACKLIGHT_LOCOMO
127 tristate "Sharp LOCOMO LCD/Backlight Driver" 139 tristate "Sharp LOCOMO LCD/Backlight Driver"
128 depends on BACKLIGHT_CLASS_DEVICE && SHARP_LOCOMO 140 depends on BACKLIGHT_CLASS_DEVICE && SHARP_LOCOMO
@@ -171,6 +183,13 @@ config BACKLIGHT_PWM
171 If you have a LCD backlight adjustable by PWM, say Y to enable 183 If you have a LCD backlight adjustable by PWM, say Y to enable
172 this driver. 184 this driver.
173 185
186config BACKLIGHT_DA903X
187 tristate "Backlight Driver for DA9030/DA9034 using WLED"
188 depends on BACKLIGHT_CLASS_DEVICE && PMIC_DA903X
189 help
190 If you have a LCD backlight connected to the WLED output of DA9030
191 or DA9034 WLED output, say Y here to enable this driver.
192
174config BACKLIGHT_MBP_NVIDIA 193config BACKLIGHT_MBP_NVIDIA
175 tristate "MacBook Pro Nvidia Backlight Driver" 194 tristate "MacBook Pro Nvidia Backlight Driver"
176 depends on BACKLIGHT_CLASS_DEVICE && X86 195 depends on BACKLIGHT_CLASS_DEVICE && X86
@@ -179,3 +198,19 @@ config BACKLIGHT_MBP_NVIDIA
179 If you have an Apple Macbook Pro with Nvidia graphics hardware say Y 198 If you have an Apple Macbook Pro with Nvidia graphics hardware say Y
180 to enable a driver for its backlight 199 to enable a driver for its backlight
181 200
201config BACKLIGHT_TOSA
202 tristate "Sharp SL-6000 Backlight Driver"
203 depends on BACKLIGHT_CLASS_DEVICE && I2C
204 depends on MACH_TOSA && LCD_TOSA
205 default n
206 help
207 If you have an Sharp SL-6000 Zaurus say Y to enable a driver
208 for its backlight
209
210config BACKLIGHT_SAHARA
211 tristate "Tabletkiosk Sahara Touch-iT Backlight Driver"
212 depends on BACKLIGHT_CLASS_DEVICE && X86
213 default n
214 help
215 If you have a Tabletkiosk Sahara Touch-iT, say y to enable the
216 backlight driver.
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 3ec551eb472c..103427de6703 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_LCD_ILI9320) += ili9320.o
7obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o 7obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
8obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o 8obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
9obj-$(CONFIG_LCD_TDO24M) += tdo24m.o 9obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
10obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
10 11
11obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o 12obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
12obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o 13obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
@@ -17,5 +18,8 @@ obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
17obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o 18obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o
18obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o 19obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
19obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o 20obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
21obj-$(CONFIG_BACKLIGHT_DA903X) += da903x.o
20obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o 22obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o
23obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
24obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
21 25
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 2afd47eefe74..f8a4bb20f41a 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -439,7 +439,7 @@ static int corgi_bl_update_status(struct backlight_device *bd)
439 return corgi_bl_set_intensity(lcd, intensity); 439 return corgi_bl_set_intensity(lcd, intensity);
440} 440}
441 441
442void corgibl_limit_intensity(int limit) 442void corgi_lcd_limit_intensity(int limit)
443{ 443{
444 if (limit) 444 if (limit)
445 corgibl_flags |= CORGIBL_BATTLOW; 445 corgibl_flags |= CORGIBL_BATTLOW;
@@ -448,7 +448,7 @@ void corgibl_limit_intensity(int limit)
448 448
449 backlight_update_status(the_corgi_lcd->bl_dev); 449 backlight_update_status(the_corgi_lcd->bl_dev);
450} 450}
451EXPORT_SYMBOL(corgibl_limit_intensity); 451EXPORT_SYMBOL(corgi_lcd_limit_intensity);
452 452
453static struct backlight_ops corgi_bl_ops = { 453static struct backlight_ops corgi_bl_ops = {
454 .get_brightness = corgi_bl_get_intensity, 454 .get_brightness = corgi_bl_get_intensity,
diff --git a/drivers/video/backlight/da903x.c b/drivers/video/backlight/da903x.c
new file mode 100644
index 000000000000..93bb4340cc64
--- /dev/null
+++ b/drivers/video/backlight/da903x.c
@@ -0,0 +1,203 @@
1/*
2 * Backlight driver for Dialog Semiconductor DA9030/DA9034
3 *
4 * Copyright (C) 2008 Compulab, Ltd.
5 * Mike Rapoport <mike@compulab.co.il>
6 *
7 * Copyright (C) 2006-2008 Marvell International Ltd.
8 * Eric Miao <eric.miao@marvell.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/fb.h>
19#include <linux/backlight.h>
20#include <linux/mfd/da903x.h>
21
22#define DA9030_WLED_CONTROL 0x25
23#define DA9030_WLED_CP_EN (1 << 6)
24#define DA9030_WLED_TRIM(x) ((x) & 0x7)
25
26#define DA9034_WLED_CONTROL1 0x3C
27#define DA9034_WLED_CONTROL2 0x3D
28
29#define DA9034_WLED_BOOST_EN (1 << 5)
30
31#define DA9030_MAX_BRIGHTNESS 7
32#define DA9034_MAX_BRIGHTNESS 0x7f
33
34struct da903x_backlight_data {
35 struct device *da903x_dev;
36 int id;
37 int current_brightness;
38};
39
40static int da903x_backlight_set(struct backlight_device *bl, int brightness)
41{
42 struct da903x_backlight_data *data = bl_get_data(bl);
43 struct device *dev = data->da903x_dev;
44 uint8_t val;
45 int ret = 0;
46
47 switch (data->id) {
48 case DA9034_ID_WLED:
49 ret = da903x_update(dev, DA9034_WLED_CONTROL1,
50 brightness, 0x7f);
51 if (ret)
52 return ret;
53
54 if (data->current_brightness && brightness == 0)
55 ret = da903x_clr_bits(dev,
56 DA9034_WLED_CONTROL2,
57 DA9034_WLED_BOOST_EN);
58
59 if (data->current_brightness == 0 && brightness)
60 ret = da903x_set_bits(dev,
61 DA9034_WLED_CONTROL2,
62 DA9034_WLED_BOOST_EN);
63 break;
64 case DA9030_ID_WLED:
65 val = DA9030_WLED_TRIM(brightness);
66 val |= brightness ? DA9030_WLED_CP_EN : 0;
67 ret = da903x_write(dev, DA9030_WLED_CONTROL, val);
68 break;
69 }
70
71 if (ret)
72 return ret;
73
74 data->current_brightness = brightness;
75 return 0;
76}
77
78static int da903x_backlight_update_status(struct backlight_device *bl)
79{
80 int brightness = bl->props.brightness;
81
82 if (bl->props.power != FB_BLANK_UNBLANK)
83 brightness = 0;
84
85 if (bl->props.fb_blank != FB_BLANK_UNBLANK)
86 brightness = 0;
87
88 return da903x_backlight_set(bl, brightness);
89}
90
91static int da903x_backlight_get_brightness(struct backlight_device *bl)
92{
93 struct da903x_backlight_data *data = bl_get_data(bl);
94 return data->current_brightness;
95}
96
97static struct backlight_ops da903x_backlight_ops = {
98 .update_status = da903x_backlight_update_status,
99 .get_brightness = da903x_backlight_get_brightness,
100};
101
102static int da903x_backlight_probe(struct platform_device *pdev)
103{
104 struct da903x_backlight_data *data;
105 struct backlight_device *bl;
106 int max_brightness;
107
108 data = kzalloc(sizeof(*data), GFP_KERNEL);
109 if (data == NULL)
110 return -ENOMEM;
111
112 switch (pdev->id) {
113 case DA9030_ID_WLED:
114 max_brightness = DA9030_MAX_BRIGHTNESS;
115 break;
116 case DA9034_ID_WLED:
117 max_brightness = DA9034_MAX_BRIGHTNESS;
118 break;
119 default:
120 dev_err(&pdev->dev, "invalid backlight device ID(%d)\n",
121 pdev->id);
122 kfree(data);
123 return -EINVAL;
124 }
125
126 data->id = pdev->id;
127 data->da903x_dev = pdev->dev.parent;
128 data->current_brightness = 0;
129
130 bl = backlight_device_register(pdev->name, data->da903x_dev,
131 data, &da903x_backlight_ops);
132 if (IS_ERR(bl)) {
133 dev_err(&pdev->dev, "failed to register backlight\n");
134 kfree(data);
135 return PTR_ERR(bl);
136 }
137
138 bl->props.max_brightness = max_brightness;
139 bl->props.brightness = max_brightness;
140
141 platform_set_drvdata(pdev, bl);
142 backlight_update_status(bl);
143 return 0;
144}
145
146static int da903x_backlight_remove(struct platform_device *pdev)
147{
148 struct backlight_device *bl = platform_get_drvdata(pdev);
149 struct da903x_backlight_data *data = bl_get_data(bl);
150
151 backlight_device_unregister(bl);
152 kfree(data);
153 return 0;
154}
155
156#ifdef CONFIG_PM
157static int da903x_backlight_suspend(struct platform_device *pdev,
158 pm_message_t state)
159{
160 struct backlight_device *bl = platform_get_drvdata(pdev);
161 return da903x_backlight_set(bl, 0);
162}
163
164static int da903x_backlight_resume(struct platform_device *pdev)
165{
166 struct backlight_device *bl = platform_get_drvdata(pdev);
167
168 backlight_update_status(bl);
169 return 0;
170}
171#else
172#define da903x_backlight_suspend NULL
173#define da903x_backlight_resume NULL
174#endif
175
176static struct platform_driver da903x_backlight_driver = {
177 .driver = {
178 .name = "da903x-backlight",
179 .owner = THIS_MODULE,
180 },
181 .probe = da903x_backlight_probe,
182 .remove = da903x_backlight_remove,
183 .suspend = da903x_backlight_suspend,
184 .resume = da903x_backlight_resume,
185};
186
187static int __init da903x_backlight_init(void)
188{
189 return platform_driver_register(&da903x_backlight_driver);
190}
191module_init(da903x_backlight_init);
192
193static void __exit da903x_backlight_exit(void)
194{
195 platform_driver_unregister(&da903x_backlight_driver);
196}
197module_exit(da903x_backlight_exit);
198
199MODULE_DESCRIPTION("Backlight Driver for Dialog Semiconductor DA9030/DA9034");
200MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
201 "Mike Rapoport <mike@compulab.co.il>");
202MODULE_LICENSE("GPL");
203MODULE_ALIAS("platform:da903x-backlight");
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
new file mode 100644
index 000000000000..a38fda1742dd
--- /dev/null
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -0,0 +1,204 @@
1/*
2 * Backlight Driver for the KB3886 Backlight
3 *
4 * Copyright (c) 2007-2008 Claudio Nieder
5 *
6 * Based on corgi_bl.c by Richard Purdie and kb3886 driver by Robert Woerle
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/mutex.h>
19#include <linux/fb.h>
20#include <linux/backlight.h>
21#include <linux/delay.h>
22#include <linux/dmi.h>
23
24#define KB3886_PARENT 0x64
25#define KB3886_IO 0x60
26#define KB3886_ADC_DAC_PWM 0xC4
27#define KB3886_PWM0_WRITE 0x81
28#define KB3886_PWM0_READ 0x41
29
30static DEFINE_MUTEX(bl_mutex);
31
32static void kb3886_bl_set_intensity(int intensity)
33{
34 mutex_lock(&bl_mutex);
35 intensity = intensity&0xff;
36 outb(KB3886_ADC_DAC_PWM, KB3886_PARENT);
37 msleep(10);
38 outb(KB3886_PWM0_WRITE, KB3886_IO);
39 msleep(10);
40 outb(intensity, KB3886_IO);
41 mutex_unlock(&bl_mutex);
42}
43
44struct kb3886bl_machinfo {
45 int max_intensity;
46 int default_intensity;
47 int limit_mask;
48 void (*set_bl_intensity)(int intensity);
49};
50
51static struct kb3886bl_machinfo kb3886_bl_machinfo = {
52 .max_intensity = 0xff,
53 .default_intensity = 0xa0,
54 .limit_mask = 0x7f,
55 .set_bl_intensity = kb3886_bl_set_intensity,
56};
57
58static struct platform_device kb3886bl_device = {
59 .name = "kb3886-bl",
60 .dev = {
61 .platform_data = &kb3886_bl_machinfo,
62 },
63 .id = -1,
64};
65
66static struct platform_device *devices[] __initdata = {
67 &kb3886bl_device,
68};
69
70/*
71 * Back to driver
72 */
73
74static int kb3886bl_intensity;
75static struct backlight_device *kb3886_backlight_device;
76static struct kb3886bl_machinfo *bl_machinfo;
77
78static unsigned long kb3886bl_flags;
79#define KB3886BL_SUSPENDED 0x01
80
81static struct dmi_system_id __initdata kb3886bl_device_table[] = {
82 {
83 .ident = "Sahara Touch-iT",
84 .matches = {
85 DMI_MATCH(DMI_SYS_VENDOR, "SDV"),
86 DMI_MATCH(DMI_PRODUCT_NAME, "iTouch T201"),
87 },
88 },
89 { }
90};
91
92static int kb3886bl_send_intensity(struct backlight_device *bd)
93{
94 int intensity = bd->props.brightness;
95
96 if (bd->props.power != FB_BLANK_UNBLANK)
97 intensity = 0;
98 if (bd->props.fb_blank != FB_BLANK_UNBLANK)
99 intensity = 0;
100 if (kb3886bl_flags & KB3886BL_SUSPENDED)
101 intensity = 0;
102
103 bl_machinfo->set_bl_intensity(intensity);
104
105 kb3886bl_intensity = intensity;
106 return 0;
107}
108
109#ifdef CONFIG_PM
110static int kb3886bl_suspend(struct platform_device *pdev, pm_message_t state)
111{
112 struct backlight_device *bd = platform_get_drvdata(pdev);
113
114 kb3886bl_flags |= KB3886BL_SUSPENDED;
115 backlight_update_status(bd);
116 return 0;
117}
118
119static int kb3886bl_resume(struct platform_device *pdev)
120{
121 struct backlight_device *bd = platform_get_drvdata(pdev);
122
123 kb3886bl_flags &= ~KB3886BL_SUSPENDED;
124 backlight_update_status(bd);
125 return 0;
126}
127#else
128#define kb3886bl_suspend NULL
129#define kb3886bl_resume NULL
130#endif
131
132static int kb3886bl_get_intensity(struct backlight_device *bd)
133{
134 return kb3886bl_intensity;
135}
136
137static struct backlight_ops kb3886bl_ops = {
138 .get_brightness = kb3886bl_get_intensity,
139 .update_status = kb3886bl_send_intensity,
140};
141
142static int kb3886bl_probe(struct platform_device *pdev)
143{
144 struct kb3886bl_machinfo *machinfo = pdev->dev.platform_data;
145
146 bl_machinfo = machinfo;
147 if (!machinfo->limit_mask)
148 machinfo->limit_mask = -1;
149
150 kb3886_backlight_device = backlight_device_register("kb3886-bl",
151 &pdev->dev, NULL, &kb3886bl_ops);
152 if (IS_ERR(kb3886_backlight_device))
153 return PTR_ERR(kb3886_backlight_device);
154
155 platform_set_drvdata(pdev, kb3886_backlight_device);
156
157 kb3886_backlight_device->props.max_brightness = machinfo->max_intensity;
158 kb3886_backlight_device->props.power = FB_BLANK_UNBLANK;
159 kb3886_backlight_device->props.brightness = machinfo->default_intensity;
160 backlight_update_status(kb3886_backlight_device);
161
162 return 0;
163}
164
165static int kb3886bl_remove(struct platform_device *pdev)
166{
167 struct backlight_device *bd = platform_get_drvdata(pdev);
168
169 backlight_device_unregister(bd);
170
171 return 0;
172}
173
174static struct platform_driver kb3886bl_driver = {
175 .probe = kb3886bl_probe,
176 .remove = kb3886bl_remove,
177 .suspend = kb3886bl_suspend,
178 .resume = kb3886bl_resume,
179 .driver = {
180 .name = "kb3886-bl",
181 },
182};
183
184static int __init kb3886_init(void)
185{
186 if (!dmi_check_system(kb3886bl_device_table))
187 return -ENODEV;
188
189 platform_add_devices(devices, ARRAY_SIZE(devices));
190 return platform_driver_register(&kb3886bl_driver);
191}
192
193static void __exit kb3886_exit(void)
194{
195 platform_driver_unregister(&kb3886bl_driver);
196}
197
198module_init(kb3886_init);
199module_exit(kb3886_exit);
200
201MODULE_AUTHOR("Claudio Nieder <private@claudio.ch>");
202MODULE_DESCRIPTION("Tabletkiosk Sahara Touch-iT Backlight Driver");
203MODULE_LICENSE("GPL");
204MODULE_ALIAS("dmi:*:svnSDV:pniTouchT201:*");
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 8e1731d3b228..680e57b616cd 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -42,10 +42,13 @@ static int fb_notifier_callback(struct notifier_block *self,
42 42
43 mutex_lock(&ld->ops_lock); 43 mutex_lock(&ld->ops_lock);
44 if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) { 44 if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) {
45 if (event == FB_EVENT_BLANK) 45 if (event == FB_EVENT_BLANK) {
46 ld->ops->set_power(ld, *(int *)evdata->data); 46 if (ld->ops->set_power)
47 else 47 ld->ops->set_power(ld, *(int *)evdata->data);
48 ld->ops->set_mode(ld, evdata->data); 48 } else {
49 if (ld->ops->set_mode)
50 ld->ops->set_mode(ld, evdata->data);
51 }
49 } 52 }
50 mutex_unlock(&ld->ops_lock); 53 mutex_unlock(&ld->ops_lock);
51 return 0; 54 return 0;
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
new file mode 100644
index 000000000000..43edbada12d1
--- /dev/null
+++ b/drivers/video/backlight/tosa_bl.c
@@ -0,0 +1,198 @@
1/*
2 * LCD / Backlight control code for Sharp SL-6000x (tosa)
3 *
4 * Copyright (c) 2005 Dirk Opfer
5 * Copyright (c) 2007,2008 Dmitry Baryshkov
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/device.h>
16#include <linux/spi/spi.h>
17#include <linux/i2c.h>
18#include <linux/gpio.h>
19#include <linux/fb.h>
20#include <linux/backlight.h>
21
22#include <asm/mach/sharpsl_param.h>
23
24#include <mach/tosa.h>
25
26#define COMADJ_DEFAULT 97
27
28#define DAC_CH1 0
29#define DAC_CH2 1
30
31struct tosa_bl_data {
32 struct i2c_client *i2c;
33 struct backlight_device *bl;
34
35 int comadj;
36};
37
38static void tosa_bl_set_backlight(struct tosa_bl_data *data, int brightness)
39{
40 struct spi_device *spi = data->i2c->dev.platform_data;
41
42 i2c_smbus_write_byte_data(data->i2c, DAC_CH1, data->comadj);
43
44 /* SetBacklightDuty */
45 i2c_smbus_write_byte_data(data->i2c, DAC_CH2, (u8)(brightness & 0xff));
46
47 /* SetBacklightVR */
48 gpio_set_value(TOSA_GPIO_BL_C20MA, brightness & 0x100);
49
50 tosa_bl_enable(spi, brightness);
51}
52
53static int tosa_bl_update_status(struct backlight_device *dev)
54{
55 struct backlight_properties *props = &dev->props;
56 struct tosa_bl_data *data = dev_get_drvdata(&dev->dev);
57 int power = max(props->power, props->fb_blank);
58 int brightness = props->brightness;
59
60 if (power)
61 brightness = 0;
62
63 tosa_bl_set_backlight(data, brightness);
64
65 return 0;
66}
67
68static int tosa_bl_get_brightness(struct backlight_device *dev)
69{
70 struct backlight_properties *props = &dev->props;
71
72 return props->brightness;
73}
74
75static struct backlight_ops bl_ops = {
76 .get_brightness = tosa_bl_get_brightness,
77 .update_status = tosa_bl_update_status,
78};
79
80static int __devinit tosa_bl_probe(struct i2c_client *client,
81 const struct i2c_device_id *id)
82{
83 struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL);
84 int ret = 0;
85 if (!data)
86 return -ENOMEM;
87
88 data->comadj = sharpsl_param.comadj == -1 ? COMADJ_DEFAULT : sharpsl_param.comadj;
89
90 ret = gpio_request(TOSA_GPIO_BL_C20MA, "backlight");
91 if (ret) {
92 dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
93 goto err_gpio_bl;
94 }
95 ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0);
96 if (ret)
97 goto err_gpio_dir;
98
99 i2c_set_clientdata(client, data);
100 data->i2c = client;
101
102 data->bl = backlight_device_register("tosa-bl", &client->dev,
103 data, &bl_ops);
104 if (IS_ERR(data->bl)) {
105 ret = PTR_ERR(data->bl);
106 goto err_reg;
107 }
108
109 data->bl->props.brightness = 69;
110 data->bl->props.max_brightness = 512 - 1;
111 data->bl->props.power = FB_BLANK_UNBLANK;
112
113 backlight_update_status(data->bl);
114
115 return 0;
116
117err_reg:
118 data->bl = NULL;
119 i2c_set_clientdata(client, NULL);
120err_gpio_dir:
121 gpio_free(TOSA_GPIO_BL_C20MA);
122err_gpio_bl:
123 kfree(data);
124 return ret;
125}
126
127static int __devexit tosa_bl_remove(struct i2c_client *client)
128{
129 struct tosa_bl_data *data = i2c_get_clientdata(client);
130
131 backlight_device_unregister(data->bl);
132 data->bl = NULL;
133 i2c_set_clientdata(client, NULL);
134
135 gpio_free(TOSA_GPIO_BL_C20MA);
136
137 kfree(data);
138
139 return 0;
140}
141
142#ifdef CONFIG_PM
143static int tosa_bl_suspend(struct i2c_client *client, pm_message_t pm)
144{
145 struct tosa_bl_data *data = i2c_get_clientdata(client);
146
147 tosa_bl_set_backlight(data, 0);
148
149 return 0;
150}
151
152static int tosa_bl_resume(struct i2c_client *client)
153{
154 struct tosa_bl_data *data = i2c_get_clientdata(client);
155
156 backlight_update_status(data->bl);
157 return 0;
158}
159#else
160#define tosa_bl_suspend NULL
161#define tosa_bl_resume NULL
162#endif
163
164static const struct i2c_device_id tosa_bl_id[] = {
165 { "tosa-bl", 0 },
166 { },
167};
168
169
170static struct i2c_driver tosa_bl_driver = {
171 .driver = {
172 .name = "tosa-bl",
173 .owner = THIS_MODULE,
174 },
175 .probe = tosa_bl_probe,
176 .remove = __devexit_p(tosa_bl_remove),
177 .suspend = tosa_bl_suspend,
178 .resume = tosa_bl_resume,
179 .id_table = tosa_bl_id,
180};
181
182static int __init tosa_bl_init(void)
183{
184 return i2c_add_driver(&tosa_bl_driver);
185}
186
187static void __exit tosa_bl_exit(void)
188{
189 i2c_del_driver(&tosa_bl_driver);
190}
191
192module_init(tosa_bl_init);
193module_exit(tosa_bl_exit);
194
195MODULE_AUTHOR("Dmitry Baryshkov");
196MODULE_LICENSE("GPL v2");
197MODULE_DESCRIPTION("LCD/Backlight control for Sharp SL-6000 PDA");
198
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
new file mode 100644
index 000000000000..57a26649f1a5
--- /dev/null
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -0,0 +1,280 @@
1/*
2 * LCD / Backlight control code for Sharp SL-6000x (tosa)
3 *
4 * Copyright (c) 2005 Dirk Opfer
5 * Copyright (c) 2007,2008 Dmitry Baryshkov
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/device.h>
16#include <linux/spi/spi.h>
17#include <linux/i2c.h>
18#include <linux/gpio.h>
19#include <linux/delay.h>
20#include <linux/lcd.h>
21#include <linux/fb.h>
22
23#include <asm/mach/sharpsl_param.h>
24
25#include <mach/tosa.h>
26
27#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
28
29#define TG_REG0_VQV 0x0001
30#define TG_REG0_COLOR 0x0002
31#define TG_REG0_UD 0x0004
32#define TG_REG0_LR 0x0008
33
34#define DAC_BASE 0x4e
35
36struct tosa_lcd_data {
37 struct spi_device *spi;
38 struct lcd_device *lcd;
39 struct i2c_client *i2c;
40
41 int lcd_power;
42};
43
44static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
45{
46 u8 buf[1];
47 struct spi_message msg;
48 struct spi_transfer xfer = {
49 .len = 1,
50 .cs_change = 1,
51 .tx_buf = buf,
52 };
53
54 buf[0] = ((adrs & 0x07) << 5) | (data & 0x1f);
55 spi_message_init(&msg);
56 spi_message_add_tail(&xfer, &msg);
57
58 return spi_sync(spi, &msg);
59}
60
61int tosa_bl_enable(struct spi_device *spi, int enable)
62{
63 /* bl_enable GP04=1 otherwise GP04=0*/
64 return tosa_tg_send(spi, TG_GPODR2, enable? 0x01 : 0x00);
65}
66EXPORT_SYMBOL(tosa_bl_enable);
67
68static void tosa_lcd_tg_init(struct tosa_lcd_data *data)
69{
70 /* TG on */
71 gpio_set_value(TOSA_GPIO_TG_ON, 0);
72
73 mdelay(60);
74
75 /* delayed 0clk TCTL signal for VGA */
76 tosa_tg_send(data->spi, TG_TPOSCTL, 0x00);
77 /* GPOS0=powercontrol, GPOS1=GPIO, GPOS2=TCTL */
78 tosa_tg_send(data->spi, TG_GPOSR, 0x02);
79}
80
81static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
82{
83 struct spi_device *spi = data->spi;
84 const int value = TG_REG0_COLOR | TG_REG0_UD | TG_REG0_LR;
85 tosa_tg_send(spi, TG_PNLCTL, value | TG_REG0_VQV); /* this depends on mode */
86
87 /* TG LCD pannel power up */
88 tosa_tg_send(spi, TG_PINICTL,0x4);
89 mdelay(50);
90
91 /* TG LCD GVSS */
92 tosa_tg_send(spi, TG_PINICTL,0x0);
93
94 if (!data->i2c) {
95 /* after the pannel is powered up the first time, we can access the i2c bus */
96 /* so probe for the DAC */
97 struct i2c_adapter *adap = i2c_get_adapter(0);
98 struct i2c_board_info info = {
99 .type = "tosa-bl",
100 .addr = DAC_BASE,
101 .platform_data = data->spi,
102 };
103 data->i2c = i2c_new_device(adap, &info);
104 }
105}
106
107static void tosa_lcd_tg_off(struct tosa_lcd_data *data)
108{
109 struct spi_device *spi = data->spi;
110
111 /* TG LCD VHSA off */
112 tosa_tg_send(spi, TG_PINICTL,0x4);
113 mdelay(50);
114
115 /* TG LCD signal off */
116 tosa_tg_send(spi, TG_PINICTL,0x6);
117 mdelay(50);
118
119 /* TG Off */
120 gpio_set_value(TOSA_GPIO_TG_ON, 1);
121 mdelay(100);
122}
123
124int tosa_lcd_set_power(struct lcd_device *lcd, int power)
125{
126 struct tosa_lcd_data *data = lcd_get_data(lcd);
127
128 if (POWER_IS_ON(power) && !POWER_IS_ON(data->lcd_power))
129 tosa_lcd_tg_on(data);
130
131 if (!POWER_IS_ON(power) && POWER_IS_ON(data->lcd_power))
132 tosa_lcd_tg_off(data);
133
134 data->lcd_power = power;
135 return 0;
136}
137
138static int tosa_lcd_get_power(struct lcd_device *lcd)
139{
140 struct tosa_lcd_data *data = lcd_get_data(lcd);
141
142 return data->lcd_power;
143}
144
145static struct lcd_ops tosa_lcd_ops = {
146 .set_power = tosa_lcd_set_power,
147 .get_power = tosa_lcd_get_power,
148};
149
150static int __devinit tosa_lcd_probe(struct spi_device *spi)
151{
152 int ret;
153 struct tosa_lcd_data *data;
154
155 data = kzalloc(sizeof(struct tosa_lcd_data), GFP_KERNEL);
156 if (!data)
157 return -ENOMEM;
158
159 /*
160 * bits_per_word cannot be configured in platform data
161 */
162 spi->bits_per_word = 8;
163
164 ret = spi_setup(spi);
165 if (ret < 0)
166 goto err_spi;
167
168 data->spi = spi;
169 dev_set_drvdata(&spi->dev, data);
170
171 ret = gpio_request(TOSA_GPIO_TG_ON, "tg #pwr");
172 if (ret < 0)
173 goto err_gpio_tg;
174
175 mdelay(60);
176
177 ret = gpio_direction_output(TOSA_GPIO_TG_ON, 0);
178 if (ret < 0)
179 goto err_gpio_dir;
180
181 mdelay(60);
182 tosa_lcd_tg_init(data);
183
184 tosa_lcd_tg_on(data);
185
186 data->lcd = lcd_device_register("tosa-lcd", &spi->dev, data,
187 &tosa_lcd_ops);
188
189 if (IS_ERR(data->lcd)) {
190 ret = PTR_ERR(data->lcd);
191 data->lcd = NULL;
192 goto err_register;
193 }
194
195 return 0;
196
197err_register:
198 tosa_lcd_tg_off(data);
199err_gpio_dir:
200 gpio_free(TOSA_GPIO_TG_ON);
201err_gpio_tg:
202 dev_set_drvdata(&spi->dev, NULL);
203err_spi:
204 kfree(data);
205 return ret;
206}
207
208static int __devexit tosa_lcd_remove(struct spi_device *spi)
209{
210 struct tosa_lcd_data *data = dev_get_drvdata(&spi->dev);
211
212 lcd_device_unregister(data->lcd);
213
214 if (data->i2c)
215 i2c_unregister_device(data->i2c);
216
217 tosa_lcd_tg_off(data);
218
219 gpio_free(TOSA_GPIO_TG_ON);
220 dev_set_drvdata(&spi->dev, NULL);
221 kfree(data);
222
223 return 0;
224}
225
226#ifdef CONFIG_PM
227static int tosa_lcd_suspend(struct spi_device *spi, pm_message_t state)
228{
229 struct tosa_lcd_data *data = dev_get_drvdata(&spi->dev);
230
231 tosa_lcd_tg_off(data);
232
233 return 0;
234}
235
236static int tosa_lcd_resume(struct spi_device *spi)
237{
238 struct tosa_lcd_data *data = dev_get_drvdata(&spi->dev);
239
240 tosa_lcd_tg_init(data);
241 if (POWER_IS_ON(data->lcd_power))
242 tosa_lcd_tg_on(data);
243 else
244 tosa_lcd_tg_off(data);
245
246 return 0;
247}
248#else
249#define tosa_lcd_suspend NULL
250#define tosa_lcd_reume NULL
251#endif
252
253static struct spi_driver tosa_lcd_driver = {
254 .driver = {
255 .name = "tosa-lcd",
256 .owner = THIS_MODULE,
257 },
258 .probe = tosa_lcd_probe,
259 .remove = __devexit_p(tosa_lcd_remove),
260 .suspend = tosa_lcd_suspend,
261 .resume = tosa_lcd_resume,
262};
263
264static int __init tosa_lcd_init(void)
265{
266 return spi_register_driver(&tosa_lcd_driver);
267}
268
269static void __exit tosa_lcd_exit(void)
270{
271 spi_unregister_driver(&tosa_lcd_driver);
272}
273
274module_init(tosa_lcd_init);
275module_exit(tosa_lcd_exit);
276
277MODULE_AUTHOR("Dmitry Baryshkov");
278MODULE_LICENSE("GPL v2");
279MODULE_DESCRIPTION("LCD/Backlight control for Sharp SL-6000 PDA");
280
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 048b139f0e50..a2aa6ddffbe2 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -2049,7 +2049,7 @@ static void cirrusfb_pci_unmap(struct fb_info *info)
2049#endif /* CONFIG_PCI */ 2049#endif /* CONFIG_PCI */
2050 2050
2051#ifdef CONFIG_ZORRO 2051#ifdef CONFIG_ZORRO
2052static void __devexit cirrusfb_zorro_unmap(struct fb_info *info) 2052static void cirrusfb_zorro_unmap(struct fb_info *info)
2053{ 2053{
2054 struct cirrusfb_info *cinfo = info->par; 2054 struct cirrusfb_info *cinfo = info->par;
2055 struct zorro_dev *zdev = to_zorro_dev(info->device); 2055 struct zorro_dev *zdev = to_zorro_dev(info->device);
@@ -2462,8 +2462,7 @@ static int __init cirrusfb_init(void)
2462 2462
2463#ifndef MODULE 2463#ifndef MODULE
2464static int __init cirrusfb_setup(char *options) { 2464static int __init cirrusfb_setup(char *options) {
2465 char *this_opt, s[32]; 2465 char *this_opt;
2466 int i;
2467 2466
2468 DPRINTK("ENTER\n"); 2467 DPRINTK("ENTER\n");
2469 2468
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 64b3d30027b8..0b2adefe9e3d 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -2118,7 +2118,7 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
2118 height, width); 2118 height, width);
2119} 2119}
2120 2120
2121static __inline__ void updatescrollmode(struct display *p, 2121static void updatescrollmode(struct display *p,
2122 struct fb_info *info, 2122 struct fb_info *info,
2123 struct vc_data *vc) 2123 struct vc_data *vc)
2124{ 2124{
@@ -2389,16 +2389,13 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
2389 2389
2390 if (!fbcon_is_inactive(vc, info)) { 2390 if (!fbcon_is_inactive(vc, info)) {
2391 if (ops->blank_state != blank) { 2391 if (ops->blank_state != blank) {
2392 int ret = 1;
2393
2394 ops->blank_state = blank; 2392 ops->blank_state = blank;
2395 fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW); 2393 fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
2396 ops->cursor_flash = (!blank); 2394 ops->cursor_flash = (!blank);
2397 2395
2398 if (info->fbops->fb_blank) 2396 if (!(info->flags & FBINFO_MISC_USEREVENT))
2399 ret = info->fbops->fb_blank(blank, info); 2397 if (fb_blank(info, blank))
2400 if (ret) 2398 fbcon_generic_blank(vc, info, blank);
2401 fbcon_generic_blank(vc, info, blank);
2402 } 2399 }
2403 2400
2404 if (!blank) 2401 if (!blank)
@@ -3534,12 +3531,18 @@ static void fbcon_exit(void)
3534 softback_buf = 0UL; 3531 softback_buf = 0UL;
3535 3532
3536 for (i = 0; i < FB_MAX; i++) { 3533 for (i = 0; i < FB_MAX; i++) {
3534 int pending;
3535
3537 mapped = 0; 3536 mapped = 0;
3538 info = registered_fb[i]; 3537 info = registered_fb[i];
3539 3538
3540 if (info == NULL) 3539 if (info == NULL)
3541 continue; 3540 continue;
3542 3541
3542 pending = cancel_work_sync(&info->queue);
3543 DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
3544 "no"));
3545
3543 for (j = first_fb_vc; j <= last_fb_vc; j++) { 3546 for (j = first_fb_vc; j <= last_fb_vc; j++) {
3544 if (con2fb_map[j] == i) 3547 if (con2fb_map[j] == i)
3545 mapped = 1; 3548 mapped = 1;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index cd5f20da738a..3c65b0d67617 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -230,7 +230,7 @@ static void fb_set_logo_directpalette(struct fb_info *info,
230 greenshift = info->var.green.offset; 230 greenshift = info->var.green.offset;
231 blueshift = info->var.blue.offset; 231 blueshift = info->var.blue.offset;
232 232
233 for (i = 32; i < logo->clutsize; i++) 233 for (i = 32; i < 32 + logo->clutsize; i++)
234 palette[i] = i << redshift | i << greenshift | i << blueshift; 234 palette[i] = i << redshift | i << greenshift | i << blueshift;
235} 235}
236 236
@@ -1002,13 +1002,9 @@ fb_blank(struct fb_info *info, int blank)
1002 return ret; 1002 return ret;
1003} 1003}
1004 1004
1005static long 1005static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1006fb_ioctl(struct file *file, unsigned int cmd, 1006 unsigned long arg)
1007 unsigned long arg)
1008{ 1007{
1009 struct inode *inode = file->f_path.dentry->d_inode;
1010 int fbidx = iminor(inode);
1011 struct fb_info *info;
1012 struct fb_ops *fb; 1008 struct fb_ops *fb;
1013 struct fb_var_screeninfo var; 1009 struct fb_var_screeninfo var;
1014 struct fb_fix_screeninfo fix; 1010 struct fb_fix_screeninfo fix;
@@ -1018,14 +1014,10 @@ fb_ioctl(struct file *file, unsigned int cmd,
1018 void __user *argp = (void __user *)arg; 1014 void __user *argp = (void __user *)arg;
1019 long ret = 0; 1015 long ret = 0;
1020 1016
1021 info = registered_fb[fbidx];
1022 mutex_lock(&info->lock);
1023 fb = info->fbops; 1017 fb = info->fbops;
1024 1018 if (!fb)
1025 if (!fb) {
1026 mutex_unlock(&info->lock);
1027 return -ENODEV; 1019 return -ENODEV;
1028 } 1020
1029 switch (cmd) { 1021 switch (cmd) {
1030 case FBIOGET_VSCREENINFO: 1022 case FBIOGET_VSCREENINFO:
1031 ret = copy_to_user(argp, &info->var, 1023 ret = copy_to_user(argp, &info->var,
@@ -1126,6 +1118,21 @@ fb_ioctl(struct file *file, unsigned int cmd,
1126 else 1118 else
1127 ret = fb->fb_ioctl(info, cmd, arg); 1119 ret = fb->fb_ioctl(info, cmd, arg);
1128 } 1120 }
1121 return ret;
1122}
1123
1124static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1125__acquires(&info->lock)
1126__releases(&info->lock)
1127{
1128 struct inode *inode = file->f_path.dentry->d_inode;
1129 int fbidx = iminor(inode);
1130 struct fb_info *info;
1131 long ret;
1132
1133 info = registered_fb[fbidx];
1134 mutex_lock(&info->lock);
1135 ret = do_fb_ioctl(info, cmd, arg);
1129 mutex_unlock(&info->lock); 1136 mutex_unlock(&info->lock);
1130 return ret; 1137 return ret;
1131} 1138}
@@ -1157,8 +1164,8 @@ struct fb_cmap32 {
1157 compat_caddr_t transp; 1164 compat_caddr_t transp;
1158}; 1165};
1159 1166
1160static int fb_getput_cmap(struct inode *inode, struct file *file, 1167static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
1161 unsigned int cmd, unsigned long arg) 1168 unsigned long arg)
1162{ 1169{
1163 struct fb_cmap_user __user *cmap; 1170 struct fb_cmap_user __user *cmap;
1164 struct fb_cmap32 __user *cmap32; 1171 struct fb_cmap32 __user *cmap32;
@@ -1181,7 +1188,7 @@ static int fb_getput_cmap(struct inode *inode, struct file *file,
1181 put_user(compat_ptr(data), &cmap->transp)) 1188 put_user(compat_ptr(data), &cmap->transp))
1182 return -EFAULT; 1189 return -EFAULT;
1183 1190
1184 err = fb_ioctl(file, cmd, (unsigned long) cmap); 1191 err = do_fb_ioctl(info, cmd, (unsigned long) cmap);
1185 1192
1186 if (!err) { 1193 if (!err) {
1187 if (copy_in_user(&cmap32->start, 1194 if (copy_in_user(&cmap32->start,
@@ -1223,8 +1230,8 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
1223 return err; 1230 return err;
1224} 1231}
1225 1232
1226static int fb_get_fscreeninfo(struct inode *inode, struct file *file, 1233static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
1227 unsigned int cmd, unsigned long arg) 1234 unsigned long arg)
1228{ 1235{
1229 mm_segment_t old_fs; 1236 mm_segment_t old_fs;
1230 struct fb_fix_screeninfo fix; 1237 struct fb_fix_screeninfo fix;
@@ -1235,7 +1242,7 @@ static int fb_get_fscreeninfo(struct inode *inode, struct file *file,
1235 1242
1236 old_fs = get_fs(); 1243 old_fs = get_fs();
1237 set_fs(KERNEL_DS); 1244 set_fs(KERNEL_DS);
1238 err = fb_ioctl(file, cmd, (unsigned long) &fix); 1245 err = do_fb_ioctl(info, cmd, (unsigned long) &fix);
1239 set_fs(old_fs); 1246 set_fs(old_fs);
1240 1247
1241 if (!err) 1248 if (!err)
@@ -1244,8 +1251,10 @@ static int fb_get_fscreeninfo(struct inode *inode, struct file *file,
1244 return err; 1251 return err;
1245} 1252}
1246 1253
1247static long 1254static long fb_compat_ioctl(struct file *file, unsigned int cmd,
1248fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1255 unsigned long arg)
1256__acquires(&info->lock)
1257__releases(&info->lock)
1249{ 1258{
1250 struct inode *inode = file->f_path.dentry->d_inode; 1259 struct inode *inode = file->f_path.dentry->d_inode;
1251 int fbidx = iminor(inode); 1260 int fbidx = iminor(inode);
@@ -1262,16 +1271,16 @@ fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1262 case FBIOPUT_CON2FBMAP: 1271 case FBIOPUT_CON2FBMAP:
1263 arg = (unsigned long) compat_ptr(arg); 1272 arg = (unsigned long) compat_ptr(arg);
1264 case FBIOBLANK: 1273 case FBIOBLANK:
1265 ret = fb_ioctl(file, cmd, arg); 1274 ret = do_fb_ioctl(info, cmd, arg);
1266 break; 1275 break;
1267 1276
1268 case FBIOGET_FSCREENINFO: 1277 case FBIOGET_FSCREENINFO:
1269 ret = fb_get_fscreeninfo(inode, file, cmd, arg); 1278 ret = fb_get_fscreeninfo(info, cmd, arg);
1270 break; 1279 break;
1271 1280
1272 case FBIOGETCMAP: 1281 case FBIOGETCMAP:
1273 case FBIOPUTCMAP: 1282 case FBIOPUTCMAP:
1274 ret = fb_getput_cmap(inode, file, cmd, arg); 1283 ret = fb_getput_cmap(info, cmd, arg);
1275 break; 1284 break;
1276 1285
1277 default: 1286 default:
@@ -1286,6 +1295,8 @@ fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1286 1295
1287static int 1296static int
1288fb_mmap(struct file *file, struct vm_area_struct * vma) 1297fb_mmap(struct file *file, struct vm_area_struct * vma)
1298__acquires(&info->lock)
1299__releases(&info->lock)
1289{ 1300{
1290 int fbidx = iminor(file->f_path.dentry->d_inode); 1301 int fbidx = iminor(file->f_path.dentry->d_inode);
1291 struct fb_info *info = registered_fb[fbidx]; 1302 struct fb_info *info = registered_fb[fbidx];
@@ -1339,6 +1350,8 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
1339 1350
1340static int 1351static int
1341fb_open(struct inode *inode, struct file *file) 1352fb_open(struct inode *inode, struct file *file)
1353__acquires(&info->lock)
1354__releases(&info->lock)
1342{ 1355{
1343 int fbidx = iminor(inode); 1356 int fbidx = iminor(inode);
1344 struct fb_info *info; 1357 struct fb_info *info;
@@ -1374,6 +1387,8 @@ out:
1374 1387
1375static int 1388static int
1376fb_release(struct inode *inode, struct file *file) 1389fb_release(struct inode *inode, struct file *file)
1390__acquires(&info->lock)
1391__releases(&info->lock)
1377{ 1392{
1378 struct fb_info * const info = file->private_data; 1393 struct fb_info * const info = file->private_data;
1379 1394
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index b790ddff76f9..ee380d5f3410 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -164,7 +164,6 @@ static struct fb_var_screeninfo macfb_defined = {
164}; 164};
165 165
166static struct fb_fix_screeninfo macfb_fix = { 166static struct fb_fix_screeninfo macfb_fix = {
167 .id = "Macintosh ",
168 .type = FB_TYPE_PACKED_PIXELS, 167 .type = FB_TYPE_PACKED_PIXELS,
169 .accel = FB_ACCEL_NONE, 168 .accel = FB_ACCEL_NONE,
170}; 169};
@@ -760,22 +759,22 @@ static int __init macfb_init(void)
760 759
761 switch(ndev->dr_hw) { 760 switch(ndev->dr_hw) {
762 case NUBUS_DRHW_APPLE_MDC: 761 case NUBUS_DRHW_APPLE_MDC:
763 strcat( macfb_fix.id, "Display Card" ); 762 strcpy(macfb_fix.id, "Mac Disp. Card");
764 macfb_setpalette = mdc_setpalette; 763 macfb_setpalette = mdc_setpalette;
765 macfb_defined.activate = FB_ACTIVATE_NOW; 764 macfb_defined.activate = FB_ACTIVATE_NOW;
766 break; 765 break;
767 case NUBUS_DRHW_APPLE_TFB: 766 case NUBUS_DRHW_APPLE_TFB:
768 strcat( macfb_fix.id, "Toby" ); 767 strcpy(macfb_fix.id, "Toby");
769 macfb_setpalette = toby_setpalette; 768 macfb_setpalette = toby_setpalette;
770 macfb_defined.activate = FB_ACTIVATE_NOW; 769 macfb_defined.activate = FB_ACTIVATE_NOW;
771 break; 770 break;
772 case NUBUS_DRHW_APPLE_JET: 771 case NUBUS_DRHW_APPLE_JET:
773 strcat( macfb_fix.id, "Jet"); 772 strcpy(macfb_fix.id, "Jet");
774 macfb_setpalette = jet_setpalette; 773 macfb_setpalette = jet_setpalette;
775 macfb_defined.activate = FB_ACTIVATE_NOW; 774 macfb_defined.activate = FB_ACTIVATE_NOW;
776 break; 775 break;
777 default: 776 default:
778 strcat( macfb_fix.id, "Generic NuBus" ); 777 strcpy(macfb_fix.id, "Generic NuBus");
779 break; 778 break;
780 } 779 }
781 } 780 }
@@ -786,21 +785,11 @@ static int __init macfb_init(void)
786 if (!video_is_nubus) 785 if (!video_is_nubus)
787 switch( mac_bi_data.id ) 786 switch( mac_bi_data.id )
788 { 787 {
789 /* These don't have onboard video. Eventually, we may
790 be able to write separate framebuffer drivers for
791 them (tobyfb.c, hiresfb.c, etc, etc) */
792 case MAC_MODEL_II:
793 case MAC_MODEL_IIX:
794 case MAC_MODEL_IICX:
795 case MAC_MODEL_IIFX:
796 strcat( macfb_fix.id, "Generic NuBus" );
797 break;
798
799 /* Valkyrie Quadras */ 788 /* Valkyrie Quadras */
800 case MAC_MODEL_Q630: 789 case MAC_MODEL_Q630:
801 /* I'm not sure about this one */ 790 /* I'm not sure about this one */
802 case MAC_MODEL_P588: 791 case MAC_MODEL_P588:
803 strcat( macfb_fix.id, "Valkyrie built-in" ); 792 strcpy(macfb_fix.id, "Valkyrie");
804 macfb_setpalette = valkyrie_setpalette; 793 macfb_setpalette = valkyrie_setpalette;
805 macfb_defined.activate = FB_ACTIVATE_NOW; 794 macfb_defined.activate = FB_ACTIVATE_NOW;
806 valkyrie_cmap_regs = ioremap(DAC_BASE, 0x1000); 795 valkyrie_cmap_regs = ioremap(DAC_BASE, 0x1000);
@@ -823,7 +812,7 @@ static int __init macfb_init(void)
823 case MAC_MODEL_Q700: 812 case MAC_MODEL_Q700:
824 case MAC_MODEL_Q900: 813 case MAC_MODEL_Q900:
825 case MAC_MODEL_Q950: 814 case MAC_MODEL_Q950:
826 strcat( macfb_fix.id, "DAFB built-in" ); 815 strcpy(macfb_fix.id, "DAFB");
827 macfb_setpalette = dafb_setpalette; 816 macfb_setpalette = dafb_setpalette;
828 macfb_defined.activate = FB_ACTIVATE_NOW; 817 macfb_defined.activate = FB_ACTIVATE_NOW;
829 dafb_cmap_regs = ioremap(DAFB_BASE, 0x1000); 818 dafb_cmap_regs = ioremap(DAFB_BASE, 0x1000);
@@ -831,7 +820,7 @@ static int __init macfb_init(void)
831 820
832 /* LC II uses the V8 framebuffer */ 821 /* LC II uses the V8 framebuffer */
833 case MAC_MODEL_LCII: 822 case MAC_MODEL_LCII:
834 strcat( macfb_fix.id, "V8 built-in" ); 823 strcpy(macfb_fix.id, "V8");
835 macfb_setpalette = v8_brazil_setpalette; 824 macfb_setpalette = v8_brazil_setpalette;
836 macfb_defined.activate = FB_ACTIVATE_NOW; 825 macfb_defined.activate = FB_ACTIVATE_NOW;
837 v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000); 826 v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
@@ -843,7 +832,7 @@ static int __init macfb_init(void)
843 case MAC_MODEL_IIVI: 832 case MAC_MODEL_IIVI:
844 case MAC_MODEL_IIVX: 833 case MAC_MODEL_IIVX:
845 case MAC_MODEL_P600: 834 case MAC_MODEL_P600:
846 strcat( macfb_fix.id, "Brazil built-in" ); 835 strcpy(macfb_fix.id, "Brazil");
847 macfb_setpalette = v8_brazil_setpalette; 836 macfb_setpalette = v8_brazil_setpalette;
848 macfb_defined.activate = FB_ACTIVATE_NOW; 837 macfb_defined.activate = FB_ACTIVATE_NOW;
849 v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000); 838 v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
@@ -860,7 +849,7 @@ static int __init macfb_init(void)
860 case MAC_MODEL_P460: 849 case MAC_MODEL_P460:
861 macfb_setpalette = v8_brazil_setpalette; 850 macfb_setpalette = v8_brazil_setpalette;
862 macfb_defined.activate = FB_ACTIVATE_NOW; 851 macfb_defined.activate = FB_ACTIVATE_NOW;
863 strcat( macfb_fix.id, "Sonora built-in" ); 852 strcpy(macfb_fix.id, "Sonora");
864 v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000); 853 v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
865 break; 854 break;
866 855
@@ -871,7 +860,7 @@ static int __init macfb_init(void)
871 case MAC_MODEL_IISI: 860 case MAC_MODEL_IISI:
872 macfb_setpalette = rbv_setpalette; 861 macfb_setpalette = rbv_setpalette;
873 macfb_defined.activate = FB_ACTIVATE_NOW; 862 macfb_defined.activate = FB_ACTIVATE_NOW;
874 strcat( macfb_fix.id, "RBV built-in" ); 863 strcpy(macfb_fix.id, "RBV");
875 rbv_cmap_regs = ioremap(DAC_BASE, 0x1000); 864 rbv_cmap_regs = ioremap(DAC_BASE, 0x1000);
876 break; 865 break;
877 866
@@ -880,7 +869,7 @@ static int __init macfb_init(void)
880 case MAC_MODEL_C660: 869 case MAC_MODEL_C660:
881 macfb_setpalette = civic_setpalette; 870 macfb_setpalette = civic_setpalette;
882 macfb_defined.activate = FB_ACTIVATE_NOW; 871 macfb_defined.activate = FB_ACTIVATE_NOW;
883 strcat( macfb_fix.id, "Civic built-in" ); 872 strcpy(macfb_fix.id, "Civic");
884 civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000); 873 civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000);
885 break; 874 break;
886 875
@@ -901,7 +890,7 @@ static int __init macfb_init(void)
901 v8_brazil_cmap_regs = 890 v8_brazil_cmap_regs =
902 ioremap(DAC_BASE, 0x1000); 891 ioremap(DAC_BASE, 0x1000);
903 } 892 }
904 strcat( macfb_fix.id, "LC built-in" ); 893 strcpy(macfb_fix.id, "LC");
905 break; 894 break;
906 /* We think this may be like the LC II */ 895 /* We think this may be like the LC II */
907 case MAC_MODEL_CCL: 896 case MAC_MODEL_CCL:
@@ -911,18 +900,18 @@ static int __init macfb_init(void)
911 v8_brazil_cmap_regs = 900 v8_brazil_cmap_regs =
912 ioremap(DAC_BASE, 0x1000); 901 ioremap(DAC_BASE, 0x1000);
913 } 902 }
914 strcat( macfb_fix.id, "Color Classic built-in" ); 903 strcpy(macfb_fix.id, "Color Classic");
915 break; 904 break;
916 905
917 /* And we *do* mean "weirdos" */ 906 /* And we *do* mean "weirdos" */
918 case MAC_MODEL_TV: 907 case MAC_MODEL_TV:
919 strcat( macfb_fix.id, "Mac TV built-in" ); 908 strcpy(macfb_fix.id, "Mac TV");
920 break; 909 break;
921 910
922 /* These don't have colour, so no need to worry */ 911 /* These don't have colour, so no need to worry */
923 case MAC_MODEL_SE30: 912 case MAC_MODEL_SE30:
924 case MAC_MODEL_CLII: 913 case MAC_MODEL_CLII:
925 strcat( macfb_fix.id, "Monochrome built-in" ); 914 strcpy(macfb_fix.id, "Monochrome");
926 break; 915 break;
927 916
928 /* Powerbooks are particularly difficult. Many of 917 /* Powerbooks are particularly difficult. Many of
@@ -935,7 +924,7 @@ static int __init macfb_init(void)
935 case MAC_MODEL_PB140: 924 case MAC_MODEL_PB140:
936 case MAC_MODEL_PB145: 925 case MAC_MODEL_PB145:
937 case MAC_MODEL_PB170: 926 case MAC_MODEL_PB170:
938 strcat( macfb_fix.id, "DDC built-in" ); 927 strcpy(macfb_fix.id, "DDC");
939 break; 928 break;
940 929
941 /* Internal is GSC, External (if present) is ViSC */ 930 /* Internal is GSC, External (if present) is ViSC */
@@ -945,13 +934,13 @@ static int __init macfb_init(void)
945 case MAC_MODEL_PB180: 934 case MAC_MODEL_PB180:
946 case MAC_MODEL_PB210: 935 case MAC_MODEL_PB210:
947 case MAC_MODEL_PB230: 936 case MAC_MODEL_PB230:
948 strcat( macfb_fix.id, "GSC built-in" ); 937 strcpy(macfb_fix.id, "GSC");
949 break; 938 break;
950 939
951 /* Internal is TIM, External is ViSC */ 940 /* Internal is TIM, External is ViSC */
952 case MAC_MODEL_PB165C: 941 case MAC_MODEL_PB165C:
953 case MAC_MODEL_PB180C: 942 case MAC_MODEL_PB180C:
954 strcat( macfb_fix.id, "TIM built-in" ); 943 strcpy(macfb_fix.id, "TIM");
955 break; 944 break;
956 945
957 /* Internal is CSC, External is Keystone+Ariel. */ 946 /* Internal is CSC, External is Keystone+Ariel. */
@@ -963,12 +952,12 @@ static int __init macfb_init(void)
963 case MAC_MODEL_PB280C: 952 case MAC_MODEL_PB280C:
964 macfb_setpalette = csc_setpalette; 953 macfb_setpalette = csc_setpalette;
965 macfb_defined.activate = FB_ACTIVATE_NOW; 954 macfb_defined.activate = FB_ACTIVATE_NOW;
966 strcat( macfb_fix.id, "CSC built-in" ); 955 strcpy(macfb_fix.id, "CSC");
967 csc_cmap_regs = ioremap(CSC_BASE, 0x1000); 956 csc_cmap_regs = ioremap(CSC_BASE, 0x1000);
968 break; 957 break;
969 958
970 default: 959 default:
971 strcat( macfb_fix.id, "Unknown/Unsupported built-in" ); 960 strcpy(macfb_fix.id, "Unknown");
972 break; 961 break;
973 } 962 }
974 963
@@ -978,16 +967,23 @@ static int __init macfb_init(void)
978 fb_info.pseudo_palette = pseudo_palette; 967 fb_info.pseudo_palette = pseudo_palette;
979 fb_info.flags = FBINFO_DEFAULT; 968 fb_info.flags = FBINFO_DEFAULT;
980 969
981 fb_alloc_cmap(&fb_info.cmap, video_cmap_len, 0); 970 err = fb_alloc_cmap(&fb_info.cmap, video_cmap_len, 0);
971 if (err)
972 goto fail_unmap;
982 973
983 err = register_framebuffer(&fb_info); 974 err = register_framebuffer(&fb_info);
984 if (!err) 975 if (err)
985 printk("fb%d: %s frame buffer device\n", 976 goto fail_dealloc;
986 fb_info.node, fb_info.fix.id); 977
987 else { 978 printk("fb%d: %s frame buffer device\n",
988 iounmap(fb_info.screen_base); 979 fb_info.node, fb_info.fix.id);
989 iounmap_macfb(); 980 return 0;
990 } 981
982fail_dealloc:
983 fb_dealloc_cmap(&fb_info.cmap);
984fail_unmap:
985 iounmap(fb_info.screen_base);
986 iounmap_macfb();
991 return err; 987 return err;
992} 988}
993 989
diff --git a/drivers/video/mb862xx/Makefile b/drivers/video/mb862xx/Makefile
new file mode 100644
index 000000000000..07664814bb1d
--- /dev/null
+++ b/drivers/video/mb862xx/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the MB862xx framebuffer driver
3#
4
5obj-$(CONFIG_FB_MB862XX) := mb862xxfb.o
diff --git a/drivers/video/mb862xx/mb862xx_reg.h b/drivers/video/mb862xx/mb862xx_reg.h
new file mode 100644
index 000000000000..2ba65e118500
--- /dev/null
+++ b/drivers/video/mb862xx/mb862xx_reg.h
@@ -0,0 +1,138 @@
1/*
2 * Fujitsu MB862xx Graphics Controller Registers/Bits
3 */
4
5#ifndef _MB862XX_REG_H
6#define _MB862XX_REG_H
7
8#ifdef MB862XX_MMIO_BOTTOM
9#define MB862XX_MMIO_BASE 0x03fc0000
10#else
11#define MB862XX_MMIO_BASE 0x01fc0000
12#endif
13#define MB862XX_I2C_BASE 0x0000c000
14#define MB862XX_DISP_BASE 0x00010000
15#define MB862XX_CAP_BASE 0x00018000
16#define MB862XX_DRAW_BASE 0x00030000
17#define MB862XX_GEO_BASE 0x00038000
18#define MB862XX_PIO_BASE 0x00038000
19#define MB862XX_MMIO_SIZE 0x40000
20
21/* Host interface/pio registers */
22#define GC_IST 0x00000020
23#define GC_IMASK 0x00000024
24#define GC_SRST 0x0000002c
25#define GC_CCF 0x00000038
26#define GC_CID 0x000000f0
27#define GC_REVISION 0x00000084
28
29#define GC_CCF_CGE_100 0x00000000
30#define GC_CCF_CGE_133 0x00040000
31#define GC_CCF_CGE_166 0x00080000
32#define GC_CCF_COT_100 0x00000000
33#define GC_CCF_COT_133 0x00010000
34#define GC_CID_CNAME_MSK 0x0000ff00
35#define GC_CID_VERSION_MSK 0x000000ff
36
37/* define enabled interrupts hereby */
38#define GC_INT_EN 0x00000000
39
40/* Memory interface mode register */
41#define GC_MMR 0x0000fffc
42
43/* Display Controller registers */
44#define GC_DCM0 0x00000000
45#define GC_HTP 0x00000004
46#define GC_HDB_HDP 0x00000008
47#define GC_VSW_HSW_HSP 0x0000000c
48#define GC_VTR 0x00000010
49#define GC_VDP_VSP 0x00000014
50#define GC_WY_WX 0x00000018
51#define GC_WH_WW 0x0000001c
52#define GC_L0M 0x00000020
53#define GC_L0OA0 0x00000024
54#define GC_L0DA0 0x00000028
55#define GC_L0DY_L0DX 0x0000002c
56#define GC_DCM1 0x00000100
57#define GC_L0EM 0x00000110
58#define GC_L0WY_L0WX 0x00000114
59#define GC_L0WH_L0WW 0x00000118
60#define GC_DCM2 0x00000104
61#define GC_DCM3 0x00000108
62#define GC_CPM_CUTC 0x000000a0
63#define GC_CUOA0 0x000000a4
64#define GC_CUY0_CUX0 0x000000a8
65#define GC_CUOA1 0x000000ac
66#define GC_CUY1_CUX1 0x000000b0
67#define GC_L0PAL0 0x00000400
68
69#define GC_CPM_CEN0 0x00100000
70#define GC_CPM_CEN1 0x00200000
71
72#define GC_DCM01_ESY 0x00000004
73#define GC_DCM01_SC 0x00003f00
74#define GC_DCM01_RESV 0x00004000
75#define GC_DCM01_CKS 0x00008000
76#define GC_DCM01_L0E 0x00010000
77#define GC_DCM01_DEN 0x80000000
78#define GC_L0M_L0C_8 0x00000000
79#define GC_L0M_L0C_16 0x80000000
80#define GC_L0EM_L0EC_24 0x40000000
81#define GC_L0M_L0W_UNIT 64
82
83#define GC_DISP_REFCLK_400 400
84
85/* Carmine specific */
86#define MB86297_DRAW_BASE 0x00020000
87#define MB86297_DISP0_BASE 0x00100000
88#define MB86297_DISP1_BASE 0x00140000
89#define MB86297_WRBACK_BASE 0x00180000
90#define MB86297_CAP0_BASE 0x00200000
91#define MB86297_CAP1_BASE 0x00280000
92#define MB86297_DRAMCTRL_BASE 0x00300000
93#define MB86297_CTRL_BASE 0x00400000
94#define MB86297_I2C_BASE 0x00500000
95
96#define GC_CTRL_STATUS 0x00000000
97#define GC_CTRL_INT_MASK 0x00000004
98#define GC_CTRL_CLK_ENABLE 0x0000000c
99#define GC_CTRL_SOFT_RST 0x00000010
100
101#define GC_CTRL_CLK_EN_DRAM 0x00000001
102#define GC_CTRL_CLK_EN_2D3D 0x00000002
103#define GC_CTRL_CLK_EN_DISP0 0x00000020
104#define GC_CTRL_CLK_EN_DISP1 0x00000040
105
106#define GC_2D3D_REV 0x000004b4
107#define GC_RE_REVISION 0x24240200
108
109/* define enabled interrupts hereby */
110#define GC_CARMINE_INT_EN 0x00000004
111
112/* DRAM controller */
113#define GC_DCTL_MODE_ADD 0x00000000
114#define GC_DCTL_SETTIME1_EMODE 0x00000004
115#define GC_DCTL_REFRESH_SETTIME2 0x00000008
116#define GC_DCTL_RSV0_STATES 0x0000000C
117#define GC_DCTL_RSV2_RSV1 0x00000010
118#define GC_DCTL_DDRIF2_DDRIF1 0x00000014
119#define GC_DCTL_IOCONT1_IOCONT0 0x00000024
120
121#define GC_DCTL_STATES_MSK 0x0000000f
122#define GC_DCTL_INIT_WAIT_CNT 3000
123#define GC_DCTL_INIT_WAIT_INTERVAL 1
124
125/* DRAM ctrl values for Carmine PCI Eval. board */
126#define GC_EVB_DCTL_MODE_ADD 0x012105c3
127#define GC_EVB_DCTL_MODE_ADD_AFT_RST 0x002105c3
128#define GC_EVB_DCTL_SETTIME1_EMODE 0x47498000
129#define GC_EVB_DCTL_REFRESH_SETTIME2 0x00422a22
130#define GC_EVB_DCTL_RSV0_STATES 0x00200003
131#define GC_EVB_DCTL_RSV0_STATES_AFT_RST 0x00200002
132#define GC_EVB_DCTL_RSV2_RSV1 0x0000000f
133#define GC_EVB_DCTL_DDRIF2_DDRIF1 0x00556646
134#define GC_EVB_DCTL_IOCONT1_IOCONT0 0x05550555
135
136#define GC_DISP_REFCLK_533 533
137
138#endif
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
new file mode 100644
index 000000000000..fb64234a3825
--- /dev/null
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -0,0 +1,1061 @@
1/*
2 * drivers/mb862xx/mb862xxfb.c
3 *
4 * Fujitsu Carmine/Coral-P(A)/Lime framebuffer driver
5 *
6 * (C) 2008 Anatolij Gustschin <agust@denx.de>
7 * DENX Software Engineering
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#undef DEBUG
16
17#include <linux/fb.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#if defined(CONFIG_PPC_OF)
23#include <linux/of_platform.h>
24#endif
25#include "mb862xxfb.h"
26#include "mb862xx_reg.h"
27
28#define NR_PALETTE 256
29#define MB862XX_MEM_SIZE 0x1000000
30#define CORALP_MEM_SIZE 0x4000000
31#define CARMINE_MEM_SIZE 0x8000000
32#define DRV_NAME "mb862xxfb"
33
34#if defined(CONFIG_LWMON5)
35static struct mb862xx_gc_mode lwmon5_gc_mode = {
36 /* Mode for Sharp LQ104V1DG61 TFT LCD Panel */
37 { "640x480", 60, 640, 480, 40000, 48, 16, 32, 11, 96, 2, 0, 0, 0 },
38 /* 16 bits/pixel, 32MB, 100MHz, SDRAM memory mode value */
39 16, 0x2000000, GC_CCF_COT_100, 0x414fb7f2
40};
41#endif
42
43#if defined(CONFIG_SOCRATES)
44static struct mb862xx_gc_mode socrates_gc_mode = {
45 /* Mode for Prime View PM070WL4 TFT LCD Panel */
46 { "800x480", 45, 800, 480, 40000, 86, 42, 33, 10, 128, 2, 0, 0, 0 },
47 /* 16 bits/pixel, 16MB, 133MHz, SDRAM memory mode value */
48 16, 0x1000000, GC_CCF_COT_133, 0x4157ba63
49};
50#endif
51
52/* Helpers */
53static inline int h_total(struct fb_var_screeninfo *var)
54{
55 return var->xres + var->left_margin +
56 var->right_margin + var->hsync_len;
57}
58
59static inline int v_total(struct fb_var_screeninfo *var)
60{
61 return var->yres + var->upper_margin +
62 var->lower_margin + var->vsync_len;
63}
64
65static inline int hsp(struct fb_var_screeninfo *var)
66{
67 return var->xres + var->right_margin - 1;
68}
69
70static inline int vsp(struct fb_var_screeninfo *var)
71{
72 return var->yres + var->lower_margin - 1;
73}
74
75static inline int d_pitch(struct fb_var_screeninfo *var)
76{
77 return var->xres * var->bits_per_pixel / 8;
78}
79
80static inline unsigned int chan_to_field(unsigned int chan,
81 struct fb_bitfield *bf)
82{
83 chan &= 0xffff;
84 chan >>= 16 - bf->length;
85 return chan << bf->offset;
86}
87
88static int mb862xxfb_setcolreg(unsigned regno,
89 unsigned red, unsigned green, unsigned blue,
90 unsigned transp, struct fb_info *info)
91{
92 struct mb862xxfb_par *par = info->par;
93 unsigned int val;
94
95 switch (info->fix.visual) {
96 case FB_VISUAL_TRUECOLOR:
97 if (regno < 16) {
98 val = chan_to_field(red, &info->var.red);
99 val |= chan_to_field(green, &info->var.green);
100 val |= chan_to_field(blue, &info->var.blue);
101 par->pseudo_palette[regno] = val;
102 }
103 break;
104 case FB_VISUAL_PSEUDOCOLOR:
105 if (regno < 256) {
106 val = (red >> 8) << 16;
107 val |= (green >> 8) << 8;
108 val |= blue >> 8;
109 outreg(disp, GC_L0PAL0 + (regno * 4), val);
110 }
111 break;
112 default:
113 return 1; /* unsupported type */
114 }
115 return 0;
116}
117
118static int mb862xxfb_check_var(struct fb_var_screeninfo *var,
119 struct fb_info *fbi)
120{
121 unsigned long tmp;
122
123 if (fbi->dev)
124 dev_dbg(fbi->dev, "%s\n", __func__);
125
126 /* check if these values fit into the registers */
127 if (var->hsync_len > 255 || var->vsync_len > 255)
128 return -EINVAL;
129
130 if ((var->xres + var->right_margin) >= 4096)
131 return -EINVAL;
132
133 if ((var->yres + var->lower_margin) > 4096)
134 return -EINVAL;
135
136 if (h_total(var) > 4096 || v_total(var) > 4096)
137 return -EINVAL;
138
139 if (var->xres_virtual > 4096 || var->yres_virtual > 4096)
140 return -EINVAL;
141
142 if (var->bits_per_pixel <= 8)
143 var->bits_per_pixel = 8;
144 else if (var->bits_per_pixel <= 16)
145 var->bits_per_pixel = 16;
146 else if (var->bits_per_pixel <= 32)
147 var->bits_per_pixel = 32;
148
149 /*
150 * can cope with 8,16 or 24/32bpp if resulting
151 * pitch is divisible by 64 without remainder
152 */
153 if (d_pitch(&fbi->var) % GC_L0M_L0W_UNIT) {
154 int r;
155
156 var->bits_per_pixel = 0;
157 do {
158 var->bits_per_pixel += 8;
159 r = d_pitch(&fbi->var) % GC_L0M_L0W_UNIT;
160 } while (r && var->bits_per_pixel <= 32);
161
162 if (d_pitch(&fbi->var) % GC_L0M_L0W_UNIT)
163 return -EINVAL;
164 }
165
166 /* line length is going to be 128 bit aligned */
167 tmp = (var->xres * var->bits_per_pixel) / 8;
168 if ((tmp & 15) != 0)
169 return -EINVAL;
170
171 /* set r/g/b positions and validate bpp */
172 switch (var->bits_per_pixel) {
173 case 8:
174 var->red.length = var->bits_per_pixel;
175 var->green.length = var->bits_per_pixel;
176 var->blue.length = var->bits_per_pixel;
177 var->red.offset = 0;
178 var->green.offset = 0;
179 var->blue.offset = 0;
180 var->transp.length = 0;
181 break;
182 case 16:
183 var->red.length = 5;
184 var->green.length = 5;
185 var->blue.length = 5;
186 var->red.offset = 10;
187 var->green.offset = 5;
188 var->blue.offset = 0;
189 var->transp.length = 0;
190 break;
191 case 24:
192 case 32:
193 var->transp.length = 8;
194 var->red.length = 8;
195 var->green.length = 8;
196 var->blue.length = 8;
197 var->transp.offset = 24;
198 var->red.offset = 16;
199 var->green.offset = 8;
200 var->blue.offset = 0;
201 break;
202 default:
203 return -EINVAL;
204 }
205 return 0;
206}
207
208/*
209 * set display parameters
210 */
211static int mb862xxfb_set_par(struct fb_info *fbi)
212{
213 struct mb862xxfb_par *par = fbi->par;
214 unsigned long reg, sc;
215
216 dev_dbg(par->dev, "%s\n", __func__);
217
218 if (par->pre_init)
219 return 0;
220
221 /* disp off */
222 reg = inreg(disp, GC_DCM1);
223 reg &= ~GC_DCM01_DEN;
224 outreg(disp, GC_DCM1, reg);
225
226 /* set display reference clock div. */
227 sc = par->refclk / (1000000 / fbi->var.pixclock) - 1;
228 reg = inreg(disp, GC_DCM1);
229 reg &= ~(GC_DCM01_CKS | GC_DCM01_RESV | GC_DCM01_SC);
230 reg |= sc << 8;
231 outreg(disp, GC_DCM1, reg);
232 dev_dbg(par->dev, "SC 0x%lx\n", sc);
233
234 /* disp dimension, format */
235 reg = pack(d_pitch(&fbi->var) / GC_L0M_L0W_UNIT,
236 (fbi->var.yres - 1));
237 if (fbi->var.bits_per_pixel == 16)
238 reg |= GC_L0M_L0C_16;
239 outreg(disp, GC_L0M, reg);
240
241 if (fbi->var.bits_per_pixel == 32) {
242 reg = inreg(disp, GC_L0EM);
243 outreg(disp, GC_L0EM, reg | GC_L0EM_L0EC_24);
244 }
245 outreg(disp, GC_WY_WX, 0);
246 reg = pack(fbi->var.yres - 1, fbi->var.xres);
247 outreg(disp, GC_WH_WW, reg);
248 outreg(disp, GC_L0OA0, 0);
249 outreg(disp, GC_L0DA0, 0);
250 outreg(disp, GC_L0DY_L0DX, 0);
251 outreg(disp, GC_L0WY_L0WX, 0);
252 outreg(disp, GC_L0WH_L0WW, reg);
253
254 /* both HW-cursors off */
255 reg = inreg(disp, GC_CPM_CUTC);
256 reg &= ~(GC_CPM_CEN0 | GC_CPM_CEN1);
257 outreg(disp, GC_CPM_CUTC, reg);
258
259 /* timings */
260 reg = pack(fbi->var.xres - 1, fbi->var.xres - 1);
261 outreg(disp, GC_HDB_HDP, reg);
262 reg = pack((fbi->var.yres - 1), vsp(&fbi->var));
263 outreg(disp, GC_VDP_VSP, reg);
264 reg = ((fbi->var.vsync_len - 1) << 24) |
265 pack((fbi->var.hsync_len - 1), hsp(&fbi->var));
266 outreg(disp, GC_VSW_HSW_HSP, reg);
267 outreg(disp, GC_HTP, pack(h_total(&fbi->var) - 1, 0));
268 outreg(disp, GC_VTR, pack(v_total(&fbi->var) - 1, 0));
269
270 /* display on */
271 reg = inreg(disp, GC_DCM1);
272 reg |= GC_DCM01_DEN | GC_DCM01_L0E;
273 reg &= ~GC_DCM01_ESY;
274 outreg(disp, GC_DCM1, reg);
275 return 0;
276}
277
278static int mb862xxfb_pan(struct fb_var_screeninfo *var,
279 struct fb_info *info)
280{
281 struct mb862xxfb_par *par = info->par;
282 unsigned long reg;
283
284 reg = pack(var->yoffset, var->xoffset);
285 outreg(disp, GC_L0WY_L0WX, reg);
286
287 reg = pack(var->yres_virtual, var->xres_virtual);
288 outreg(disp, GC_L0WH_L0WW, reg);
289 return 0;
290}
291
292static int mb862xxfb_blank(int mode, struct fb_info *fbi)
293{
294 struct mb862xxfb_par *par = fbi->par;
295 unsigned long reg;
296
297 dev_dbg(fbi->dev, "blank mode=%d\n", mode);
298
299 switch (mode) {
300 case FB_BLANK_POWERDOWN:
301 reg = inreg(disp, GC_DCM1);
302 reg &= ~GC_DCM01_DEN;
303 outreg(disp, GC_DCM1, reg);
304 break;
305 case FB_BLANK_UNBLANK:
306 reg = inreg(disp, GC_DCM1);
307 reg |= GC_DCM01_DEN;
308 outreg(disp, GC_DCM1, reg);
309 break;
310 case FB_BLANK_NORMAL:
311 case FB_BLANK_VSYNC_SUSPEND:
312 case FB_BLANK_HSYNC_SUSPEND:
313 default:
314 return 1;
315 }
316 return 0;
317}
318
319/* framebuffer ops */
320static struct fb_ops mb862xxfb_ops = {
321 .owner = THIS_MODULE,
322 .fb_check_var = mb862xxfb_check_var,
323 .fb_set_par = mb862xxfb_set_par,
324 .fb_setcolreg = mb862xxfb_setcolreg,
325 .fb_blank = mb862xxfb_blank,
326 .fb_pan_display = mb862xxfb_pan,
327 .fb_fillrect = cfb_fillrect,
328 .fb_copyarea = cfb_copyarea,
329 .fb_imageblit = cfb_imageblit,
330};
331
332/* initialize fb_info data */
333static int mb862xxfb_init_fbinfo(struct fb_info *fbi)
334{
335 struct mb862xxfb_par *par = fbi->par;
336 struct mb862xx_gc_mode *mode = par->gc_mode;
337 unsigned long reg;
338
339 fbi->fbops = &mb862xxfb_ops;
340 fbi->pseudo_palette = par->pseudo_palette;
341 fbi->screen_base = par->fb_base;
342 fbi->screen_size = par->mapped_vram;
343
344 strcpy(fbi->fix.id, DRV_NAME);
345 fbi->fix.smem_start = (unsigned long)par->fb_base_phys;
346 fbi->fix.smem_len = par->mapped_vram;
347 fbi->fix.mmio_start = (unsigned long)par->mmio_base_phys;
348 fbi->fix.mmio_len = par->mmio_len;
349 fbi->fix.accel = FB_ACCEL_NONE;
350 fbi->fix.type = FB_TYPE_PACKED_PIXELS;
351 fbi->fix.type_aux = 0;
352 fbi->fix.xpanstep = 1;
353 fbi->fix.ypanstep = 1;
354 fbi->fix.ywrapstep = 0;
355
356 reg = inreg(disp, GC_DCM1);
357 if (reg & GC_DCM01_DEN && reg & GC_DCM01_L0E) {
358 /* get the disp mode from active display cfg */
359 unsigned long sc = ((reg & GC_DCM01_SC) >> 8) + 1;
360 unsigned long hsp, vsp, ht, vt;
361
362 dev_dbg(par->dev, "using bootloader's disp. mode\n");
363 fbi->var.pixclock = (sc * 1000000) / par->refclk;
364 fbi->var.xres = (inreg(disp, GC_HDB_HDP) & 0x0fff) + 1;
365 reg = inreg(disp, GC_VDP_VSP);
366 fbi->var.yres = ((reg >> 16) & 0x0fff) + 1;
367 vsp = (reg & 0x0fff) + 1;
368 fbi->var.xres_virtual = fbi->var.xres;
369 fbi->var.yres_virtual = fbi->var.yres;
370 reg = inreg(disp, GC_L0EM);
371 if (reg & GC_L0EM_L0EC_24) {
372 fbi->var.bits_per_pixel = 32;
373 } else {
374 reg = inreg(disp, GC_L0M);
375 if (reg & GC_L0M_L0C_16)
376 fbi->var.bits_per_pixel = 16;
377 else
378 fbi->var.bits_per_pixel = 8;
379 }
380 reg = inreg(disp, GC_VSW_HSW_HSP);
381 fbi->var.hsync_len = ((reg & 0xff0000) >> 16) + 1;
382 fbi->var.vsync_len = ((reg & 0x3f000000) >> 24) + 1;
383 hsp = (reg & 0xffff) + 1;
384 ht = ((inreg(disp, GC_HTP) & 0xfff0000) >> 16) + 1;
385 fbi->var.right_margin = hsp - fbi->var.xres;
386 fbi->var.left_margin = ht - hsp - fbi->var.hsync_len;
387 vt = ((inreg(disp, GC_VTR) & 0xfff0000) >> 16) + 1;
388 fbi->var.lower_margin = vsp - fbi->var.yres;
389 fbi->var.upper_margin = vt - vsp - fbi->var.vsync_len;
390 } else if (mode) {
391 dev_dbg(par->dev, "using supplied mode\n");
392 fb_videomode_to_var(&fbi->var, (struct fb_videomode *)mode);
393 fbi->var.bits_per_pixel = mode->def_bpp ? mode->def_bpp : 8;
394 } else {
395 int ret;
396
397 ret = fb_find_mode(&fbi->var, fbi, "640x480-16@60",
398 NULL, 0, NULL, 16);
399 if (ret == 0 || ret == 4) {
400 dev_err(par->dev,
401 "failed to get initial mode\n");
402 return -EINVAL;
403 }
404 }
405
406 fbi->var.xoffset = 0;
407 fbi->var.yoffset = 0;
408 fbi->var.grayscale = 0;
409 fbi->var.nonstd = 0;
410 fbi->var.height = -1;
411 fbi->var.width = -1;
412 fbi->var.accel_flags = 0;
413 fbi->var.vmode = FB_VMODE_NONINTERLACED;
414 fbi->var.activate = FB_ACTIVATE_NOW;
415 fbi->flags = FBINFO_DEFAULT |
416#ifdef __BIG_ENDIAN
417 FBINFO_FOREIGN_ENDIAN |
418#endif
419 FBINFO_HWACCEL_XPAN |
420 FBINFO_HWACCEL_YPAN;
421
422 /* check and possibly fix bpp */
423 if ((fbi->fbops->fb_check_var)(&fbi->var, fbi))
424 dev_err(par->dev, "check_var() failed on initial setup?\n");
425
426 fbi->fix.visual = fbi->var.bits_per_pixel == 8 ?
427 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
428 fbi->fix.line_length = (fbi->var.xres_virtual *
429 fbi->var.bits_per_pixel) / 8;
430 return 0;
431}
432
433/*
434 * show some display controller and cursor registers
435 */
436static ssize_t mb862xxfb_show_dispregs(struct device *dev,
437 struct device_attribute *attr, char *buf)
438{
439 struct fb_info *fbi = dev_get_drvdata(dev);
440 struct mb862xxfb_par *par = fbi->par;
441 char *ptr = buf;
442 unsigned int reg;
443
444 for (reg = GC_DCM0; reg <= GC_L0DY_L0DX; reg += 4)
445 ptr += sprintf(ptr, "%08x = %08x\n",
446 reg, inreg(disp, reg));
447
448 for (reg = GC_CPM_CUTC; reg <= GC_CUY1_CUX1; reg += 4)
449 ptr += sprintf(ptr, "%08x = %08x\n",
450 reg, inreg(disp, reg));
451
452 for (reg = GC_DCM1; reg <= GC_L0WH_L0WW; reg += 4)
453 ptr += sprintf(ptr, "%08x = %08x\n",
454 reg, inreg(disp, reg));
455
456 return ptr - buf;
457}
458
459static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL);
460
461irqreturn_t mb862xx_intr(int irq, void *dev_id)
462{
463 struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id;
464 unsigned long reg_ist, mask;
465
466 if (!par)
467 return IRQ_NONE;
468
469 if (par->type == BT_CARMINE) {
470 /* Get Interrupt Status */
471 reg_ist = inreg(ctrl, GC_CTRL_STATUS);
472 mask = inreg(ctrl, GC_CTRL_INT_MASK);
473 if (reg_ist == 0)
474 return IRQ_HANDLED;
475
476 reg_ist &= mask;
477 if (reg_ist == 0)
478 return IRQ_HANDLED;
479
480 /* Clear interrupt status */
481 outreg(ctrl, 0x0, reg_ist);
482 } else {
483 /* Get status */
484 reg_ist = inreg(host, GC_IST);
485 mask = inreg(host, GC_IMASK);
486
487 reg_ist &= mask;
488 if (reg_ist == 0)
489 return IRQ_HANDLED;
490
491 /* Clear status */
492 outreg(host, GC_IST, ~reg_ist);
493 }
494 return IRQ_HANDLED;
495}
496
497#if defined(CONFIG_FB_MB862XX_LIME)
498/*
499 * GDC (Lime, Coral(B/Q), Mint, ...) on host bus
500 */
501static int mb862xx_gdc_init(struct mb862xxfb_par *par)
502{
503 unsigned long ccf, mmr;
504 unsigned long ver, rev;
505
506 if (!par)
507 return -ENODEV;
508
509#if defined(CONFIG_FB_PRE_INIT_FB)
510 par->pre_init = 1;
511#endif
512 par->host = par->mmio_base;
513 par->i2c = par->mmio_base + MB862XX_I2C_BASE;
514 par->disp = par->mmio_base + MB862XX_DISP_BASE;
515 par->cap = par->mmio_base + MB862XX_CAP_BASE;
516 par->draw = par->mmio_base + MB862XX_DRAW_BASE;
517 par->geo = par->mmio_base + MB862XX_GEO_BASE;
518 par->pio = par->mmio_base + MB862XX_PIO_BASE;
519
520 par->refclk = GC_DISP_REFCLK_400;
521
522 ver = inreg(host, GC_CID);
523 rev = inreg(pio, GC_REVISION);
524 if ((ver == 0x303) && (rev & 0xffffff00) == 0x20050100) {
525 dev_info(par->dev, "Fujitsu Lime v1.%d found\n",
526 (int)rev & 0xff);
527 par->type = BT_LIME;
528 ccf = par->gc_mode ? par->gc_mode->ccf : GC_CCF_COT_100;
529 mmr = par->gc_mode ? par->gc_mode->mmr : 0x414fb7f2;
530 } else {
531 dev_info(par->dev, "? GDC, CID/Rev.: 0x%lx/0x%lx \n", ver, rev);
532 return -ENODEV;
533 }
534
535 if (!par->pre_init) {
536 outreg(host, GC_CCF, ccf);
537 udelay(200);
538 outreg(host, GC_MMR, mmr);
539 udelay(10);
540 }
541
542 /* interrupt status */
543 outreg(host, GC_IST, 0);
544 outreg(host, GC_IMASK, GC_INT_EN);
545 return 0;
546}
547
548static int __devinit of_platform_mb862xx_probe(struct of_device *ofdev,
549 const struct of_device_id *id)
550{
551 struct device_node *np = ofdev->node;
552 struct device *dev = &ofdev->dev;
553 struct mb862xxfb_par *par;
554 struct fb_info *info;
555 struct resource res;
556 resource_size_t res_size;
557 unsigned long ret = -ENODEV;
558
559 if (of_address_to_resource(np, 0, &res)) {
560 dev_err(dev, "Invalid address\n");
561 return -ENXIO;
562 }
563
564 info = framebuffer_alloc(sizeof(struct mb862xxfb_par), dev);
565 if (info == NULL) {
566 dev_err(dev, "cannot allocate framebuffer\n");
567 return -ENOMEM;
568 }
569
570 par = info->par;
571 par->info = info;
572 par->dev = dev;
573
574 par->irq = irq_of_parse_and_map(np, 0);
575 if (par->irq == NO_IRQ) {
576 dev_err(dev, "failed to map irq\n");
577 ret = -ENODEV;
578 goto fbrel;
579 }
580
581 res_size = 1 + res.end - res.start;
582 par->res = request_mem_region(res.start, res_size, DRV_NAME);
583 if (par->res == NULL) {
584 dev_err(dev, "Cannot claim framebuffer/mmio\n");
585 ret = -ENXIO;
586 goto irqdisp;
587 }
588
589#if defined(CONFIG_LWMON5)
590 par->gc_mode = &lwmon5_gc_mode;
591#endif
592
593#if defined(CONFIG_SOCRATES)
594 par->gc_mode = &socrates_gc_mode;
595#endif
596
597 par->fb_base_phys = res.start;
598 par->mmio_base_phys = res.start + MB862XX_MMIO_BASE;
599 par->mmio_len = MB862XX_MMIO_SIZE;
600 if (par->gc_mode)
601 par->mapped_vram = par->gc_mode->max_vram;
602 else
603 par->mapped_vram = MB862XX_MEM_SIZE;
604
605 par->fb_base = ioremap(par->fb_base_phys, par->mapped_vram);
606 if (par->fb_base == NULL) {
607 dev_err(dev, "Cannot map framebuffer\n");
608 goto rel_reg;
609 }
610
611 par->mmio_base = ioremap(par->mmio_base_phys, par->mmio_len);
612 if (par->mmio_base == NULL) {
613 dev_err(dev, "Cannot map registers\n");
614 goto fb_unmap;
615 }
616
617 dev_dbg(dev, "fb phys 0x%llx 0x%lx\n",
618 (u64)par->fb_base_phys, (ulong)par->mapped_vram);
619 dev_dbg(dev, "mmio phys 0x%llx 0x%lx, (irq = %d)\n",
620 (u64)par->mmio_base_phys, (ulong)par->mmio_len, par->irq);
621
622 if (mb862xx_gdc_init(par))
623 goto io_unmap;
624
625 if (request_irq(par->irq, mb862xx_intr, IRQF_DISABLED,
626 DRV_NAME, (void *)par)) {
627 dev_err(dev, "Cannot request irq\n");
628 goto io_unmap;
629 }
630
631 mb862xxfb_init_fbinfo(info);
632
633 if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0) < 0) {
634 dev_err(dev, "Could not allocate cmap for fb_info.\n");
635 goto free_irq;
636 }
637
638 if ((info->fbops->fb_set_par)(info))
639 dev_err(dev, "set_var() failed on initial setup?\n");
640
641 if (register_framebuffer(info)) {
642 dev_err(dev, "failed to register framebuffer\n");
643 goto rel_cmap;
644 }
645
646 dev_set_drvdata(dev, info);
647
648 if (device_create_file(dev, &dev_attr_dispregs))
649 dev_err(dev, "Can't create sysfs regdump file\n");
650 return 0;
651
652rel_cmap:
653 fb_dealloc_cmap(&info->cmap);
654free_irq:
655 outreg(host, GC_IMASK, 0);
656 free_irq(par->irq, (void *)par);
657io_unmap:
658 iounmap(par->mmio_base);
659fb_unmap:
660 iounmap(par->fb_base);
661rel_reg:
662 release_mem_region(res.start, res_size);
663irqdisp:
664 irq_dispose_mapping(par->irq);
665fbrel:
666 dev_set_drvdata(dev, NULL);
667 framebuffer_release(info);
668 return ret;
669}
670
671static int __devexit of_platform_mb862xx_remove(struct of_device *ofdev)
672{
673 struct fb_info *fbi = dev_get_drvdata(&ofdev->dev);
674 struct mb862xxfb_par *par = fbi->par;
675 resource_size_t res_size = 1 + par->res->end - par->res->start;
676 unsigned long reg;
677
678 dev_dbg(fbi->dev, "%s release\n", fbi->fix.id);
679
680 /* display off */
681 reg = inreg(disp, GC_DCM1);
682 reg &= ~(GC_DCM01_DEN | GC_DCM01_L0E);
683 outreg(disp, GC_DCM1, reg);
684
685 /* disable interrupts */
686 outreg(host, GC_IMASK, 0);
687
688 free_irq(par->irq, (void *)par);
689 irq_dispose_mapping(par->irq);
690
691 device_remove_file(&ofdev->dev, &dev_attr_dispregs);
692
693 unregister_framebuffer(fbi);
694 fb_dealloc_cmap(&fbi->cmap);
695
696 iounmap(par->mmio_base);
697 iounmap(par->fb_base);
698
699 dev_set_drvdata(&ofdev->dev, NULL);
700 release_mem_region(par->res->start, res_size);
701 framebuffer_release(fbi);
702 return 0;
703}
704
705/*
706 * common types
707 */
708static struct of_device_id __devinitdata of_platform_mb862xx_tbl[] = {
709 { .compatible = "fujitsu,MB86276", },
710 { .compatible = "fujitsu,lime", },
711 { .compatible = "fujitsu,MB86277", },
712 { .compatible = "fujitsu,mint", },
713 { .compatible = "fujitsu,MB86293", },
714 { .compatible = "fujitsu,MB86294", },
715 { .compatible = "fujitsu,coral", },
716 { /* end */ }
717};
718
719static struct of_platform_driver of_platform_mb862xxfb_driver = {
720 .owner = THIS_MODULE,
721 .name = DRV_NAME,
722 .match_table = of_platform_mb862xx_tbl,
723 .probe = of_platform_mb862xx_probe,
724 .remove = __devexit_p(of_platform_mb862xx_remove),
725};
726#endif
727
728#if defined(CONFIG_FB_MB862XX_PCI_GDC)
729static int coralp_init(struct mb862xxfb_par *par)
730{
731 int cn, ver;
732
733 par->host = par->mmio_base;
734 par->i2c = par->mmio_base + MB862XX_I2C_BASE;
735 par->disp = par->mmio_base + MB862XX_DISP_BASE;
736 par->cap = par->mmio_base + MB862XX_CAP_BASE;
737 par->draw = par->mmio_base + MB862XX_DRAW_BASE;
738 par->geo = par->mmio_base + MB862XX_GEO_BASE;
739 par->pio = par->mmio_base + MB862XX_PIO_BASE;
740
741 par->refclk = GC_DISP_REFCLK_400;
742
743 ver = inreg(host, GC_CID);
744 cn = (ver & GC_CID_CNAME_MSK) >> 8;
745 ver = ver & GC_CID_VERSION_MSK;
746 if (cn == 3) {
747 dev_info(par->dev, "Fujitsu Coral-%s GDC Rev.%d found\n",\
748 (ver == 6) ? "P" : (ver == 8) ? "PA" : "?",
749 par->pdev->revision);
750 outreg(host, GC_CCF, GC_CCF_CGE_166 | GC_CCF_COT_133);
751 udelay(200);
752 outreg(host, GC_MMR, GC_MMR_CORALP_EVB_VAL);
753 udelay(10);
754 /* Clear interrupt status */
755 outreg(host, GC_IST, 0);
756 } else {
757 return -ENODEV;
758 }
759 return 0;
760}
761
762static int init_dram_ctrl(struct mb862xxfb_par *par)
763{
764 unsigned long i = 0;
765
766 /*
767 * Set io mode first! Spec. says IC may be destroyed
768 * if not set to SSTL2/LVCMOS before init.
769 */
770 outreg(dram_ctrl, GC_DCTL_IOCONT1_IOCONT0, GC_EVB_DCTL_IOCONT1_IOCONT0);
771
772 /* DRAM init */
773 outreg(dram_ctrl, GC_DCTL_MODE_ADD, GC_EVB_DCTL_MODE_ADD);
774 outreg(dram_ctrl, GC_DCTL_SETTIME1_EMODE, GC_EVB_DCTL_SETTIME1_EMODE);
775 outreg(dram_ctrl, GC_DCTL_REFRESH_SETTIME2,
776 GC_EVB_DCTL_REFRESH_SETTIME2);
777 outreg(dram_ctrl, GC_DCTL_RSV2_RSV1, GC_EVB_DCTL_RSV2_RSV1);
778 outreg(dram_ctrl, GC_DCTL_DDRIF2_DDRIF1, GC_EVB_DCTL_DDRIF2_DDRIF1);
779 outreg(dram_ctrl, GC_DCTL_RSV0_STATES, GC_EVB_DCTL_RSV0_STATES);
780
781 /* DLL reset done? */
782 while ((inreg(dram_ctrl, GC_DCTL_RSV0_STATES) & GC_DCTL_STATES_MSK)) {
783 udelay(GC_DCTL_INIT_WAIT_INTERVAL);
784 if (i++ > GC_DCTL_INIT_WAIT_CNT) {
785 dev_err(par->dev, "VRAM init failed.\n");
786 return -EINVAL;
787 }
788 }
789 outreg(dram_ctrl, GC_DCTL_MODE_ADD, GC_EVB_DCTL_MODE_ADD_AFT_RST);
790 outreg(dram_ctrl, GC_DCTL_RSV0_STATES, GC_EVB_DCTL_RSV0_STATES_AFT_RST);
791 return 0;
792}
793
794static int carmine_init(struct mb862xxfb_par *par)
795{
796 unsigned long reg;
797
798 par->ctrl = par->mmio_base + MB86297_CTRL_BASE;
799 par->i2c = par->mmio_base + MB86297_I2C_BASE;
800 par->disp = par->mmio_base + MB86297_DISP0_BASE;
801 par->disp1 = par->mmio_base + MB86297_DISP1_BASE;
802 par->cap = par->mmio_base + MB86297_CAP0_BASE;
803 par->cap1 = par->mmio_base + MB86297_CAP1_BASE;
804 par->draw = par->mmio_base + MB86297_DRAW_BASE;
805 par->dram_ctrl = par->mmio_base + MB86297_DRAMCTRL_BASE;
806 par->wrback = par->mmio_base + MB86297_WRBACK_BASE;
807
808 par->refclk = GC_DISP_REFCLK_533;
809
810 /* warm up */
811 reg = GC_CTRL_CLK_EN_DRAM | GC_CTRL_CLK_EN_2D3D | GC_CTRL_CLK_EN_DISP0;
812 outreg(ctrl, GC_CTRL_CLK_ENABLE, reg);
813
814 /* check for engine module revision */
815 if (inreg(draw, GC_2D3D_REV) == GC_RE_REVISION)
816 dev_info(par->dev, "Fujitsu Carmine GDC Rev.%d found\n",
817 par->pdev->revision);
818 else
819 goto err_init;
820
821 reg &= ~GC_CTRL_CLK_EN_2D3D;
822 outreg(ctrl, GC_CTRL_CLK_ENABLE, reg);
823
824 /* set up vram */
825 if (init_dram_ctrl(par) < 0)
826 goto err_init;
827
828 outreg(ctrl, GC_CTRL_INT_MASK, 0);
829 return 0;
830
831err_init:
832 outreg(ctrl, GC_CTRL_CLK_ENABLE, 0);
833 return -EINVAL;
834}
835
836static inline int mb862xx_pci_gdc_init(struct mb862xxfb_par *par)
837{
838 switch (par->type) {
839 case BT_CORALP:
840 return coralp_init(par);
841 case BT_CARMINE:
842 return carmine_init(par);
843 default:
844 return -ENODEV;
845 }
846}
847
848#define CHIP_ID(id) \
849 { PCI_DEVICE(PCI_VENDOR_ID_FUJITSU_LIMITED, id) }
850
851static struct pci_device_id mb862xx_pci_tbl[] __devinitdata = {
852 /* MB86295/MB86296 */
853 CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALP),
854 CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALPA),
855 /* MB86297 */
856 CHIP_ID(PCI_DEVICE_ID_FUJITSU_CARMINE),
857 { 0, }
858};
859
860MODULE_DEVICE_TABLE(pci, mb862xx_pci_tbl);
861
862static int __devinit mb862xx_pci_probe(struct pci_dev *pdev,
863 const struct pci_device_id *ent)
864{
865 struct mb862xxfb_par *par;
866 struct fb_info *info;
867 struct device *dev = &pdev->dev;
868 int ret;
869
870 ret = pci_enable_device(pdev);
871 if (ret < 0) {
872 dev_err(dev, "Cannot enable PCI device\n");
873 goto out;
874 }
875
876 info = framebuffer_alloc(sizeof(struct mb862xxfb_par), dev);
877 if (!info) {
878 dev_err(dev, "framebuffer alloc failed\n");
879 ret = -ENOMEM;
880 goto dis_dev;
881 }
882
883 par = info->par;
884 par->info = info;
885 par->dev = dev;
886 par->pdev = pdev;
887 par->irq = pdev->irq;
888
889 ret = pci_request_regions(pdev, DRV_NAME);
890 if (ret < 0) {
891 dev_err(dev, "Cannot reserve region(s) for PCI device\n");
892 goto rel_fb;
893 }
894
895 switch (pdev->device) {
896 case PCI_DEVICE_ID_FUJITSU_CORALP:
897 case PCI_DEVICE_ID_FUJITSU_CORALPA:
898 par->fb_base_phys = pci_resource_start(par->pdev, 0);
899 par->mapped_vram = CORALP_MEM_SIZE;
900 par->mmio_base_phys = par->fb_base_phys + MB862XX_MMIO_BASE;
901 par->mmio_len = MB862XX_MMIO_SIZE;
902 par->type = BT_CORALP;
903 break;
904 case PCI_DEVICE_ID_FUJITSU_CARMINE:
905 par->fb_base_phys = pci_resource_start(par->pdev, 2);
906 par->mmio_base_phys = pci_resource_start(par->pdev, 3);
907 par->mmio_len = pci_resource_len(par->pdev, 3);
908 par->mapped_vram = CARMINE_MEM_SIZE;
909 par->type = BT_CARMINE;
910 break;
911 default:
912 /* should never occur */
913 goto rel_reg;
914 }
915
916 par->fb_base = ioremap(par->fb_base_phys, par->mapped_vram);
917 if (par->fb_base == NULL) {
918 dev_err(dev, "Cannot map framebuffer\n");
919 goto rel_reg;
920 }
921
922 par->mmio_base = ioremap(par->mmio_base_phys, par->mmio_len);
923 if (par->mmio_base == NULL) {
924 dev_err(dev, "Cannot map registers\n");
925 ret = -EIO;
926 goto fb_unmap;
927 }
928
929 dev_dbg(dev, "fb phys 0x%llx 0x%lx\n",
930 (unsigned long long)par->fb_base_phys, (ulong)par->mapped_vram);
931 dev_dbg(dev, "mmio phys 0x%llx 0x%lx\n",
932 (unsigned long long)par->mmio_base_phys, (ulong)par->mmio_len);
933
934 if (mb862xx_pci_gdc_init(par))
935 goto io_unmap;
936
937 if (request_irq(par->irq, mb862xx_intr, IRQF_DISABLED | IRQF_SHARED,
938 DRV_NAME, (void *)par)) {
939 dev_err(dev, "Cannot request irq\n");
940 goto io_unmap;
941 }
942
943 mb862xxfb_init_fbinfo(info);
944
945 if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0) < 0) {
946 dev_err(dev, "Could not allocate cmap for fb_info.\n");
947 ret = -ENOMEM;
948 goto free_irq;
949 }
950
951 if ((info->fbops->fb_set_par)(info))
952 dev_err(dev, "set_var() failed on initial setup?\n");
953
954 ret = register_framebuffer(info);
955 if (ret < 0) {
956 dev_err(dev, "failed to register framebuffer\n");
957 goto rel_cmap;
958 }
959
960 pci_set_drvdata(pdev, info);
961
962 if (device_create_file(dev, &dev_attr_dispregs))
963 dev_err(dev, "Can't create sysfs regdump file\n");
964
965 if (par->type == BT_CARMINE)
966 outreg(ctrl, GC_CTRL_INT_MASK, GC_CARMINE_INT_EN);
967 else
968 outreg(host, GC_IMASK, GC_INT_EN);
969
970 return 0;
971
972rel_cmap:
973 fb_dealloc_cmap(&info->cmap);
974free_irq:
975 free_irq(par->irq, (void *)par);
976io_unmap:
977 iounmap(par->mmio_base);
978fb_unmap:
979 iounmap(par->fb_base);
980rel_reg:
981 pci_release_regions(pdev);
982rel_fb:
983 framebuffer_release(info);
984dis_dev:
985 pci_disable_device(pdev);
986out:
987 return ret;
988}
989
990static void __devexit mb862xx_pci_remove(struct pci_dev *pdev)
991{
992 struct fb_info *fbi = pci_get_drvdata(pdev);
993 struct mb862xxfb_par *par = fbi->par;
994 unsigned long reg;
995
996 dev_dbg(fbi->dev, "%s release\n", fbi->fix.id);
997
998 /* display off */
999 reg = inreg(disp, GC_DCM1);
1000 reg &= ~(GC_DCM01_DEN | GC_DCM01_L0E);
1001 outreg(disp, GC_DCM1, reg);
1002
1003 if (par->type == BT_CARMINE) {
1004 outreg(ctrl, GC_CTRL_INT_MASK, 0);
1005 outreg(ctrl, GC_CTRL_CLK_ENABLE, 0);
1006 } else {
1007 outreg(host, GC_IMASK, 0);
1008 }
1009
1010 device_remove_file(&pdev->dev, &dev_attr_dispregs);
1011
1012 pci_set_drvdata(pdev, NULL);
1013 unregister_framebuffer(fbi);
1014 fb_dealloc_cmap(&fbi->cmap);
1015
1016 free_irq(par->irq, (void *)par);
1017 iounmap(par->mmio_base);
1018 iounmap(par->fb_base);
1019
1020 pci_release_regions(pdev);
1021 framebuffer_release(fbi);
1022 pci_disable_device(pdev);
1023}
1024
1025static struct pci_driver mb862xxfb_pci_driver = {
1026 .name = DRV_NAME,
1027 .id_table = mb862xx_pci_tbl,
1028 .probe = mb862xx_pci_probe,
1029 .remove = __devexit_p(mb862xx_pci_remove),
1030};
1031#endif
1032
1033static int __devinit mb862xxfb_init(void)
1034{
1035 int ret = -ENODEV;
1036
1037#if defined(CONFIG_FB_MB862XX_LIME)
1038 ret = of_register_platform_driver(&of_platform_mb862xxfb_driver);
1039#endif
1040#if defined(CONFIG_FB_MB862XX_PCI_GDC)
1041 ret = pci_register_driver(&mb862xxfb_pci_driver);
1042#endif
1043 return ret;
1044}
1045
1046static void __exit mb862xxfb_exit(void)
1047{
1048#if defined(CONFIG_FB_MB862XX_LIME)
1049 of_unregister_platform_driver(&of_platform_mb862xxfb_driver);
1050#endif
1051#if defined(CONFIG_FB_MB862XX_PCI_GDC)
1052 pci_unregister_driver(&mb862xxfb_pci_driver);
1053#endif
1054}
1055
1056module_init(mb862xxfb_init);
1057module_exit(mb862xxfb_exit);
1058
1059MODULE_DESCRIPTION("Fujitsu MB862xx Framebuffer driver");
1060MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
1061MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/mb862xx/mb862xxfb.h b/drivers/video/mb862xx/mb862xxfb.h
new file mode 100644
index 000000000000..c4c8f4dd2217
--- /dev/null
+++ b/drivers/video/mb862xx/mb862xxfb.h
@@ -0,0 +1,83 @@
1#ifndef __MB862XX_H__
2#define __MB862XX_H__
3
4#define PCI_VENDOR_ID_FUJITSU_LIMITED 0x10cf
5#define PCI_DEVICE_ID_FUJITSU_CORALP 0x2019
6#define PCI_DEVICE_ID_FUJITSU_CORALPA 0x201e
7#define PCI_DEVICE_ID_FUJITSU_CARMINE 0x202b
8
9#define GC_MMR_CORALP_EVB_VAL 0x11d7fa13
10
11enum gdctype {
12 BT_NONE,
13 BT_LIME,
14 BT_MINT,
15 BT_CORAL,
16 BT_CORALP,
17 BT_CARMINE,
18};
19
20struct mb862xx_gc_mode {
21 struct fb_videomode def_mode; /* mode of connected display */
22 unsigned int def_bpp; /* default depth */
23 unsigned long max_vram; /* connected SDRAM size */
24 unsigned long ccf; /* gdc clk */
25 unsigned long mmr; /* memory mode for SDRAM */
26};
27
28/* private data */
29struct mb862xxfb_par {
30 struct fb_info *info; /* fb info head */
31 struct device *dev;
32 struct pci_dev *pdev;
33 struct resource *res; /* framebuffer/mmio resource */
34
35 resource_size_t fb_base_phys; /* fb base, 36-bit PPC440EPx */
36 resource_size_t mmio_base_phys; /* io base addr */
37 void __iomem *fb_base; /* remapped framebuffer */
38 void __iomem *mmio_base; /* remapped registers */
39 size_t mapped_vram; /* length of remapped vram */
40 size_t mmio_len; /* length of register region */
41
42 void __iomem *host; /* relocatable reg. bases */
43 void __iomem *i2c;
44 void __iomem *disp;
45 void __iomem *disp1;
46 void __iomem *cap;
47 void __iomem *cap1;
48 void __iomem *draw;
49 void __iomem *geo;
50 void __iomem *pio;
51 void __iomem *ctrl;
52 void __iomem *dram_ctrl;
53 void __iomem *wrback;
54
55 unsigned int irq;
56 unsigned int type; /* GDC type */
57 unsigned int refclk; /* disp. reference clock */
58 struct mb862xx_gc_mode *gc_mode; /* GDC mode init data */
59 int pre_init; /* don't init display if 1 */
60
61 u32 pseudo_palette[16];
62};
63
64#if defined(CONFIG_FB_MB862XX_LIME) && defined(CONFIG_FB_MB862XX_PCI_GDC)
65#error "Select Lime GDC or CoralP/Carmine support, but not both together"
66#endif
67#if defined(CONFIG_FB_MB862XX_LIME)
68#define gdc_read __raw_readl
69#define gdc_write __raw_writel
70#else
71#define gdc_read readl
72#define gdc_write writel
73#endif
74
75#define inreg(type, off) \
76 gdc_read((par->type + (off)))
77
78#define outreg(type, off, val) \
79 gdc_write((val), (par->type + (off)))
80
81#define pack(a, b) (((a) << 16) | (b))
82
83#endif
diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile
index 99da8b6d2c36..ed13889c1162 100644
--- a/drivers/video/omap/Makefile
+++ b/drivers/video/omap/Makefile
@@ -23,7 +23,6 @@ objs-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o
23objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o 23objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o
24objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o 24objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o
25objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o 25objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
26objs-y$(CONFIG_MACH_SX1) += lcd_sx1.o
27 26
28omapfb-objs := $(objs-yy) 27omapfb-objs := $(objs-yy)
29 28
diff --git a/drivers/video/omap/lcd_sx1.c b/drivers/video/omap/lcd_sx1.c
deleted file mode 100644
index e55de201b8ff..000000000000
--- a/drivers/video/omap/lcd_sx1.c
+++ /dev/null
@@ -1,327 +0,0 @@
1/*
2 * LCD panel support for the Siemens SX1 mobile phone
3 *
4 * Current version : Vovan888@gmail.com, great help from FCA00000
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/delay.h>
24#include <linux/io.h>
25
26#include <mach/gpio.h>
27#include <mach/omapfb.h>
28#include <mach/mcbsp.h>
29#include <mach/mux.h>
30
31/*
32 * OMAP310 GPIO registers
33 */
34#define GPIO_DATA_INPUT 0xfffce000
35#define GPIO_DATA_OUTPUT 0xfffce004
36#define GPIO_DIR_CONTROL 0xfffce008
37#define GPIO_INT_CONTROL 0xfffce00c
38#define GPIO_INT_MASK 0xfffce010
39#define GPIO_INT_STATUS 0xfffce014
40#define GPIO_PIN_CONTROL 0xfffce018
41
42
43#define A_LCD_SSC_RD 3
44#define A_LCD_SSC_SD 7
45#define _A_LCD_RESET 9
46#define _A_LCD_SSC_CS 12
47#define _A_LCD_SSC_A0 13
48
49#define DSP_REG 0xE1017024
50
51const unsigned char INIT_1[12] = {
52 0x1C, 0x02, 0x88, 0x00, 0x1E, 0xE0, 0x00, 0xDC, 0x00, 0x02, 0x00
53};
54
55const unsigned char INIT_2[127] = {
56 0x15, 0x00, 0x29, 0x00, 0x3E, 0x00, 0x51, 0x00,
57 0x65, 0x00, 0x7A, 0x00, 0x8D, 0x00, 0xA1, 0x00,
58 0xB6, 0x00, 0xC7, 0x00, 0xD8, 0x00, 0xEB, 0x00,
59 0xFB, 0x00, 0x0B, 0x01, 0x1B, 0x01, 0x27, 0x01,
60 0x34, 0x01, 0x41, 0x01, 0x4C, 0x01, 0x55, 0x01,
61 0x5F, 0x01, 0x68, 0x01, 0x70, 0x01, 0x78, 0x01,
62 0x7E, 0x01, 0x86, 0x01, 0x8C, 0x01, 0x94, 0x01,
63 0x9B, 0x01, 0xA1, 0x01, 0xA4, 0x01, 0xA9, 0x01,
64 0xAD, 0x01, 0xB2, 0x01, 0xB7, 0x01, 0xBC, 0x01,
65 0xC0, 0x01, 0xC4, 0x01, 0xC8, 0x01, 0xCB, 0x01,
66 0xCF, 0x01, 0xD2, 0x01, 0xD5, 0x01, 0xD8, 0x01,
67 0xDB, 0x01, 0xE0, 0x01, 0xE3, 0x01, 0xE6, 0x01,
68 0xE8, 0x01, 0xEB, 0x01, 0xEE, 0x01, 0xF1, 0x01,
69 0xF3, 0x01, 0xF8, 0x01, 0xF9, 0x01, 0xFC, 0x01,
70 0x00, 0x02, 0x03, 0x02, 0x07, 0x02, 0x09, 0x02,
71 0x0E, 0x02, 0x13, 0x02, 0x1C, 0x02, 0x00
72};
73
74const unsigned char INIT_3[15] = {
75 0x14, 0x26, 0x33, 0x3D, 0x45, 0x4D, 0x53, 0x59,
76 0x5E, 0x63, 0x67, 0x6D, 0x71, 0x78, 0xFF
77};
78
79static void epson_sendbyte(int flag, unsigned char byte)
80{
81 int i, shifter = 0x80;
82
83 if (!flag)
84 gpio_set_value(_A_LCD_SSC_A0, 0);
85 mdelay(2);
86 gpio_set_value(A_LCD_SSC_RD, 1);
87
88 gpio_set_value(A_LCD_SSC_SD, flag);
89
90 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
91 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
92 for (i = 0; i < 8; i++) {
93 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
94 gpio_set_value(A_LCD_SSC_SD, shifter & byte);
95 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
96 shifter >>= 1;
97 }
98 gpio_set_value(_A_LCD_SSC_A0, 1);
99}
100
101static void init_system(void)
102{
103 omap_mcbsp_request(OMAP_MCBSP3);
104 omap_mcbsp_stop(OMAP_MCBSP3);
105}
106
107static void setup_GPIO(void)
108{
109 /* new wave */
110 gpio_request(A_LCD_SSC_RD, "lcd_ssc_rd");
111 gpio_request(A_LCD_SSC_SD, "lcd_ssc_sd");
112 gpio_request(_A_LCD_RESET, "lcd_reset");
113 gpio_request(_A_LCD_SSC_CS, "lcd_ssc_cs");
114 gpio_request(_A_LCD_SSC_A0, "lcd_ssc_a0");
115
116 /* set GPIOs to output, with initial data */
117 gpio_direction_output(A_LCD_SSC_RD, 1);
118 gpio_direction_output(A_LCD_SSC_SD, 0);
119 gpio_direction_output(_A_LCD_RESET, 0);
120 gpio_direction_output(_A_LCD_SSC_CS, 1);
121 gpio_direction_output(_A_LCD_SSC_A0, 1);
122}
123
124static void display_init(void)
125{
126 int i;
127
128 omap_cfg_reg(MCBSP3_CLKX);
129
130 mdelay(2);
131 setup_GPIO();
132 mdelay(2);
133
134 /* reset LCD */
135 gpio_set_value(A_LCD_SSC_SD, 1);
136 epson_sendbyte(0, 0x25);
137
138 gpio_set_value(_A_LCD_RESET, 0);
139 mdelay(10);
140 gpio_set_value(_A_LCD_RESET, 1);
141
142 gpio_set_value(_A_LCD_SSC_CS, 1);
143 mdelay(2);
144 gpio_set_value(_A_LCD_SSC_CS, 0);
145
146 /* init LCD, phase 1 */
147 epson_sendbyte(0, 0xCA);
148 for (i = 0; i < 10; i++)
149 epson_sendbyte(1, INIT_1[i]);
150 gpio_set_value(_A_LCD_SSC_CS, 1);
151 gpio_set_value(_A_LCD_SSC_CS, 0);
152
153 /* init LCD phase 2 */
154 epson_sendbyte(0, 0xCB);
155 for (i = 0; i < 125; i++)
156 epson_sendbyte(1, INIT_2[i]);
157 gpio_set_value(_A_LCD_SSC_CS, 1);
158 gpio_set_value(_A_LCD_SSC_CS, 0);
159
160 /* init LCD phase 2a */
161 epson_sendbyte(0, 0xCC);
162 for (i = 0; i < 14; i++)
163 epson_sendbyte(1, INIT_3[i]);
164 gpio_set_value(_A_LCD_SSC_CS, 1);
165 gpio_set_value(_A_LCD_SSC_CS, 0);
166
167 /* init LCD phase 3 */
168 epson_sendbyte(0, 0xBC);
169 epson_sendbyte(1, 0x08);
170 gpio_set_value(_A_LCD_SSC_CS, 1);
171 gpio_set_value(_A_LCD_SSC_CS, 0);
172
173 /* init LCD phase 4 */
174 epson_sendbyte(0, 0x07);
175 epson_sendbyte(1, 0x05);
176 gpio_set_value(_A_LCD_SSC_CS, 1);
177 gpio_set_value(_A_LCD_SSC_CS, 0);
178
179 /* init LCD phase 5 */
180 epson_sendbyte(0, 0x94);
181 gpio_set_value(_A_LCD_SSC_CS, 1);
182 gpio_set_value(_A_LCD_SSC_CS, 0);
183
184 /* init LCD phase 6 */
185 epson_sendbyte(0, 0xC6);
186 epson_sendbyte(1, 0x80);
187 gpio_set_value(_A_LCD_SSC_CS, 1);
188 mdelay(100); /* used to be 1000 */
189 gpio_set_value(_A_LCD_SSC_CS, 0);
190
191 /* init LCD phase 7 */
192 epson_sendbyte(0, 0x16);
193 epson_sendbyte(1, 0x02);
194 epson_sendbyte(1, 0x00);
195 epson_sendbyte(1, 0xB1);
196 epson_sendbyte(1, 0x00);
197 gpio_set_value(_A_LCD_SSC_CS, 1);
198 gpio_set_value(_A_LCD_SSC_CS, 0);
199
200 /* init LCD phase 8 */
201 epson_sendbyte(0, 0x76);
202 epson_sendbyte(1, 0x00);
203 epson_sendbyte(1, 0x00);
204 epson_sendbyte(1, 0xDB);
205 epson_sendbyte(1, 0x00);
206 gpio_set_value(_A_LCD_SSC_CS, 1);
207 gpio_set_value(_A_LCD_SSC_CS, 0);
208
209 /* init LCD phase 9 */
210 epson_sendbyte(0, 0xAF);
211 gpio_set_value(_A_LCD_SSC_CS, 1);
212}
213
214static int sx1_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
215{
216 return 0;
217}
218
219static void sx1_panel_cleanup(struct lcd_panel *panel)
220{
221}
222
223static void sx1_panel_disable(struct lcd_panel *panel)
224{
225 printk(KERN_INFO "SX1: LCD panel disable\n");
226 sx1_setmmipower(0);
227 gpio_set_value(_A_LCD_SSC_CS, 1);
228
229 epson_sendbyte(0, 0x25);
230 gpio_set_value(_A_LCD_SSC_CS, 0);
231
232 epson_sendbyte(0, 0xAE);
233 gpio_set_value(_A_LCD_SSC_CS, 1);
234 mdelay(100);
235 gpio_set_value(_A_LCD_SSC_CS, 0);
236
237 epson_sendbyte(0, 0x95);
238 gpio_set_value(_A_LCD_SSC_CS, 1);
239}
240
241static int sx1_panel_enable(struct lcd_panel *panel)
242{
243 printk(KERN_INFO "lcd_sx1: LCD panel enable\n");
244 init_system();
245 display_init();
246
247 sx1_setmmipower(1);
248 sx1_setbacklight(0x18);
249 sx1_setkeylight (0x06);
250 return 0;
251}
252
253
254static unsigned long sx1_panel_get_caps(struct lcd_panel *panel)
255{
256 return 0;
257}
258
259struct lcd_panel sx1_panel = {
260 .name = "sx1",
261 .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
262 OMAP_LCDC_INV_HSYNC | OMAP_LCDC_INV_PIX_CLOCK |
263 OMAP_LCDC_INV_OUTPUT_EN,
264
265 .x_res = 176,
266 .y_res = 220,
267 .data_lines = 16,
268 .bpp = 16,
269 .hsw = 5,
270 .hfp = 5,
271 .hbp = 5,
272 .vsw = 2,
273 .vfp = 1,
274 .vbp = 1,
275 .pixel_clock = 1500,
276
277 .init = sx1_panel_init,
278 .cleanup = sx1_panel_cleanup,
279 .enable = sx1_panel_enable,
280 .disable = sx1_panel_disable,
281 .get_caps = sx1_panel_get_caps,
282};
283
284static int sx1_panel_probe(struct platform_device *pdev)
285{
286 omapfb_register_panel(&sx1_panel);
287 return 0;
288}
289
290static int sx1_panel_remove(struct platform_device *pdev)
291{
292 return 0;
293}
294
295static int sx1_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
296{
297 return 0;
298}
299
300static int sx1_panel_resume(struct platform_device *pdev)
301{
302 return 0;
303}
304
305struct platform_driver sx1_panel_driver = {
306 .probe = sx1_panel_probe,
307 .remove = sx1_panel_remove,
308 .suspend = sx1_panel_suspend,
309 .resume = sx1_panel_resume,
310 .driver = {
311 .name = "lcd_sx1",
312 .owner = THIS_MODULE,
313 },
314};
315
316static int sx1_panel_drv_init(void)
317{
318 return platform_driver_register(&sx1_panel_driver);
319}
320
321static void sx1_panel_drv_cleanup(void)
322{
323 platform_driver_unregister(&sx1_panel_driver);
324}
325
326module_init(sx1_panel_drv_init);
327module_exit(sx1_panel_drv_cleanup);
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 5a5e407dc45f..1a49519dafa4 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -392,7 +392,7 @@ static void set_fb_fix(struct fb_info *fbi)
392 int bpp; 392 int bpp;
393 393
394 rg = &plane->fbdev->mem_desc.region[plane->idx]; 394 rg = &plane->fbdev->mem_desc.region[plane->idx];
395 fbi->screen_base = (char __iomem *)rg->vaddr; 395 fbi->screen_base = rg->vaddr;
396 fix->smem_start = rg->paddr; 396 fix->smem_start = rg->paddr;
397 fix->smem_len = rg->size; 397 fix->smem_len = rg->size;
398 398
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 97204497d9f7..cc59c52e1103 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -804,6 +804,9 @@ static int pxafb_smart_thread(void *arg)
804 804
805static int pxafb_smart_init(struct pxafb_info *fbi) 805static int pxafb_smart_init(struct pxafb_info *fbi)
806{ 806{
807 if (!(fbi->lccr0 | LCCR0_LCDT))
808 return 0;
809
807 fbi->smart_thread = kthread_run(pxafb_smart_thread, fbi, 810 fbi->smart_thread = kthread_run(pxafb_smart_thread, fbi,
808 "lcd_refresh"); 811 "lcd_refresh");
809 if (IS_ERR(fbi->smart_thread)) { 812 if (IS_ERR(fbi->smart_thread)) {
@@ -1372,7 +1375,7 @@ static void pxafb_decode_mach_info(struct pxafb_info *fbi,
1372 fbi->cmap_inverse = inf->cmap_inverse; 1375 fbi->cmap_inverse = inf->cmap_inverse;
1373 fbi->cmap_static = inf->cmap_static; 1376 fbi->cmap_static = inf->cmap_static;
1374 1377
1375 switch (lcd_conn & 0xf) { 1378 switch (lcd_conn & LCD_TYPE_MASK) {
1376 case LCD_TYPE_MONO_STN: 1379 case LCD_TYPE_MONO_STN:
1377 fbi->lccr0 = LCCR0_CMS; 1380 fbi->lccr0 = LCCR0_CMS;
1378 break; 1381 break;
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index 2a380011e9ba..7baf2dd12d50 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -222,6 +222,9 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
222 unsigned int bbisc = tmio_ioread16(par->lcr + LCR_BBISC); 222 unsigned int bbisc = tmio_ioread16(par->lcr + LCR_BBISC);
223 223
224 224
225 tmio_iowrite16(bbisc, par->lcr + LCR_BBISC);
226
227#ifdef CONFIG_FB_TMIO_ACCELL
225 /* 228 /*
226 * We were in polling mode and now we got correct irq. 229 * We were in polling mode and now we got correct irq.
227 * Switch back to IRQ-based sync of command FIFO 230 * Switch back to IRQ-based sync of command FIFO
@@ -231,9 +234,6 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
231 par->use_polling = false; 234 par->use_polling = false;
232 } 235 }
233 236
234 tmio_iowrite16(bbisc, par->lcr + LCR_BBISC);
235
236#ifdef CONFIG_FB_TMIO_ACCELL
237 if (bbisc & 1) 237 if (bbisc & 1)
238 wake_up(&par->wait_acc); 238 wake_up(&par->wait_acc);
239#endif 239#endif
@@ -938,7 +938,9 @@ static void tmiofb_dump_regs(struct platform_device *dev)
938static int tmiofb_suspend(struct platform_device *dev, pm_message_t state) 938static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
939{ 939{
940 struct fb_info *info = platform_get_drvdata(dev); 940 struct fb_info *info = platform_get_drvdata(dev);
941#ifdef CONFIG_FB_TMIO_ACCELL
941 struct tmiofb_par *par = info->par; 942 struct tmiofb_par *par = info->par;
943#endif
942 struct mfd_cell *cell = dev->dev.platform_data; 944 struct mfd_cell *cell = dev->dev.platform_data;
943 int retval = 0; 945 int retval = 0;
944 946
@@ -950,12 +952,14 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
950 info->fbops->fb_sync(info); 952 info->fbops->fb_sync(info);
951 953
952 954
955#ifdef CONFIG_FB_TMIO_ACCELL
953 /* 956 /*
954 * The fb should be usable even if interrupts are disabled (and they are 957 * The fb should be usable even if interrupts are disabled (and they are
955 * during suspend/resume). Switch temporary to forced polling. 958 * during suspend/resume). Switch temporary to forced polling.
956 */ 959 */
957 printk(KERN_INFO "tmiofb: switching to polling\n"); 960 printk(KERN_INFO "tmiofb: switching to polling\n");
958 par->use_polling = true; 961 par->use_polling = true;
962#endif
959 tmiofb_hw_stop(dev); 963 tmiofb_hw_stop(dev);
960 964
961 if (cell->suspend) 965 if (cell->suspend)
diff --git a/drivers/video/via/global.h b/drivers/video/via/global.h
index 8e5263c5b812..7543d5f7e309 100644
--- a/drivers/video/via/global.h
+++ b/drivers/video/via/global.h
@@ -38,7 +38,6 @@
38#include "iface.h" 38#include "iface.h"
39#include "viafbdev.h" 39#include "viafbdev.h"
40#include "chip.h" 40#include "chip.h"
41#include "debug.h"
42#include "accel.h" 41#include "accel.h"
43#include "share.h" 42#include "share.h"
44#include "dvi.h" 43#include "dvi.h"
@@ -48,12 +47,10 @@
48 47
49#include "lcd.h" 48#include "lcd.h"
50#include "ioctl.h" 49#include "ioctl.h"
51#include "viamode.h"
52#include "via_utility.h" 50#include "via_utility.h"
53#include "vt1636.h" 51#include "vt1636.h"
54#include "tblDPASetting.h" 52#include "tblDPASetting.h"
55#include "tbl1636.h" 53#include "tbl1636.h"
56#include "viafbdev.h"
57 54
58/* External struct*/ 55/* External struct*/
59 56
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 0132eae06f55..73ac754ad801 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -2036,30 +2036,30 @@ static int viafb_vt1636_proc_write(struct file *file,
2036 return count; 2036 return count;
2037} 2037}
2038 2038
2039static void viafb_init_proc(struct proc_dir_entry *viafb_entry) 2039static void viafb_init_proc(struct proc_dir_entry **viafb_entry)
2040{ 2040{
2041 struct proc_dir_entry *entry; 2041 struct proc_dir_entry *entry;
2042 viafb_entry = proc_mkdir("viafb", NULL); 2042 *viafb_entry = proc_mkdir("viafb", NULL);
2043 if (viafb_entry) { 2043 if (viafb_entry) {
2044 entry = create_proc_entry("dvp0", 0, viafb_entry); 2044 entry = create_proc_entry("dvp0", 0, *viafb_entry);
2045 if (entry) { 2045 if (entry) {
2046 entry->owner = THIS_MODULE; 2046 entry->owner = THIS_MODULE;
2047 entry->read_proc = viafb_dvp0_proc_read; 2047 entry->read_proc = viafb_dvp0_proc_read;
2048 entry->write_proc = viafb_dvp0_proc_write; 2048 entry->write_proc = viafb_dvp0_proc_write;
2049 } 2049 }
2050 entry = create_proc_entry("dvp1", 0, viafb_entry); 2050 entry = create_proc_entry("dvp1", 0, *viafb_entry);
2051 if (entry) { 2051 if (entry) {
2052 entry->owner = THIS_MODULE; 2052 entry->owner = THIS_MODULE;
2053 entry->read_proc = viafb_dvp1_proc_read; 2053 entry->read_proc = viafb_dvp1_proc_read;
2054 entry->write_proc = viafb_dvp1_proc_write; 2054 entry->write_proc = viafb_dvp1_proc_write;
2055 } 2055 }
2056 entry = create_proc_entry("dfph", 0, viafb_entry); 2056 entry = create_proc_entry("dfph", 0, *viafb_entry);
2057 if (entry) { 2057 if (entry) {
2058 entry->owner = THIS_MODULE; 2058 entry->owner = THIS_MODULE;
2059 entry->read_proc = viafb_dfph_proc_read; 2059 entry->read_proc = viafb_dfph_proc_read;
2060 entry->write_proc = viafb_dfph_proc_write; 2060 entry->write_proc = viafb_dfph_proc_write;
2061 } 2061 }
2062 entry = create_proc_entry("dfpl", 0, viafb_entry); 2062 entry = create_proc_entry("dfpl", 0, *viafb_entry);
2063 if (entry) { 2063 if (entry) {
2064 entry->owner = THIS_MODULE; 2064 entry->owner = THIS_MODULE;
2065 entry->read_proc = viafb_dfpl_proc_read; 2065 entry->read_proc = viafb_dfpl_proc_read;
@@ -2068,7 +2068,7 @@ static void viafb_init_proc(struct proc_dir_entry *viafb_entry)
2068 if (VT1636_LVDS == viaparinfo->chip_info->lvds_chip_info. 2068 if (VT1636_LVDS == viaparinfo->chip_info->lvds_chip_info.
2069 lvds_chip_name || VT1636_LVDS == 2069 lvds_chip_name || VT1636_LVDS ==
2070 viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) { 2070 viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) {
2071 entry = create_proc_entry("vt1636", 0, viafb_entry); 2071 entry = create_proc_entry("vt1636", 0, *viafb_entry);
2072 if (entry) { 2072 if (entry) {
2073 entry->owner = THIS_MODULE; 2073 entry->owner = THIS_MODULE;
2074 entry->read_proc = viafb_vt1636_proc_read; 2074 entry->read_proc = viafb_vt1636_proc_read;
@@ -2087,6 +2087,7 @@ static void viafb_remove_proc(struct proc_dir_entry *viafb_entry)
2087 remove_proc_entry("dfpl", viafb_entry); 2087 remove_proc_entry("dfpl", viafb_entry);
2088 remove_proc_entry("vt1636", viafb_entry); 2088 remove_proc_entry("vt1636", viafb_entry);
2089 remove_proc_entry("vt1625", viafb_entry); 2089 remove_proc_entry("vt1625", viafb_entry);
2090 remove_proc_entry("viafb", NULL);
2090} 2091}
2091 2092
2092static int __devinit via_pci_probe(void) 2093static int __devinit via_pci_probe(void)
@@ -2348,7 +2349,7 @@ static int __devinit via_pci_probe(void)
2348 viafbinfo->node, viafbinfo->fix.id, default_var.xres, 2349 viafbinfo->node, viafbinfo->fix.id, default_var.xres,
2349 default_var.yres, default_var.bits_per_pixel); 2350 default_var.yres, default_var.bits_per_pixel);
2350 2351
2351 viafb_init_proc(viaparinfo->proc_entry); 2352 viafb_init_proc(&viaparinfo->proc_entry);
2352 viafb_init_dac(IGA2); 2353 viafb_init_dac(IGA2);
2353 return 0; 2354 return 0;
2354} 2355}
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index a463b3dd837b..2493f05e9f61 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -668,7 +668,7 @@ static struct xenbus_device_id xenfb_ids[] = {
668 { "" } 668 { "" }
669}; 669};
670 670
671static struct xenbus_driver xenfb = { 671static struct xenbus_driver xenfb_driver = {
672 .name = "vfb", 672 .name = "vfb",
673 .owner = THIS_MODULE, 673 .owner = THIS_MODULE,
674 .ids = xenfb_ids, 674 .ids = xenfb_ids,
@@ -687,12 +687,12 @@ static int __init xenfb_init(void)
687 if (xen_initial_domain()) 687 if (xen_initial_domain())
688 return -ENODEV; 688 return -ENODEV;
689 689
690 return xenbus_register_frontend(&xenfb); 690 return xenbus_register_frontend(&xenfb_driver);
691} 691}
692 692
693static void __exit xenfb_cleanup(void) 693static void __exit xenfb_cleanup(void)
694{ 694{
695 xenbus_unregister_driver(&xenfb); 695 xenbus_unregister_driver(&xenfb_driver);
696} 696}
697 697
698module_init(xenfb_init); 698module_init(xenfb_init);
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 5da3d2423cc0..40a3a2afbfe7 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -298,8 +298,9 @@ static int xilinxfb_assign(struct device *dev, unsigned long physaddr,
298 298
299 /* Put a banner in the log (for DEBUG) */ 299 /* Put a banner in the log (for DEBUG) */
300 dev_dbg(dev, "regs: phys=%lx, virt=%p\n", physaddr, drvdata->regs); 300 dev_dbg(dev, "regs: phys=%lx, virt=%p\n", physaddr, drvdata->regs);
301 dev_dbg(dev, "fb: phys=%p, virt=%p, size=%x\n", 301 dev_dbg(dev, "fb: phys=%llx, virt=%p, size=%x\n",
302 (void*)drvdata->fb_phys, drvdata->fb_virt, fbsize); 302 (unsigned long long) drvdata->fb_phys, drvdata->fb_virt,
303 fbsize);
303 304
304 return 0; /* success */ 305 return 0; /* success */
305 306
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index c4493091c655..90616822cd20 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -36,7 +36,7 @@ config W1_MASTER_DS2482
36 36
37config W1_MASTER_DS1WM 37config W1_MASTER_DS1WM
38 tristate "Maxim DS1WM 1-wire busmaster" 38 tristate "Maxim DS1WM 1-wire busmaster"
39 depends on W1 && ARM 39 depends on W1 && ARM && HAVE_CLK
40 help 40 help
41 Say Y here to enable the DS1WM 1-wire driver, such as that 41 Say Y here to enable the DS1WM 1-wire driver, such as that
42 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like 42 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
@@ -52,5 +52,12 @@ config W1_MASTER_GPIO
52 This support is also available as a module. If so, the module 52 This support is also available as a module. If so, the module
53 will be called w1-gpio.ko. 53 will be called w1-gpio.ko.
54 54
55config HDQ_MASTER_OMAP
56 tristate "OMAP HDQ driver"
57 depends on ARCH_OMAP2430 || ARCH_OMAP34XX
58 help
59 Say Y here if you want support for the 1-wire or HDQ Interface
60 on an OMAP processor.
61
55endmenu 62endmenu
56 63
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index 1420b5bbdda8..bc4714a75f3a 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o
7obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o 7obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o
8obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o 8obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
9obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o 9obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
10obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
new file mode 100644
index 000000000000..c973889110c8
--- /dev/null
+++ b/drivers/w1/masters/omap_hdq.c
@@ -0,0 +1,725 @@
1/*
2 * drivers/w1/masters/omap_hdq.c
3 *
4 * Copyright (C) 2007 Texas Instruments, Inc.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
15#include <linux/err.h>
16#include <linux/clk.h>
17#include <linux/io.h>
18
19#include <asm/irq.h>
20#include <mach/hardware.h>
21
22#include "../w1.h"
23#include "../w1_int.h"
24
25#define MOD_NAME "OMAP_HDQ:"
26
27#define OMAP_HDQ_REVISION 0x00
28#define OMAP_HDQ_TX_DATA 0x04
29#define OMAP_HDQ_RX_DATA 0x08
30#define OMAP_HDQ_CTRL_STATUS 0x0c
31#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
32#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
33#define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
34#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
35#define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
36#define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
37#define OMAP_HDQ_INT_STATUS 0x10
38#define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
39#define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
40#define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
41#define OMAP_HDQ_SYSCONFIG 0x14
42#define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
43#define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
44#define OMAP_HDQ_SYSSTATUS 0x18
45#define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
46
47#define OMAP_HDQ_FLAG_CLEAR 0
48#define OMAP_HDQ_FLAG_SET 1
49#define OMAP_HDQ_TIMEOUT (HZ/5)
50
51#define OMAP_HDQ_MAX_USER 4
52
53static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
54static int w1_id;
55
56struct hdq_data {
57 struct device *dev;
58 void __iomem *hdq_base;
59 /* lock status update */
60 struct mutex hdq_mutex;
61 int hdq_usecount;
62 struct clk *hdq_ick;
63 struct clk *hdq_fck;
64 u8 hdq_irqstatus;
65 /* device lock */
66 spinlock_t hdq_spinlock;
67 /*
68 * Used to control the call to omap_hdq_get and omap_hdq_put.
69 * HDQ Protocol: Write the CMD|REG_address first, followed by
70 * the data wrire or read.
71 */
72 int init_trans;
73};
74
75static int __init omap_hdq_probe(struct platform_device *pdev);
76static int omap_hdq_remove(struct platform_device *pdev);
77
78static struct platform_driver omap_hdq_driver = {
79 .probe = omap_hdq_probe,
80 .remove = omap_hdq_remove,
81 .driver = {
82 .name = "omap_hdq",
83 },
84};
85
86static u8 omap_w1_read_byte(void *_hdq);
87static void omap_w1_write_byte(void *_hdq, u8 byte);
88static u8 omap_w1_reset_bus(void *_hdq);
89static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
90 u8 search_type, w1_slave_found_callback slave_found);
91
92
93static struct w1_bus_master omap_w1_master = {
94 .read_byte = omap_w1_read_byte,
95 .write_byte = omap_w1_write_byte,
96 .reset_bus = omap_w1_reset_bus,
97 .search = omap_w1_search_bus,
98};
99
100/* HDQ register I/O routines */
101static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
102{
103 return __raw_readb(hdq_data->hdq_base + offset);
104}
105
106static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
107{
108 __raw_writeb(val, hdq_data->hdq_base + offset);
109}
110
111static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
112 u8 val, u8 mask)
113{
114 u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
115 | (val & mask);
116 __raw_writeb(new_val, hdq_data->hdq_base + offset);
117
118 return new_val;
119}
120
121/*
122 * Wait for one or more bits in flag change.
123 * HDQ_FLAG_SET: wait until any bit in the flag is set.
124 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
125 * return 0 on success and -ETIMEDOUT in the case of timeout.
126 */
127static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
128 u8 flag, u8 flag_set, u8 *status)
129{
130 int ret = 0;
131 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
132
133 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
134 /* wait for the flag clear */
135 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
136 && time_before(jiffies, timeout)) {
137 schedule_timeout_uninterruptible(1);
138 }
139 if (*status & flag)
140 ret = -ETIMEDOUT;
141 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
142 /* wait for the flag set */
143 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
144 && time_before(jiffies, timeout)) {
145 schedule_timeout_uninterruptible(1);
146 }
147 if (!(*status & flag))
148 ret = -ETIMEDOUT;
149 } else
150 return -EINVAL;
151
152 return ret;
153}
154
155/* write out a byte and fill *status with HDQ_INT_STATUS */
156static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
157{
158 int ret;
159 u8 tmp_status;
160 unsigned long irqflags;
161
162 *status = 0;
163
164 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
165 /* clear interrupt flags via a dummy read */
166 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
167 /* ISR loads it with new INT_STATUS */
168 hdq_data->hdq_irqstatus = 0;
169 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
170
171 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
172
173 /* set the GO bit */
174 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
175 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
176 /* wait for the TXCOMPLETE bit */
177 ret = wait_event_timeout(hdq_wait_queue,
178 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
179 if (ret == 0) {
180 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
181 goto out;
182 }
183
184 *status = hdq_data->hdq_irqstatus;
185 /* check irqstatus */
186 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
187 dev_dbg(hdq_data->dev, "timeout waiting for"
188 "TXCOMPLETE/RXCOMPLETE, %x", *status);
189 ret = -ETIMEDOUT;
190 goto out;
191 }
192
193 /* wait for the GO bit return to zero */
194 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
195 OMAP_HDQ_CTRL_STATUS_GO,
196 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
197 if (ret) {
198 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
199 "return to zero, %x", tmp_status);
200 }
201
202out:
203 return ret;
204}
205
206/* HDQ Interrupt service routine */
207static irqreturn_t hdq_isr(int irq, void *_hdq)
208{
209 struct hdq_data *hdq_data = _hdq;
210 unsigned long irqflags;
211
212 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
213 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
214 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
215 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
216
217 if (hdq_data->hdq_irqstatus &
218 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
219 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
220 /* wake up sleeping process */
221 wake_up(&hdq_wait_queue);
222 }
223
224 return IRQ_HANDLED;
225}
226
227/* HDQ Mode: always return success */
228static u8 omap_w1_reset_bus(void *_hdq)
229{
230 return 0;
231}
232
233/* W1 search callback function */
234static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
235 u8 search_type, w1_slave_found_callback slave_found)
236{
237 u64 module_id, rn_le, cs, id;
238
239 if (w1_id)
240 module_id = w1_id;
241 else
242 module_id = 0x1;
243
244 rn_le = cpu_to_le64(module_id);
245 /*
246 * HDQ might not obey truly the 1-wire spec.
247 * So calculate CRC based on module parameter.
248 */
249 cs = w1_calc_crc8((u8 *)&rn_le, 7);
250 id = (cs << 56) | module_id;
251
252 slave_found(master_dev, id);
253}
254
255static int _omap_hdq_reset(struct hdq_data *hdq_data)
256{
257 int ret;
258 u8 tmp_status;
259
260 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
261 /*
262 * Select HDQ mode & enable clocks.
263 * It is observed that INT flags can't be cleared via a read and GO/INIT
264 * won't return to zero if interrupt is disabled. So we always enable
265 * interrupt.
266 */
267 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
268 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
269 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
270
271 /* wait for reset to complete */
272 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
273 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
274 if (ret)
275 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
276 tmp_status);
277 else {
278 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
279 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
280 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
281 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
282 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
283 }
284
285 return ret;
286}
287
288/* Issue break pulse to the device */
289static int omap_hdq_break(struct hdq_data *hdq_data)
290{
291 int ret = 0;
292 u8 tmp_status;
293 unsigned long irqflags;
294
295 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
296 if (ret < 0) {
297 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
298 ret = -EINTR;
299 goto rtn;
300 }
301
302 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
303 /* clear interrupt flags via a dummy read */
304 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
305 /* ISR loads it with new INT_STATUS */
306 hdq_data->hdq_irqstatus = 0;
307 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
308
309 /* set the INIT and GO bit */
310 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
311 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
312 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
313 OMAP_HDQ_CTRL_STATUS_GO);
314
315 /* wait for the TIMEOUT bit */
316 ret = wait_event_timeout(hdq_wait_queue,
317 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
318 if (ret == 0) {
319 dev_dbg(hdq_data->dev, "break wait elapsed\n");
320 ret = -EINTR;
321 goto out;
322 }
323
324 tmp_status = hdq_data->hdq_irqstatus;
325 /* check irqstatus */
326 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
327 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
328 tmp_status);
329 ret = -ETIMEDOUT;
330 goto out;
331 }
332 /*
333 * wait for both INIT and GO bits rerurn to zero.
334 * zero wait time expected for interrupt mode.
335 */
336 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
337 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
338 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
339 &tmp_status);
340 if (ret)
341 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
342 "return to zero, %x", tmp_status);
343
344out:
345 mutex_unlock(&hdq_data->hdq_mutex);
346rtn:
347 return ret;
348}
349
350static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
351{
352 int ret = 0;
353 u8 status;
354 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
355
356 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
357 if (ret < 0) {
358 ret = -EINTR;
359 goto rtn;
360 }
361
362 if (!hdq_data->hdq_usecount) {
363 ret = -EINVAL;
364 goto out;
365 }
366
367 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
368 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
369 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
370 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
371 /*
372 * The RX comes immediately after TX. It
373 * triggers another interrupt before we
374 * sleep. So we have to wait for RXCOMPLETE bit.
375 */
376 while (!(hdq_data->hdq_irqstatus
377 & OMAP_HDQ_INT_STATUS_RXCOMPLETE)
378 && time_before(jiffies, timeout)) {
379 schedule_timeout_uninterruptible(1);
380 }
381 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
382 OMAP_HDQ_CTRL_STATUS_DIR);
383 status = hdq_data->hdq_irqstatus;
384 /* check irqstatus */
385 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
386 dev_dbg(hdq_data->dev, "timeout waiting for"
387 "RXCOMPLETE, %x", status);
388 ret = -ETIMEDOUT;
389 goto out;
390 }
391 }
392 /* the data is ready. Read it in! */
393 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
394out:
395 mutex_unlock(&hdq_data->hdq_mutex);
396rtn:
397 return 0;
398
399}
400
401/* Enable clocks and set the controller to HDQ mode */
402static int omap_hdq_get(struct hdq_data *hdq_data)
403{
404 int ret = 0;
405
406 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
407 if (ret < 0) {
408 ret = -EINTR;
409 goto rtn;
410 }
411
412 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
413 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
414 ret = -EINVAL;
415 goto out;
416 } else {
417 hdq_data->hdq_usecount++;
418 try_module_get(THIS_MODULE);
419 if (1 == hdq_data->hdq_usecount) {
420 if (clk_enable(hdq_data->hdq_ick)) {
421 dev_dbg(hdq_data->dev, "Can not enable ick\n");
422 ret = -ENODEV;
423 goto clk_err;
424 }
425 if (clk_enable(hdq_data->hdq_fck)) {
426 dev_dbg(hdq_data->dev, "Can not enable fck\n");
427 clk_disable(hdq_data->hdq_ick);
428 ret = -ENODEV;
429 goto clk_err;
430 }
431
432 /* make sure HDQ is out of reset */
433 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
434 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
435 ret = _omap_hdq_reset(hdq_data);
436 if (ret)
437 /* back up the count */
438 hdq_data->hdq_usecount--;
439 } else {
440 /* select HDQ mode & enable clocks */
441 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
442 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
443 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
444 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
445 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
446 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
447 }
448 }
449 }
450
451clk_err:
452 clk_put(hdq_data->hdq_ick);
453 clk_put(hdq_data->hdq_fck);
454out:
455 mutex_unlock(&hdq_data->hdq_mutex);
456rtn:
457 return ret;
458}
459
460/* Disable clocks to the module */
461static int omap_hdq_put(struct hdq_data *hdq_data)
462{
463 int ret = 0;
464
465 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
466 if (ret < 0)
467 return -EINTR;
468
469 if (0 == hdq_data->hdq_usecount) {
470 dev_dbg(hdq_data->dev, "attempt to decrement use count"
471 "when it is zero");
472 ret = -EINVAL;
473 } else {
474 hdq_data->hdq_usecount--;
475 module_put(THIS_MODULE);
476 if (0 == hdq_data->hdq_usecount) {
477 clk_disable(hdq_data->hdq_ick);
478 clk_disable(hdq_data->hdq_fck);
479 }
480 }
481 mutex_unlock(&hdq_data->hdq_mutex);
482
483 return ret;
484}
485
486/* Read a byte of data from the device */
487static u8 omap_w1_read_byte(void *_hdq)
488{
489 struct hdq_data *hdq_data = _hdq;
490 u8 val = 0;
491 int ret;
492
493 ret = hdq_read_byte(hdq_data, &val);
494 if (ret) {
495 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
496 if (ret < 0) {
497 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
498 return -EINTR;
499 }
500 hdq_data->init_trans = 0;
501 mutex_unlock(&hdq_data->hdq_mutex);
502 omap_hdq_put(hdq_data);
503 return -1;
504 }
505
506 /* Write followed by a read, release the module */
507 if (hdq_data->init_trans) {
508 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
509 if (ret < 0) {
510 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
511 return -EINTR;
512 }
513 hdq_data->init_trans = 0;
514 mutex_unlock(&hdq_data->hdq_mutex);
515 omap_hdq_put(hdq_data);
516 }
517
518 return val;
519}
520
521/* Write a byte of data to the device */
522static void omap_w1_write_byte(void *_hdq, u8 byte)
523{
524 struct hdq_data *hdq_data = _hdq;
525 int ret;
526 u8 status;
527
528 /* First write to initialize the transfer */
529 if (hdq_data->init_trans == 0)
530 omap_hdq_get(hdq_data);
531
532 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
533 if (ret < 0) {
534 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
535 return;
536 }
537 hdq_data->init_trans++;
538 mutex_unlock(&hdq_data->hdq_mutex);
539
540 ret = hdq_write_byte(hdq_data, byte, &status);
541 if (ret == 0) {
542 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
543 return;
544 }
545
546 /* Second write, data transfered. Release the module */
547 if (hdq_data->init_trans > 1) {
548 omap_hdq_put(hdq_data);
549 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
550 if (ret < 0) {
551 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
552 return;
553 }
554 hdq_data->init_trans = 0;
555 mutex_unlock(&hdq_data->hdq_mutex);
556 }
557
558 return;
559}
560
561static int __init omap_hdq_probe(struct platform_device *pdev)
562{
563 struct hdq_data *hdq_data;
564 struct resource *res;
565 int ret, irq;
566 u8 rev;
567
568 hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
569 if (!hdq_data) {
570 dev_dbg(&pdev->dev, "unable to allocate memory\n");
571 ret = -ENOMEM;
572 goto err_kmalloc;
573 }
574
575 hdq_data->dev = &pdev->dev;
576 platform_set_drvdata(pdev, hdq_data);
577
578 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
579 if (!res) {
580 dev_dbg(&pdev->dev, "unable to get resource\n");
581 ret = -ENXIO;
582 goto err_resource;
583 }
584
585 hdq_data->hdq_base = ioremap(res->start, SZ_4K);
586 if (!hdq_data->hdq_base) {
587 dev_dbg(&pdev->dev, "ioremap failed\n");
588 ret = -EINVAL;
589 goto err_ioremap;
590 }
591
592 /* get interface & functional clock objects */
593 hdq_data->hdq_ick = clk_get(&pdev->dev, "hdq_ick");
594 hdq_data->hdq_fck = clk_get(&pdev->dev, "hdq_fck");
595
596 if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
597 dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
598 if (IS_ERR(hdq_data->hdq_ick)) {
599 ret = PTR_ERR(hdq_data->hdq_ick);
600 goto err_clk;
601 }
602 if (IS_ERR(hdq_data->hdq_fck)) {
603 ret = PTR_ERR(hdq_data->hdq_fck);
604 clk_put(hdq_data->hdq_ick);
605 goto err_clk;
606 }
607 }
608
609 hdq_data->hdq_usecount = 0;
610 mutex_init(&hdq_data->hdq_mutex);
611
612 if (clk_enable(hdq_data->hdq_ick)) {
613 dev_dbg(&pdev->dev, "Can not enable ick\n");
614 ret = -ENODEV;
615 goto err_intfclk;
616 }
617
618 if (clk_enable(hdq_data->hdq_fck)) {
619 dev_dbg(&pdev->dev, "Can not enable fck\n");
620 ret = -ENODEV;
621 goto err_fnclk;
622 }
623
624 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
625 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
626 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
627
628 spin_lock_init(&hdq_data->hdq_spinlock);
629
630 irq = platform_get_irq(pdev, 0);
631 if (irq < 0) {
632 ret = -ENXIO;
633 goto err_irq;
634 }
635
636 ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
637 if (ret < 0) {
638 dev_dbg(&pdev->dev, "could not request irq\n");
639 goto err_irq;
640 }
641
642 omap_hdq_break(hdq_data);
643
644 /* don't clock the HDQ until it is needed */
645 clk_disable(hdq_data->hdq_ick);
646 clk_disable(hdq_data->hdq_fck);
647
648 omap_w1_master.data = hdq_data;
649
650 ret = w1_add_master_device(&omap_w1_master);
651 if (ret) {
652 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
653 goto err_w1;
654 }
655
656 return 0;
657
658err_w1:
659err_irq:
660 clk_disable(hdq_data->hdq_fck);
661
662err_fnclk:
663 clk_disable(hdq_data->hdq_ick);
664
665err_intfclk:
666 clk_put(hdq_data->hdq_ick);
667 clk_put(hdq_data->hdq_fck);
668
669err_clk:
670 iounmap(hdq_data->hdq_base);
671
672err_ioremap:
673err_resource:
674 platform_set_drvdata(pdev, NULL);
675 kfree(hdq_data);
676
677err_kmalloc:
678 return ret;
679
680}
681
682static int omap_hdq_remove(struct platform_device *pdev)
683{
684 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
685
686 mutex_lock(&hdq_data->hdq_mutex);
687
688 if (hdq_data->hdq_usecount) {
689 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
690 return -EBUSY;
691 }
692
693 mutex_unlock(&hdq_data->hdq_mutex);
694
695 /* remove module dependency */
696 clk_put(hdq_data->hdq_ick);
697 clk_put(hdq_data->hdq_fck);
698 free_irq(INT_24XX_HDQ_IRQ, hdq_data);
699 platform_set_drvdata(pdev, NULL);
700 iounmap(hdq_data->hdq_base);
701 kfree(hdq_data);
702
703 return 0;
704}
705
706static int __init
707omap_hdq_init(void)
708{
709 return platform_driver_register(&omap_hdq_driver);
710}
711module_init(omap_hdq_init);
712
713static void __exit
714omap_hdq_exit(void)
715{
716 platform_driver_unregister(&omap_hdq_driver);
717}
718module_exit(omap_hdq_exit);
719
720module_param(w1_id, int, S_IRUSR);
721MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
722
723MODULE_AUTHOR("Texas Instruments");
724MODULE_DESCRIPTION("HDQ driver Library");
725MODULE_LICENSE("GPL");
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 3df29a122f84..8d0b1fb1e52e 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -44,4 +44,11 @@ config W1_SLAVE_DS2760
44 44
45 If you are unsure, say N. 45 If you are unsure, say N.
46 46
47config W1_SLAVE_BQ27000
48 tristate "BQ27000 slave support"
49 depends on W1
50 help
51 Say Y here if you want to use a hdq
52 bq27000 slave support.
53
47endmenu 54endmenu
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index a8eb7524df1d..990f400b6d22 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -6,4 +6,4 @@ obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o
6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o 6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o
7obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o 7obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
8obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o 8obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
9 9obj-$(CONFIG_W1_SLAVE_BQ27000) += w1_bq27000.o
diff --git a/drivers/w1/slaves/w1_bq27000.c b/drivers/w1/slaves/w1_bq27000.c
new file mode 100644
index 000000000000..8f4c91f6c680
--- /dev/null
+++ b/drivers/w1/slaves/w1_bq27000.c
@@ -0,0 +1,123 @@
1/*
2 * drivers/w1/slaves/w1_bq27000.c
3 *
4 * Copyright (C) 2007 Texas Instruments, Inc.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/types.h>
16#include <linux/platform_device.h>
17#include <linux/mutex.h>
18
19#include "../w1.h"
20#include "../w1_int.h"
21#include "../w1_family.h"
22
23#define HDQ_CMD_READ (0)
24#define HDQ_CMD_WRITE (1<<7)
25
26static int F_ID;
27
28void w1_bq27000_write(struct device *dev, u8 buf, u8 reg)
29{
30 struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
31
32 if (!dev) {
33 pr_info("Could not obtain slave dev ptr\n");
34 return;
35 }
36
37 w1_write_8(sl->master, HDQ_CMD_WRITE | reg);
38 w1_write_8(sl->master, buf);
39}
40EXPORT_SYMBOL(w1_bq27000_write);
41
42int w1_bq27000_read(struct device *dev, u8 reg)
43{
44 u8 val;
45 struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
46
47 if (!dev)
48 return 0;
49
50 w1_write_8(sl->master, HDQ_CMD_READ | reg);
51 val = w1_read_8(sl->master);
52
53 return val;
54}
55EXPORT_SYMBOL(w1_bq27000_read);
56
57static int w1_bq27000_add_slave(struct w1_slave *sl)
58{
59 int ret;
60 int id = 1;
61 struct platform_device *pdev;
62
63 pdev = platform_device_alloc("bq27000-battery", id);
64 if (!pdev) {
65 ret = -ENOMEM;
66 return ret;
67 }
68 pdev->dev.parent = &sl->dev;
69
70 ret = platform_device_add(pdev);
71 if (ret)
72 goto pdev_add_failed;
73
74 dev_set_drvdata(&sl->dev, pdev);
75
76 goto success;
77
78pdev_add_failed:
79 platform_device_unregister(pdev);
80success:
81 return ret;
82}
83
84static void w1_bq27000_remove_slave(struct w1_slave *sl)
85{
86 struct platform_device *pdev = dev_get_drvdata(&sl->dev);
87
88 platform_device_unregister(pdev);
89}
90
91static struct w1_family_ops w1_bq27000_fops = {
92 .add_slave = w1_bq27000_add_slave,
93 .remove_slave = w1_bq27000_remove_slave,
94};
95
96static struct w1_family w1_bq27000_family = {
97 .fid = 1,
98 .fops = &w1_bq27000_fops,
99};
100
101static int __init w1_bq27000_init(void)
102{
103 if (F_ID)
104 w1_bq27000_family.fid = F_ID;
105
106 return w1_register_family(&w1_bq27000_family);
107}
108
109static void __exit w1_bq27000_exit(void)
110{
111 w1_unregister_family(&w1_bq27000_family);
112}
113
114
115module_init(w1_bq27000_init);
116module_exit(w1_bq27000_exit);
117
118module_param(F_ID, int, S_IRUSR);
119MODULE_PARM_DESC(F_ID, "1-wire slave FID for BQ device");
120
121MODULE_LICENSE("GPL");
122MODULE_AUTHOR("Texas Instruments Ltd");
123MODULE_DESCRIPTION("HDQ/1-wire slave driver bq27000 battery monitor chip");
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index cdaa6fffbfc7..97304bd83ec9 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -206,6 +206,7 @@ void w1_slave_detach(struct w1_slave *sl);
206 206
207u8 w1_triplet(struct w1_master *dev, int bdir); 207u8 w1_triplet(struct w1_master *dev, int bdir);
208void w1_write_8(struct w1_master *, u8); 208void w1_write_8(struct w1_master *, u8);
209u8 w1_read_8(struct w1_master *);
209int w1_reset_bus(struct w1_master *); 210int w1_reset_bus(struct w1_master *);
210u8 w1_calc_crc8(u8 *, int); 211u8 w1_calc_crc8(u8 *, int);
211void w1_write_block(struct w1_master *, const u8 *, int); 212void w1_write_block(struct w1_master *, const u8 *, int);
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index f4f82f1f486e..0d15b0eaf79a 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -217,7 +217,7 @@ u8 w1_triplet(struct w1_master *dev, int bdir)
217 * @param dev the master device 217 * @param dev the master device
218 * @return the byte read 218 * @return the byte read
219 */ 219 */
220static u8 w1_read_8(struct w1_master * dev) 220u8 w1_read_8(struct w1_master *dev)
221{ 221{
222 int i; 222 int i;
223 u8 res = 0; 223 u8 res = 0;
@@ -230,6 +230,7 @@ static u8 w1_read_8(struct w1_master * dev)
230 230
231 return res; 231 return res;
232} 232}
233EXPORT_SYMBOL_GPL(w1_read_8);
233 234
234/** 235/**
235 * Writes a series of bytes. 236 * Writes a series of bytes.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 1a22fe782a27..4fd3fa5546b1 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -67,11 +67,11 @@ config AT91RM9200_WATCHDOG
67 system when the timeout is reached. 67 system when the timeout is reached.
68 68
69config AT91SAM9X_WATCHDOG 69config AT91SAM9X_WATCHDOG
70 tristate "AT91SAM9X watchdog" 70 tristate "AT91SAM9X / AT91CAP9 watchdog"
71 depends on WATCHDOG && (ARCH_AT91SAM9260 || ARCH_AT91SAM9261) 71 depends on ARCH_AT91 && !ARCH_AT91RM9200
72 help 72 help
73 Watchdog timer embedded into AT91SAM9X chips. This will reboot your 73 Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will
74 system when the timeout is reached. 74 reboot your system when the timeout is reached.
75 75
76config 21285_WATCHDOG 76config 21285_WATCHDOG
77 tristate "DC21285 watchdog" 77 tristate "DC21285 watchdog"
diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c
index 6e46a551395c..3e57aa4d643a 100644
--- a/drivers/watchdog/acquirewdt.c
+++ b/drivers/watchdog/acquirewdt.c
@@ -3,8 +3,8 @@
3 * 3 *
4 * Based on wdt.c. Original copyright messages: 4 * Based on wdt.c. Original copyright messages:
5 * 5 *
6 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. 6 * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
7 * http://www.redhat.com 7 * All Rights Reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -15,7 +15,7 @@
15 * warranty for any of this software. This material is provided 15 * warranty for any of this software. This material is provided
16 * "AS-IS" and at no charge. 16 * "AS-IS" and at no charge.
17 * 17 *
18 * (c) Copyright 1995 Alan Cox <alan@redhat.com> 18 * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk>
19 * 19 *
20 * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com> 20 * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com>
21 * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT 21 * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index a5110f93a755..a1d7856ea6e0 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -6,8 +6,8 @@
6 * Based on acquirewdt.c which is based on wdt.c. 6 * Based on acquirewdt.c which is based on wdt.c.
7 * Original copyright messages: 7 * Original copyright messages:
8 * 8 *
9 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. 9 * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
10 * http://www.redhat.com 10 * All Rights Reserved.
11 * 11 *
12 * This program is free software; you can redistribute it and/or 12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 13 * modify it under the terms of the GNU General Public License
@@ -18,7 +18,7 @@
18 * warranty for any of this software. This material is provided 18 * warranty for any of this software. This material is provided
19 * "AS-IS" and at no charge. 19 * "AS-IS" and at no charge.
20 * 20 *
21 * (c) Copyright 1995 Alan Cox <alan@redhat.com> 21 * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk>
22 * 22 *
23 * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com> 23 * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com>
24 * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT 24 * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index b4babfc31586..b1da287f90ec 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -30,7 +30,7 @@
30#include <linux/bitops.h> 30#include <linux/bitops.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32 32
33#include <asm/arch/at91_wdt.h> 33#include <mach/at91_wdt.h>
34 34
35#define DRV_NAME "AT91SAM9 Watchdog" 35#define DRV_NAME "AT91SAM9 Watchdog"
36 36
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 31b42253054e..067a57cb3f82 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -5,7 +5,7 @@
5 * Originally based on softdog.c 5 * Originally based on softdog.c
6 * Copyright 2006-2007 Analog Devices Inc. 6 * Copyright 2006-2007 Analog Devices Inc.
7 * Copyright 2006-2007 Michele d'Amico 7 * Copyright 2006-2007 Michele d'Amico
8 * Copyright 1996 Alan Cox <alan@redhat.com> 8 * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
9 * 9 *
10 * Enter bugs at http://blackfin.uclinux.org/ 10 * Enter bugs at http://blackfin.uclinux.org/
11 * 11 *
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index c3b78a76f173..225398fd5049 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -42,8 +42,10 @@ u32 booke_wdt_period = WDT_PERIOD_DEFAULT;
42 42
43#ifdef CONFIG_FSL_BOOKE 43#ifdef CONFIG_FSL_BOOKE
44#define WDTP(x) ((((63-x)&0x3)<<30)|(((63-x)&0x3c)<<15)) 44#define WDTP(x) ((((63-x)&0x3)<<30)|(((63-x)&0x3c)<<15))
45#define WDTP_MASK (WDTP(0))
45#else 46#else
46#define WDTP(x) (TCR_WP(x)) 47#define WDTP(x) (TCR_WP(x))
48#define WDTP_MASK (TCR_WP_MASK)
47#endif 49#endif
48 50
49static DEFINE_SPINLOCK(booke_wdt_lock); 51static DEFINE_SPINLOCK(booke_wdt_lock);
@@ -65,6 +67,7 @@ static void __booke_wdt_enable(void *data)
65 /* clear status before enabling watchdog */ 67 /* clear status before enabling watchdog */
66 __booke_wdt_ping(NULL); 68 __booke_wdt_ping(NULL);
67 val = mfspr(SPRN_TCR); 69 val = mfspr(SPRN_TCR);
70 val &= ~WDTP_MASK;
68 val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period)); 71 val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period));
69 72
70 mtspr(SPRN_TCR, val); 73 mtspr(SPRN_TCR, val);
@@ -114,7 +117,7 @@ static long booke_wdt_ioctl(struct file *file,
114 case WDIOC_SETTIMEOUT: 117 case WDIOC_SETTIMEOUT:
115 if (get_user(booke_wdt_period, p)) 118 if (get_user(booke_wdt_period, p))
116 return -EFAULT; 119 return -EFAULT;
117 mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WDTP(0)) | 120 mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WDTP_MASK) |
118 WDTP(booke_wdt_period)); 121 WDTP(booke_wdt_period));
119 return 0; 122 return 0;
120 case WDIOC_GETTIMEOUT: 123 case WDIOC_GETTIMEOUT:
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index bbd14e34319f..a171fc6ae1cb 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -8,8 +8,8 @@
8 * Based on wdt.c. 8 * Based on wdt.c.
9 * Original copyright messages: 9 * Original copyright messages:
10 * 10 *
11 * (c) Copyright 1996-1997 Alan Cox <alan@redhat.com>, All Rights Reserved. 11 * (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>,
12 * http://www.redhat.com 12 * All Rights Reserved.
13 * 13 *
14 * This program is free software; you can redistribute it and/or 14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License 15 * modify it under the terms of the GNU General Public License
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index a3765e0be4a8..763c1ea5dce5 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -40,6 +40,7 @@
40#include <linux/bootmem.h> 40#include <linux/bootmem.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <asm/desc.h> 42#include <asm/desc.h>
43#include <asm/cacheflush.h>
43 44
44#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */ 45#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
45#define CRU_BIOS_SIGNATURE_VALUE 0x55524324 46#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
@@ -394,6 +395,8 @@ static void __devinit dmi_find_cru(const struct dmi_header *dm)
394 smbios_cru64_ptr->double_offset; 395 smbios_cru64_ptr->double_offset;
395 cru_rom_addr = ioremap(cru_physical_address, 396 cru_rom_addr = ioremap(cru_physical_address,
396 smbios_cru64_ptr->double_length); 397 smbios_cru64_ptr->double_length);
398 set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
399 smbios_cru64_ptr->double_length >> PAGE_SHIFT);
397 } 400 }
398 } 401 }
399} 402}
@@ -482,7 +485,7 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
482 "Management Log for details.\n"); 485 "Management Log for details.\n");
483 } 486 }
484 487
485 return NOTIFY_STOP; 488 return NOTIFY_OK;
486} 489}
487 490
488/* 491/*
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index c13383f7fcb9..74f951c18b90 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -394,8 +394,7 @@ static unsigned char __init esb_getdevice(void)
394 goto err_disable; 394 goto err_disable;
395 } 395 }
396 396
397 BASEADDR = ioremap(pci_resource_start(esb_pci, 0), 397 BASEADDR = pci_ioremap_bar(esb_pci, 0);
398 pci_resource_len(esb_pci, 0));
399 if (BASEADDR == NULL) { 398 if (BASEADDR == NULL) {
400 /* Something's wrong here, BASEADDR has to be set */ 399 /* Something's wrong here, BASEADDR has to be set */
401 printk(KERN_ERR PFX "failed to get BASEADDR\n"); 400 printk(KERN_ERR PFX "failed to get BASEADDR\n");
diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c
index ca344a85eb95..2474ebca88f6 100644
--- a/drivers/watchdog/iTCO_vendor_support.c
+++ b/drivers/watchdog/iTCO_vendor_support.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * intel TCO vendor specific watchdog driver support 2 * intel TCO vendor specific watchdog driver support
3 * 3 *
4 * (c) Copyright 2006 Wim Van Sebroeck <wim@iguana.be>. 4 * (c) Copyright 2006-2008 Wim Van Sebroeck <wim@iguana.be>.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -19,8 +19,7 @@
19 19
20/* Module and version information */ 20/* Module and version information */
21#define DRV_NAME "iTCO_vendor_support" 21#define DRV_NAME "iTCO_vendor_support"
22#define DRV_VERSION "1.01" 22#define DRV_VERSION "1.02"
23#define DRV_RELDATE "11-Nov-2006"
24#define PFX DRV_NAME ": " 23#define PFX DRV_NAME ": "
25 24
26/* Includes */ 25/* Includes */
@@ -78,24 +77,6 @@ MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=0 (n
78 * 20.6 seconds. 77 * 20.6 seconds.
79 */ 78 */
80 79
81static void supermicro_old_pre_start(unsigned long acpibase)
82{
83 unsigned long val32;
84
85 val32 = inl(SMI_EN);
86 val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
87 outl(val32, SMI_EN); /* Needed to activate watchdog */
88}
89
90static void supermicro_old_pre_stop(unsigned long acpibase)
91{
92 unsigned long val32;
93
94 val32 = inl(SMI_EN);
95 val32 &= 0x00002000; /* Turn on SMI clearing watchdog */
96 outl(val32, SMI_EN); /* Needed to deactivate watchdog */
97}
98
99static void supermicro_old_pre_keepalive(unsigned long acpibase) 80static void supermicro_old_pre_keepalive(unsigned long acpibase)
100{ 81{
101 /* Reload TCO Timer (done in iTCO_wdt_keepalive) + */ 82 /* Reload TCO Timer (done in iTCO_wdt_keepalive) + */
@@ -247,18 +228,14 @@ static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat)
247void iTCO_vendor_pre_start(unsigned long acpibase, 228void iTCO_vendor_pre_start(unsigned long acpibase,
248 unsigned int heartbeat) 229 unsigned int heartbeat)
249{ 230{
250 if (vendorsupport == SUPERMICRO_OLD_BOARD) 231 if (vendorsupport == SUPERMICRO_NEW_BOARD)
251 supermicro_old_pre_start(acpibase);
252 else if (vendorsupport == SUPERMICRO_NEW_BOARD)
253 supermicro_new_pre_start(heartbeat); 232 supermicro_new_pre_start(heartbeat);
254} 233}
255EXPORT_SYMBOL(iTCO_vendor_pre_start); 234EXPORT_SYMBOL(iTCO_vendor_pre_start);
256 235
257void iTCO_vendor_pre_stop(unsigned long acpibase) 236void iTCO_vendor_pre_stop(unsigned long acpibase)
258{ 237{
259 if (vendorsupport == SUPERMICRO_OLD_BOARD) 238 if (vendorsupport == SUPERMICRO_NEW_BOARD)
260 supermicro_old_pre_stop(acpibase);
261 else if (vendorsupport == SUPERMICRO_NEW_BOARD)
262 supermicro_new_pre_stop(); 239 supermicro_new_pre_stop();
263} 240}
264EXPORT_SYMBOL(iTCO_vendor_pre_stop); 241EXPORT_SYMBOL(iTCO_vendor_pre_stop);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index bfb93bc2ca9f..5b395a4ddfdf 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * intel TCO Watchdog Driver (Used in i82801 and i6300ESB chipsets) 2 * intel TCO Watchdog Driver (Used in i82801 and i6300ESB chipsets)
3 * 3 *
4 * (c) Copyright 2006-2007 Wim Van Sebroeck <wim@iguana.be>. 4 * (c) Copyright 2006-2008 Wim Van Sebroeck <wim@iguana.be>.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -20,34 +20,41 @@
20 * 82801BAM (ICH2-M) : document number 290687-002, 298242-027, 20 * 82801BAM (ICH2-M) : document number 290687-002, 298242-027,
21 * 82801CA (ICH3-S) : document number 290733-003, 290739-013, 21 * 82801CA (ICH3-S) : document number 290733-003, 290739-013,
22 * 82801CAM (ICH3-M) : document number 290716-001, 290718-007, 22 * 82801CAM (ICH3-M) : document number 290716-001, 290718-007,
23 * 82801DB (ICH4) : document number 290744-001, 290745-020, 23 * 82801DB (ICH4) : document number 290744-001, 290745-025,
24 * 82801DBM (ICH4-M) : document number 252337-001, 252663-005, 24 * 82801DBM (ICH4-M) : document number 252337-001, 252663-008,
25 * 82801E (C-ICH) : document number 273599-001, 273645-002, 25 * 82801E (C-ICH) : document number 273599-001, 273645-002,
26 * 82801EB (ICH5) : document number 252516-001, 252517-003, 26 * 82801EB (ICH5) : document number 252516-001, 252517-028,
27 * 82801ER (ICH5R) : document number 252516-001, 252517-003, 27 * 82801ER (ICH5R) : document number 252516-001, 252517-028,
28 * 82801FB (ICH6) : document number 301473-002, 301474-007, 28 * 6300ESB (6300ESB) : document number 300641-004, 300884-013,
29 * 82801FR (ICH6R) : document number 301473-002, 301474-007, 29 * 82801FB (ICH6) : document number 301473-002, 301474-026,
30 * 82801FBM (ICH6-M) : document number 301473-002, 301474-007, 30 * 82801FR (ICH6R) : document number 301473-002, 301474-026,
31 * 82801FW (ICH6W) : document number 301473-001, 301474-007, 31 * 82801FBM (ICH6-M) : document number 301473-002, 301474-026,
32 * 82801FRW (ICH6RW) : document number 301473-001, 301474-007, 32 * 82801FW (ICH6W) : document number 301473-001, 301474-026,
33 * 82801GB (ICH7) : document number 307013-002, 307014-009, 33 * 82801FRW (ICH6RW) : document number 301473-001, 301474-026,
34 * 82801GR (ICH7R) : document number 307013-002, 307014-009, 34 * 631xESB (631xESB) : document number 313082-001, 313075-006,
35 * 82801GDH (ICH7DH) : document number 307013-002, 307014-009, 35 * 632xESB (632xESB) : document number 313082-001, 313075-006,
36 * 82801GBM (ICH7-M) : document number 307013-002, 307014-009, 36 * 82801GB (ICH7) : document number 307013-003, 307014-024,
37 * 82801GHM (ICH7-M DH) : document number 307013-002, 307014-009, 37 * 82801GR (ICH7R) : document number 307013-003, 307014-024,
38 * 82801HB (ICH8) : document number 313056-003, 313057-009, 38 * 82801GDH (ICH7DH) : document number 307013-003, 307014-024,
39 * 82801HR (ICH8R) : document number 313056-003, 313057-009, 39 * 82801GBM (ICH7-M) : document number 307013-003, 307014-024,
40 * 82801HBM (ICH8M) : document number 313056-003, 313057-009, 40 * 82801GHM (ICH7-M DH) : document number 307013-003, 307014-024,
41 * 82801HH (ICH8DH) : document number 313056-003, 313057-009, 41 * 82801GU (ICH7-U) : document number 307013-003, 307014-024,
42 * 82801HO (ICH8DO) : document number 313056-003, 313057-009, 42 * 82801HB (ICH8) : document number 313056-003, 313057-017,
43 * 82801HEM (ICH8M-E) : document number 313056-003, 313057-009, 43 * 82801HR (ICH8R) : document number 313056-003, 313057-017,
44 * 82801IB (ICH9) : document number 316972-001, 316973-006, 44 * 82801HBM (ICH8M) : document number 313056-003, 313057-017,
45 * 82801IR (ICH9R) : document number 316972-001, 316973-006, 45 * 82801HH (ICH8DH) : document number 313056-003, 313057-017,
46 * 82801IH (ICH9DH) : document number 316972-001, 316973-006, 46 * 82801HO (ICH8DO) : document number 313056-003, 313057-017,
47 * 82801IO (ICH9DO) : document number 316972-001, 316973-006, 47 * 82801HEM (ICH8M-E) : document number 313056-003, 313057-017,
48 * 6300ESB (6300ESB) : document number 300641-003, 300884-010, 48 * 82801IB (ICH9) : document number 316972-004, 316973-012,
49 * 631xESB (631xESB) : document number 313082-001, 313075-005, 49 * 82801IR (ICH9R) : document number 316972-004, 316973-012,
50 * 632xESB (632xESB) : document number 313082-001, 313075-005 50 * 82801IH (ICH9DH) : document number 316972-004, 316973-012,
51 * 82801IO (ICH9DO) : document number 316972-004, 316973-012,
52 * 82801IBM (ICH9M) : document number 316972-004, 316973-012,
53 * 82801IEM (ICH9M-E) : document number 316972-004, 316973-012,
54 * 82801JIB (ICH10) : document number 319973-002, 319974-002,
55 * 82801JIR (ICH10R) : document number 319973-002, 319974-002,
56 * 82801JD (ICH10D) : document number 319973-002, 319974-002,
57 * 82801JDO (ICH10DO) : document number 319973-002, 319974-002
51 */ 58 */
52 59
53/* 60/*
@@ -56,8 +63,7 @@
56 63
57/* Module and version information */ 64/* Module and version information */
58#define DRV_NAME "iTCO_wdt" 65#define DRV_NAME "iTCO_wdt"
59#define DRV_VERSION "1.03" 66#define DRV_VERSION "1.04"
60#define DRV_RELDATE "30-Apr-2008"
61#define PFX DRV_NAME ": " 67#define PFX DRV_NAME ": "
62 68
63/* Includes */ 69/* Includes */
@@ -96,19 +102,26 @@ enum iTCO_chipsets {
96 TCO_ICH6, /* ICH6 & ICH6R */ 102 TCO_ICH6, /* ICH6 & ICH6R */
97 TCO_ICH6M, /* ICH6-M */ 103 TCO_ICH6M, /* ICH6-M */
98 TCO_ICH6W, /* ICH6W & ICH6RW */ 104 TCO_ICH6W, /* ICH6W & ICH6RW */
105 TCO_631XESB, /* 631xESB/632xESB */
99 TCO_ICH7, /* ICH7 & ICH7R */ 106 TCO_ICH7, /* ICH7 & ICH7R */
100 TCO_ICH7M, /* ICH7-M */ 107 TCO_ICH7DH, /* ICH7DH */
108 TCO_ICH7M, /* ICH7-M & ICH7-U */
101 TCO_ICH7MDH, /* ICH7-M DH */ 109 TCO_ICH7MDH, /* ICH7-M DH */
102 TCO_ICH8, /* ICH8 & ICH8R */ 110 TCO_ICH8, /* ICH8 & ICH8R */
103 TCO_ICH8ME, /* ICH8M-E */
104 TCO_ICH8DH, /* ICH8DH */ 111 TCO_ICH8DH, /* ICH8DH */
105 TCO_ICH8DO, /* ICH8DO */ 112 TCO_ICH8DO, /* ICH8DO */
106 TCO_ICH8M, /* ICH8M */ 113 TCO_ICH8M, /* ICH8M */
114 TCO_ICH8ME, /* ICH8M-E */
107 TCO_ICH9, /* ICH9 */ 115 TCO_ICH9, /* ICH9 */
108 TCO_ICH9R, /* ICH9R */ 116 TCO_ICH9R, /* ICH9R */
109 TCO_ICH9DH, /* ICH9DH */ 117 TCO_ICH9DH, /* ICH9DH */
110 TCO_ICH9DO, /* ICH9DO */ 118 TCO_ICH9DO, /* ICH9DO */
111 TCO_631XESB, /* 631xESB/632xESB */ 119 TCO_ICH9M, /* ICH9M */
120 TCO_ICH9ME, /* ICH9M-E */
121 TCO_ICH10, /* ICH10 */
122 TCO_ICH10R, /* ICH10R */
123 TCO_ICH10D, /* ICH10D */
124 TCO_ICH10DO, /* ICH10DO */
112}; 125};
113 126
114static struct { 127static struct {
@@ -129,19 +142,26 @@ static struct {
129 {"ICH6 or ICH6R", 2}, 142 {"ICH6 or ICH6R", 2},
130 {"ICH6-M", 2}, 143 {"ICH6-M", 2},
131 {"ICH6W or ICH6RW", 2}, 144 {"ICH6W or ICH6RW", 2},
145 {"631xESB/632xESB", 2},
132 {"ICH7 or ICH7R", 2}, 146 {"ICH7 or ICH7R", 2},
133 {"ICH7-M", 2}, 147 {"ICH7DH", 2},
148 {"ICH7-M or ICH7-U", 2},
134 {"ICH7-M DH", 2}, 149 {"ICH7-M DH", 2},
135 {"ICH8 or ICH8R", 2}, 150 {"ICH8 or ICH8R", 2},
136 {"ICH8M-E", 2},
137 {"ICH8DH", 2}, 151 {"ICH8DH", 2},
138 {"ICH8DO", 2}, 152 {"ICH8DO", 2},
139 {"ICH8M", 2}, 153 {"ICH8M", 2},
154 {"ICH8M-E", 2},
140 {"ICH9", 2}, 155 {"ICH9", 2},
141 {"ICH9R", 2}, 156 {"ICH9R", 2},
142 {"ICH9DH", 2}, 157 {"ICH9DH", 2},
143 {"ICH9DO", 2}, 158 {"ICH9DO", 2},
144 {"631xESB/632xESB", 2}, 159 {"ICH9M", 2},
160 {"ICH9M-E", 2},
161 {"ICH10", 2},
162 {"ICH10R", 2},
163 {"ICH10D", 2},
164 {"ICH10DO", 2},
145 {NULL, 0} 165 {NULL, 0}
146}; 166};
147 167
@@ -175,18 +195,6 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
175 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_0, TCO_ICH6)}, 195 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_0, TCO_ICH6)},
176 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_1, TCO_ICH6M)}, 196 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_1, TCO_ICH6M)},
177 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_2, TCO_ICH6W)}, 197 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_2, TCO_ICH6W)},
178 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_0, TCO_ICH7)},
179 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1, TCO_ICH7M)},
180 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31, TCO_ICH7MDH)},
181 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0, TCO_ICH8)},
182 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_1, TCO_ICH8ME)},
183 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2, TCO_ICH8DH)},
184 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3, TCO_ICH8DO)},
185 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_4, TCO_ICH8M)},
186 { ITCO_PCI_DEVICE(0x2918, TCO_ICH9)},
187 { ITCO_PCI_DEVICE(0x2916, TCO_ICH9R)},
188 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_2, TCO_ICH9DH)},
189 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_4, TCO_ICH9DO)},
190 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB2_0, TCO_631XESB)}, 198 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB2_0, TCO_631XESB)},
191 { ITCO_PCI_DEVICE(0x2671, TCO_631XESB)}, 199 { ITCO_PCI_DEVICE(0x2671, TCO_631XESB)},
192 { ITCO_PCI_DEVICE(0x2672, TCO_631XESB)}, 200 { ITCO_PCI_DEVICE(0x2672, TCO_631XESB)},
@@ -203,6 +211,25 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
203 { ITCO_PCI_DEVICE(0x267d, TCO_631XESB)}, 211 { ITCO_PCI_DEVICE(0x267d, TCO_631XESB)},
204 { ITCO_PCI_DEVICE(0x267e, TCO_631XESB)}, 212 { ITCO_PCI_DEVICE(0x267e, TCO_631XESB)},
205 { ITCO_PCI_DEVICE(0x267f, TCO_631XESB)}, 213 { ITCO_PCI_DEVICE(0x267f, TCO_631XESB)},
214 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_0, TCO_ICH7)},
215 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_30, TCO_ICH7DH)},
216 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1, TCO_ICH7M)},
217 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31, TCO_ICH7MDH)},
218 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0, TCO_ICH8)},
219 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2, TCO_ICH8DH)},
220 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3, TCO_ICH8DO)},
221 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_4, TCO_ICH8M)},
222 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_1, TCO_ICH8ME)},
223 { ITCO_PCI_DEVICE(0x2918, TCO_ICH9)},
224 { ITCO_PCI_DEVICE(0x2916, TCO_ICH9R)},
225 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_2, TCO_ICH9DH)},
226 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_4, TCO_ICH9DO)},
227 { ITCO_PCI_DEVICE(0x2919, TCO_ICH9M)},
228 { ITCO_PCI_DEVICE(0x2917, TCO_ICH9ME)},
229 { ITCO_PCI_DEVICE(0x3a18, TCO_ICH10)},
230 { ITCO_PCI_DEVICE(0x3a16, TCO_ICH10R)},
231 { ITCO_PCI_DEVICE(0x3a1a, TCO_ICH10D)},
232 { ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)},
206 { 0, }, /* End of list */ 233 { 0, }, /* End of list */
207}; 234};
208MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); 235MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
@@ -311,6 +338,7 @@ static int iTCO_wdt_unset_NO_REBOOT_bit(void)
311static int iTCO_wdt_start(void) 338static int iTCO_wdt_start(void)
312{ 339{
313 unsigned int val; 340 unsigned int val;
341 unsigned long val32;
314 342
315 spin_lock(&iTCO_wdt_private.io_lock); 343 spin_lock(&iTCO_wdt_private.io_lock);
316 344
@@ -323,6 +351,18 @@ static int iTCO_wdt_start(void)
323 return -EIO; 351 return -EIO;
324 } 352 }
325 353
354 /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
355 val32 = inl(SMI_EN);
356 val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
357 outl(val32, SMI_EN);
358
359 /* Force the timer to its reload value by writing to the TCO_RLD
360 register */
361 if (iTCO_wdt_private.iTCO_version == 2)
362 outw(0x01, TCO_RLD);
363 else if (iTCO_wdt_private.iTCO_version == 1)
364 outb(0x01, TCO_RLD);
365
326 /* Bit 11: TCO Timer Halt -> 0 = The TCO timer is enabled to count */ 366 /* Bit 11: TCO Timer Halt -> 0 = The TCO timer is enabled to count */
327 val = inw(TCO1_CNT); 367 val = inw(TCO1_CNT);
328 val &= 0xf7ff; 368 val &= 0xf7ff;
@@ -338,6 +378,7 @@ static int iTCO_wdt_start(void)
338static int iTCO_wdt_stop(void) 378static int iTCO_wdt_stop(void)
339{ 379{
340 unsigned int val; 380 unsigned int val;
381 unsigned long val32;
341 382
342 spin_lock(&iTCO_wdt_private.io_lock); 383 spin_lock(&iTCO_wdt_private.io_lock);
343 384
@@ -349,6 +390,11 @@ static int iTCO_wdt_stop(void)
349 outw(val, TCO1_CNT); 390 outw(val, TCO1_CNT);
350 val = inw(TCO1_CNT); 391 val = inw(TCO1_CNT);
351 392
393 /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
394 val32 = inl(SMI_EN);
395 val32 |= 0x00002000;
396 outl(val32, SMI_EN);
397
352 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ 398 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
353 iTCO_wdt_set_NO_REBOOT_bit(); 399 iTCO_wdt_set_NO_REBOOT_bit();
354 400
@@ -459,7 +505,6 @@ static int iTCO_wdt_open(struct inode *inode, struct file *file)
459 /* 505 /*
460 * Reload and activate timer 506 * Reload and activate timer
461 */ 507 */
462 iTCO_wdt_keepalive();
463 iTCO_wdt_start(); 508 iTCO_wdt_start();
464 return nonseekable_open(inode, file); 509 return nonseekable_open(inode, file);
465} 510}
@@ -604,7 +649,6 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
604 int ret; 649 int ret;
605 u32 base_address; 650 u32 base_address;
606 unsigned long RCBA; 651 unsigned long RCBA;
607 unsigned long val32;
608 652
609 /* 653 /*
610 * Find the ACPI/PM base I/O address which is the base 654 * Find the ACPI/PM base I/O address which is the base
@@ -644,17 +688,13 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
644 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ 688 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
645 iTCO_wdt_set_NO_REBOOT_bit(); 689 iTCO_wdt_set_NO_REBOOT_bit();
646 690
647 /* Set the TCO_EN bit in SMI_EN register */ 691 /* The TCO logic uses the TCO_EN bit in the SMI_EN register */
648 if (!request_region(SMI_EN, 4, "iTCO_wdt")) { 692 if (!request_region(SMI_EN, 4, "iTCO_wdt")) {
649 printk(KERN_ERR PFX 693 printk(KERN_ERR PFX
650 "I/O address 0x%04lx already in use\n", SMI_EN); 694 "I/O address 0x%04lx already in use\n", SMI_EN);
651 ret = -EIO; 695 ret = -EIO;
652 goto out; 696 goto out;
653 } 697 }
654 val32 = inl(SMI_EN);
655 val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
656 outl(val32, SMI_EN);
657 release_region(SMI_EN, 4);
658 698
659 /* The TCO I/O registers reside in a 32-byte range pointed to 699 /* The TCO I/O registers reside in a 32-byte range pointed to
660 by the TCOBASE value */ 700 by the TCOBASE value */
@@ -662,7 +702,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
662 printk(KERN_ERR PFX "I/O address 0x%04lx already in use\n", 702 printk(KERN_ERR PFX "I/O address 0x%04lx already in use\n",
663 TCOBASE); 703 TCOBASE);
664 ret = -EIO; 704 ret = -EIO;
665 goto out; 705 goto unreg_smi_en;
666 } 706 }
667 707
668 printk(KERN_INFO PFX 708 printk(KERN_INFO PFX
@@ -672,8 +712,9 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
672 TCOBASE); 712 TCOBASE);
673 713
674 /* Clear out the (probably old) status */ 714 /* Clear out the (probably old) status */
675 outb(0, TCO1_STS); 715 outb(8, TCO1_STS); /* Clear the Time Out Status bit */
676 outb(3, TCO2_STS); 716 outb(2, TCO2_STS); /* Clear SECOND_TO_STS bit */
717 outb(4, TCO2_STS); /* Clear BOOT_STS bit */
677 718
678 /* Make sure the watchdog is not running */ 719 /* Make sure the watchdog is not running */
679 iTCO_wdt_stop(); 720 iTCO_wdt_stop();
@@ -701,6 +742,8 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
701 742
702unreg_region: 743unreg_region:
703 release_region(TCOBASE, 0x20); 744 release_region(TCOBASE, 0x20);
745unreg_smi_en:
746 release_region(SMI_EN, 4);
704out: 747out:
705 if (iTCO_wdt_private.iTCO_version == 2) 748 if (iTCO_wdt_private.iTCO_version == 2)
706 iounmap(iTCO_wdt_private.gcs); 749 iounmap(iTCO_wdt_private.gcs);
@@ -718,6 +761,7 @@ static void __devexit iTCO_wdt_cleanup(void)
718 /* Deregister */ 761 /* Deregister */
719 misc_deregister(&iTCO_wdt_miscdev); 762 misc_deregister(&iTCO_wdt_miscdev);
720 release_region(TCOBASE, 0x20); 763 release_region(TCOBASE, 0x20);
764 release_region(SMI_EN, 4);
721 if (iTCO_wdt_private.iTCO_version == 2) 765 if (iTCO_wdt_private.iTCO_version == 2)
722 iounmap(iTCO_wdt_private.gcs); 766 iounmap(iTCO_wdt_private.gcs);
723 pci_dev_put(iTCO_wdt_private.pdev); 767 pci_dev_put(iTCO_wdt_private.pdev);
@@ -782,8 +826,8 @@ static int __init iTCO_wdt_init_module(void)
782{ 826{
783 int err; 827 int err;
784 828
785 printk(KERN_INFO PFX "Intel TCO WatchDog Timer Driver v%s (%s)\n", 829 printk(KERN_INFO PFX "Intel TCO WatchDog Timer Driver v%s\n",
786 DRV_VERSION, DRV_RELDATE); 830 DRV_VERSION);
787 831
788 err = platform_driver_register(&iTCO_wdt_driver); 832 err = platform_driver_register(&iTCO_wdt_driver);
789 if (err) 833 if (err)
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index 8782ec1f5aa0..317ef2b16cff 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -11,8 +11,8 @@
11 * Based on acquirewdt.c which is based on wdt.c. 11 * Based on acquirewdt.c which is based on wdt.c.
12 * Original copyright messages: 12 * Original copyright messages:
13 * 13 *
14 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. 14 * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
15 * http://www.redhat.com 15 * All Rights Reserved.
16 * 16 *
17 * This program is free software; you can redistribute it and/or 17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License 18 * modify it under the terms of the GNU General Public License
@@ -23,7 +23,7 @@
23 * warranty for any of this software. This material is provided 23 * warranty for any of this software. This material is provided
24 * "AS-IS" and at no charge. 24 * "AS-IS" and at no charge.
25 * 25 *
26 * (c) Copyright 1995 Alan Cox <alan@redhat.com> 26 * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk>
27 * 27 *
28 * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com> 28 * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com>
29 * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT 29 * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 73c9e7992feb..0f761db9a27c 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -9,7 +9,7 @@
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * based on softdog.c by Alan Cox <alan@redhat.com> 12 * based on softdog.c by Alan Cox <alan@lxorguk.ukuu.org.uk>
13 */ 13 */
14 14
15#include <linux/module.h> 15#include <linux/module.h>
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 2a9bfa81f9d6..1130ad697ce2 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -4,8 +4,8 @@
4 * (c) Copyright 2004 ARM Limited 4 * (c) Copyright 2004 ARM Limited
5 * 5 *
6 * Based on the SoftDog driver: 6 * Based on the SoftDog driver:
7 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. 7 * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
8 * http://www.redhat.com 8 * All Rights Reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index b4b7b0a4c119..3acce623f209 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -98,6 +98,8 @@ static void mtx1_wdt_reset(void)
98 98
99static void mtx1_wdt_start(void) 99static void mtx1_wdt_start(void)
100{ 100{
101 unsigned long flags;
102
101 spin_lock_irqsave(&mtx1_wdt_device.lock, flags); 103 spin_lock_irqsave(&mtx1_wdt_device.lock, flags);
102 if (!mtx1_wdt_device.queue) { 104 if (!mtx1_wdt_device.queue) {
103 mtx1_wdt_device.queue = 1; 105 mtx1_wdt_device.queue = 1;
@@ -110,6 +112,8 @@ static void mtx1_wdt_start(void)
110 112
111static int mtx1_wdt_stop(void) 113static int mtx1_wdt_stop(void)
112{ 114{
115 unsigned long flags;
116
113 spin_lock_irqsave(&mtx1_wdt_device.lock, flags); 117 spin_lock_irqsave(&mtx1_wdt_device.lock, flags);
114 if (mtx1_wdt_device.queue) { 118 if (mtx1_wdt_device.queue) {
115 mtx1_wdt_device.queue = 0; 119 mtx1_wdt_device.queue = 0;
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 7bcbb7f4745f..2f2ce7429f5b 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -16,7 +16,7 @@
16 * 20030527: George G. Davis <gdavis@mvista.com> 16 * 20030527: George G. Davis <gdavis@mvista.com>
17 * Initially based on linux-2.4.19-rmk7-pxa1/drivers/char/sa1100_wdt.c 17 * Initially based on linux-2.4.19-rmk7-pxa1/drivers/char/sa1100_wdt.c
18 * (c) Copyright 2000 Oleg Drokin <green@crimea.edu> 18 * (c) Copyright 2000 Oleg Drokin <green@crimea.edu>
19 * Based on SoftDog driver by Alan Cox <alan@redhat.com> 19 * Based on SoftDog driver by Alan Cox <alan@lxorguk.ukuu.org.uk>
20 * 20 *
21 * Copyright (c) 2004 Texas Instruments. 21 * Copyright (c) 2004 Texas Instruments.
22 * 1. Modified to support OMAP1610 32-KHz watchdog timer 22 * 1. Modified to support OMAP1610 32-KHz watchdog timer
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index 90eb1d4271d7..5d76422c402c 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -6,7 +6,7 @@
6 * Based on source code of the following authors: 6 * Based on source code of the following authors:
7 * Ken Hollis <kenji@bitgate.com>, 7 * Ken Hollis <kenji@bitgate.com>,
8 * Lindsay Harris <lindsay@bluegum.com>, 8 * Lindsay Harris <lindsay@bluegum.com>,
9 * Alan Cox <alan@redhat.com>, 9 * Alan Cox <alan@lxorguk.ukuu.org.uk>,
10 * Matt Domsch <Matt_Domsch@dell.com>, 10 * Matt Domsch <Matt_Domsch@dell.com>,
11 * Rob Radez <rob@osinvestor.com> 11 * Rob Radez <rob@osinvestor.com>
12 * 12 *
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index c1685c942de6..afb089695da8 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Based on source code of the following authors: 6 * Based on source code of the following authors:
7 * Ken Hollis <kenji@bitgate.com>, 7 * Ken Hollis <kenji@bitgate.com>,
8 * Alan Cox <alan@redhat.com>, 8 * Alan Cox <alan@lxorguk.ukuu.org.uk>,
9 * Matt Domsch <Matt_Domsch@dell.com>, 9 * Matt Domsch <Matt_Domsch@dell.com>,
10 * Rob Radez <rob@osinvestor.com>, 10 * Rob Radez <rob@osinvestor.com>,
11 * Greg Kroah-Hartman <greg@kroah.com> 11 * Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index c9c73b69c5e5..57027f4653ce 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -7,7 +7,8 @@
7 * based on 7 * based on
8 * SoftDog 0.05: A Software Watchdog Device 8 * SoftDog 0.05: A Software Watchdog Device
9 * 9 *
10 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. 10 * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
11 * All Rights Reserved.
11 * 12 *
12 * This program is free software; you can redistribute it and/or 13 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 14 * modify it under the terms of the GNU General Public License
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 86d42801de45..f7f6ce82a5e2 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -6,7 +6,7 @@
6 * S3C2410 Watchdog Timer Support 6 * S3C2410 Watchdog Timer Support
7 * 7 *
8 * Based on, softdog.c by Alan Cox, 8 * Based on, softdog.c by Alan Cox,
9 * (c) Copyright 1996 Alan Cox <alan@redhat.com> 9 * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index 31a48437dc3d..ed01e4c2beff 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -2,7 +2,7 @@
2 * Watchdog driver for the SA11x0/PXA2xx 2 * Watchdog driver for the SA11x0/PXA2xx
3 * 3 *
4 * (c) Copyright 2000 Oleg Drokin <green@crimea.edu> 4 * (c) Copyright 2000 Oleg Drokin <green@crimea.edu>
5 * Based on SoftDog driver by Alan Cox <alan@redhat.com> 5 * Based on SoftDog driver by Alan Cox <alan@lxorguk.ukuu.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 27e526a07c9a..38f5831c9291 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -35,8 +35,8 @@
35 * Based on various other watchdog drivers, which are probably all 35 * Based on various other watchdog drivers, which are probably all
36 * loosely based on something Alan Cox wrote years ago. 36 * loosely based on something Alan Cox wrote years ago.
37 * 37 *
38 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. 38 * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
39 * http://www.redhat.com 39 * All Rights Reserved.
40 * 40 *
41 * This program is free software; you can redistribute it and/or 41 * This program is free software; you can redistribute it and/or
42 * modify it under the terms of the GNU General Public License 42 * modify it under the terms of the GNU General Public License
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index fd83dd052d8c..ae74f6bcfa23 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -16,8 +16,8 @@
16 * Based on acquirewdt.c which is based on wdt.c. 16 * Based on acquirewdt.c which is based on wdt.c.
17 * Original copyright messages: 17 * Original copyright messages:
18 * 18 *
19 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. 19 * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
20 * http://www.redhat.com 20 * All Rights Reserved.
21 * 21 *
22 * This program is free software; you can redistribute it and/or 22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License 23 * modify it under the terms of the GNU General Public License
@@ -28,7 +28,7 @@
28 * warranty for any of this software. This material is provided 28 * warranty for any of this software. This material is provided
29 * "AS-IS" and at no charge. 29 * "AS-IS" and at no charge.
30 * 30 *
31 * (c) Copyright 1995 Alan Cox <alan@redhat.com> 31 * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk>
32 * 32 *
33 * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com> 33 * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com>
34 * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT 34 * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index e5e470ca7759..06553debc7bc 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -10,7 +10,7 @@
10 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 * 12 *
13 * based on softdog.c by Alan Cox <alan@redhat.com> 13 * based on softdog.c by Alan Cox <alan@lxorguk.ukuu.org.uk>
14 */ 14 */
15 15
16#include <linux/module.h> 16#include <linux/module.h>
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 988ff1d5b4be..2e56cad77d19 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SMsC 37B787 Watchdog Timer driver for Linux 2.6.x.x 2 * SMsC 37B787 Watchdog Timer driver for Linux 2.6.x.x
3 * 3 *
4 * Based on acquirewdt.c by Alan Cox <alan@redhat.com> 4 * Based on acquirewdt.c by Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * and some other existing drivers 5 * and some other existing drivers
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index c650464c5c63..7204f9662114 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -1,8 +1,7 @@
1/* 1/*
2 * SoftDog 0.07: A Software Watchdog Device 2 * SoftDog 0.07: A Software Watchdog Device
3 * 3 *
4 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. 4 * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>, All Rights Reserved.
5 * http://www.redhat.com
6 * 5 *
7 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 69396adaa5c3..916890abffdd 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -11,8 +11,8 @@
11 * 11 *
12 * (c) Copyright 2000-2001 Marek Michalkiewicz <marekm@linux.org.pl> 12 * (c) Copyright 2000-2001 Marek Michalkiewicz <marekm@linux.org.pl>
13 * 13 *
14 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. 14 * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
15 * http://www.redhat.com 15 * All Rights Reserved.
16 * 16 *
17 * This program is free software; you can redistribute it and/or 17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License 18 * modify it under the terms of the GNU General Public License
@@ -23,7 +23,7 @@
23 * warranty for any of this software. This material is provided 23 * warranty for any of this software. This material is provided
24 * "AS-IS" and at no charge. 24 * "AS-IS" and at no charge.
25 * 25 *
26 * (c) Copyright 1995 Alan Cox <alan@redhat.com> 26 * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk>
27 */ 27 */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
index 445d30a01ed3..3c7aa412b1f3 100644
--- a/drivers/watchdog/w83697hf_wdt.c
+++ b/drivers/watchdog/w83697hf_wdt.c
@@ -12,8 +12,8 @@
12 * 12 *
13 * (c) Copyright 2000-2001 Marek Michalkiewicz <marekm@linux.org.pl> 13 * (c) Copyright 2000-2001 Marek Michalkiewicz <marekm@linux.org.pl>
14 * 14 *
15 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. 15 * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
16 * http://www.redhat.com 16 * All Rights Reserved.
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License 19 * modify it under the terms of the GNU General Public License
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index c73b5e2919c6..ada8ad82d993 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -102,7 +102,7 @@ static void w83697ug_select_wd_register(void)
102 102
103 } else { 103 } else {
104 printk(KERN_ERR PFX "No W83697UG/UF could be found\n"); 104 printk(KERN_ERR PFX "No W83697UG/UF could be found\n");
105 return -EIO; 105 return;
106 } 106 }
107 107
108 outb_p(0x07, WDT_EFER); /* point to logical device number reg */ 108 outb_p(0x07, WDT_EFER); /* point to logical device number reg */
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index 68377ae171ff..42e940c23891 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -10,8 +10,8 @@
10 * Based on advantechwdt.c which is based on wdt.c. 10 * Based on advantechwdt.c which is based on wdt.c.
11 * Original copyright messages: 11 * Original copyright messages:
12 * 12 *
13 * (c) Copyright 1996-1997 Alan Cox <alan@redhat.com>, All Rights Reserved. 13 * (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>,
14 * http://www.redhat.com 14 * All Rights Reserved.
15 * 15 *
16 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License 17 * modify it under the terms of the GNU General Public License
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index deeebb2b13ea..eddb9187e7b6 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Industrial Computer Source WDT500/501 driver 2 * Industrial Computer Source WDT500/501 driver
3 * 3 *
4 * (c) Copyright 1996-1997 Alan Cox <alan@redhat.com>, All Rights Reserved. 4 * (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>,
5 * http://www.redhat.com 5 * All Rights Reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 191ea6302107..f55135662d78 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -6,7 +6,8 @@
6 * 6 *
7 * SoftDog 0.05: A Software Watchdog Device 7 * SoftDog 0.05: A Software Watchdog Device
8 * 8 *
9 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. 9 * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
10 * All Rights Reserved.
10 * 11 *
11 * This program is free software; you can redistribute it and/or 12 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 13 * modify it under the terms of the GNU General Public License
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index ed02bdb38c09..c45839a4a34d 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Industrial Computer Source PCI-WDT500/501 driver 2 * Industrial Computer Source PCI-WDT500/501 driver
3 * 3 *
4 * (c) Copyright 1996-1997 Alan Cox <alan@redhat.com>, All Rights Reserved. 4 * (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>,
5 * http://www.redhat.com 5 * All Rights Reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 8c83abc73400..8dc7109d61b7 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -41,17 +41,18 @@
41#include <linux/pagemap.h> 41#include <linux/pagemap.h>
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/highmem.h>
45#include <linux/list.h> 44#include <linux/list.h>
46#include <linux/sysdev.h> 45#include <linux/sysdev.h>
47 46
48#include <asm/xen/hypervisor.h>
49#include <asm/page.h> 47#include <asm/page.h>
50#include <asm/pgalloc.h> 48#include <asm/pgalloc.h>
51#include <asm/pgtable.h> 49#include <asm/pgtable.h>
52#include <asm/uaccess.h> 50#include <asm/uaccess.h>
53#include <asm/tlb.h> 51#include <asm/tlb.h>
54 52
53#include <asm/xen/hypervisor.h>
54#include <asm/xen/hypercall.h>
55#include <xen/interface/xen.h>
55#include <xen/interface/memory.h> 56#include <xen/interface/memory.h>
56#include <xen/xenbus.h> 57#include <xen/xenbus.h>
57#include <xen/features.h> 58#include <xen/features.h>
@@ -123,14 +124,7 @@ static struct timer_list balloon_timer;
123static void scrub_page(struct page *page) 124static void scrub_page(struct page *page)
124{ 125{
125#ifdef CONFIG_XEN_SCRUB_PAGES 126#ifdef CONFIG_XEN_SCRUB_PAGES
126 if (PageHighMem(page)) { 127 clear_highpage(page);
127 void *v = kmap(page);
128 clear_page(v);
129 kunmap(v);
130 } else {
131 void *v = page_address(page);
132 clear_page(v);
133 }
134#endif 128#endif
135} 129}
136 130
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 565280ec1c6a..974f56d1ebe1 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -2,7 +2,7 @@
2 2
3#include <xen/xenbus.h> 3#include <xen/xenbus.h>
4 4
5#include <asm-x86/xen/hypervisor.h> 5#include <asm/xen/hypervisor.h>
6#include <asm/cpu.h> 6#include <asm/cpu.h>
7 7
8static void enable_hotplug_cpu(int cpu) 8static void enable_hotplug_cpu(int cpu)
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 9ce1ab6c268d..1e3b934a4cf7 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -774,7 +774,7 @@ void xen_poll_irq(int irq)
774 774
775 poll.nr_ports = 1; 775 poll.nr_ports = 1;
776 poll.timeout = 0; 776 poll.timeout = 0;
777 poll.ports = &evtchn; 777 set_xen_guest_handle(poll.ports, &evtchn);
778 778
779 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) 779 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
780 BUG(); 780 BUG();
diff --git a/drivers/xen/features.c b/drivers/xen/features.c
index 0707714e40d6..99eda169c779 100644
--- a/drivers/xen/features.c
+++ b/drivers/xen/features.c
@@ -8,7 +8,11 @@
8#include <linux/types.h> 8#include <linux/types.h>
9#include <linux/cache.h> 9#include <linux/cache.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <asm/xen/hypervisor.h> 11
12#include <asm/xen/hypercall.h>
13
14#include <xen/interface/xen.h>
15#include <xen/interface/version.h>
12#include <xen/features.h> 16#include <xen/features.h>
13 17
14u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; 18u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 06592b9da83c..7d8f531fb8e8 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -40,6 +40,7 @@
40#include <xen/interface/xen.h> 40#include <xen/interface/xen.h>
41#include <xen/page.h> 41#include <xen/page.h>
42#include <xen/grant_table.h> 42#include <xen/grant_table.h>
43#include <asm/xen/hypercall.h>
43 44
44#include <asm/pgtable.h> 45#include <asm/pgtable.h>
45#include <asm/sync_bitops.h> 46#include <asm/sync_bitops.h>
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index d0e87cbe157c..9b91617b9582 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -39,8 +39,6 @@ static int xen_suspend(void *data)
39 39
40 BUG_ON(!irqs_disabled()); 40 BUG_ON(!irqs_disabled());
41 41
42 load_cr3(swapper_pg_dir);
43
44 err = device_power_down(PMSG_SUSPEND); 42 err = device_power_down(PMSG_SUSPEND);
45 if (err) { 43 if (err) {
46 printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n", 44 printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n",
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
index 797cb4e31f07..a240b2c20b99 100644
--- a/drivers/xen/xencomm.c
+++ b/drivers/xen/xencomm.c
@@ -23,13 +23,7 @@
23#include <asm/page.h> 23#include <asm/page.h>
24#include <xen/xencomm.h> 24#include <xen/xencomm.h>
25#include <xen/interface/xen.h> 25#include <xen/interface/xen.h>
26#ifdef __ia64__ 26#include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
27#include <asm/xen/xencomm.h> /* for is_kern_addr() */
28#endif
29
30#ifdef HAVE_XEN_PLATFORM_COMPAT_H
31#include <xen/platform-compat.h>
32#endif
33 27
34static int xencomm_init(struct xencomm_desc *desc, 28static int xencomm_init(struct xencomm_desc *desc,
35 void *buffer, unsigned long bytes) 29 void *buffer, unsigned long bytes)
@@ -157,20 +151,11 @@ static int xencomm_create(void *buffer, unsigned long bytes,
157 return 0; 151 return 0;
158} 152}
159 153
160/* check if memory address is within VMALLOC region */
161static int is_phys_contiguous(unsigned long addr)
162{
163 if (!is_kernel_addr(addr))
164 return 0;
165
166 return (addr < VMALLOC_START) || (addr >= VMALLOC_END);
167}
168
169static struct xencomm_handle *xencomm_create_inline(void *ptr) 154static struct xencomm_handle *xencomm_create_inline(void *ptr)
170{ 155{
171 unsigned long paddr; 156 unsigned long paddr;
172 157
173 BUG_ON(!is_phys_contiguous((unsigned long)ptr)); 158 BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
174 159
175 paddr = (unsigned long)xencomm_pa(ptr); 160 paddr = (unsigned long)xencomm_pa(ptr);
176 BUG_ON(paddr & XENCOMM_INLINE_FLAG); 161 BUG_ON(paddr & XENCOMM_INLINE_FLAG);
@@ -202,7 +187,7 @@ struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
202 int rc; 187 int rc;
203 struct xencomm_desc *desc; 188 struct xencomm_desc *desc;
204 189
205 if (is_phys_contiguous((unsigned long)ptr)) 190 if (xencomm_is_phys_contiguous((unsigned long)ptr))
206 return xencomm_create_inline(ptr); 191 return xencomm_create_inline(ptr);
207 192
208 rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL); 193 rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
@@ -219,7 +204,7 @@ struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
219 int rc; 204 int rc;
220 struct xencomm_desc *desc = NULL; 205 struct xencomm_desc *desc = NULL;
221 206
222 if (is_phys_contiguous((unsigned long)ptr)) 207 if (xencomm_is_phys_contiguous((unsigned long)ptr))
223 return xencomm_create_inline(ptr); 208 return xencomm_create_inline(ptr);
224 209
225 rc = xencomm_create_mini(ptr, bytes, xc_desc, 210 rc = xencomm_create_mini(ptr, bytes, xc_desc,