aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/acpi_pad.c2
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/hest.c4
-rw-r--r--drivers/acpi/pci_irq.c8
-rw-r--r--drivers/acpi/power_meter.c2
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/sbshc.c2
-rw-r--r--drivers/acpi/sleep.c56
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/ata/pata_pcmcia.c51
-rw-r--r--drivers/base/iommu.c43
-rw-r--r--drivers/base/platform.c30
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/base/power/sysfs.c65
-rw-r--r--drivers/block/amiflop.c47
-rw-r--r--drivers/block/drbd/drbd_main.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c3
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/hd.c4
-rw-r--r--drivers/block/pktcdvd.c17
-rw-r--r--drivers/bluetooth/bluecard_cs.c11
-rw-r--r--drivers/bluetooth/bt3c_cs.c11
-rw-r--r--drivers/bluetooth/btuart_cs.c11
-rw-r--r--drivers/bluetooth/dtl1_cs.c11
-rw-r--r--drivers/char/i8k.c21
-rw-r--r--drivers/char/isicom.c9
-rw-r--r--drivers/char/istallion.c2
-rw-r--r--drivers/char/mxser.c3
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c9
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c5
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c19
-rw-r--r--drivers/char/pcmcia/ipwireless/main.h1
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.c19
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.h3
-rw-r--r--drivers/char/pcmcia/synclink_cs.c22
-rw-r--r--drivers/char/riscom8.c1
-rw-r--r--drivers/char/serial167.c2
-rw-r--r--drivers/char/stallion.c7
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--drivers/char/tpm/Kconfig6
-rw-r--r--drivers/char/tpm/tpm.c47
-rw-r--r--drivers/char/tpm/tpm_tis.c40
-rw-r--r--drivers/char/tty_io.c1
-rw-r--r--drivers/clocksource/cs5535-clockevt.c8
-rw-r--r--drivers/clocksource/sh_cmt.c45
-rw-r--r--drivers/clocksource/sh_mtu2.c37
-rw-r--r--drivers/clocksource/sh_tmu.c41
-rw-r--r--drivers/cpufreq/cpufreq.c59
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c56
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c115
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/cpuidle/governors/menu.c11
-rw-r--r--drivers/dma/shdma.c52
-rw-r--r--drivers/dma/shdma.h4
-rw-r--r--drivers/dma/txx9dmac.c2
-rw-r--r--drivers/edac/edac_mce_amd.c5
-rw-r--r--drivers/firewire/core-iso.c14
-rw-r--r--drivers/firewire/ohci.c23
-rw-r--r--drivers/gpio/gpiolib.c3
-rw-r--r--drivers/gpio/it8761e_gpio.c18
-rw-r--r--drivers/gpio/pca953x.c14
-rw-r--r--drivers/gpio/pl061.c14
-rw-r--r--drivers/gpu/drm/drm_irq.c1
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_sysfs.c21
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c151
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c22
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c16
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c54
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h13
-rw-r--r--drivers/gpu/drm/i915/intel_display.c11
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h2
-rw-r--r--drivers/gpu/drm/radeon/r300.c7
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c19
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c5
-rw-r--r--drivers/gpu/drm/via/via_video.c2
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--drivers/hid/hid-cherry.c1
-rw-r--r--drivers/hid/hid-core.c10
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-ntrig.c72
-rw-r--r--drivers/hid/hid-sony.c2
-rw-r--r--drivers/hid/hid-wacom.c1
-rw-r--r--drivers/hid/usbhid/hid-core.c13
-rw-r--r--drivers/hwmon/applesmc.c61
-rw-r--r--drivers/hwmon/asc7621.c63
-rw-r--r--drivers/hwmon/asus_atk0110.c4
-rw-r--r--drivers/hwmon/hp_accel.c6
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-octeon.c4
-rw-r--r--drivers/i2c/i2c-core.c229
-rw-r--r--drivers/ide/ide-cs.c24
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/cma.c74
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/ucm.c3
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/user_mad.c12
-rw-r--r--drivers/infiniband/core/uverbs_main.c11
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c133
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig18
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2374
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c882
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c520
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c193
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h745
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c811
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c518
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1577
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c417
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h550
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h829
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h66
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c50
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c16
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c20
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c115
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/joydev.c18
-rw-r--r--drivers/input/joystick/analog.c4
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c6
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c1
-rw-r--r--drivers/input/keyboard/Kconfig48
-rw-r--r--drivers/input/keyboard/Makefile3
-rw-r--r--drivers/input/keyboard/corgikbd.c414
-rw-r--r--drivers/input/keyboard/spitzkbd.c496
-rw-r--r--drivers/input/keyboard/tosakbd.c431
-rw-r--r--drivers/input/misc/ati_remote.c14
-rw-r--r--drivers/input/misc/pcspkr.c6
-rw-r--r--drivers/input/mouse/alps.c1
-rw-r--r--drivers/input/mouse/elantech.c104
-rw-r--r--drivers/input/mouse/elantech.h5
-rw-r--r--drivers/input/mouse/psmouse-base.c32
-rw-r--r--drivers/input/mouse/psmouse.h1
-rw-r--r--drivers/input/mouse/synaptics.c35
-rw-r--r--drivers/input/mouse/synaptics.h4
-rw-r--r--drivers/input/touchscreen/Kconfig20
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/ad7877.c15
-rw-r--r--drivers/input/touchscreen/corgi_ts.c385
-rw-r--r--drivers/input/touchscreen/eeti_ts.c56
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c40
-rw-r--r--drivers/isdn/divert/divert_procfs.c18
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c76
-rw-r--r--drivers/isdn/hisax/avma1_cs.c63
-rw-r--r--drivers/isdn/hisax/elsa_cs.c40
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c60
-rw-r--r--drivers/isdn/hisax/teles_cs.c50
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/raid5.c37
-rw-r--r--drivers/media/common/saa7146_fops.c11
-rw-r--r--drivers/media/common/saa7146_video.c8
-rw-r--r--drivers/media/dvb/frontends/stv090x.c4
-rw-r--r--drivers/media/dvb/ttpci/budget.c3
-rw-r--r--drivers/media/video/Kconfig4
-rw-r--r--drivers/media/video/Makefile2
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c38
-rw-r--r--drivers/media/video/gspca/sn9c20x.c2
-rw-r--r--drivers/media/video/gspca/spca508.c1
-rw-r--r--drivers/media/video/gspca/spca561.c1
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c2
-rw-r--r--drivers/media/video/hexium_gemini.c3
-rw-r--r--drivers/media/video/hexium_orion.c4
-rw-r--r--drivers/media/video/mx1_camera.c8
-rw-r--r--drivers/media/video/mxb.c17
-rw-r--r--drivers/media/video/omap24xxcam.c2
-rw-r--r--drivers/media/video/pxa_camera.c11
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/mfd/wm831x-core.c3
-rw-r--r--drivers/mfd/wm8350-core.c4
-rw-r--r--drivers/misc/Kconfig16
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/vmware_balloon.c832
-rw-r--r--drivers/mmc/host/at91_mci.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c18
-rw-r--r--drivers/mmc/host/mmci.c19
-rw-r--r--drivers/mmc/host/mmci.h6
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/internal.h17
-rw-r--r--drivers/mtd/maps/pcmciamtd.c3
-rw-r--r--drivers/mtd/mtdbdi.c43
-rw-r--r--drivers/mtd/mtdcore.c79
-rw-r--r--drivers/mtd/mtdsuper.c2
-rw-r--r--drivers/mtd/nand/orion_nand.c8
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/a2065.c1
-rw-r--r--drivers/net/ariadne.c1
-rw-r--r--drivers/net/arm/ep93xx_eth.c10
-rw-r--r--drivers/net/bnx2.c48
-rw-r--r--drivers/net/can/usb/ems_usb.c8
-rw-r--r--drivers/net/cxgb3/ael1002.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/e100.c10
-rw-r--r--drivers/net/e1000e/82571.c20
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/netdev.c95
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/fsl_pq_mdio.c20
-rw-r--r--drivers/net/gianfar.c14
-rw-r--r--drivers/net/hydra.c1
-rw-r--r--drivers/net/igbvf/netdev.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c62
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ks8851.c12
-rw-r--r--drivers/net/pcmcia/3c574_cs.c22
-rw-r--r--drivers/net/pcmcia/3c589_cs.c16
-rw-r--r--drivers/net/pcmcia/axnet_cs.c21
-rw-r--r--drivers/net/pcmcia/com20020_cs.c29
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c18
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c16
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c14
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c16
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c48
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c36
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/mdio-octeon.c10
-rw-r--r--drivers/net/phy/micrel.c105
-rw-r--r--drivers/net/ppp_generic.c34
-rw-r--r--drivers/net/r8169.c54
-rw-r--r--drivers/net/sb1250-mac.c67
-rw-r--r--drivers/net/sfc/efx.c4
-rw-r--r--drivers/net/sfc/falcon.c4
-rw-r--r--drivers/net/sfc/falcon_boards.c13
-rw-r--r--drivers/net/sfc/nic.h2
-rw-r--r--drivers/net/sfc/siena.c13
-rw-r--r--drivers/net/tg3.c1
-rw-r--r--drivers/net/usb/Kconfig21
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/cdc_ether.c1
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/ipheth.c569
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/usb/sierra_net.c1004
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/wireless/airo_cs.c72
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c11
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h1
-rw-r--r--drivers/net/wireless/atmel_cs.c70
-rw-r--r--drivers/net/wireless/b43/pcmcia.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c38
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c54
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c3
-rw-r--r--drivers/net/wireless/libertas/if_cs.c21
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c27
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c27
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/ray_cs.c15
-rw-r--r--drivers/net/wireless/ray_cs.h1
-rw-r--r--drivers/net/wireless/wl3501.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c23
-rw-r--r--drivers/net/zorro8390.c1
-rw-r--r--drivers/of/of_mdio.c2
-rw-r--r--drivers/oprofile/cpu_buffer.c75
-rw-r--r--drivers/oprofile/oprof.c12
-rw-r--r--drivers/oprofile/oprof.h3
-rw-r--r--drivers/oprofile/timer_int.c78
-rw-r--r--drivers/parport/parport_cs.c13
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c3
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c3
-rw-r--r--drivers/pci/intel-iommu.c22
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c10
-rw-r--r--drivers/pci/probe.c23
-rw-r--r--drivers/pci/setup-bus.c114
-rw-r--r--drivers/pcmcia/Kconfig25
-rw-r--r--drivers/pcmcia/Makefile10
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/cardbus.c1
-rw-r--r--drivers/pcmcia/cistpl.c121
-rw-r--r--drivers/pcmcia/cs.c17
-rw-r--r--drivers/pcmcia/cs_internal.h22
-rw-r--r--drivers/pcmcia/db1xxx_ss.c16
-rw-r--r--drivers/pcmcia/ds.c89
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pcmcia_cis.c356
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c30
-rw-r--r--drivers/pcmcia/pcmcia_resource.c634
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c229
-rw-r--r--drivers/pcmcia/rsrc_iodyn.c172
-rw-r--r--drivers/pcmcia/rsrc_mgr.c112
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c164
-rw-r--r--drivers/pcmcia/yenta_socket.c7
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/asus-laptop.c8
-rw-r--r--drivers/platform/x86/dell-wmi.c3
-rw-r--r--drivers/platform/x86/eeepc-laptop.c3
-rw-r--r--drivers/platform/x86/eeepc-wmi.c333
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c26
-rw-r--r--drivers/pnp/resource.c4
-rw-r--r--drivers/regulator/max8925-regulator.c2
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-davinci.c673
-rw-r--r--drivers/s390/block/dasd.c39
-rw-r--r--drivers/s390/block/dasd_3990_erp.c20
-rw-r--r--drivers/s390/block/dasd_alias.c125
-rw-r--r--drivers/s390/block/dasd_devmap.c174
-rw-r--r--drivers/s390/block/dasd_eckd.c116
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dasd_int.h49
-rw-r--r--drivers/s390/char/Kconfig3
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/keyboard.c21
-rw-r--r--drivers/s390/char/vmcp.c38
-rw-r--r--drivers/s390/char/zcore.c4
-rw-r--r--drivers/s390/cio/chsc_sch.c1
-rw-r--r--drivers/s390/cio/cio.c3
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/s390/cio/qdio.h15
-rw-r--r--drivers/s390/cio/qdio_main.c67
-rw-r--r--drivers/s390/cio/qdio_setup.c8
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c17
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c1
-rw-r--r--drivers/sbus/char/flash.c6
-rw-r--r--drivers/scsi/advansys.c50
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c4
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c4
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c9
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c9
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c23
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h1
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c13
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c9
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c2
-rw-r--r--drivers/scsi/scsi_debug.c3
-rw-r--r--drivers/scsi/scsi_error.c15
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/zorro7xx.c1
-rw-r--r--drivers/serial/8250_pnp.c2
-rw-r--r--drivers/serial/Kconfig23
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/atmel_serial.c207
-rw-r--r--drivers/serial/imx.c10
-rw-r--r--drivers/serial/mpc52xx_uart.c35
-rw-r--r--drivers/serial/pmac_zilog.c4
-rw-r--r--drivers/serial/serial_cs.c36
-rw-r--r--drivers/serial/serial_ks8695.c1
-rw-r--r--drivers/serial/sh-sci.c189
-rw-r--r--drivers/sh/Kconfig24
-rw-r--r--drivers/sh/Makefile2
-rw-r--r--drivers/sh/clk-cpg.c298
-rw-r--r--drivers/sh/clk.c545
-rw-r--r--drivers/sh/intc.c333
-rw-r--r--drivers/spi/omap2_mcspi.c5
-rw-r--r--drivers/spi/pxa2xx_spi.c25
-rw-r--r--drivers/spi/spi.c8
-rw-r--r--drivers/ssb/main.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c45
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c35
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c42
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c42
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c42
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c19
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c45
-rw-r--r--drivers/staging/dt3155/dt3155_drv.c4
-rw-r--r--drivers/staging/hv/Hv.c2
-rw-r--r--drivers/staging/hv/RndisFilter.c1
-rw-r--r--drivers/staging/hv/netvsc_drv.c3
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c3
-rw-r--r--drivers/staging/iio/adc/max1363_core.c2
-rw-r--r--drivers/staging/iio/industrialio-core.c3
-rw-r--r--drivers/staging/iio/light/tsl2563.c2
-rw-r--r--drivers/staging/iio/ring_sw.c2
-rw-r--r--drivers/staging/netwave/netwave_cs.c9
-rw-r--r--drivers/staging/octeon/cvmx-helper-board.c8
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c1
-rw-r--r--drivers/staging/rtl8192su/r8192U_core.c3
-rw-r--r--drivers/staging/usbip/usbip_event.c3
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c3
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c15
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c9
-rw-r--r--drivers/telephony/ixj_pcmcia.c3
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/Kconfig2
-rw-r--r--drivers/usb/core/driver.c49
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/inode.c5
-rw-r--r--drivers/usb/core/usb.c20
-rw-r--r--drivers/usb/gadget/at91_udc.c7
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c7
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/ehci-hub.c2
-rw-r--r--drivers/usb/host/ehci-mem.c2
-rw-r--r--drivers/usb/host/ehci-omap.c6
-rw-r--r--drivers/usb/host/ehci-sched.c40
-rw-r--r--drivers/usb/host/ehci.h5
-rw-r--r--drivers/usb/host/ohci-at91.c2
-rw-r--r--drivers/usb/host/ohci-da8xx.c2
-rw-r--r--drivers/usb/host/ohci-hub.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c6
-rw-r--r--drivers/usb/host/sl811-hcd.c6
-rw-r--r--drivers/usb/host/sl811_cs.c28
-rw-r--r--drivers/usb/host/xhci-mem.c65
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/misc/usbsevseg.c15
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/Makefile2
-rw-r--r--drivers/usb/musb/blackfin.c8
-rw-r--r--drivers/usb/musb/davinci.c2
-rw-r--r--drivers/usb/musb/musb_core.c77
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/musb/omap2430.c3
-rw-r--r--drivers/usb/musb/tusb6010.c13
-rw-r--r--drivers/usb/musb/tusb6010_omap.c22
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h5
-rw-r--r--drivers/usb/serial/qcaux.c10
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c26
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h3
-rw-r--r--drivers/usb/wusbcore/devconnect.c2
-rw-r--r--drivers/vhost/vhost.c7
-rw-r--r--drivers/video/amifb.c49
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c15
-rw-r--r--drivers/video/cirrusfb.c1
-rw-r--r--drivers/video/efifb.c3
-rw-r--r--drivers/video/fm2fb.c1
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c27
-rw-r--r--drivers/w1/masters/omap_hdq.c1
-rw-r--r--drivers/w1/slaves/w1_therm.c5
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/booke_wdt.c2
-rw-r--r--drivers/watchdog/ep93xx_wdt.c2
-rw-r--r--drivers/watchdog/mpcore_wdt.c21
-rw-r--r--drivers/watchdog/sb_wdog.c4
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c14
-rw-r--r--drivers/xen/manage.c14
-rw-r--r--drivers/zorro/proc.c6
-rw-r--r--drivers/zorro/zorro-driver.c24
-rw-r--r--drivers/zorro/zorro-sysfs.c11
-rw-r--r--drivers/zorro/zorro.c243
490 files changed, 19698 insertions, 6420 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 34f1e1064dbc..f42a03029b7c 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SFI) += sfi/
17obj-$(CONFIG_PNP) += pnp/ 17obj-$(CONFIG_PNP) += pnp/
18obj-$(CONFIG_ARM_AMBA) += amba/ 18obj-$(CONFIG_ARM_AMBA) += amba/
19 19
20obj-$(CONFIG_VIRTIO) += virtio/
20obj-$(CONFIG_XEN) += xen/ 21obj-$(CONFIG_XEN) += xen/
21 22
22# regulators early, since some subsystems rely on them to initialize 23# regulators early, since some subsystems rely on them to initialize
@@ -108,7 +109,6 @@ obj-$(CONFIG_PPC_PS3) += ps3/
108obj-$(CONFIG_OF) += of/ 109obj-$(CONFIG_OF) += of/
109obj-$(CONFIG_SSB) += ssb/ 110obj-$(CONFIG_SSB) += ssb/
110obj-$(CONFIG_VHOST_NET) += vhost/ 111obj-$(CONFIG_VHOST_NET) += vhost/
111obj-$(CONFIG_VIRTIO) += virtio/
112obj-$(CONFIG_VLYNQ) += vlynq/ 112obj-$(CONFIG_VLYNQ) += vlynq/
113obj-$(CONFIG_STAGING) += staging/ 113obj-$(CONFIG_STAGING) += staging/
114obj-y += platform/ 114obj-y += platform/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 19dacfd43163..62122134693b 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -31,7 +31,7 @@
31#include <acpi/acpi_bus.h> 31#include <acpi/acpi_bus.h>
32#include <acpi/acpi_drivers.h> 32#include <acpi/acpi_drivers.h>
33 33
34#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator" 34#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
35#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" 35#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
36#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 36#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
37static DEFINE_MUTEX(isolated_cpus_lock); 37static DEFINE_MUTEX(isolated_cpus_lock);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 37132dc2da03..743576bf1bd7 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -527,7 +527,7 @@ int acpi_bus_generate_proc_event4(const char *device_class, const char *bus_id,
527 if (!event_is_open) 527 if (!event_is_open)
528 return 0; 528 return 0;
529 529
530 event = kmalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC); 530 event = kzalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC);
531 if (!event) 531 if (!event)
532 return -ENOMEM; 532 return -ENOMEM;
533 533
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c
index 4bb18c980ac6..1c527a192872 100644
--- a/drivers/acpi/hest.c
+++ b/drivers/acpi/hest.c
@@ -123,6 +123,10 @@ int acpi_hest_firmware_first_pci(struct pci_dev *pci)
123{ 123{
124 acpi_status status = AE_NOT_FOUND; 124 acpi_status status = AE_NOT_FOUND;
125 struct acpi_table_header *hest = NULL; 125 struct acpi_table_header *hest = NULL;
126
127 if (acpi_disabled)
128 return 0;
129
126 status = acpi_get_table(ACPI_SIG_HEST, 1, &hest); 130 status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
127 131
128 if (ACPI_SUCCESS(status)) { 132 if (ACPI_SUCCESS(status)) {
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index b0a71ecee682..e4804fb05e23 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -401,11 +401,13 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
401 * driver reported one, then use it. Exit in any case. 401 * driver reported one, then use it. Exit in any case.
402 */ 402 */
403 if (gsi < 0) { 403 if (gsi < 0) {
404 u32 dev_gsi;
404 dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin)); 405 dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin));
405 /* Interrupt Line values above 0xF are forbidden */ 406 /* Interrupt Line values above 0xF are forbidden */
406 if (dev->irq > 0 && (dev->irq <= 0xF)) { 407 if (dev->irq > 0 && (dev->irq <= 0xF) &&
407 printk(" - using IRQ %d\n", dev->irq); 408 (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
408 acpi_register_gsi(&dev->dev, dev->irq, 409 printk(" - using ISA IRQ %d\n", dev->irq);
410 acpi_register_gsi(&dev->dev, dev_gsi,
409 ACPI_LEVEL_SENSITIVE, 411 ACPI_LEVEL_SENSITIVE,
410 ACPI_ACTIVE_LOW); 412 ACPI_ACTIVE_LOW);
411 return 0; 413 return 0;
diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
index e8c32a49f14e..66f67293341e 100644
--- a/drivers/acpi/power_meter.c
+++ b/drivers/acpi/power_meter.c
@@ -35,7 +35,7 @@
35#define ACPI_POWER_METER_NAME "power_meter" 35#define ACPI_POWER_METER_NAME "power_meter"
36ACPI_MODULE_NAME(ACPI_POWER_METER_NAME); 36ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
37#define ACPI_POWER_METER_DEVICE_NAME "Power Meter" 37#define ACPI_POWER_METER_DEVICE_NAME "Power Meter"
38#define ACPI_POWER_METER_CLASS "power_meter_resource" 38#define ACPI_POWER_METER_CLASS "pwr_meter_resource"
39 39
40#define NUM_SENSORS 17 40#define NUM_SENSORS 17
41 41
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5939e7f7d8e9..c3817e1f32c7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -698,7 +698,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
698 "max_cstate: C%d\n" 698 "max_cstate: C%d\n"
699 "maximum allowed latency: %d usec\n", 699 "maximum allowed latency: %d usec\n",
700 pr->power.state ? pr->power.state - pr->power.states : 0, 700 pr->power.state ? pr->power.state - pr->power.states : 0,
701 max_cstate, pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)); 701 max_cstate, pm_qos_request(PM_QOS_CPU_DMA_LATENCY));
702 702
703 seq_puts(seq, "states:\n"); 703 seq_puts(seq, "states:\n");
704 704
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 36704b887ccf..f8be23b6c129 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -18,7 +18,7 @@
18 18
19#define PREFIX "ACPI: " 19#define PREFIX "ACPI: "
20 20
21#define ACPI_SMB_HC_CLASS "smbus_host_controller" 21#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
22#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC" 22#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
23 23
24struct acpi_smb_hc { 24struct acpi_smb_hc {
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index f74834a544fd..baa76bbf244a 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -450,6 +450,38 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
450 }, 450 },
451 }, 451 },
452 { 452 {
453 .callback = init_set_sci_en_on_resume,
454 .ident = "Lenovo ThinkPad T410",
455 .matches = {
456 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
457 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
458 },
459 },
460 {
461 .callback = init_set_sci_en_on_resume,
462 .ident = "Lenovo ThinkPad T510",
463 .matches = {
464 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
465 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
466 },
467 },
468 {
469 .callback = init_set_sci_en_on_resume,
470 .ident = "Lenovo ThinkPad W510",
471 .matches = {
472 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
473 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
474 },
475 },
476 {
477 .callback = init_set_sci_en_on_resume,
478 .ident = "Lenovo ThinkPad X201[s]",
479 .matches = {
480 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
481 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
482 },
483 },
484 {
453 .callback = init_old_suspend_ordering, 485 .callback = init_old_suspend_ordering,
454 .ident = "Panasonic CF51-2L", 486 .ident = "Panasonic CF51-2L",
455 .matches = { 487 .matches = {
@@ -458,6 +490,30 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
458 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 490 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
459 }, 491 },
460 }, 492 },
493 {
494 .callback = init_set_sci_en_on_resume,
495 .ident = "Dell Studio 1558",
496 .matches = {
497 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
498 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
499 },
500 },
501 {
502 .callback = init_set_sci_en_on_resume,
503 .ident = "Dell Studio 1557",
504 .matches = {
505 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
506 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
507 },
508 },
509 {
510 .callback = init_set_sci_en_on_resume,
511 .ident = "Dell Studio 1555",
512 .matches = {
513 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
514 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
515 },
516 },
461 {}, 517 {},
462}; 518};
463#endif /* CONFIG_SUSPEND */ 519#endif /* CONFIG_SUSPEND */
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 9f6cfac0f2cc..228740f356c9 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
879void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 879void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
880{ 880{
881 struct ata_port *ap = qc->ap; 881 struct ata_port *ap = qc->ap;
882 struct request_queue *q = qc->scsicmd->device->request_queue;
883 unsigned long flags;
882 884
883 WARN_ON(!ap->ops->error_handler); 885 WARN_ON(!ap->ops->error_handler);
884 886
@@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
890 * Note that ATA_QCFLAG_FAILED is unconditionally set after 892 * Note that ATA_QCFLAG_FAILED is unconditionally set after
891 * this function completes. 893 * this function completes.
892 */ 894 */
895 spin_lock_irqsave(q->queue_lock, flags);
893 blk_abort_request(qc->scsicmd->request); 896 blk_abort_request(qc->scsicmd->request);
897 spin_unlock_irqrestore(q->queue_lock, flags);
894} 898}
895 899
896/** 900/**
@@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
1624 } 1628 }
1625 1629
1626 /* okay, this error is ours */ 1630 /* okay, this error is ours */
1631 memset(&tf, 0, sizeof(tf));
1627 rc = ata_eh_read_log_10h(dev, &tag, &tf); 1632 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1628 if (rc) { 1633 if (rc) {
1629 ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1634 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 3c3172d3c34e..aa39bda6441a 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -45,16 +45,6 @@
45#define DRV_NAME "pata_pcmcia" 45#define DRV_NAME "pata_pcmcia"
46#define DRV_VERSION "0.3.5" 46#define DRV_VERSION "0.3.5"
47 47
48/*
49 * Private data structure to glue stuff together
50 */
51
52struct ata_pcmcia_info {
53 struct pcmcia_device *pdev;
54 int ndev;
55 dev_node_t node;
56};
57
58/** 48/**
59 * pcmcia_set_mode - PCMCIA specific mode setup 49 * pcmcia_set_mode - PCMCIA specific mode setup
60 * @link: link 50 * @link: link
@@ -248,7 +238,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
248{ 238{
249 struct ata_host *host; 239 struct ata_host *host;
250 struct ata_port *ap; 240 struct ata_port *ap;
251 struct ata_pcmcia_info *info;
252 struct pcmcia_config_check *stk = NULL; 241 struct pcmcia_config_check *stk = NULL;
253 int is_kme = 0, ret = -ENOMEM, p; 242 int is_kme = 0, ret = -ENOMEM, p;
254 unsigned long io_base, ctl_base; 243 unsigned long io_base, ctl_base;
@@ -256,19 +245,10 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
256 int n_ports = 1; 245 int n_ports = 1;
257 struct ata_port_operations *ops = &pcmcia_port_ops; 246 struct ata_port_operations *ops = &pcmcia_port_ops;
258 247
259 info = kzalloc(sizeof(*info), GFP_KERNEL);
260 if (info == NULL)
261 return -ENOMEM;
262
263 /* Glue stuff together. FIXME: We may be able to get rid of info with care */
264 info->pdev = pdev;
265 pdev->priv = info;
266
267 /* Set up attributes in order to probe card and get resources */ 248 /* Set up attributes in order to probe card and get resources */
268 pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 249 pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
269 pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 250 pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
270 pdev->io.IOAddrLines = 3; 251 pdev->io.IOAddrLines = 3;
271 pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
272 pdev->conf.Attributes = CONF_ENABLE_IRQ; 252 pdev->conf.Attributes = CONF_ENABLE_IRQ;
273 pdev->conf.IntType = INT_MEMORY_AND_IO; 253 pdev->conf.IntType = INT_MEMORY_AND_IO;
274 254
@@ -293,8 +273,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
293 } 273 }
294 io_base = pdev->io.BasePort1; 274 io_base = pdev->io.BasePort1;
295 ctl_base = stk->ctl_base; 275 ctl_base = stk->ctl_base;
296 ret = pcmcia_request_irq(pdev, &pdev->irq); 276 if (!pdev->irq)
297 if (ret)
298 goto failed; 277 goto failed;
299 278
300 ret = pcmcia_request_configuration(pdev, &pdev->conf); 279 ret = pcmcia_request_configuration(pdev, &pdev->conf);
@@ -344,21 +323,19 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
344 } 323 }
345 324
346 /* activate */ 325 /* activate */
347 ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_sff_interrupt, 326 ret = ata_host_activate(host, pdev->irq, ata_sff_interrupt,
348 IRQF_SHARED, &pcmcia_sht); 327 IRQF_SHARED, &pcmcia_sht);
349 if (ret) 328 if (ret)
350 goto failed; 329 goto failed;
351 330
352 info->ndev = 1; 331 pdev->priv = host;
353 kfree(stk); 332 kfree(stk);
354 return 0; 333 return 0;
355 334
356failed: 335failed:
357 kfree(stk); 336 kfree(stk);
358 info->ndev = 0;
359 pcmcia_disable_device(pdev); 337 pcmcia_disable_device(pdev);
360out1: 338out1:
361 kfree(info);
362 return ret; 339 return ret;
363} 340}
364 341
@@ -372,20 +349,12 @@ out1:
372 349
373static void pcmcia_remove_one(struct pcmcia_device *pdev) 350static void pcmcia_remove_one(struct pcmcia_device *pdev)
374{ 351{
375 struct ata_pcmcia_info *info = pdev->priv; 352 struct ata_host *host = pdev->priv;
376 struct device *dev = &pdev->dev; 353
377 354 if (host)
378 if (info != NULL) { 355 ata_host_detach(host);
379 /* If we have attached the device to the ATA layer, detach it */ 356
380 if (info->ndev) {
381 struct ata_host *host = dev_get_drvdata(dev);
382 ata_host_detach(host);
383 }
384 info->ndev = 0;
385 pdev->priv = NULL;
386 }
387 pcmcia_disable_device(pdev); 357 pcmcia_disable_device(pdev);
388 kfree(info);
389} 358}
390 359
391static struct pcmcia_device_id pcmcia_devices[] = { 360static struct pcmcia_device_id pcmcia_devices[] = {
@@ -424,6 +393,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
424 PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), 393 PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
425 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), 394 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
426 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), 395 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
396 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb),
397 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
427 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), 398 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
428 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), 399 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
429 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), 400 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
@@ -444,6 +415,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
444 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), 415 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
445 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), 416 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
446 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), 417 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
418 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133),
419 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
447 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), 420 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
448 PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), 421 PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
449 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), 422 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c
index 8ad4ffea6920..6e6b6a11b3ce 100644
--- a/drivers/base/iommu.c
+++ b/drivers/base/iommu.c
@@ -80,20 +80,6 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
80} 80}
81EXPORT_SYMBOL_GPL(iommu_detach_device); 81EXPORT_SYMBOL_GPL(iommu_detach_device);
82 82
83int iommu_map_range(struct iommu_domain *domain, unsigned long iova,
84 phys_addr_t paddr, size_t size, int prot)
85{
86 return iommu_ops->map(domain, iova, paddr, size, prot);
87}
88EXPORT_SYMBOL_GPL(iommu_map_range);
89
90void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
91 size_t size)
92{
93 iommu_ops->unmap(domain, iova, size);
94}
95EXPORT_SYMBOL_GPL(iommu_unmap_range);
96
97phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 83phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
98 unsigned long iova) 84 unsigned long iova)
99{ 85{
@@ -107,3 +93,32 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
107 return iommu_ops->domain_has_cap(domain, cap); 93 return iommu_ops->domain_has_cap(domain, cap);
108} 94}
109EXPORT_SYMBOL_GPL(iommu_domain_has_cap); 95EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
96
97int iommu_map(struct iommu_domain *domain, unsigned long iova,
98 phys_addr_t paddr, int gfp_order, int prot)
99{
100 unsigned long invalid_mask;
101 size_t size;
102
103 size = 0x1000UL << gfp_order;
104 invalid_mask = size - 1;
105
106 BUG_ON((iova | paddr) & invalid_mask);
107
108 return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
109}
110EXPORT_SYMBOL_GPL(iommu_map);
111
112int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
113{
114 unsigned long invalid_mask;
115 size_t size;
116
117 size = 0x1000UL << gfp_order;
118 invalid_mask = size - 1;
119
120 BUG_ON(iova & invalid_mask);
121
122 return iommu_ops->unmap(domain, iova, gfp_order);
123}
124EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4b4b565c835f..ada6397c23a5 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(platform_device_alloc);
187 * released. 187 * released.
188 */ 188 */
189int platform_device_add_resources(struct platform_device *pdev, 189int platform_device_add_resources(struct platform_device *pdev,
190 struct resource *res, unsigned int num) 190 const struct resource *res, unsigned int num)
191{ 191{
192 struct resource *r; 192 struct resource *r;
193 193
@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
367 */ 367 */
368struct platform_device *platform_device_register_simple(const char *name, 368struct platform_device *platform_device_register_simple(const char *name,
369 int id, 369 int id,
370 struct resource *res, 370 const struct resource *res,
371 unsigned int num) 371 unsigned int num)
372{ 372{
373 struct platform_device *pdev; 373 struct platform_device *pdev;
@@ -967,17 +967,17 @@ static int platform_pm_restore_noirq(struct device *dev)
967 967
968int __weak platform_pm_runtime_suspend(struct device *dev) 968int __weak platform_pm_runtime_suspend(struct device *dev)
969{ 969{
970 return -ENOSYS; 970 return pm_generic_runtime_suspend(dev);
971}; 971};
972 972
973int __weak platform_pm_runtime_resume(struct device *dev) 973int __weak platform_pm_runtime_resume(struct device *dev)
974{ 974{
975 return -ENOSYS; 975 return pm_generic_runtime_resume(dev);
976}; 976};
977 977
978int __weak platform_pm_runtime_idle(struct device *dev) 978int __weak platform_pm_runtime_idle(struct device *dev)
979{ 979{
980 return -ENOSYS; 980 return pm_generic_runtime_idle(dev);
981}; 981};
982 982
983#else /* !CONFIG_PM_RUNTIME */ 983#else /* !CONFIG_PM_RUNTIME */
@@ -1254,6 +1254,26 @@ static int __init early_platform_driver_probe_id(char *class_str,
1254 } 1254 }
1255 1255
1256 if (match) { 1256 if (match) {
1257 /*
1258 * Set up a sensible init_name to enable
1259 * dev_name() and others to be used before the
1260 * rest of the driver core is initialized.
1261 */
1262 if (!match->dev.init_name && slab_is_available()) {
1263 if (match->id != -1)
1264 match->dev.init_name =
1265 kasprintf(GFP_KERNEL, "%s.%d",
1266 match->name,
1267 match->id);
1268 else
1269 match->dev.init_name =
1270 kasprintf(GFP_KERNEL, "%s",
1271 match->name);
1272
1273 if (!match->dev.init_name)
1274 return -ENOMEM;
1275 }
1276
1257 if (epdrv->pdrv->probe(match)) 1277 if (epdrv->pdrv->probe(match))
1258 pr_warning("%s: unable to probe %s early.\n", 1278 pr_warning("%s: unable to probe %s early.\n",
1259 class_str, match->name); 1279 class_str, match->name);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 626dd147b75f..b0ec0e9f27e9 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -229,14 +229,16 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
229 229
230 if (retval) { 230 if (retval) {
231 dev->power.runtime_status = RPM_ACTIVE; 231 dev->power.runtime_status = RPM_ACTIVE;
232 pm_runtime_cancel_pending(dev);
233
234 if (retval == -EAGAIN || retval == -EBUSY) { 232 if (retval == -EAGAIN || retval == -EBUSY) {
235 notify = true; 233 if (dev->power.timer_expires == 0)
234 notify = true;
236 dev->power.runtime_error = 0; 235 dev->power.runtime_error = 0;
236 } else {
237 pm_runtime_cancel_pending(dev);
237 } 238 }
238 } else { 239 } else {
239 dev->power.runtime_status = RPM_SUSPENDED; 240 dev->power.runtime_status = RPM_SUSPENDED;
241 pm_runtime_deactivate_timer(dev);
240 242
241 if (dev->parent) { 243 if (dev->parent) {
242 parent = dev->parent; 244 parent = dev->parent;
@@ -659,8 +661,6 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
659 661
660 if (dev->power.runtime_status == RPM_SUSPENDED) 662 if (dev->power.runtime_status == RPM_SUSPENDED)
661 retval = 1; 663 retval = 1;
662 else if (dev->power.runtime_status == RPM_SUSPENDING)
663 retval = -EINPROGRESS;
664 else if (atomic_read(&dev->power.usage_count) > 0 664 else if (atomic_read(&dev->power.usage_count) > 0
665 || dev->power.disable_depth > 0) 665 || dev->power.disable_depth > 0)
666 retval = -EAGAIN; 666 retval = -EAGAIN;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 86fd9373447e..a4c33bc51257 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,6 +5,7 @@
5#include <linux/device.h> 5#include <linux/device.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <linux/pm_runtime.h> 7#include <linux/pm_runtime.h>
8#include <asm/atomic.h>
8#include "power.h" 9#include "power.h"
9 10
10/* 11/*
@@ -143,7 +144,59 @@ wake_store(struct device * dev, struct device_attribute *attr,
143 144
144static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); 145static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
145 146
146#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG 147#ifdef CONFIG_PM_ADVANCED_DEBUG
148#ifdef CONFIG_PM_RUNTIME
149
150static ssize_t rtpm_usagecount_show(struct device *dev,
151 struct device_attribute *attr, char *buf)
152{
153 return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
154}
155
156static ssize_t rtpm_children_show(struct device *dev,
157 struct device_attribute *attr, char *buf)
158{
159 return sprintf(buf, "%d\n", dev->power.ignore_children ?
160 0 : atomic_read(&dev->power.child_count));
161}
162
163static ssize_t rtpm_enabled_show(struct device *dev,
164 struct device_attribute *attr, char *buf)
165{
166 if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
167 return sprintf(buf, "disabled & forbidden\n");
168 else if (dev->power.disable_depth)
169 return sprintf(buf, "disabled\n");
170 else if (dev->power.runtime_auto == false)
171 return sprintf(buf, "forbidden\n");
172 return sprintf(buf, "enabled\n");
173}
174
175static ssize_t rtpm_status_show(struct device *dev,
176 struct device_attribute *attr, char *buf)
177{
178 if (dev->power.runtime_error)
179 return sprintf(buf, "error\n");
180 switch (dev->power.runtime_status) {
181 case RPM_SUSPENDED:
182 return sprintf(buf, "suspended\n");
183 case RPM_SUSPENDING:
184 return sprintf(buf, "suspending\n");
185 case RPM_RESUMING:
186 return sprintf(buf, "resuming\n");
187 case RPM_ACTIVE:
188 return sprintf(buf, "active\n");
189 }
190 return -EIO;
191}
192
193static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
194static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
195static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
196static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
197
198#endif
199
147static ssize_t async_show(struct device *dev, struct device_attribute *attr, 200static ssize_t async_show(struct device *dev, struct device_attribute *attr,
148 char *buf) 201 char *buf)
149{ 202{
@@ -170,15 +223,21 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
170} 223}
171 224
172static DEVICE_ATTR(async, 0644, async_show, async_store); 225static DEVICE_ATTR(async, 0644, async_show, async_store);
173#endif /* CONFIG_PM_SLEEP_ADVANCED_DEBUG */ 226#endif /* CONFIG_PM_ADVANCED_DEBUG */
174 227
175static struct attribute * power_attrs[] = { 228static struct attribute * power_attrs[] = {
176#ifdef CONFIG_PM_RUNTIME 229#ifdef CONFIG_PM_RUNTIME
177 &dev_attr_control.attr, 230 &dev_attr_control.attr,
178#endif 231#endif
179 &dev_attr_wakeup.attr, 232 &dev_attr_wakeup.attr,
180#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG 233#ifdef CONFIG_PM_ADVANCED_DEBUG
181 &dev_attr_async.attr, 234 &dev_attr_async.attr,
235#ifdef CONFIG_PM_RUNTIME
236 &dev_attr_runtime_usage.attr,
237 &dev_attr_runtime_active_kids.attr,
238 &dev_attr_runtime_status.attr,
239 &dev_attr_runtime_enabled.attr,
240#endif
182#endif 241#endif
183 NULL, 242 NULL,
184}; 243};
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 0182a22c423a..832798aa14f6 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -66,6 +66,7 @@
66#include <linux/blkdev.h> 66#include <linux/blkdev.h>
67#include <linux/elevator.h> 67#include <linux/elevator.h>
68#include <linux/interrupt.h> 68#include <linux/interrupt.h>
69#include <linux/platform_device.h>
69 70
70#include <asm/setup.h> 71#include <asm/setup.h>
71#include <asm/uaccess.h> 72#include <asm/uaccess.h>
@@ -1696,34 +1697,18 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
1696 return get_disk(unit[drive].gendisk); 1697 return get_disk(unit[drive].gendisk);
1697} 1698}
1698 1699
1699static int __init amiga_floppy_init(void) 1700static int __init amiga_floppy_probe(struct platform_device *pdev)
1700{ 1701{
1701 int i, ret; 1702 int i, ret;
1702 1703
1703 if (!MACH_IS_AMIGA)
1704 return -ENODEV;
1705
1706 if (!AMIGAHW_PRESENT(AMI_FLOPPY))
1707 return -ENODEV;
1708
1709 if (register_blkdev(FLOPPY_MAJOR,"fd")) 1704 if (register_blkdev(FLOPPY_MAJOR,"fd"))
1710 return -EBUSY; 1705 return -EBUSY;
1711 1706
1712 /*
1713 * We request DSKPTR, DSKLEN and DSKDATA only, because the other
1714 * floppy registers are too spreaded over the custom register space
1715 */
1716 ret = -EBUSY;
1717 if (!request_mem_region(CUSTOM_PHYSADDR+0x20, 8, "amiflop [Paula]")) {
1718 printk("fd: cannot get floppy registers\n");
1719 goto out_blkdev;
1720 }
1721
1722 ret = -ENOMEM; 1707 ret = -ENOMEM;
1723 if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) == 1708 if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) ==
1724 NULL) { 1709 NULL) {
1725 printk("fd: cannot get chip mem buffer\n"); 1710 printk("fd: cannot get chip mem buffer\n");
1726 goto out_memregion; 1711 goto out_blkdev;
1727 } 1712 }
1728 1713
1729 ret = -EBUSY; 1714 ret = -EBUSY;
@@ -1792,18 +1777,13 @@ out_irq2:
1792 free_irq(IRQ_AMIGA_DSKBLK, NULL); 1777 free_irq(IRQ_AMIGA_DSKBLK, NULL);
1793out_irq: 1778out_irq:
1794 amiga_chip_free(raw_buf); 1779 amiga_chip_free(raw_buf);
1795out_memregion:
1796 release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
1797out_blkdev: 1780out_blkdev:
1798 unregister_blkdev(FLOPPY_MAJOR,"fd"); 1781 unregister_blkdev(FLOPPY_MAJOR,"fd");
1799 return ret; 1782 return ret;
1800} 1783}
1801 1784
1802module_init(amiga_floppy_init);
1803#ifdef MODULE
1804
1805#if 0 /* not safe to unload */ 1785#if 0 /* not safe to unload */
1806void cleanup_module(void) 1786static int __exit amiga_floppy_remove(struct platform_device *pdev)
1807{ 1787{
1808 int i; 1788 int i;
1809 1789
@@ -1820,12 +1800,25 @@ void cleanup_module(void)
1820 custom.dmacon = DMAF_DISK; /* disable DMA */ 1800 custom.dmacon = DMAF_DISK; /* disable DMA */
1821 amiga_chip_free(raw_buf); 1801 amiga_chip_free(raw_buf);
1822 blk_cleanup_queue(floppy_queue); 1802 blk_cleanup_queue(floppy_queue);
1823 release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
1824 unregister_blkdev(FLOPPY_MAJOR, "fd"); 1803 unregister_blkdev(FLOPPY_MAJOR, "fd");
1825} 1804}
1826#endif 1805#endif
1827 1806
1828#else 1807static struct platform_driver amiga_floppy_driver = {
1808 .driver = {
1809 .name = "amiga-floppy",
1810 .owner = THIS_MODULE,
1811 },
1812};
1813
1814static int __init amiga_floppy_init(void)
1815{
1816 return platform_driver_probe(&amiga_floppy_driver, amiga_floppy_probe);
1817}
1818
1819module_init(amiga_floppy_init);
1820
1821#ifndef MODULE
1829static int __init amiga_floppy_setup (char *str) 1822static int __init amiga_floppy_setup (char *str)
1830{ 1823{
1831 int n; 1824 int n;
@@ -1840,3 +1833,5 @@ static int __init amiga_floppy_setup (char *str)
1840 1833
1841__setup("floppy=", amiga_floppy_setup); 1834__setup("floppy=", amiga_floppy_setup);
1842#endif 1835#endif
1836
1837MODULE_ALIAS("platform:amiga-floppy");
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 67e0fc542249..93d1f9b469d4 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1695,6 +1695,7 @@ int drbd_send_protocol(struct drbd_conf *mdev)
1695 cf |= CF_DRY_RUN; 1695 cf |= CF_DRY_RUN;
1696 else { 1696 else {
1697 dev_err(DEV, "--dry-run is not supported by peer"); 1697 dev_err(DEV, "--dry-run is not supported by peer");
1698 kfree(p);
1698 return 0; 1699 return 0;
1699 } 1700 }
1700 } 1701 }
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index ed9f1de24a71..3f096e7959b4 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -899,7 +899,8 @@ retry:
899 899
900 drbd_thread_start(&mdev->asender); 900 drbd_thread_start(&mdev->asender);
901 901
902 drbd_send_protocol(mdev); 902 if (!drbd_send_protocol(mdev))
903 return -1;
903 drbd_send_sync_param(mdev, &mdev->sync_conf); 904 drbd_send_sync_param(mdev, &mdev->sync_conf);
904 drbd_send_sizes(mdev, 0); 905 drbd_send_sizes(mdev, 0);
905 drbd_send_uuids(mdev); 906 drbd_send_uuids(mdev);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 44bf6d11197e..d48a1dfd7b24 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -235,7 +235,7 @@ void drbd_endio_pri(struct bio *bio, int error)
235 if (unlikely(error)) { 235 if (unlikely(error)) {
236 what = (bio_data_dir(bio) == WRITE) 236 what = (bio_data_dir(bio) == WRITE)
237 ? write_completed_with_error 237 ? write_completed_with_error
238 : (bio_rw(bio) == READA) 238 : (bio_rw(bio) == READ)
239 ? read_completed_with_error 239 ? read_completed_with_error
240 : read_ahead_completed_with_error; 240 : read_ahead_completed_with_error;
241 } else 241 } else
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index 034e6dfc878c..81c78b3ce2df 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -164,12 +164,12 @@ unsigned long read_timer(void)
164 unsigned long t, flags; 164 unsigned long t, flags;
165 int i; 165 int i;
166 166
167 spin_lock_irqsave(&i8253_lock, flags); 167 raw_spin_lock_irqsave(&i8253_lock, flags);
168 t = jiffies * 11932; 168 t = jiffies * 11932;
169 outb_p(0, 0x43); 169 outb_p(0, 0x43);
170 i = inb_p(0x40); 170 i = inb_p(0x40);
171 i |= inb(0x40) << 8; 171 i |= inb(0x40) << 8;
172 spin_unlock_irqrestore(&i8253_lock, flags); 172 raw_spin_unlock_irqrestore(&i8253_lock, flags);
173 return(t - i); 173 return(t - i);
174} 174}
175#endif 175#endif
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ddf19425245d..8a549db2aa78 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -48,6 +48,7 @@
48#include <linux/module.h> 48#include <linux/module.h>
49#include <linux/types.h> 49#include <linux/types.h>
50#include <linux/kernel.h> 50#include <linux/kernel.h>
51#include <linux/compat.h>
51#include <linux/kthread.h> 52#include <linux/kthread.h>
52#include <linux/errno.h> 53#include <linux/errno.h>
53#include <linux/spinlock.h> 54#include <linux/spinlock.h>
@@ -2984,7 +2985,7 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2984 mutex_unlock(&ctl_mutex); 2985 mutex_unlock(&ctl_mutex);
2985} 2986}
2986 2987
2987static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 2988static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2988{ 2989{
2989 void __user *argp = (void __user *)arg; 2990 void __user *argp = (void __user *)arg;
2990 struct pkt_ctrl_command ctrl_cmd; 2991 struct pkt_ctrl_command ctrl_cmd;
@@ -3021,10 +3022,20 @@ static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cm
3021 return ret; 3022 return ret;
3022} 3023}
3023 3024
3025#ifdef CONFIG_COMPAT
3026static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3027{
3028 return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
3029}
3030#endif
3024 3031
3025static const struct file_operations pkt_ctl_fops = { 3032static const struct file_operations pkt_ctl_fops = {
3026 .ioctl = pkt_ctl_ioctl, 3033 .open = nonseekable_open,
3027 .owner = THIS_MODULE, 3034 .unlocked_ioctl = pkt_ctl_ioctl,
3035#ifdef CONFIG_COMPAT
3036 .compat_ioctl = pkt_ctl_compat_ioctl,
3037#endif
3038 .owner = THIS_MODULE,
3028}; 3039};
3029 3040
3030static struct miscdevice pkt_misc = { 3041static struct miscdevice pkt_misc = {
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index d9bf87ca9e83..6f907ebed2d5 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -65,7 +65,6 @@ MODULE_LICENSE("GPL");
65 65
66typedef struct bluecard_info_t { 66typedef struct bluecard_info_t {
67 struct pcmcia_device *p_dev; 67 struct pcmcia_device *p_dev;
68 dev_node_t node;
69 68
70 struct hci_dev *hdev; 69 struct hci_dev *hdev;
71 70
@@ -869,9 +868,6 @@ static int bluecard_probe(struct pcmcia_device *link)
869 868
870 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 869 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
871 link->io.NumPorts1 = 8; 870 link->io.NumPorts1 = 8;
872 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
873
874 link->irq.Handler = bluecard_interrupt;
875 871
876 link->conf.Attributes = CONF_ENABLE_IRQ; 872 link->conf.Attributes = CONF_ENABLE_IRQ;
877 link->conf.IntType = INT_MEMORY_AND_IO; 873 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -908,9 +904,9 @@ static int bluecard_config(struct pcmcia_device *link)
908 if (i != 0) 904 if (i != 0)
909 goto failed; 905 goto failed;
910 906
911 i = pcmcia_request_irq(link, &link->irq); 907 i = pcmcia_request_irq(link, bluecard_interrupt);
912 if (i != 0) 908 if (i != 0)
913 link->irq.AssignedIRQ = 0; 909 goto failed;
914 910
915 i = pcmcia_request_configuration(link, &link->conf); 911 i = pcmcia_request_configuration(link, &link->conf);
916 if (i != 0) 912 if (i != 0)
@@ -919,9 +915,6 @@ static int bluecard_config(struct pcmcia_device *link)
919 if (bluecard_open(info) != 0) 915 if (bluecard_open(info) != 0)
920 goto failed; 916 goto failed;
921 917
922 strcpy(info->node.dev_name, info->hdev->name);
923 link->dev_node = &info->node;
924
925 return 0; 918 return 0;
926 919
927failed: 920failed:
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 027cb8bf650f..21e05fdc9121 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -72,7 +72,6 @@ MODULE_FIRMWARE("BT3CPCC.bin");
72 72
73typedef struct bt3c_info_t { 73typedef struct bt3c_info_t {
74 struct pcmcia_device *p_dev; 74 struct pcmcia_device *p_dev;
75 dev_node_t node;
76 75
77 struct hci_dev *hdev; 76 struct hci_dev *hdev;
78 77
@@ -661,9 +660,6 @@ static int bt3c_probe(struct pcmcia_device *link)
661 660
662 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 661 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
663 link->io.NumPorts1 = 8; 662 link->io.NumPorts1 = 8;
664 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
665
666 link->irq.Handler = bt3c_interrupt;
667 663
668 link->conf.Attributes = CONF_ENABLE_IRQ; 664 link->conf.Attributes = CONF_ENABLE_IRQ;
669 link->conf.IntType = INT_MEMORY_AND_IO; 665 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -743,9 +739,9 @@ static int bt3c_config(struct pcmcia_device *link)
743 goto failed; 739 goto failed;
744 740
745found_port: 741found_port:
746 i = pcmcia_request_irq(link, &link->irq); 742 i = pcmcia_request_irq(link, &bt3c_interrupt);
747 if (i != 0) 743 if (i != 0)
748 link->irq.AssignedIRQ = 0; 744 goto failed;
749 745
750 i = pcmcia_request_configuration(link, &link->conf); 746 i = pcmcia_request_configuration(link, &link->conf);
751 if (i != 0) 747 if (i != 0)
@@ -754,9 +750,6 @@ found_port:
754 if (bt3c_open(info) != 0) 750 if (bt3c_open(info) != 0)
755 goto failed; 751 goto failed;
756 752
757 strcpy(info->node.dev_name, info->hdev->name);
758 link->dev_node = &info->node;
759
760 return 0; 753 return 0;
761 754
762failed: 755failed:
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 60c0953d7d00..4ed7288f99db 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -67,7 +67,6 @@ MODULE_LICENSE("GPL");
67 67
68typedef struct btuart_info_t { 68typedef struct btuart_info_t {
69 struct pcmcia_device *p_dev; 69 struct pcmcia_device *p_dev;
70 dev_node_t node;
71 70
72 struct hci_dev *hdev; 71 struct hci_dev *hdev;
73 72
@@ -590,9 +589,6 @@ static int btuart_probe(struct pcmcia_device *link)
590 589
591 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 590 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
592 link->io.NumPorts1 = 8; 591 link->io.NumPorts1 = 8;
593 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
594
595 link->irq.Handler = btuart_interrupt;
596 592
597 link->conf.Attributes = CONF_ENABLE_IRQ; 593 link->conf.Attributes = CONF_ENABLE_IRQ;
598 link->conf.IntType = INT_MEMORY_AND_IO; 594 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -672,9 +668,9 @@ static int btuart_config(struct pcmcia_device *link)
672 goto failed; 668 goto failed;
673 669
674found_port: 670found_port:
675 i = pcmcia_request_irq(link, &link->irq); 671 i = pcmcia_request_irq(link, btuart_interrupt);
676 if (i != 0) 672 if (i != 0)
677 link->irq.AssignedIRQ = 0; 673 goto failed;
678 674
679 i = pcmcia_request_configuration(link, &link->conf); 675 i = pcmcia_request_configuration(link, &link->conf);
680 if (i != 0) 676 if (i != 0)
@@ -683,9 +679,6 @@ found_port:
683 if (btuart_open(info) != 0) 679 if (btuart_open(info) != 0)
684 goto failed; 680 goto failed;
685 681
686 strcpy(info->node.dev_name, info->hdev->name);
687 link->dev_node = &info->node;
688
689 return 0; 682 return 0;
690 683
691failed: 684failed:
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 17788317c51a..ef044d55cb25 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -67,7 +67,6 @@ MODULE_LICENSE("GPL");
67 67
68typedef struct dtl1_info_t { 68typedef struct dtl1_info_t {
69 struct pcmcia_device *p_dev; 69 struct pcmcia_device *p_dev;
70 dev_node_t node;
71 70
72 struct hci_dev *hdev; 71 struct hci_dev *hdev;
73 72
@@ -575,9 +574,6 @@ static int dtl1_probe(struct pcmcia_device *link)
575 574
576 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 575 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
577 link->io.NumPorts1 = 8; 576 link->io.NumPorts1 = 8;
578 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
579
580 link->irq.Handler = dtl1_interrupt;
581 577
582 link->conf.Attributes = CONF_ENABLE_IRQ; 578 link->conf.Attributes = CONF_ENABLE_IRQ;
583 link->conf.IntType = INT_MEMORY_AND_IO; 579 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -621,9 +617,9 @@ static int dtl1_config(struct pcmcia_device *link)
621 if (pcmcia_loop_config(link, dtl1_confcheck, NULL) < 0) 617 if (pcmcia_loop_config(link, dtl1_confcheck, NULL) < 0)
622 goto failed; 618 goto failed;
623 619
624 i = pcmcia_request_irq(link, &link->irq); 620 i = pcmcia_request_irq(link, dtl1_interrupt);
625 if (i != 0) 621 if (i != 0)
626 link->irq.AssignedIRQ = 0; 622 goto failed;
627 623
628 i = pcmcia_request_configuration(link, &link->conf); 624 i = pcmcia_request_configuration(link, &link->conf);
629 if (i != 0) 625 if (i != 0)
@@ -632,9 +628,6 @@ static int dtl1_config(struct pcmcia_device *link)
632 if (dtl1_open(info) != 0) 628 if (dtl1_open(info) != 0)
633 goto failed; 629 goto failed;
634 630
635 strcpy(info->node.dev_name, info->hdev->name);
636 link->dev_node = &info->node;
637
638 return 0; 631 return 0;
639 632
640failed: 633failed:
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index fc8cf7ac7f2b..4cd8b227c11f 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -23,6 +23,7 @@
23#include <linux/seq_file.h> 23#include <linux/seq_file.h>
24#include <linux/dmi.h> 24#include <linux/dmi.h>
25#include <linux/capability.h> 25#include <linux/capability.h>
26#include <linux/smp_lock.h>
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27#include <asm/io.h> 28#include <asm/io.h>
28 29
@@ -82,8 +83,7 @@ module_param(fan_mult, int, 0);
82MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with"); 83MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with");
83 84
84static int i8k_open_fs(struct inode *inode, struct file *file); 85static int i8k_open_fs(struct inode *inode, struct file *file);
85static int i8k_ioctl(struct inode *, struct file *, unsigned int, 86static long i8k_ioctl(struct file *, unsigned int, unsigned long);
86 unsigned long);
87 87
88static const struct file_operations i8k_fops = { 88static const struct file_operations i8k_fops = {
89 .owner = THIS_MODULE, 89 .owner = THIS_MODULE,
@@ -91,7 +91,7 @@ static const struct file_operations i8k_fops = {
91 .read = seq_read, 91 .read = seq_read,
92 .llseek = seq_lseek, 92 .llseek = seq_lseek,
93 .release = single_release, 93 .release = single_release,
94 .ioctl = i8k_ioctl, 94 .unlocked_ioctl = i8k_ioctl,
95}; 95};
96 96
97struct smm_regs { 97struct smm_regs {
@@ -307,8 +307,8 @@ static int i8k_get_dell_signature(int req_fn)
307 return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1; 307 return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1;
308} 308}
309 309
310static int i8k_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, 310static int
311 unsigned long arg) 311i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
312{ 312{
313 int val = 0; 313 int val = 0;
314 int speed; 314 int speed;
@@ -395,6 +395,17 @@ static int i8k_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
395 return 0; 395 return 0;
396} 396}
397 397
398static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
399{
400 long ret;
401
402 lock_kernel();
403 ret = i8k_ioctl_unlocked(fp, cmd, arg);
404 unlock_kernel();
405
406 return ret;
407}
408
398/* 409/*
399 * Print the information for /proc/i8k. 410 * Print the information for /proc/i8k.
400 */ 411 */
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 0fa2e4a0835d..c1ab303455cf 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -879,8 +879,8 @@ static int isicom_open(struct tty_struct *tty, struct file *filp)
879 if (tport == NULL) 879 if (tport == NULL)
880 return -ENODEV; 880 return -ENODEV;
881 port = container_of(tport, struct isi_port, port); 881 port = container_of(tport, struct isi_port, port);
882 card = &isi_card[BOARD(tty->index)];
883 882
883 tty->driver_data = port;
884 return tty_port_open(tport, tty, filp); 884 return tty_port_open(tport, tty, filp);
885} 885}
886 886
@@ -936,7 +936,12 @@ static void isicom_shutdown(struct tty_port *port)
936static void isicom_close(struct tty_struct *tty, struct file *filp) 936static void isicom_close(struct tty_struct *tty, struct file *filp)
937{ 937{
938 struct isi_port *ip = tty->driver_data; 938 struct isi_port *ip = tty->driver_data;
939 struct tty_port *port = &ip->port; 939 struct tty_port *port;
940
941 if (ip == NULL)
942 return;
943
944 port = &ip->port;
940 if (isicom_paranoia_check(ip, tty->name, "isicom_close")) 945 if (isicom_paranoia_check(ip, tty->name, "isicom_close"))
941 return; 946 return;
942 tty_port_close(port, tty, filp); 947 tty_port_close(port, tty, filp);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 4cd6c527ee41..4e395c956a09 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -827,6 +827,8 @@ static int stli_open(struct tty_struct *tty, struct file *filp)
827 return -ENODEV; 827 return -ENODEV;
828 if (portp->devnr < 1) 828 if (portp->devnr < 1)
829 return -ENODEV; 829 return -ENODEV;
830
831 tty->driver_data = portp;
830 return tty_port_open(&portp->port, tty, filp); 832 return tty_port_open(&portp->port, tty, filp);
831} 833}
832 834
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 47023053ee85..d2692d443f7b 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -1011,6 +1011,7 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
1011 if (!info->ioaddr) 1011 if (!info->ioaddr)
1012 return -ENODEV; 1012 return -ENODEV;
1013 1013
1014 tty->driver_data = info;
1014 return tty_port_open(&info->port, tty, filp); 1015 return tty_port_open(&info->port, tty, filp);
1015} 1016}
1016 1017
@@ -1074,7 +1075,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
1074 struct mxser_port *info = tty->driver_data; 1075 struct mxser_port *info = tty->driver_data;
1075 struct tty_port *port = &info->port; 1076 struct tty_port *port = &info->port;
1076 1077
1077 if (tty->index == MXSER_PORTS) 1078 if (tty->index == MXSER_PORTS || info == NULL)
1078 return; 1079 return;
1079 if (tty_port_close_start(port, tty, filp) == 0) 1080 if (tty_port_close_start(port, tty, filp) == 0)
1080 return; 1081 return;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 90b199f97bec..e7956acf2ad6 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -106,7 +106,6 @@ static int major; /* major number we get from the kernel */
106 106
107struct cm4000_dev { 107struct cm4000_dev {
108 struct pcmcia_device *p_dev; 108 struct pcmcia_device *p_dev;
109 dev_node_t node; /* OS node (major,minor) */
110 109
111 unsigned char atr[MAX_ATR]; 110 unsigned char atr[MAX_ATR];
112 unsigned char rbuf[512]; 111 unsigned char rbuf[512];
@@ -884,8 +883,7 @@ static void monitor_card(unsigned long p)
884 /* slow down warning, but prompt immediately after insertion */ 883 /* slow down warning, but prompt immediately after insertion */
885 if (dev->cwarn == 0 || dev->cwarn == 10) { 884 if (dev->cwarn == 0 || dev->cwarn == 10) {
886 set_bit(IS_BAD_CARD, &dev->flags); 885 set_bit(IS_BAD_CARD, &dev->flags);
887 printk(KERN_WARNING MODULE_NAME ": device %s: ", 886 dev_warn(&dev->p_dev->dev, MODULE_NAME ": ");
888 dev->node.dev_name);
889 if (test_bit(IS_BAD_CSUM, &dev->flags)) { 887 if (test_bit(IS_BAD_CSUM, &dev->flags)) {
890 DEBUGP(4, dev, "ATR checksum (0x%.2x, should " 888 DEBUGP(4, dev, "ATR checksum (0x%.2x, should "
891 "be zero) failed\n", dev->atr_csum); 889 "be zero) failed\n", dev->atr_csum);
@@ -1781,11 +1779,6 @@ static int cm4000_config(struct pcmcia_device * link, int devno)
1781 goto cs_release; 1779 goto cs_release;
1782 1780
1783 dev = link->priv; 1781 dev = link->priv;
1784 sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno);
1785 dev->node.major = major;
1786 dev->node.minor = devno;
1787 dev->node.next = NULL;
1788 link->dev_node = &dev->node;
1789 1782
1790 return 0; 1783 return 0;
1791 1784
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index a6a70e476bea..c0775c844e08 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -72,7 +72,6 @@ static struct class *cmx_class;
72 72
73struct reader_dev { 73struct reader_dev {
74 struct pcmcia_device *p_dev; 74 struct pcmcia_device *p_dev;
75 dev_node_t node;
76 wait_queue_head_t devq; 75 wait_queue_head_t devq;
77 wait_queue_head_t poll_wait; 76 wait_queue_head_t poll_wait;
78 wait_queue_head_t read_wait; 77 wait_queue_head_t read_wait;
@@ -568,10 +567,6 @@ static int reader_config(struct pcmcia_device *link, int devno)
568 } 567 }
569 568
570 dev = link->priv; 569 dev = link->priv;
571 sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno);
572 dev->node.major = major;
573 dev->node.minor = devno;
574 dev->node.next = &dev->node;
575 570
576 DEBUGP(2, dev, "device " DEVICE_NAME "%d at 0x%.4x-0x%.4x\n", devno, 571 DEBUGP(2, dev, "device " DEVICE_NAME "%d at 0x%.4x-0x%.4x\n", devno,
577 link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1); 572 link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1);
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index dff24dae1485..63c32e3f23ba 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -195,9 +195,6 @@ static int config_ipwireless(struct ipw_dev *ipw)
195 link->conf.Attributes = CONF_ENABLE_IRQ; 195 link->conf.Attributes = CONF_ENABLE_IRQ;
196 link->conf.IntType = INT_MEMORY_AND_IO; 196 link->conf.IntType = INT_MEMORY_AND_IO;
197 197
198 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
199 link->irq.Handler = ipwireless_interrupt;
200
201 INIT_WORK(&ipw->work_reboot, signalled_reboot_work); 198 INIT_WORK(&ipw->work_reboot, signalled_reboot_work);
202 199
203 ipwireless_init_hardware_v1(ipw->hardware, link->io.BasePort1, 200 ipwireless_init_hardware_v1(ipw->hardware, link->io.BasePort1,
@@ -205,8 +202,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
205 ipw->is_v2_card, signalled_reboot_callback, 202 ipw->is_v2_card, signalled_reboot_callback,
206 ipw); 203 ipw);
207 204
208 ret = pcmcia_request_irq(link, &link->irq); 205 ret = pcmcia_request_irq(link, ipwireless_interrupt);
209
210 if (ret != 0) 206 if (ret != 0)
211 goto exit; 207 goto exit;
212 208
@@ -217,7 +213,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
217 (unsigned int) link->io.BasePort1, 213 (unsigned int) link->io.BasePort1,
218 (unsigned int) (link->io.BasePort1 + 214 (unsigned int) (link->io.BasePort1 +
219 link->io.NumPorts1 - 1), 215 link->io.NumPorts1 - 1),
220 (unsigned int) link->irq.AssignedIRQ); 216 (unsigned int) link->irq);
221 if (ipw->attr_memory && ipw->common_memory) 217 if (ipw->attr_memory && ipw->common_memory)
222 printk(KERN_INFO IPWIRELESS_PCCARD_NAME 218 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
223 ": attr memory 0x%08lx-0x%08lx, common memory 0x%08lx-0x%08lx\n", 219 ": attr memory 0x%08lx-0x%08lx, common memory 0x%08lx-0x%08lx\n",
@@ -232,8 +228,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
232 if (!ipw->network) 228 if (!ipw->network)
233 goto exit; 229 goto exit;
234 230
235 ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network, 231 ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network);
236 ipw->nodes);
237 if (!ipw->tty) 232 if (!ipw->tty)
238 goto exit; 233 goto exit;
239 234
@@ -248,8 +243,6 @@ static int config_ipwireless(struct ipw_dev *ipw)
248 if (ret != 0) 243 if (ret != 0)
249 goto exit; 244 goto exit;
250 245
251 link->dev_node = &ipw->nodes[0];
252
253 return 0; 246 return 0;
254 247
255exit: 248exit:
@@ -271,8 +264,6 @@ exit:
271 264
272static void release_ipwireless(struct ipw_dev *ipw) 265static void release_ipwireless(struct ipw_dev *ipw)
273{ 266{
274 pcmcia_disable_device(ipw->link);
275
276 if (ipw->common_memory) { 267 if (ipw->common_memory) {
277 release_mem_region(ipw->request_common_memory.Base, 268 release_mem_region(ipw->request_common_memory.Base,
278 ipw->request_common_memory.Size); 269 ipw->request_common_memory.Size);
@@ -288,7 +279,6 @@ static void release_ipwireless(struct ipw_dev *ipw)
288 if (ipw->attr_memory) 279 if (ipw->attr_memory)
289 pcmcia_release_window(ipw->link, ipw->handle_attr_memory); 280 pcmcia_release_window(ipw->link, ipw->handle_attr_memory);
290 281
291 /* Break the link with Card Services */
292 pcmcia_disable_device(ipw->link); 282 pcmcia_disable_device(ipw->link);
293} 283}
294 284
@@ -313,9 +303,6 @@ static int ipwireless_attach(struct pcmcia_device *link)
313 ipw->link = link; 303 ipw->link = link;
314 link->priv = ipw; 304 link->priv = ipw;
315 305
316 /* Link this device into our device list. */
317 link->dev_node = &ipw->nodes[0];
318
319 ipw->hardware = ipwireless_hardware_create(); 306 ipw->hardware = ipwireless_hardware_create();
320 if (!ipw->hardware) { 307 if (!ipw->hardware) {
321 kfree(ipw); 308 kfree(ipw);
diff --git a/drivers/char/pcmcia/ipwireless/main.h b/drivers/char/pcmcia/ipwireless/main.h
index 0e0363af9ab2..96d0ef31b172 100644
--- a/drivers/char/pcmcia/ipwireless/main.h
+++ b/drivers/char/pcmcia/ipwireless/main.h
@@ -54,7 +54,6 @@ struct ipw_dev {
54 void __iomem *common_memory; 54 void __iomem *common_memory;
55 win_req_t request_common_memory; 55 win_req_t request_common_memory;
56 56
57 dev_node_t nodes[2];
58 /* Reference to attribute memory, containing CIS data */ 57 /* Reference to attribute memory, containing CIS data */
59 void *attribute_memory; 58 void *attribute_memory;
60 59
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
index 2bb7874a6899..1a2c2c3b068f 100644
--- a/drivers/char/pcmcia/ipwireless/tty.c
+++ b/drivers/char/pcmcia/ipwireless/tty.c
@@ -487,7 +487,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
487 return tty_mode_ioctl(linux_tty, file, cmd , arg); 487 return tty_mode_ioctl(linux_tty, file, cmd , arg);
488} 488}
489 489
490static int add_tty(dev_node_t *nodesp, int j, 490static int add_tty(int j,
491 struct ipw_hardware *hardware, 491 struct ipw_hardware *hardware,
492 struct ipw_network *network, int channel_idx, 492 struct ipw_network *network, int channel_idx,
493 int secondary_channel_idx, int tty_type) 493 int secondary_channel_idx, int tty_type)
@@ -510,19 +510,13 @@ static int add_tty(dev_node_t *nodesp, int j,
510 ipwireless_associate_network_tty(network, 510 ipwireless_associate_network_tty(network,
511 secondary_channel_idx, 511 secondary_channel_idx,
512 ttys[j]); 512 ttys[j]);
513 if (nodesp != NULL) {
514 sprintf(nodesp->dev_name, "ttyIPWp%d", j);
515 nodesp->major = ipw_tty_driver->major;
516 nodesp->minor = j + ipw_tty_driver->minor_start;
517 }
518 if (get_tty(j + ipw_tty_driver->minor_start) == ttys[j]) 513 if (get_tty(j + ipw_tty_driver->minor_start) == ttys[j])
519 report_registering(ttys[j]); 514 report_registering(ttys[j]);
520 return 0; 515 return 0;
521} 516}
522 517
523struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware, 518struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware,
524 struct ipw_network *network, 519 struct ipw_network *network)
525 dev_node_t *nodes)
526{ 520{
527 int i, j; 521 int i, j;
528 522
@@ -539,26 +533,23 @@ struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware,
539 if (allfree) { 533 if (allfree) {
540 j = i; 534 j = i;
541 535
542 if (add_tty(&nodes[0], j, hardware, network, 536 if (add_tty(j, hardware, network,
543 IPW_CHANNEL_DIALLER, IPW_CHANNEL_RAS, 537 IPW_CHANNEL_DIALLER, IPW_CHANNEL_RAS,
544 TTYTYPE_MODEM)) 538 TTYTYPE_MODEM))
545 return NULL; 539 return NULL;
546 540
547 j += IPWIRELESS_PCMCIA_MINOR_RANGE; 541 j += IPWIRELESS_PCMCIA_MINOR_RANGE;
548 if (add_tty(&nodes[1], j, hardware, network, 542 if (add_tty(j, hardware, network,
549 IPW_CHANNEL_DIALLER, -1, 543 IPW_CHANNEL_DIALLER, -1,
550 TTYTYPE_MONITOR)) 544 TTYTYPE_MONITOR))
551 return NULL; 545 return NULL;
552 546
553 j += IPWIRELESS_PCMCIA_MINOR_RANGE; 547 j += IPWIRELESS_PCMCIA_MINOR_RANGE;
554 if (add_tty(NULL, j, hardware, network, 548 if (add_tty(j, hardware, network,
555 IPW_CHANNEL_RAS, -1, 549 IPW_CHANNEL_RAS, -1,
556 TTYTYPE_RAS_RAW)) 550 TTYTYPE_RAS_RAW))
557 return NULL; 551 return NULL;
558 552
559 nodes[0].next = &nodes[1];
560 nodes[1].next = NULL;
561
562 return ttys[i]; 553 return ttys[i];
563 } 554 }
564 } 555 }
diff --git a/drivers/char/pcmcia/ipwireless/tty.h b/drivers/char/pcmcia/ipwireless/tty.h
index b0deb9168b6b..4da6c201f727 100644
--- a/drivers/char/pcmcia/ipwireless/tty.h
+++ b/drivers/char/pcmcia/ipwireless/tty.h
@@ -34,8 +34,7 @@ int ipwireless_tty_init(void);
34void ipwireless_tty_release(void); 34void ipwireless_tty_release(void);
35 35
36struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hw, 36struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hw,
37 struct ipw_network *net, 37 struct ipw_network *net);
38 dev_node_t *nodes);
39void ipwireless_tty_free(struct ipw_tty *tty); 38void ipwireless_tty_free(struct ipw_tty *tty);
40void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data, 39void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
41 unsigned int length); 40 unsigned int length);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index c31a0d913d37..308903ec8bf8 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -220,7 +220,6 @@ typedef struct _mgslpc_info {
220 220
221 /* PCMCIA support */ 221 /* PCMCIA support */
222 struct pcmcia_device *p_dev; 222 struct pcmcia_device *p_dev;
223 dev_node_t node;
224 int stop; 223 int stop;
225 224
226 /* SPPP/Cisco HDLC device parts */ 225 /* SPPP/Cisco HDLC device parts */
@@ -552,10 +551,6 @@ static int mgslpc_probe(struct pcmcia_device *link)
552 551
553 /* Initialize the struct pcmcia_device structure */ 552 /* Initialize the struct pcmcia_device structure */
554 553
555 /* Interrupt setup */
556 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
557 link->irq.Handler = NULL;
558
559 link->conf.Attributes = 0; 554 link->conf.Attributes = 0;
560 link->conf.IntType = INT_MEMORY_AND_IO; 555 link->conf.IntType = INT_MEMORY_AND_IO;
561 556
@@ -608,9 +603,7 @@ static int mgslpc_config(struct pcmcia_device *link)
608 link->conf.ConfigIndex = 8; 603 link->conf.ConfigIndex = 8;
609 link->conf.Present = PRESENT_OPTION; 604 link->conf.Present = PRESENT_OPTION;
610 605
611 link->irq.Handler = mgslpc_isr; 606 ret = pcmcia_request_irq(link, mgslpc_isr);
612
613 ret = pcmcia_request_irq(link, &link->irq);
614 if (ret) 607 if (ret)
615 goto failed; 608 goto failed;
616 ret = pcmcia_request_configuration(link, &link->conf); 609 ret = pcmcia_request_configuration(link, &link->conf);
@@ -618,17 +611,12 @@ static int mgslpc_config(struct pcmcia_device *link)
618 goto failed; 611 goto failed;
619 612
620 info->io_base = link->io.BasePort1; 613 info->io_base = link->io.BasePort1;
621 info->irq_level = link->irq.AssignedIRQ; 614 info->irq_level = link->irq;
622
623 /* add to linked list of devices */
624 sprintf(info->node.dev_name, "mgslpc0");
625 info->node.major = info->node.minor = 0;
626 link->dev_node = &info->node;
627 615
628 printk(KERN_INFO "%s: index 0x%02x:", 616 dev_info(&link->dev, "index 0x%02x:",
629 info->node.dev_name, link->conf.ConfigIndex); 617 link->conf.ConfigIndex);
630 if (link->conf.Attributes & CONF_ENABLE_IRQ) 618 if (link->conf.Attributes & CONF_ENABLE_IRQ)
631 printk(", irq %d", link->irq.AssignedIRQ); 619 printk(", irq %d", link->irq);
632 if (link->io.NumPorts1) 620 if (link->io.NumPorts1)
633 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 621 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
634 link->io.BasePort1+link->io.NumPorts1-1); 622 link->io.BasePort1+link->io.NumPorts1-1);
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 0a8d1e56c993..b02332a5412f 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -909,6 +909,7 @@ static int rc_open(struct tty_struct *tty, struct file *filp)
909 if (error) 909 if (error)
910 return error; 910 return error;
911 911
912 tty->driver_data = port;
912 return tty_port_open(&port->port, tty, filp); 913 return tty_port_open(&port->port, tty, filp);
913} 914}
914 915
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 8dfd24721a82..78a62ebe75c7 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -627,7 +627,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
627 char data; 627 char data;
628 int char_count; 628 int char_count;
629 int save_cnt; 629 int save_cnt;
630 int len;
631 630
632 /* determine the channel and change to that context */ 631 /* determine the channel and change to that context */
633 channel = (u_short) (base_addr[CyLICR] >> 2); 632 channel = (u_short) (base_addr[CyLICR] >> 2);
@@ -1528,7 +1527,6 @@ static int
1528cy_ioctl(struct tty_struct *tty, struct file *file, 1527cy_ioctl(struct tty_struct *tty, struct file *file,
1529 unsigned int cmd, unsigned long arg) 1528 unsigned int cmd, unsigned long arg)
1530{ 1529{
1531 unsigned long val;
1532 struct cyclades_port *info = tty->driver_data; 1530 struct cyclades_port *info = tty->driver_data;
1533 int ret_val = 0; 1531 int ret_val = 0;
1534 void __user *argp = (void __user *)arg; 1532 void __user *argp = (void __user *)arg;
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 0e511d61f544..6049fd731924 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -724,7 +724,6 @@ static int stl_open(struct tty_struct *tty, struct file *filp)
724{ 724{
725 struct stlport *portp; 725 struct stlport *portp;
726 struct stlbrd *brdp; 726 struct stlbrd *brdp;
727 struct tty_port *port;
728 unsigned int minordev, brdnr, panelnr; 727 unsigned int minordev, brdnr, panelnr;
729 int portnr; 728 int portnr;
730 729
@@ -754,7 +753,8 @@ static int stl_open(struct tty_struct *tty, struct file *filp)
754 portp = brdp->panels[panelnr]->ports[portnr]; 753 portp = brdp->panels[panelnr]->ports[portnr];
755 if (portp == NULL) 754 if (portp == NULL)
756 return -ENODEV; 755 return -ENODEV;
757 port = &portp->port; 756
757 tty->driver_data = portp;
758 return tty_port_open(&portp->port, tty, filp); 758 return tty_port_open(&portp->port, tty, filp);
759 759
760} 760}
@@ -841,7 +841,8 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
841 pr_debug("stl_close(tty=%p,filp=%p)\n", tty, filp); 841 pr_debug("stl_close(tty=%p,filp=%p)\n", tty, filp);
842 842
843 portp = tty->driver_data; 843 portp = tty->driver_data;
844 BUG_ON(portp == NULL); 844 if(portp == NULL)
845 return;
845 tty_port_close(&portp->port, tty, filp); 846 tty_port_close(&portp->port, tty, filp);
846} 847}
847 848
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 59de2525d303..d4e8b213a462 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -289,7 +289,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
289 289
290static void sysrq_ftrace_dump(int key, struct tty_struct *tty) 290static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
291{ 291{
292 ftrace_dump(); 292 ftrace_dump(DUMP_ALL);
293} 293}
294static struct sysrq_key_op sysrq_ftrace_dump_op = { 294static struct sysrq_key_op sysrq_ftrace_dump_op = {
295 .handler = sysrq_ftrace_dump, 295 .handler = sysrq_ftrace_dump,
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index f5fc64f89c5c..4dc338f3d1aa 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -17,14 +17,16 @@ menuconfig TCG_TPM
17 obtained at: <http://sourceforge.net/projects/trousers>. To 17 obtained at: <http://sourceforge.net/projects/trousers>. To
18 compile this driver as a module, choose M here; the module 18 compile this driver as a module, choose M here; the module
19 will be called tpm. If unsure, say N. 19 will be called tpm. If unsure, say N.
20 Note: For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI 20 Notes:
21 1) For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI
21 and CONFIG_PNPACPI. 22 and CONFIG_PNPACPI.
23 2) Without ACPI enabled, the BIOS event log won't be accessible,
24 which is required to validate the PCR 0-7 values.
22 25
23if TCG_TPM 26if TCG_TPM
24 27
25config TCG_TIS 28config TCG_TIS
26 tristate "TPM Interface Specification 1.2 Interface" 29 tristate "TPM Interface Specification 1.2 Interface"
27 depends on PNP
28 ---help--- 30 ---help---
29 If you have a TPM security chip that is compliant with the 31 If you have a TPM security chip that is compliant with the
30 TCG TIS 1.2 TPM specification say Yes and it will be accessible 32 TCG TIS 1.2 TPM specification say Yes and it will be accessible
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 068c816e6942..05ad4a17a28f 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1068,6 +1068,27 @@ void tpm_remove_hardware(struct device *dev)
1068} 1068}
1069EXPORT_SYMBOL_GPL(tpm_remove_hardware); 1069EXPORT_SYMBOL_GPL(tpm_remove_hardware);
1070 1070
1071#define TPM_ORD_SAVESTATE cpu_to_be32(152)
1072#define SAVESTATE_RESULT_SIZE 10
1073
1074static struct tpm_input_header savestate_header = {
1075 .tag = TPM_TAG_RQU_COMMAND,
1076 .length = cpu_to_be32(10),
1077 .ordinal = TPM_ORD_SAVESTATE
1078};
1079
1080/* Bug workaround - some TPM's don't flush the most
1081 * recently changed pcr on suspend, so force the flush
1082 * with an extend to the selected _unused_ non-volatile pcr.
1083 */
1084static int tpm_suspend_pcr;
1085static int __init tpm_suspend_setup(char *str)
1086{
1087 get_option(&str, &tpm_suspend_pcr);
1088 return 1;
1089}
1090__setup("tpm_suspend_pcr=", tpm_suspend_setup);
1091
1071/* 1092/*
1072 * We are about to suspend. Save the TPM state 1093 * We are about to suspend. Save the TPM state
1073 * so that it can be restored. 1094 * so that it can be restored.
@@ -1075,17 +1096,29 @@ EXPORT_SYMBOL_GPL(tpm_remove_hardware);
1075int tpm_pm_suspend(struct device *dev, pm_message_t pm_state) 1096int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
1076{ 1097{
1077 struct tpm_chip *chip = dev_get_drvdata(dev); 1098 struct tpm_chip *chip = dev_get_drvdata(dev);
1078 u8 savestate[] = { 1099 struct tpm_cmd_t cmd;
1079 0, 193, /* TPM_TAG_RQU_COMMAND */ 1100 int rc;
1080 0, 0, 0, 10, /* blob length (in bytes) */ 1101
1081 0, 0, 0, 152 /* TPM_ORD_SaveState */ 1102 u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
1082 };
1083 1103
1084 if (chip == NULL) 1104 if (chip == NULL)
1085 return -ENODEV; 1105 return -ENODEV;
1086 1106
1087 tpm_transmit(chip, savestate, sizeof(savestate)); 1107 /* for buggy tpm, flush pcrs with extend to selected dummy */
1088 return 0; 1108 if (tpm_suspend_pcr) {
1109 cmd.header.in = pcrextend_header;
1110 cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
1111 memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
1112 TPM_DIGEST_SIZE);
1113 rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
1114 "extending dummy pcr before suspend");
1115 }
1116
1117 /* now do the actual savestate */
1118 cmd.header.in = savestate_header;
1119 rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
1120 "sending savestate before suspend");
1121 return rc;
1089} 1122}
1090EXPORT_SYMBOL_GPL(tpm_pm_suspend); 1123EXPORT_SYMBOL_GPL(tpm_pm_suspend);
1091 1124
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 94345994f8a6..24314a9cffe8 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -598,7 +598,7 @@ out_err:
598 tpm_remove_hardware(chip->dev); 598 tpm_remove_hardware(chip->dev);
599 return rc; 599 return rc;
600} 600}
601 601#ifdef CONFIG_PNP
602static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, 602static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
603 const struct pnp_device_id *pnp_id) 603 const struct pnp_device_id *pnp_id)
604{ 604{
@@ -663,7 +663,7 @@ static struct pnp_driver tis_pnp_driver = {
663module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, 663module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
664 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); 664 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
665MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); 665MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
666 666#endif
667static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg) 667static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
668{ 668{
669 return tpm_pm_suspend(&dev->dev, msg); 669 return tpm_pm_suspend(&dev->dev, msg);
@@ -690,21 +690,21 @@ MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
690static int __init init_tis(void) 690static int __init init_tis(void)
691{ 691{
692 int rc; 692 int rc;
693#ifdef CONFIG_PNP
694 if (!force)
695 return pnp_register_driver(&tis_pnp_driver);
696#endif
693 697
694 if (force) { 698 rc = platform_driver_register(&tis_drv);
695 rc = platform_driver_register(&tis_drv); 699 if (rc < 0)
696 if (rc < 0)
697 return rc;
698 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
699 return PTR_ERR(pdev);
700 if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
701 platform_device_unregister(pdev);
702 platform_driver_unregister(&tis_drv);
703 }
704 return rc; 700 return rc;
701 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
702 return PTR_ERR(pdev);
703 if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
704 platform_device_unregister(pdev);
705 platform_driver_unregister(&tis_drv);
705 } 706 }
706 707 return rc;
707 return pnp_register_driver(&tis_pnp_driver);
708} 708}
709 709
710static void __exit cleanup_tis(void) 710static void __exit cleanup_tis(void)
@@ -728,12 +728,14 @@ static void __exit cleanup_tis(void)
728 list_del(&i->list); 728 list_del(&i->list);
729 } 729 }
730 spin_unlock(&tis_lock); 730 spin_unlock(&tis_lock);
731 731#ifdef CONFIG_PNP
732 if (force) { 732 if (!force) {
733 platform_device_unregister(pdev);
734 platform_driver_unregister(&tis_drv);
735 } else
736 pnp_unregister_driver(&tis_pnp_driver); 733 pnp_unregister_driver(&tis_pnp_driver);
734 return;
735 }
736#endif
737 platform_device_unregister(pdev);
738 platform_driver_unregister(&tis_drv);
737} 739}
738 740
739module_init(init_tis); 741module_init(init_tis);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 6da962c9b21c..d71f0fc34b46 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1875,6 +1875,7 @@ got_driver:
1875 */ 1875 */
1876 if (filp->f_op == &hung_up_tty_fops) 1876 if (filp->f_op == &hung_up_tty_fops)
1877 filp->f_op = &tty_fops; 1877 filp->f_op = &tty_fops;
1878 unlock_kernel();
1878 goto retry_open; 1879 goto retry_open;
1879 } 1880 }
1880 unlock_kernel(); 1881 unlock_kernel();
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index b314a999aabe..d7be69f13154 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -154,14 +154,14 @@ static int __init cs5535_mfgpt_init(void)
154 if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) { 154 if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) {
155 printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n", 155 printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n",
156 timer_irq); 156 timer_irq);
157 return -EIO; 157 goto err_timer;
158 } 158 }
159 159
160 /* And register it with the kernel */ 160 /* And register it with the kernel */
161 ret = setup_irq(timer_irq, &mfgptirq); 161 ret = setup_irq(timer_irq, &mfgptirq);
162 if (ret) { 162 if (ret) {
163 printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n"); 163 printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n");
164 goto err; 164 goto err_irq;
165 } 165 }
166 166
167 /* Set the clock scale and enable the event mode for CMP2 */ 167 /* Set the clock scale and enable the event mode for CMP2 */
@@ -184,8 +184,10 @@ static int __init cs5535_mfgpt_init(void)
184 184
185 return 0; 185 return 0;
186 186
187err: 187err_irq:
188 cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq); 188 cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq);
189err_timer:
190 cs5535_mfgpt_free_timer(cs5535_event_clock);
189 printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n"); 191 printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n");
190 return -EIO; 192 return -EIO;
191} 193}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 744f748cc84b..f6677cb19789 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -150,13 +150,12 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
150 150
151static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 151static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
152{ 152{
153 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
154 int ret; 153 int ret;
155 154
156 /* enable clock */ 155 /* enable clock */
157 ret = clk_enable(p->clk); 156 ret = clk_enable(p->clk);
158 if (ret) { 157 if (ret) {
159 pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk); 158 dev_err(&p->pdev->dev, "cannot enable clock\n");
160 return ret; 159 return ret;
161 } 160 }
162 161
@@ -279,7 +278,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
279 delay = 1; 278 delay = 1;
280 279
281 if (!delay) 280 if (!delay)
282 pr_warning("sh_cmt: too long delay\n"); 281 dev_warn(&p->pdev->dev, "too long delay\n");
283 282
284 } while (delay); 283 } while (delay);
285} 284}
@@ -289,7 +288,7 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
289 unsigned long flags; 288 unsigned long flags;
290 289
291 if (delta > p->max_match_value) 290 if (delta > p->max_match_value)
292 pr_warning("sh_cmt: delta out of range\n"); 291 dev_warn(&p->pdev->dev, "delta out of range\n");
293 292
294 spin_lock_irqsave(&p->lock, flags); 293 spin_lock_irqsave(&p->lock, flags);
295 p->next_match_value = delta; 294 p->next_match_value = delta;
@@ -451,7 +450,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
451 cs->resume = sh_cmt_clocksource_resume; 450 cs->resume = sh_cmt_clocksource_resume;
452 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); 451 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
453 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 452 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
454 pr_info("sh_cmt: %s used as clock source\n", cs->name); 453 dev_info(&p->pdev->dev, "used as clock source\n");
455 clocksource_register(cs); 454 clocksource_register(cs);
456 return 0; 455 return 0;
457} 456}
@@ -497,13 +496,11 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
497 496
498 switch (mode) { 497 switch (mode) {
499 case CLOCK_EVT_MODE_PERIODIC: 498 case CLOCK_EVT_MODE_PERIODIC:
500 pr_info("sh_cmt: %s used for periodic clock events\n", 499 dev_info(&p->pdev->dev, "used for periodic clock events\n");
501 ced->name);
502 sh_cmt_clock_event_start(p, 1); 500 sh_cmt_clock_event_start(p, 1);
503 break; 501 break;
504 case CLOCK_EVT_MODE_ONESHOT: 502 case CLOCK_EVT_MODE_ONESHOT:
505 pr_info("sh_cmt: %s used for oneshot clock events\n", 503 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
506 ced->name);
507 sh_cmt_clock_event_start(p, 0); 504 sh_cmt_clock_event_start(p, 0);
508 break; 505 break;
509 case CLOCK_EVT_MODE_SHUTDOWN: 506 case CLOCK_EVT_MODE_SHUTDOWN:
@@ -544,7 +541,7 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
544 ced->set_next_event = sh_cmt_clock_event_next; 541 ced->set_next_event = sh_cmt_clock_event_next;
545 ced->set_mode = sh_cmt_clock_event_mode; 542 ced->set_mode = sh_cmt_clock_event_mode;
546 543
547 pr_info("sh_cmt: %s used for clock events\n", ced->name); 544 dev_info(&p->pdev->dev, "used for clock events\n");
548 clockevents_register_device(ced); 545 clockevents_register_device(ced);
549} 546}
550 547
@@ -601,22 +598,27 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
601 /* map memory, let mapbase point to our channel */ 598 /* map memory, let mapbase point to our channel */
602 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 599 p->mapbase = ioremap_nocache(res->start, resource_size(res));
603 if (p->mapbase == NULL) { 600 if (p->mapbase == NULL) {
604 pr_err("sh_cmt: failed to remap I/O memory\n"); 601 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
605 goto err0; 602 goto err0;
606 } 603 }
607 604
608 /* request irq using setup_irq() (too early for request_irq()) */ 605 /* request irq using setup_irq() (too early for request_irq()) */
609 p->irqaction.name = cfg->name; 606 p->irqaction.name = dev_name(&p->pdev->dev);
610 p->irqaction.handler = sh_cmt_interrupt; 607 p->irqaction.handler = sh_cmt_interrupt;
611 p->irqaction.dev_id = p; 608 p->irqaction.dev_id = p;
612 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 609 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
610 IRQF_IRQPOLL | IRQF_NOBALANCING;
613 611
614 /* get hold of clock */ 612 /* get hold of clock */
615 p->clk = clk_get(&p->pdev->dev, cfg->clk); 613 p->clk = clk_get(&p->pdev->dev, "cmt_fck");
616 if (IS_ERR(p->clk)) { 614 if (IS_ERR(p->clk)) {
617 pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk); 615 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
618 ret = PTR_ERR(p->clk); 616 p->clk = clk_get(&p->pdev->dev, cfg->clk);
619 goto err1; 617 if (IS_ERR(p->clk)) {
618 dev_err(&p->pdev->dev, "cannot get clock\n");
619 ret = PTR_ERR(p->clk);
620 goto err1;
621 }
620 } 622 }
621 623
622 if (resource_size(res) == 6) { 624 if (resource_size(res) == 6) {
@@ -629,17 +631,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
629 p->clear_bits = ~0xc000; 631 p->clear_bits = ~0xc000;
630 } 632 }
631 633
632 ret = sh_cmt_register(p, cfg->name, 634 ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
633 cfg->clockevent_rating, 635 cfg->clockevent_rating,
634 cfg->clocksource_rating); 636 cfg->clocksource_rating);
635 if (ret) { 637 if (ret) {
636 pr_err("sh_cmt: registration failed\n"); 638 dev_err(&p->pdev->dev, "registration failed\n");
637 goto err1; 639 goto err1;
638 } 640 }
639 641
640 ret = setup_irq(irq, &p->irqaction); 642 ret = setup_irq(irq, &p->irqaction);
641 if (ret) { 643 if (ret) {
642 pr_err("sh_cmt: failed to request irq %d\n", irq); 644 dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
643 goto err1; 645 goto err1;
644 } 646 }
645 647
@@ -654,11 +656,10 @@ err0:
654static int __devinit sh_cmt_probe(struct platform_device *pdev) 656static int __devinit sh_cmt_probe(struct platform_device *pdev)
655{ 657{
656 struct sh_cmt_priv *p = platform_get_drvdata(pdev); 658 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
657 struct sh_timer_config *cfg = pdev->dev.platform_data;
658 int ret; 659 int ret;
659 660
660 if (p) { 661 if (p) {
661 pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name); 662 dev_info(&pdev->dev, "kept as earlytimer\n");
662 return 0; 663 return 0;
663 } 664 }
664 665
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 5fb78bfd73bb..ef7a5be8a09f 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -119,13 +119,12 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
119 119
120static int sh_mtu2_enable(struct sh_mtu2_priv *p) 120static int sh_mtu2_enable(struct sh_mtu2_priv *p)
121{ 121{
122 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
123 int ret; 122 int ret;
124 123
125 /* enable clock */ 124 /* enable clock */
126 ret = clk_enable(p->clk); 125 ret = clk_enable(p->clk);
127 if (ret) { 126 if (ret) {
128 pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk); 127 dev_err(&p->pdev->dev, "cannot enable clock\n");
129 return ret; 128 return ret;
130 } 129 }
131 130
@@ -194,8 +193,7 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
194 193
195 switch (mode) { 194 switch (mode) {
196 case CLOCK_EVT_MODE_PERIODIC: 195 case CLOCK_EVT_MODE_PERIODIC:
197 pr_info("sh_mtu2: %s used for periodic clock events\n", 196 dev_info(&p->pdev->dev, "used for periodic clock events\n");
198 ced->name);
199 sh_mtu2_enable(p); 197 sh_mtu2_enable(p);
200 break; 198 break;
201 case CLOCK_EVT_MODE_UNUSED: 199 case CLOCK_EVT_MODE_UNUSED:
@@ -222,13 +220,13 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
222 ced->cpumask = cpumask_of(0); 220 ced->cpumask = cpumask_of(0);
223 ced->set_mode = sh_mtu2_clock_event_mode; 221 ced->set_mode = sh_mtu2_clock_event_mode;
224 222
225 pr_info("sh_mtu2: %s used for clock events\n", ced->name); 223 dev_info(&p->pdev->dev, "used for clock events\n");
226 clockevents_register_device(ced); 224 clockevents_register_device(ced);
227 225
228 ret = setup_irq(p->irqaction.irq, &p->irqaction); 226 ret = setup_irq(p->irqaction.irq, &p->irqaction);
229 if (ret) { 227 if (ret) {
230 pr_err("sh_mtu2: failed to request irq %d\n", 228 dev_err(&p->pdev->dev, "failed to request irq %d\n",
231 p->irqaction.irq); 229 p->irqaction.irq);
232 return; 230 return;
233 } 231 }
234} 232}
@@ -274,26 +272,32 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
274 /* map memory, let mapbase point to our channel */ 272 /* map memory, let mapbase point to our channel */
275 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 273 p->mapbase = ioremap_nocache(res->start, resource_size(res));
276 if (p->mapbase == NULL) { 274 if (p->mapbase == NULL) {
277 pr_err("sh_mtu2: failed to remap I/O memory\n"); 275 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
278 goto err0; 276 goto err0;
279 } 277 }
280 278
281 /* setup data for setup_irq() (too early for request_irq()) */ 279 /* setup data for setup_irq() (too early for request_irq()) */
282 p->irqaction.name = cfg->name; 280 p->irqaction.name = dev_name(&p->pdev->dev);
283 p->irqaction.handler = sh_mtu2_interrupt; 281 p->irqaction.handler = sh_mtu2_interrupt;
284 p->irqaction.dev_id = p; 282 p->irqaction.dev_id = p;
285 p->irqaction.irq = irq; 283 p->irqaction.irq = irq;
286 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 284 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
285 IRQF_IRQPOLL | IRQF_NOBALANCING;
287 286
288 /* get hold of clock */ 287 /* get hold of clock */
289 p->clk = clk_get(&p->pdev->dev, cfg->clk); 288 p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
290 if (IS_ERR(p->clk)) { 289 if (IS_ERR(p->clk)) {
291 pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk); 290 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
292 ret = PTR_ERR(p->clk); 291 p->clk = clk_get(&p->pdev->dev, cfg->clk);
293 goto err1; 292 if (IS_ERR(p->clk)) {
293 dev_err(&p->pdev->dev, "cannot get clock\n");
294 ret = PTR_ERR(p->clk);
295 goto err1;
296 }
294 } 297 }
295 298
296 return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating); 299 return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
300 cfg->clockevent_rating);
297 err1: 301 err1:
298 iounmap(p->mapbase); 302 iounmap(p->mapbase);
299 err0: 303 err0:
@@ -303,11 +307,10 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
303static int __devinit sh_mtu2_probe(struct platform_device *pdev) 307static int __devinit sh_mtu2_probe(struct platform_device *pdev)
304{ 308{
305 struct sh_mtu2_priv *p = platform_get_drvdata(pdev); 309 struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
306 struct sh_timer_config *cfg = pdev->dev.platform_data;
307 int ret; 310 int ret;
308 311
309 if (p) { 312 if (p) {
310 pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name); 313 dev_info(&pdev->dev, "kept as earlytimer\n");
311 return 0; 314 return 0;
312 } 315 }
313 316
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index fc9ff1e5b770..8e44e14ec4c2 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -107,13 +107,12 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
107 107
108static int sh_tmu_enable(struct sh_tmu_priv *p) 108static int sh_tmu_enable(struct sh_tmu_priv *p)
109{ 109{
110 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
111 int ret; 110 int ret;
112 111
113 /* enable clock */ 112 /* enable clock */
114 ret = clk_enable(p->clk); 113 ret = clk_enable(p->clk);
115 if (ret) { 114 if (ret) {
116 pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk); 115 dev_err(&p->pdev->dev, "cannot enable clock\n");
117 return ret; 116 return ret;
118 } 117 }
119 118
@@ -229,7 +228,7 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
229 cs->disable = sh_tmu_clocksource_disable; 228 cs->disable = sh_tmu_clocksource_disable;
230 cs->mask = CLOCKSOURCE_MASK(32); 229 cs->mask = CLOCKSOURCE_MASK(32);
231 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 230 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
232 pr_info("sh_tmu: %s used as clock source\n", cs->name); 231 dev_info(&p->pdev->dev, "used as clock source\n");
233 clocksource_register(cs); 232 clocksource_register(cs);
234 return 0; 233 return 0;
235} 234}
@@ -277,13 +276,11 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
277 276
278 switch (mode) { 277 switch (mode) {
279 case CLOCK_EVT_MODE_PERIODIC: 278 case CLOCK_EVT_MODE_PERIODIC:
280 pr_info("sh_tmu: %s used for periodic clock events\n", 279 dev_info(&p->pdev->dev, "used for periodic clock events\n");
281 ced->name);
282 sh_tmu_clock_event_start(p, 1); 280 sh_tmu_clock_event_start(p, 1);
283 break; 281 break;
284 case CLOCK_EVT_MODE_ONESHOT: 282 case CLOCK_EVT_MODE_ONESHOT:
285 pr_info("sh_tmu: %s used for oneshot clock events\n", 283 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
286 ced->name);
287 sh_tmu_clock_event_start(p, 0); 284 sh_tmu_clock_event_start(p, 0);
288 break; 285 break;
289 case CLOCK_EVT_MODE_UNUSED: 286 case CLOCK_EVT_MODE_UNUSED:
@@ -324,13 +321,13 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
324 ced->set_next_event = sh_tmu_clock_event_next; 321 ced->set_next_event = sh_tmu_clock_event_next;
325 ced->set_mode = sh_tmu_clock_event_mode; 322 ced->set_mode = sh_tmu_clock_event_mode;
326 323
327 pr_info("sh_tmu: %s used for clock events\n", ced->name); 324 dev_info(&p->pdev->dev, "used for clock events\n");
328 clockevents_register_device(ced); 325 clockevents_register_device(ced);
329 326
330 ret = setup_irq(p->irqaction.irq, &p->irqaction); 327 ret = setup_irq(p->irqaction.irq, &p->irqaction);
331 if (ret) { 328 if (ret) {
332 pr_err("sh_tmu: failed to request irq %d\n", 329 dev_err(&p->pdev->dev, "failed to request irq %d\n",
333 p->irqaction.irq); 330 p->irqaction.irq);
334 return; 331 return;
335 } 332 }
336} 333}
@@ -379,26 +376,31 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
379 /* map memory, let mapbase point to our channel */ 376 /* map memory, let mapbase point to our channel */
380 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 377 p->mapbase = ioremap_nocache(res->start, resource_size(res));
381 if (p->mapbase == NULL) { 378 if (p->mapbase == NULL) {
382 pr_err("sh_tmu: failed to remap I/O memory\n"); 379 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
383 goto err0; 380 goto err0;
384 } 381 }
385 382
386 /* setup data for setup_irq() (too early for request_irq()) */ 383 /* setup data for setup_irq() (too early for request_irq()) */
387 p->irqaction.name = cfg->name; 384 p->irqaction.name = dev_name(&p->pdev->dev);
388 p->irqaction.handler = sh_tmu_interrupt; 385 p->irqaction.handler = sh_tmu_interrupt;
389 p->irqaction.dev_id = p; 386 p->irqaction.dev_id = p;
390 p->irqaction.irq = irq; 387 p->irqaction.irq = irq;
391 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 388 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
389 IRQF_IRQPOLL | IRQF_NOBALANCING;
392 390
393 /* get hold of clock */ 391 /* get hold of clock */
394 p->clk = clk_get(&p->pdev->dev, cfg->clk); 392 p->clk = clk_get(&p->pdev->dev, "tmu_fck");
395 if (IS_ERR(p->clk)) { 393 if (IS_ERR(p->clk)) {
396 pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk); 394 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
397 ret = PTR_ERR(p->clk); 395 p->clk = clk_get(&p->pdev->dev, cfg->clk);
398 goto err1; 396 if (IS_ERR(p->clk)) {
397 dev_err(&p->pdev->dev, "cannot get clock\n");
398 ret = PTR_ERR(p->clk);
399 goto err1;
400 }
399 } 401 }
400 402
401 return sh_tmu_register(p, cfg->name, 403 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
402 cfg->clockevent_rating, 404 cfg->clockevent_rating,
403 cfg->clocksource_rating); 405 cfg->clocksource_rating);
404 err1: 406 err1:
@@ -410,11 +412,10 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
410static int __devinit sh_tmu_probe(struct platform_device *pdev) 412static int __devinit sh_tmu_probe(struct platform_device *pdev)
411{ 413{
412 struct sh_tmu_priv *p = platform_get_drvdata(pdev); 414 struct sh_tmu_priv *p = platform_get_drvdata(pdev);
413 struct sh_timer_config *cfg = pdev->dev.platform_data;
414 int ret; 415 int ret;
415 416
416 if (p) { 417 if (p) {
417 pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name); 418 dev_info(&pdev->dev, "kept as earlytimer\n");
418 return 0; 419 return 0;
419 } 420 }
420 421
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2d5d575e889d..063b2184caf5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -662,32 +662,20 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
662 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 662 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
663} 663}
664 664
665#define define_one_ro(_name) \ 665cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
666static struct freq_attr _name = \ 666cpufreq_freq_attr_ro(cpuinfo_min_freq);
667__ATTR(_name, 0444, show_##_name, NULL) 667cpufreq_freq_attr_ro(cpuinfo_max_freq);
668 668cpufreq_freq_attr_ro(cpuinfo_transition_latency);
669#define define_one_ro0400(_name) \ 669cpufreq_freq_attr_ro(scaling_available_governors);
670static struct freq_attr _name = \ 670cpufreq_freq_attr_ro(scaling_driver);
671__ATTR(_name, 0400, show_##_name, NULL) 671cpufreq_freq_attr_ro(scaling_cur_freq);
672 672cpufreq_freq_attr_ro(bios_limit);
673#define define_one_rw(_name) \ 673cpufreq_freq_attr_ro(related_cpus);
674static struct freq_attr _name = \ 674cpufreq_freq_attr_ro(affected_cpus);
675__ATTR(_name, 0644, show_##_name, store_##_name) 675cpufreq_freq_attr_rw(scaling_min_freq);
676 676cpufreq_freq_attr_rw(scaling_max_freq);
677define_one_ro0400(cpuinfo_cur_freq); 677cpufreq_freq_attr_rw(scaling_governor);
678define_one_ro(cpuinfo_min_freq); 678cpufreq_freq_attr_rw(scaling_setspeed);
679define_one_ro(cpuinfo_max_freq);
680define_one_ro(cpuinfo_transition_latency);
681define_one_ro(scaling_available_governors);
682define_one_ro(scaling_driver);
683define_one_ro(scaling_cur_freq);
684define_one_ro(bios_limit);
685define_one_ro(related_cpus);
686define_one_ro(affected_cpus);
687define_one_rw(scaling_min_freq);
688define_one_rw(scaling_max_freq);
689define_one_rw(scaling_governor);
690define_one_rw(scaling_setspeed);
691 679
692static struct attribute *default_attrs[] = { 680static struct attribute *default_attrs[] = {
693 &cpuinfo_min_freq.attr, 681 &cpuinfo_min_freq.attr,
@@ -1113,6 +1101,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1113 unsigned int cpu = sys_dev->id; 1101 unsigned int cpu = sys_dev->id;
1114 unsigned long flags; 1102 unsigned long flags;
1115 struct cpufreq_policy *data; 1103 struct cpufreq_policy *data;
1104 struct kobject *kobj;
1105 struct completion *cmp;
1116#ifdef CONFIG_SMP 1106#ifdef CONFIG_SMP
1117 struct sys_device *cpu_sys_dev; 1107 struct sys_device *cpu_sys_dev;
1118 unsigned int j; 1108 unsigned int j;
@@ -1141,10 +1131,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1141 dprintk("removing link\n"); 1131 dprintk("removing link\n");
1142 cpumask_clear_cpu(cpu, data->cpus); 1132 cpumask_clear_cpu(cpu, data->cpus);
1143 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1133 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1144 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 1134 kobj = &sys_dev->kobj;
1145 cpufreq_cpu_put(data); 1135 cpufreq_cpu_put(data);
1146 cpufreq_debug_enable_ratelimit(); 1136 cpufreq_debug_enable_ratelimit();
1147 unlock_policy_rwsem_write(cpu); 1137 unlock_policy_rwsem_write(cpu);
1138 sysfs_remove_link(kobj, "cpufreq");
1148 return 0; 1139 return 0;
1149 } 1140 }
1150#endif 1141#endif
@@ -1181,7 +1172,10 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1181 data->governor->name, CPUFREQ_NAME_LEN); 1172 data->governor->name, CPUFREQ_NAME_LEN);
1182#endif 1173#endif
1183 cpu_sys_dev = get_cpu_sysdev(j); 1174 cpu_sys_dev = get_cpu_sysdev(j);
1184 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); 1175 kobj = &cpu_sys_dev->kobj;
1176 unlock_policy_rwsem_write(cpu);
1177 sysfs_remove_link(kobj, "cpufreq");
1178 lock_policy_rwsem_write(cpu);
1185 cpufreq_cpu_put(data); 1179 cpufreq_cpu_put(data);
1186 } 1180 }
1187 } 1181 }
@@ -1192,19 +1186,22 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1192 if (cpufreq_driver->target) 1186 if (cpufreq_driver->target)
1193 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1187 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1194 1188
1195 kobject_put(&data->kobj); 1189 kobj = &data->kobj;
1190 cmp = &data->kobj_unregister;
1191 unlock_policy_rwsem_write(cpu);
1192 kobject_put(kobj);
1196 1193
1197 /* we need to make sure that the underlying kobj is actually 1194 /* we need to make sure that the underlying kobj is actually
1198 * not referenced anymore by anybody before we proceed with 1195 * not referenced anymore by anybody before we proceed with
1199 * unloading. 1196 * unloading.
1200 */ 1197 */
1201 dprintk("waiting for dropping of refcount\n"); 1198 dprintk("waiting for dropping of refcount\n");
1202 wait_for_completion(&data->kobj_unregister); 1199 wait_for_completion(cmp);
1203 dprintk("wait complete\n"); 1200 dprintk("wait complete\n");
1204 1201
1202 lock_policy_rwsem_write(cpu);
1205 if (cpufreq_driver->exit) 1203 if (cpufreq_driver->exit)
1206 cpufreq_driver->exit(data); 1204 cpufreq_driver->exit(data);
1207
1208 unlock_policy_rwsem_write(cpu); 1205 unlock_policy_rwsem_write(cpu);
1209 1206
1210 free_cpumask_var(data->related_cpus); 1207 free_cpumask_var(data->related_cpus);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 599a40b25cb0..526bfbf69611 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -178,12 +178,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
178 return sprintf(buf, "%u\n", min_sampling_rate); 178 return sprintf(buf, "%u\n", min_sampling_rate);
179} 179}
180 180
181#define define_one_ro(_name) \ 181define_one_global_ro(sampling_rate_max);
182static struct global_attr _name = \ 182define_one_global_ro(sampling_rate_min);
183__ATTR(_name, 0444, show_##_name, NULL)
184
185define_one_ro(sampling_rate_max);
186define_one_ro(sampling_rate_min);
187 183
188/* cpufreq_conservative Governor Tunables */ 184/* cpufreq_conservative Governor Tunables */
189#define show_one(file_name, object) \ 185#define show_one(file_name, object) \
@@ -221,12 +217,8 @@ show_one_old(freq_step);
221show_one_old(sampling_rate_min); 217show_one_old(sampling_rate_min);
222show_one_old(sampling_rate_max); 218show_one_old(sampling_rate_max);
223 219
224#define define_one_ro_old(object, _name) \ 220cpufreq_freq_attr_ro_old(sampling_rate_min);
225static struct freq_attr object = \ 221cpufreq_freq_attr_ro_old(sampling_rate_max);
226__ATTR(_name, 0444, show_##_name##_old, NULL)
227
228define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
229define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
230 222
231/*** delete after deprecation time ***/ 223/*** delete after deprecation time ***/
232 224
@@ -364,16 +356,12 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
364 return count; 356 return count;
365} 357}
366 358
367#define define_one_rw(_name) \ 359define_one_global_rw(sampling_rate);
368static struct global_attr _name = \ 360define_one_global_rw(sampling_down_factor);
369__ATTR(_name, 0644, show_##_name, store_##_name) 361define_one_global_rw(up_threshold);
370 362define_one_global_rw(down_threshold);
371define_one_rw(sampling_rate); 363define_one_global_rw(ignore_nice_load);
372define_one_rw(sampling_down_factor); 364define_one_global_rw(freq_step);
373define_one_rw(up_threshold);
374define_one_rw(down_threshold);
375define_one_rw(ignore_nice_load);
376define_one_rw(freq_step);
377 365
378static struct attribute *dbs_attributes[] = { 366static struct attribute *dbs_attributes[] = {
379 &sampling_rate_max.attr, 367 &sampling_rate_max.attr,
@@ -409,16 +397,12 @@ write_one_old(down_threshold);
409write_one_old(ignore_nice_load); 397write_one_old(ignore_nice_load);
410write_one_old(freq_step); 398write_one_old(freq_step);
411 399
412#define define_one_rw_old(object, _name) \ 400cpufreq_freq_attr_rw_old(sampling_rate);
413static struct freq_attr object = \ 401cpufreq_freq_attr_rw_old(sampling_down_factor);
414__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) 402cpufreq_freq_attr_rw_old(up_threshold);
415 403cpufreq_freq_attr_rw_old(down_threshold);
416define_one_rw_old(sampling_rate_old, sampling_rate); 404cpufreq_freq_attr_rw_old(ignore_nice_load);
417define_one_rw_old(sampling_down_factor_old, sampling_down_factor); 405cpufreq_freq_attr_rw_old(freq_step);
418define_one_rw_old(up_threshold_old, up_threshold);
419define_one_rw_old(down_threshold_old, down_threshold);
420define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
421define_one_rw_old(freq_step_old, freq_step);
422 406
423static struct attribute *dbs_attributes_old[] = { 407static struct attribute *dbs_attributes_old[] = {
424 &sampling_rate_max_old.attr, 408 &sampling_rate_max_old.attr,
@@ -444,6 +428,7 @@ static struct attribute_group dbs_attr_group_old = {
444static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 428static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
445{ 429{
446 unsigned int load = 0; 430 unsigned int load = 0;
431 unsigned int max_load = 0;
447 unsigned int freq_target; 432 unsigned int freq_target;
448 433
449 struct cpufreq_policy *policy; 434 struct cpufreq_policy *policy;
@@ -501,6 +486,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
501 continue; 486 continue;
502 487
503 load = 100 * (wall_time - idle_time) / wall_time; 488 load = 100 * (wall_time - idle_time) / wall_time;
489
490 if (load > max_load)
491 max_load = load;
504 } 492 }
505 493
506 /* 494 /*
@@ -511,7 +499,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
511 return; 499 return;
512 500
513 /* Check for frequency increase */ 501 /* Check for frequency increase */
514 if (load > dbs_tuners_ins.up_threshold) { 502 if (max_load > dbs_tuners_ins.up_threshold) {
515 this_dbs_info->down_skip = 0; 503 this_dbs_info->down_skip = 0;
516 504
517 /* if we are already at full speed then break out early */ 505 /* if we are already at full speed then break out early */
@@ -538,7 +526,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
538 * can support the current CPU usage without triggering the up 526 * can support the current CPU usage without triggering the up
539 * policy. To be safe, we focus 10 points under the threshold. 527 * policy. To be safe, we focus 10 points under the threshold.
540 */ 528 */
541 if (load < (dbs_tuners_ins.down_threshold - 10)) { 529 if (max_load < (dbs_tuners_ins.down_threshold - 10)) {
542 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 530 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
543 531
544 this_dbs_info->requested_freq -= freq_target; 532 this_dbs_info->requested_freq -= freq_target;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index bd444dc93cf2..e1314212d8d4 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -73,6 +73,7 @@ enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
73 73
74struct cpu_dbs_info_s { 74struct cpu_dbs_info_s {
75 cputime64_t prev_cpu_idle; 75 cputime64_t prev_cpu_idle;
76 cputime64_t prev_cpu_iowait;
76 cputime64_t prev_cpu_wall; 77 cputime64_t prev_cpu_wall;
77 cputime64_t prev_cpu_nice; 78 cputime64_t prev_cpu_nice;
78 struct cpufreq_policy *cur_policy; 79 struct cpufreq_policy *cur_policy;
@@ -108,6 +109,7 @@ static struct dbs_tuners {
108 unsigned int down_differential; 109 unsigned int down_differential;
109 unsigned int ignore_nice; 110 unsigned int ignore_nice;
110 unsigned int powersave_bias; 111 unsigned int powersave_bias;
112 unsigned int io_is_busy;
111} dbs_tuners_ins = { 113} dbs_tuners_ins = {
112 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 114 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
113 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, 115 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
@@ -148,6 +150,16 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
148 return idle_time; 150 return idle_time;
149} 151}
150 152
153static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
154{
155 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
156
157 if (iowait_time == -1ULL)
158 return 0;
159
160 return iowait_time;
161}
162
151/* 163/*
152 * Find right freq to be set now with powersave_bias on. 164 * Find right freq to be set now with powersave_bias on.
153 * Returns the freq_hi to be used right now and will set freq_hi_jiffies, 165 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
@@ -234,12 +246,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
234 return sprintf(buf, "%u\n", min_sampling_rate); 246 return sprintf(buf, "%u\n", min_sampling_rate);
235} 247}
236 248
237#define define_one_ro(_name) \ 249define_one_global_ro(sampling_rate_max);
238static struct global_attr _name = \ 250define_one_global_ro(sampling_rate_min);
239__ATTR(_name, 0444, show_##_name, NULL)
240
241define_one_ro(sampling_rate_max);
242define_one_ro(sampling_rate_min);
243 251
244/* cpufreq_ondemand Governor Tunables */ 252/* cpufreq_ondemand Governor Tunables */
245#define show_one(file_name, object) \ 253#define show_one(file_name, object) \
@@ -249,6 +257,7 @@ static ssize_t show_##file_name \
249 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 257 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
250} 258}
251show_one(sampling_rate, sampling_rate); 259show_one(sampling_rate, sampling_rate);
260show_one(io_is_busy, io_is_busy);
252show_one(up_threshold, up_threshold); 261show_one(up_threshold, up_threshold);
253show_one(ignore_nice_load, ignore_nice); 262show_one(ignore_nice_load, ignore_nice);
254show_one(powersave_bias, powersave_bias); 263show_one(powersave_bias, powersave_bias);
@@ -274,12 +283,8 @@ show_one_old(powersave_bias);
274show_one_old(sampling_rate_min); 283show_one_old(sampling_rate_min);
275show_one_old(sampling_rate_max); 284show_one_old(sampling_rate_max);
276 285
277#define define_one_ro_old(object, _name) \ 286cpufreq_freq_attr_ro_old(sampling_rate_min);
278static struct freq_attr object = \ 287cpufreq_freq_attr_ro_old(sampling_rate_max);
279__ATTR(_name, 0444, show_##_name##_old, NULL)
280
281define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
282define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
283 288
284/*** delete after deprecation time ***/ 289/*** delete after deprecation time ***/
285 290
@@ -299,6 +304,23 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
299 return count; 304 return count;
300} 305}
301 306
307static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
308 const char *buf, size_t count)
309{
310 unsigned int input;
311 int ret;
312
313 ret = sscanf(buf, "%u", &input);
314 if (ret != 1)
315 return -EINVAL;
316
317 mutex_lock(&dbs_mutex);
318 dbs_tuners_ins.io_is_busy = !!input;
319 mutex_unlock(&dbs_mutex);
320
321 return count;
322}
323
302static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, 324static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
303 const char *buf, size_t count) 325 const char *buf, size_t count)
304{ 326{
@@ -376,14 +398,11 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
376 return count; 398 return count;
377} 399}
378 400
379#define define_one_rw(_name) \ 401define_one_global_rw(sampling_rate);
380static struct global_attr _name = \ 402define_one_global_rw(io_is_busy);
381__ATTR(_name, 0644, show_##_name, store_##_name) 403define_one_global_rw(up_threshold);
382 404define_one_global_rw(ignore_nice_load);
383define_one_rw(sampling_rate); 405define_one_global_rw(powersave_bias);
384define_one_rw(up_threshold);
385define_one_rw(ignore_nice_load);
386define_one_rw(powersave_bias);
387 406
388static struct attribute *dbs_attributes[] = { 407static struct attribute *dbs_attributes[] = {
389 &sampling_rate_max.attr, 408 &sampling_rate_max.attr,
@@ -392,6 +411,7 @@ static struct attribute *dbs_attributes[] = {
392 &up_threshold.attr, 411 &up_threshold.attr,
393 &ignore_nice_load.attr, 412 &ignore_nice_load.attr,
394 &powersave_bias.attr, 413 &powersave_bias.attr,
414 &io_is_busy.attr,
395 NULL 415 NULL
396}; 416};
397 417
@@ -415,14 +435,10 @@ write_one_old(up_threshold);
415write_one_old(ignore_nice_load); 435write_one_old(ignore_nice_load);
416write_one_old(powersave_bias); 436write_one_old(powersave_bias);
417 437
418#define define_one_rw_old(object, _name) \ 438cpufreq_freq_attr_rw_old(sampling_rate);
419static struct freq_attr object = \ 439cpufreq_freq_attr_rw_old(up_threshold);
420__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) 440cpufreq_freq_attr_rw_old(ignore_nice_load);
421 441cpufreq_freq_attr_rw_old(powersave_bias);
422define_one_rw_old(sampling_rate_old, sampling_rate);
423define_one_rw_old(up_threshold_old, up_threshold);
424define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
425define_one_rw_old(powersave_bias_old, powersave_bias);
426 442
427static struct attribute *dbs_attributes_old[] = { 443static struct attribute *dbs_attributes_old[] = {
428 &sampling_rate_max_old.attr, 444 &sampling_rate_max_old.attr,
@@ -470,14 +486,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
470 486
471 for_each_cpu(j, policy->cpus) { 487 for_each_cpu(j, policy->cpus) {
472 struct cpu_dbs_info_s *j_dbs_info; 488 struct cpu_dbs_info_s *j_dbs_info;
473 cputime64_t cur_wall_time, cur_idle_time; 489 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
474 unsigned int idle_time, wall_time; 490 unsigned int idle_time, wall_time, iowait_time;
475 unsigned int load, load_freq; 491 unsigned int load, load_freq;
476 int freq_avg; 492 int freq_avg;
477 493
478 j_dbs_info = &per_cpu(od_cpu_dbs_info, j); 494 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
479 495
480 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 496 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
497 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
481 498
482 wall_time = (unsigned int) cputime64_sub(cur_wall_time, 499 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
483 j_dbs_info->prev_cpu_wall); 500 j_dbs_info->prev_cpu_wall);
@@ -487,6 +504,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
487 j_dbs_info->prev_cpu_idle); 504 j_dbs_info->prev_cpu_idle);
488 j_dbs_info->prev_cpu_idle = cur_idle_time; 505 j_dbs_info->prev_cpu_idle = cur_idle_time;
489 506
507 iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
508 j_dbs_info->prev_cpu_iowait);
509 j_dbs_info->prev_cpu_iowait = cur_iowait_time;
510
490 if (dbs_tuners_ins.ignore_nice) { 511 if (dbs_tuners_ins.ignore_nice) {
491 cputime64_t cur_nice; 512 cputime64_t cur_nice;
492 unsigned long cur_nice_jiffies; 513 unsigned long cur_nice_jiffies;
@@ -504,6 +525,16 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
504 idle_time += jiffies_to_usecs(cur_nice_jiffies); 525 idle_time += jiffies_to_usecs(cur_nice_jiffies);
505 } 526 }
506 527
528 /*
529 * For the purpose of ondemand, waiting for disk IO is an
530 * indication that you're performance critical, and not that
531 * the system is actually idle. So subtract the iowait time
532 * from the cpu idle time.
533 */
534
535 if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
536 idle_time -= iowait_time;
537
507 if (unlikely(!wall_time || wall_time < idle_time)) 538 if (unlikely(!wall_time || wall_time < idle_time))
508 continue; 539 continue;
509 540
@@ -617,6 +648,29 @@ static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
617 cancel_delayed_work_sync(&dbs_info->work); 648 cancel_delayed_work_sync(&dbs_info->work);
618} 649}
619 650
651/*
652 * Not all CPUs want IO time to be accounted as busy; this dependson how
653 * efficient idling at a higher frequency/voltage is.
654 * Pavel Machek says this is not so for various generations of AMD and old
655 * Intel systems.
656 * Mike Chan (androidlcom) calis this is also not true for ARM.
657 * Because of this, whitelist specific known (series) of CPUs by default, and
658 * leave all others up to the user.
659 */
660static int should_io_be_busy(void)
661{
662#if defined(CONFIG_X86)
663 /*
664 * For Intel, Core 2 (model 15) andl later have an efficient idle.
665 */
666 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
667 boot_cpu_data.x86 == 6 &&
668 boot_cpu_data.x86_model >= 15)
669 return 1;
670#endif
671 return 0;
672}
673
620static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 674static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
621 unsigned int event) 675 unsigned int event)
622{ 676{
@@ -679,6 +733,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
679 dbs_tuners_ins.sampling_rate = 733 dbs_tuners_ins.sampling_rate =
680 max(min_sampling_rate, 734 max(min_sampling_rate,
681 latency * LATENCY_MULTIPLIER); 735 latency * LATENCY_MULTIPLIER);
736 dbs_tuners_ins.io_is_busy = should_io_be_busy();
682 } 737 }
683 mutex_unlock(&dbs_mutex); 738 mutex_unlock(&dbs_mutex);
684 739
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 1c1ceb4f218f..12c98900dcf8 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -67,7 +67,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
67 struct ladder_device *ldev = &__get_cpu_var(ladder_devices); 67 struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
68 struct ladder_device_state *last_state; 68 struct ladder_device_state *last_state;
69 int last_residency, last_idx = ldev->last_state_idx; 69 int last_residency, last_idx = ldev->last_state_idx;
70 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); 70 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
71 71
72 /* Special case when user has set very strict latency requirement */ 72 /* Special case when user has set very strict latency requirement */
73 if (unlikely(latency_req == 0)) { 73 if (unlikely(latency_req == 0)) {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 1aea7157d8ff..b81ad9c731ae 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -100,7 +100,6 @@ struct menu_device {
100 int needs_update; 100 int needs_update;
101 101
102 unsigned int expected_us; 102 unsigned int expected_us;
103 unsigned int measured_us;
104 u64 predicted_us; 103 u64 predicted_us;
105 unsigned int exit_us; 104 unsigned int exit_us;
106 unsigned int bucket; 105 unsigned int bucket;
@@ -183,18 +182,18 @@ static u64 div_round64(u64 dividend, u32 divisor)
183static int menu_select(struct cpuidle_device *dev) 182static int menu_select(struct cpuidle_device *dev)
184{ 183{
185 struct menu_device *data = &__get_cpu_var(menu_devices); 184 struct menu_device *data = &__get_cpu_var(menu_devices);
186 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); 185 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
187 int i; 186 int i;
188 int multiplier; 187 int multiplier;
189 188
190 data->last_state_idx = 0;
191 data->exit_us = 0;
192
193 if (data->needs_update) { 189 if (data->needs_update) {
194 menu_update(dev); 190 menu_update(dev);
195 data->needs_update = 0; 191 data->needs_update = 0;
196 } 192 }
197 193
194 data->last_state_idx = 0;
195 data->exit_us = 0;
196
198 /* Special case when user has set very strict latency requirement */ 197 /* Special case when user has set very strict latency requirement */
199 if (unlikely(latency_req == 0)) 198 if (unlikely(latency_req == 0))
200 return 0; 199 return 0;
@@ -294,7 +293,7 @@ static void menu_update(struct cpuidle_device *dev)
294 new_factor = data->correction_factor[data->bucket] 293 new_factor = data->correction_factor[data->bucket]
295 * (DECAY - 1) / DECAY; 294 * (DECAY - 1) / DECAY;
296 295
297 if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING) 296 if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
298 new_factor += RESOLUTION * measured_us / data->expected_us; 297 new_factor += RESOLUTION * measured_us / data->expected_us;
299 else 298 else
300 /* 299 /*
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 7cc31b3f40d8..323afef77802 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -26,8 +26,7 @@
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
29 29#include <linux/sh_dma.h>
30#include <asm/dmaengine.h>
31 30
32#include "shdma.h" 31#include "shdma.h"
33 32
@@ -45,7 +44,7 @@ enum sh_dmae_desc_status {
45#define LOG2_DEFAULT_XFER_SIZE 2 44#define LOG2_DEFAULT_XFER_SIZE 2
46 45
47/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ 46/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
48static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; 47static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
49 48
50static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 49static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
51 50
@@ -190,7 +189,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
190 struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 189 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
191 struct sh_dmae_device, common); 190 struct sh_dmae_device, common);
192 struct sh_dmae_pdata *pdata = shdev->pdata; 191 struct sh_dmae_pdata *pdata = shdev->pdata;
193 struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 192 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
194 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); 193 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
195 int shift = chan_pdata->dmars_bit; 194 int shift = chan_pdata->dmars_bit;
196 195
@@ -266,8 +265,8 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
266 return NULL; 265 return NULL;
267} 266}
268 267
269static struct sh_dmae_slave_config *sh_dmae_find_slave( 268static const struct sh_dmae_slave_config *sh_dmae_find_slave(
270 struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id) 269 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
271{ 270{
272 struct dma_device *dma_dev = sh_chan->common.device; 271 struct dma_device *dma_dev = sh_chan->common.device;
273 struct sh_dmae_device *shdev = container_of(dma_dev, 272 struct sh_dmae_device *shdev = container_of(dma_dev,
@@ -275,11 +274,11 @@ static struct sh_dmae_slave_config *sh_dmae_find_slave(
275 struct sh_dmae_pdata *pdata = shdev->pdata; 274 struct sh_dmae_pdata *pdata = shdev->pdata;
276 int i; 275 int i;
277 276
278 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) 277 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
279 return NULL; 278 return NULL;
280 279
281 for (i = 0; i < pdata->slave_num; i++) 280 for (i = 0; i < pdata->slave_num; i++)
282 if (pdata->slave[i].slave_id == slave_id) 281 if (pdata->slave[i].slave_id == param->slave_id)
283 return pdata->slave + i; 282 return pdata->slave + i;
284 283
285 return NULL; 284 return NULL;
@@ -290,6 +289,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
290 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 289 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
291 struct sh_desc *desc; 290 struct sh_desc *desc;
292 struct sh_dmae_slave *param = chan->private; 291 struct sh_dmae_slave *param = chan->private;
292 int ret;
293 293
294 pm_runtime_get_sync(sh_chan->dev); 294 pm_runtime_get_sync(sh_chan->dev);
295 295
@@ -298,14 +298,18 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
298 * never runs concurrently with itself or free_chan_resources. 298 * never runs concurrently with itself or free_chan_resources.
299 */ 299 */
300 if (param) { 300 if (param) {
301 struct sh_dmae_slave_config *cfg; 301 const struct sh_dmae_slave_config *cfg;
302 302
303 cfg = sh_dmae_find_slave(sh_chan, param->slave_id); 303 cfg = sh_dmae_find_slave(sh_chan, param);
304 if (!cfg) 304 if (!cfg) {
305 return -EINVAL; 305 ret = -EINVAL;
306 goto efindslave;
307 }
306 308
307 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) 309 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
308 return -EBUSY; 310 ret = -EBUSY;
311 goto etestused;
312 }
309 313
310 param->config = cfg; 314 param->config = cfg;
311 315
@@ -334,10 +338,20 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
334 } 338 }
335 spin_unlock_bh(&sh_chan->desc_lock); 339 spin_unlock_bh(&sh_chan->desc_lock);
336 340
337 if (!sh_chan->descs_allocated) 341 if (!sh_chan->descs_allocated) {
338 pm_runtime_put(sh_chan->dev); 342 ret = -ENOMEM;
343 goto edescalloc;
344 }
339 345
340 return sh_chan->descs_allocated; 346 return sh_chan->descs_allocated;
347
348edescalloc:
349 if (param)
350 clear_bit(param->slave_id, sh_dmae_slave_used);
351etestused:
352efindslave:
353 pm_runtime_put(sh_chan->dev);
354 return ret;
341} 355}
342 356
343/* 357/*
@@ -559,12 +573,14 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
559{ 573{
560 struct sh_dmae_slave *param; 574 struct sh_dmae_slave *param;
561 struct sh_dmae_chan *sh_chan; 575 struct sh_dmae_chan *sh_chan;
576 dma_addr_t slave_addr;
562 577
563 if (!chan) 578 if (!chan)
564 return NULL; 579 return NULL;
565 580
566 sh_chan = to_sh_chan(chan); 581 sh_chan = to_sh_chan(chan);
567 param = chan->private; 582 param = chan->private;
583 slave_addr = param->config->addr;
568 584
569 /* Someone calling slave DMA on a public channel? */ 585 /* Someone calling slave DMA on a public channel? */
570 if (!param || !sg_len) { 586 if (!param || !sg_len) {
@@ -577,7 +593,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
577 * if (param != NULL), this is a successfully requested slave channel, 593 * if (param != NULL), this is a successfully requested slave channel,
578 * therefore param->config != NULL too. 594 * therefore param->config != NULL too.
579 */ 595 */
580 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr, 596 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
581 direction, flags); 597 direction, flags);
582} 598}
583 599
@@ -858,7 +874,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
858 int irq, unsigned long flags) 874 int irq, unsigned long flags)
859{ 875{
860 int err; 876 int err;
861 struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 877 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
862 struct platform_device *pdev = to_platform_device(shdev->common.dev); 878 struct platform_device *pdev = to_platform_device(shdev->common.dev);
863 struct sh_dmae_chan *new_sh_chan; 879 struct sh_dmae_chan *new_sh_chan;
864 880
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 153609a1e96c..4021275a0a43 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -17,8 +17,8 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/list.h> 18#include <linux/list.h>
19 19
20#include <asm/dmaengine.h> 20#define SH_DMAC_MAX_CHANNELS 6
21 21#define SH_DMA_SLAVE_NUMBER 256
22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ 22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
23 23
24struct device; 24struct device;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 3ebc61067e54..75fcf1ac8bb7 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1359,3 +1359,5 @@ module_exit(txx9dmac_exit);
1359MODULE_LICENSE("GPL"); 1359MODULE_LICENSE("GPL");
1360MODULE_DESCRIPTION("TXx9 DMA Controller driver"); 1360MODULE_DESCRIPTION("TXx9 DMA Controller driver");
1361MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); 1361MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
1362MODULE_ALIAS("platform:txx9dmac");
1363MODULE_ALIAS("platform:txx9dmac-chan");
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index f5b6d9fe4def..97e64bcdbc06 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -294,7 +294,6 @@ wrong_ls_mce:
294void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors) 294void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
295{ 295{
296 u32 ec = ERROR_CODE(regs->nbsl); 296 u32 ec = ERROR_CODE(regs->nbsl);
297 u32 xec = EXT_ERROR_CODE(regs->nbsl);
298 297
299 if (!handle_errors) 298 if (!handle_errors)
300 return; 299 return;
@@ -324,7 +323,7 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
324 pr_cont("\n"); 323 pr_cont("\n");
325 } 324 }
326 325
327 pr_emerg("%s.\n", EXT_ERR_MSG(xec)); 326 pr_emerg("%s.\n", EXT_ERR_MSG(regs->nbsl));
328 327
329 if (BUS_ERROR(ec) && nb_bus_decoder) 328 if (BUS_ERROR(ec) && nb_bus_decoder)
330 nb_bus_decoder(node_id, regs); 329 nb_bus_decoder(node_id, regs);
@@ -374,7 +373,7 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
374 ((m->status & MCI_STATUS_PCC) ? "yes" : "no")); 373 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
375 374
376 /* do the two bits[14:13] together */ 375 /* do the two bits[14:13] together */
377 ecc = m->status & (3ULL << 45); 376 ecc = (m->status >> 45) & 0x3;
378 if (ecc) 377 if (ecc)
379 pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U")); 378 pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
380 379
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 3784a47865b7..8f5aebfb29df 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -190,7 +190,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
190 for (try = 0; try < 5; try++) { 190 for (try = 0; try < 5; try++) {
191 new = allocate ? old - bandwidth : old + bandwidth; 191 new = allocate ? old - bandwidth : old + bandwidth;
192 if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) 192 if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
193 break; 193 return -EBUSY;
194 194
195 data[0] = cpu_to_be32(old); 195 data[0] = cpu_to_be32(old);
196 data[1] = cpu_to_be32(new); 196 data[1] = cpu_to_be32(new);
@@ -218,7 +218,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
218 u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) 218 u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
219{ 219{
220 __be32 c, all, old; 220 __be32 c, all, old;
221 int i, retry = 5; 221 int i, ret = -EIO, retry = 5;
222 222
223 old = all = allocate ? cpu_to_be32(~0) : 0; 223 old = all = allocate ? cpu_to_be32(~0) : 0;
224 224
@@ -226,6 +226,8 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
226 if (!(channels_mask & 1 << i)) 226 if (!(channels_mask & 1 << i))
227 continue; 227 continue;
228 228
229 ret = -EBUSY;
230
229 c = cpu_to_be32(1 << (31 - i)); 231 c = cpu_to_be32(1 << (31 - i));
230 if ((old & c) != (all & c)) 232 if ((old & c) != (all & c))
231 continue; 233 continue;
@@ -251,12 +253,16 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
251 253
252 /* 1394-1995 IRM, fall through to retry. */ 254 /* 1394-1995 IRM, fall through to retry. */
253 default: 255 default:
254 if (retry--) 256 if (retry) {
257 retry--;
255 i--; 258 i--;
259 } else {
260 ret = -EIO;
261 }
256 } 262 }
257 } 263 }
258 264
259 return -EIO; 265 return ret;
260} 266}
261 267
262static void deallocate_channel(struct fw_card *card, int irm_id, 268static void deallocate_channel(struct fw_card *card, int irm_id,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index c9388fbb3bcc..a3b083a7403a 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1158,7 +1158,7 @@ static void handle_local_lock(struct fw_ohci *ohci,
1158 struct fw_packet *packet, u32 csr) 1158 struct fw_packet *packet, u32 csr)
1159{ 1159{
1160 struct fw_packet response; 1160 struct fw_packet response;
1161 int tcode, length, ext_tcode, sel; 1161 int tcode, length, ext_tcode, sel, try;
1162 __be32 *payload, lock_old; 1162 __be32 *payload, lock_old;
1163 u32 lock_arg, lock_data; 1163 u32 lock_arg, lock_data;
1164 1164
@@ -1185,21 +1185,26 @@ static void handle_local_lock(struct fw_ohci *ohci,
1185 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); 1185 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1186 reg_write(ohci, OHCI1394_CSRControl, sel); 1186 reg_write(ohci, OHCI1394_CSRControl, sel);
1187 1187
1188 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) 1188 for (try = 0; try < 20; try++)
1189 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData)); 1189 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1190 else 1190 lock_old = cpu_to_be32(reg_read(ohci,
1191 fw_notify("swap not done yet\n"); 1191 OHCI1394_CSRData));
1192 fw_fill_response(&response, packet->header,
1193 RCODE_COMPLETE,
1194 &lock_old, sizeof(lock_old));
1195 goto out;
1196 }
1197
1198 fw_error("swap not done (CSR lock timeout)\n");
1199 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1192 1200
1193 fw_fill_response(&response, packet->header,
1194 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
1195 out: 1201 out:
1196 fw_core_handle_response(&ohci->card, &response); 1202 fw_core_handle_response(&ohci->card, &response);
1197} 1203}
1198 1204
1199static void handle_local_request(struct context *ctx, struct fw_packet *packet) 1205static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1200{ 1206{
1201 u64 offset; 1207 u64 offset, csr;
1202 u32 csr;
1203 1208
1204 if (ctx == &ctx->ohci->at_request_ctx) { 1209 if (ctx == &ctx->ohci->at_request_ctx) {
1205 packet->ack = ACK_PENDING; 1210 packet->ack = ACK_PENDING;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 76be229c814d..eb0c3fe44b29 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -416,7 +416,8 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
416 return 0; 416 return 0;
417 417
418free_sd: 418free_sd:
419 sysfs_put(pdesc->value_sd); 419 if (pdesc)
420 sysfs_put(pdesc->value_sd);
420free_id: 421free_id:
421 idr_remove(&pdesc_idr, id); 422 idr_remove(&pdesc_idr, id);
422 desc->flags &= GPIO_FLAGS_MASK; 423 desc->flags &= GPIO_FLAGS_MASK;
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c
index 753219cf993a..41a9388f2fde 100644
--- a/drivers/gpio/it8761e_gpio.c
+++ b/drivers/gpio/it8761e_gpio.c
@@ -80,8 +80,8 @@ static int it8761e_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
80 u16 reg; 80 u16 reg;
81 u8 bit; 81 u8 bit;
82 82
83 bit = gpio_num % 7; 83 bit = gpio_num % 8;
84 reg = (gpio_num >= 7) ? gpio_ba + 1 : gpio_ba; 84 reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba;
85 85
86 return !!(inb(reg) & (1 << bit)); 86 return !!(inb(reg) & (1 << bit));
87} 87}
@@ -91,8 +91,8 @@ static int it8761e_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
91 u8 curr_dirs; 91 u8 curr_dirs;
92 u8 io_reg, bit; 92 u8 io_reg, bit;
93 93
94 bit = gpio_num % 7; 94 bit = gpio_num % 8;
95 io_reg = (gpio_num >= 7) ? GPIO2X_IO : GPIO1X_IO; 95 io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO;
96 96
97 spin_lock(&sio_lock); 97 spin_lock(&sio_lock);
98 98
@@ -116,8 +116,8 @@ static void it8761e_gpio_set(struct gpio_chip *gc,
116 u8 curr_vals, bit; 116 u8 curr_vals, bit;
117 u16 reg; 117 u16 reg;
118 118
119 bit = gpio_num % 7; 119 bit = gpio_num % 8;
120 reg = (gpio_num >= 7) ? gpio_ba + 1 : gpio_ba; 120 reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba;
121 121
122 spin_lock(&sio_lock); 122 spin_lock(&sio_lock);
123 123
@@ -135,8 +135,8 @@ static int it8761e_gpio_direction_out(struct gpio_chip *gc,
135{ 135{
136 u8 curr_dirs, io_reg, bit; 136 u8 curr_dirs, io_reg, bit;
137 137
138 bit = gpio_num % 7; 138 bit = gpio_num % 8;
139 io_reg = (gpio_num >= 7) ? GPIO2X_IO : GPIO1X_IO; 139 io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO;
140 140
141 it8761e_gpio_set(gc, gpio_num, val); 141 it8761e_gpio_set(gc, gpio_num, val);
142 142
@@ -200,7 +200,7 @@ static int __init it8761e_gpio_init(void)
200 return -EBUSY; 200 return -EBUSY;
201 201
202 it8761e_gpio_chip.base = -1; 202 it8761e_gpio_chip.base = -1;
203 it8761e_gpio_chip.ngpio = 14; 203 it8761e_gpio_chip.ngpio = 16;
204 204
205 err = gpiochip_add(&it8761e_gpio_chip); 205 err = gpiochip_add(&it8761e_gpio_chip);
206 if (err < 0) 206 if (err < 0)
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 7d521e1d17e1..b827c976dc62 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -252,6 +252,18 @@ static void pca953x_irq_bus_lock(unsigned int irq)
252static void pca953x_irq_bus_sync_unlock(unsigned int irq) 252static void pca953x_irq_bus_sync_unlock(unsigned int irq)
253{ 253{
254 struct pca953x_chip *chip = get_irq_chip_data(irq); 254 struct pca953x_chip *chip = get_irq_chip_data(irq);
255 uint16_t new_irqs;
256 uint16_t level;
257
258 /* Look for any newly setup interrupt */
259 new_irqs = chip->irq_trig_fall | chip->irq_trig_raise;
260 new_irqs &= ~chip->reg_direction;
261
262 while (new_irqs) {
263 level = __ffs(new_irqs);
264 pca953x_gpio_direction_input(&chip->gpio_chip, level);
265 new_irqs &= ~(1 << level);
266 }
255 267
256 mutex_unlock(&chip->irq_lock); 268 mutex_unlock(&chip->irq_lock);
257} 269}
@@ -278,7 +290,7 @@ static int pca953x_irq_set_type(unsigned int irq, unsigned int type)
278 else 290 else
279 chip->irq_trig_raise &= ~mask; 291 chip->irq_trig_raise &= ~mask;
280 292
281 return pca953x_gpio_direction_input(&chip->gpio_chip, level); 293 return 0;
282} 294}
283 295
284static struct irq_chip pca953x_irq_chip = { 296static struct irq_chip pca953x_irq_chip = {
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 5ad8f778ced4..105701a1f05b 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -91,6 +91,12 @@ static int pl061_direction_output(struct gpio_chip *gc, unsigned offset,
91 gpiodir = readb(chip->base + GPIODIR); 91 gpiodir = readb(chip->base + GPIODIR);
92 gpiodir |= 1 << offset; 92 gpiodir |= 1 << offset;
93 writeb(gpiodir, chip->base + GPIODIR); 93 writeb(gpiodir, chip->base + GPIODIR);
94
95 /*
96 * gpio value is set again, because pl061 doesn't allow to set value of
97 * a gpio pin before configuring it in OUT mode.
98 */
99 writeb(!!value << offset, chip->base + (1 << (offset + 2)));
94 spin_unlock_irqrestore(&chip->lock, flags); 100 spin_unlock_irqrestore(&chip->lock, flags);
95 101
96 return 0; 102 return 0;
@@ -183,7 +189,7 @@ static int pl061_irq_type(unsigned irq, unsigned trigger)
183 gpioibe &= ~(1 << offset); 189 gpioibe &= ~(1 << offset);
184 if (trigger & IRQ_TYPE_EDGE_RISING) 190 if (trigger & IRQ_TYPE_EDGE_RISING)
185 gpioiev |= 1 << offset; 191 gpioiev |= 1 << offset;
186 else 192 else if (trigger & IRQ_TYPE_EDGE_FALLING)
187 gpioiev &= ~(1 << offset); 193 gpioiev &= ~(1 << offset);
188 } 194 }
189 writeb(gpioibe, chip->base + GPIOIBE); 195 writeb(gpioibe, chip->base + GPIOIBE);
@@ -204,7 +210,7 @@ static struct irq_chip pl061_irqchip = {
204 210
205static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) 211static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
206{ 212{
207 struct list_head *chip_list = get_irq_chip_data(irq); 213 struct list_head *chip_list = get_irq_data(irq);
208 struct list_head *ptr; 214 struct list_head *ptr;
209 struct pl061_gpio *chip; 215 struct pl061_gpio *chip;
210 216
@@ -297,9 +303,9 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
297 goto iounmap; 303 goto iounmap;
298 } 304 }
299 INIT_LIST_HEAD(chip_list); 305 INIT_LIST_HEAD(chip_list);
300 set_irq_chip_data(irq, chip_list); 306 set_irq_data(irq, chip_list);
301 } else 307 } else
302 chip_list = get_irq_chip_data(irq); 308 chip_list = get_irq_data(irq);
303 list_add(&chip->list, chip_list); 309 list_add(&chip->list, chip_list);
304 310
305 for (i = 0; i < PL061_GPIO_NR; i++) { 311 for (i = 0; i < PL061_GPIO_NR; i++) {
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3bd872761567..a263b7070fc6 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -476,6 +476,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
476 unsigned long irqflags; 476 unsigned long irqflags;
477 477
478 spin_lock_irqsave(&dev->vbl_lock, irqflags); 478 spin_lock_irqsave(&dev->vbl_lock, irqflags);
479 dev->driver->disable_vblank(dev, crtc);
479 DRM_WAKEUP(&dev->vbl_queue[crtc]); 480 DRM_WAKEUP(&dev->vbl_queue[crtc]);
480 dev->vblank_enabled[crtc] = 0; 481 dev->vblank_enabled[crtc] = 0;
481 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); 482 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index e4865f99989c..7732268eced2 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -77,7 +77,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
77 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= 77 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
78 (offset + size)) 78 (offset + size))
79 break; 79 break;
80 if (!agpmem) 80 if (&agpmem->head == &dev->agp->memory)
81 return NULL; 81 return NULL;
82 82
83 /* 83 /*
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 1a1825b29f5f..25bbd30ed7af 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -354,7 +354,10 @@ static struct bin_attribute edid_attr = {
354int drm_sysfs_connector_add(struct drm_connector *connector) 354int drm_sysfs_connector_add(struct drm_connector *connector)
355{ 355{
356 struct drm_device *dev = connector->dev; 356 struct drm_device *dev = connector->dev;
357 int ret = 0, i, j; 357 int attr_cnt = 0;
358 int opt_cnt = 0;
359 int i;
360 int ret = 0;
358 361
359 /* We shouldn't get called more than once for the same connector */ 362 /* We shouldn't get called more than once for the same connector */
360 BUG_ON(device_is_registered(&connector->kdev)); 363 BUG_ON(device_is_registered(&connector->kdev));
@@ -377,8 +380,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
377 380
378 /* Standard attributes */ 381 /* Standard attributes */
379 382
380 for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) { 383 for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
381 ret = device_create_file(&connector->kdev, &connector_attrs[i]); 384 ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
382 if (ret) 385 if (ret)
383 goto err_out_files; 386 goto err_out_files;
384 } 387 }
@@ -394,8 +397,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
394 case DRM_MODE_CONNECTOR_SVIDEO: 397 case DRM_MODE_CONNECTOR_SVIDEO:
395 case DRM_MODE_CONNECTOR_Component: 398 case DRM_MODE_CONNECTOR_Component:
396 case DRM_MODE_CONNECTOR_TV: 399 case DRM_MODE_CONNECTOR_TV:
397 for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) { 400 for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
398 ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]); 401 ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
399 if (ret) 402 if (ret)
400 goto err_out_files; 403 goto err_out_files;
401 } 404 }
@@ -414,10 +417,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
414 return 0; 417 return 0;
415 418
416err_out_files: 419err_out_files:
417 if (i > 0) 420 for (i = 0; i < opt_cnt; i++)
418 for (j = 0; j < i; j++) 421 device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
419 device_remove_file(&connector->kdev, 422 for (i = 0; i < attr_cnt; i++)
420 &connector_attrs[i]); 423 device_remove_file(&connector->kdev, &connector_attrs[i]);
421 device_unregister(&connector->kdev); 424 device_unregister(&connector->kdev);
422 425
423out: 426out:
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2dc93939507d..c3cfafcbfe7d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1357,6 +1357,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1357 1357
1358 dev_priv->cfb_size = size; 1358 dev_priv->cfb_size = size;
1359 1359
1360 dev_priv->compressed_fb = compressed_fb;
1361
1360 if (IS_GM45(dev)) { 1362 if (IS_GM45(dev)) {
1361 g4x_disable_fbc(dev); 1363 g4x_disable_fbc(dev);
1362 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1364 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1364,12 +1366,22 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1364 i8xx_disable_fbc(dev); 1366 i8xx_disable_fbc(dev);
1365 I915_WRITE(FBC_CFB_BASE, cfb_base); 1367 I915_WRITE(FBC_CFB_BASE, cfb_base);
1366 I915_WRITE(FBC_LL_BASE, ll_base); 1368 I915_WRITE(FBC_LL_BASE, ll_base);
1369 dev_priv->compressed_llb = compressed_llb;
1367 } 1370 }
1368 1371
1369 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, 1372 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
1370 ll_base, size >> 20); 1373 ll_base, size >> 20);
1371} 1374}
1372 1375
1376static void i915_cleanup_compression(struct drm_device *dev)
1377{
1378 struct drm_i915_private *dev_priv = dev->dev_private;
1379
1380 drm_mm_put_block(dev_priv->compressed_fb);
1381 if (!IS_GM45(dev))
1382 drm_mm_put_block(dev_priv->compressed_llb);
1383}
1384
1373/* true = enable decode, false = disable decoder */ 1385/* true = enable decode, false = disable decoder */
1374static unsigned int i915_vga_set_decode(void *cookie, bool state) 1386static unsigned int i915_vga_set_decode(void *cookie, bool state)
1375{ 1387{
@@ -1787,6 +1799,8 @@ int i915_driver_unload(struct drm_device *dev)
1787 mutex_lock(&dev->struct_mutex); 1799 mutex_lock(&dev->struct_mutex);
1788 i915_gem_cleanup_ringbuffer(dev); 1800 i915_gem_cleanup_ringbuffer(dev);
1789 mutex_unlock(&dev->struct_mutex); 1801 mutex_unlock(&dev->struct_mutex);
1802 if (I915_HAS_FBC(dev) && i915_powersave)
1803 i915_cleanup_compression(dev);
1790 drm_mm_takedown(&dev_priv->vram); 1804 drm_mm_takedown(&dev_priv->vram);
1791 i915_gem_lastclose(dev); 1805 i915_gem_lastclose(dev);
1792 1806
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0af3dcc85ce9..cc03537bb883 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = {
69}; 69};
70 70
71const static struct intel_device_info intel_i85x_info = { 71const static struct intel_device_info intel_i85x_info = {
72 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1,
73}; 74};
74 75
75const static struct intel_device_info intel_i865g_info = { 76const static struct intel_device_info intel_i865g_info = {
@@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = {
151 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
152 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
153 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), 154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
154 INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), 155 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
155 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), 156 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
156 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), 157 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
157 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), 158 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6960849522f8..6e4790065d9e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -195,6 +195,7 @@ struct intel_overlay;
195struct intel_device_info { 195struct intel_device_info {
196 u8 is_mobile : 1; 196 u8 is_mobile : 1;
197 u8 is_i8xx : 1; 197 u8 is_i8xx : 1;
198 u8 is_i85x : 1;
198 u8 is_i915g : 1; 199 u8 is_i915g : 1;
199 u8 is_i9xx : 1; 200 u8 is_i9xx : 1;
200 u8 is_i945gm : 1; 201 u8 is_i945gm : 1;
@@ -235,11 +236,14 @@ typedef struct drm_i915_private {
235 236
236 drm_dma_handle_t *status_page_dmah; 237 drm_dma_handle_t *status_page_dmah;
237 void *hw_status_page; 238 void *hw_status_page;
239 void *seqno_page;
238 dma_addr_t dma_status_page; 240 dma_addr_t dma_status_page;
239 uint32_t counter; 241 uint32_t counter;
240 unsigned int status_gfx_addr; 242 unsigned int status_gfx_addr;
243 unsigned int seqno_gfx_addr;
241 drm_local_map_t hws_map; 244 drm_local_map_t hws_map;
242 struct drm_gem_object *hws_obj; 245 struct drm_gem_object *hws_obj;
246 struct drm_gem_object *seqno_obj;
243 struct drm_gem_object *pwrctx; 247 struct drm_gem_object *pwrctx;
244 248
245 struct resource mch_res; 249 struct resource mch_res;
@@ -630,6 +634,9 @@ typedef struct drm_i915_private {
630 u8 max_delay; 634 u8 max_delay;
631 635
632 enum no_fbc_reason no_fbc_reason; 636 enum no_fbc_reason no_fbc_reason;
637
638 struct drm_mm_node *compressed_fb;
639 struct drm_mm_node *compressed_llb;
633} drm_i915_private_t; 640} drm_i915_private_t;
634 641
635/** driver private structure attached to each drm_gem_object */ 642/** driver private structure attached to each drm_gem_object */
@@ -1070,7 +1077,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1070 1077
1071#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1078#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1072#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1079#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1073#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1080#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1074#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1081#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1075#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) 1082#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1076#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1083#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
@@ -1135,6 +1142,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1135 1142
1136#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ 1143#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
1137 IS_GEN6(dev)) 1144 IS_GEN6(dev))
1145#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
1138 1146
1139#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1147#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1140 1148
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 80871c62a571..ef3d91dda71a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1588,6 +1588,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1588 } 1588 }
1589} 1589}
1590 1590
1591#define PIPE_CONTROL_FLUSH(addr) \
1592 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
1593 PIPE_CONTROL_DEPTH_STALL); \
1594 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
1595 OUT_RING(0); \
1596 OUT_RING(0); \
1597
1591/** 1598/**
1592 * Creates a new sequence number, emitting a write of it to the status page 1599 * Creates a new sequence number, emitting a write of it to the status page
1593 * plus an interrupt, which will trigger i915_user_interrupt_handler. 1600 * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1622,13 +1629,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1622 if (dev_priv->mm.next_gem_seqno == 0) 1629 if (dev_priv->mm.next_gem_seqno == 0)
1623 dev_priv->mm.next_gem_seqno++; 1630 dev_priv->mm.next_gem_seqno++;
1624 1631
1625 BEGIN_LP_RING(4); 1632 if (HAS_PIPE_CONTROL(dev)) {
1626 OUT_RING(MI_STORE_DWORD_INDEX); 1633 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
1627 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1628 OUT_RING(seqno);
1629 1634
1630 OUT_RING(MI_USER_INTERRUPT); 1635 /*
1631 ADVANCE_LP_RING(); 1636 * Workaround qword write incoherence by flushing the
1637 * PIPE_NOTIFY buffers out to memory before requesting
1638 * an interrupt.
1639 */
1640 BEGIN_LP_RING(32);
1641 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1642 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
1643 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1644 OUT_RING(seqno);
1645 OUT_RING(0);
1646 PIPE_CONTROL_FLUSH(scratch_addr);
1647 scratch_addr += 128; /* write to separate cachelines */
1648 PIPE_CONTROL_FLUSH(scratch_addr);
1649 scratch_addr += 128;
1650 PIPE_CONTROL_FLUSH(scratch_addr);
1651 scratch_addr += 128;
1652 PIPE_CONTROL_FLUSH(scratch_addr);
1653 scratch_addr += 128;
1654 PIPE_CONTROL_FLUSH(scratch_addr);
1655 scratch_addr += 128;
1656 PIPE_CONTROL_FLUSH(scratch_addr);
1657 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1658 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
1659 PIPE_CONTROL_NOTIFY);
1660 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1661 OUT_RING(seqno);
1662 OUT_RING(0);
1663 ADVANCE_LP_RING();
1664 } else {
1665 BEGIN_LP_RING(4);
1666 OUT_RING(MI_STORE_DWORD_INDEX);
1667 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1668 OUT_RING(seqno);
1669
1670 OUT_RING(MI_USER_INTERRUPT);
1671 ADVANCE_LP_RING();
1672 }
1632 1673
1633 DRM_DEBUG_DRIVER("%d\n", seqno); 1674 DRM_DEBUG_DRIVER("%d\n", seqno);
1634 1675
@@ -1752,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev)
1752{ 1793{
1753 drm_i915_private_t *dev_priv = dev->dev_private; 1794 drm_i915_private_t *dev_priv = dev->dev_private;
1754 1795
1755 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); 1796 if (HAS_PIPE_CONTROL(dev))
1797 return ((volatile u32 *)(dev_priv->seqno_page))[0];
1798 else
1799 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1756} 1800}
1757 1801
1758/** 1802/**
@@ -2362,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2362 pitch_val = obj_priv->stride / tile_width; 2406 pitch_val = obj_priv->stride / tile_width;
2363 pitch_val = ffs(pitch_val) - 1; 2407 pitch_val = ffs(pitch_val) - 1;
2364 2408
2409 if (obj_priv->tiling_mode == I915_TILING_Y &&
2410 HAS_128_BYTE_Y_TILING(dev))
2411 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2412 else
2413 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2414
2365 val = obj_priv->gtt_offset; 2415 val = obj_priv->gtt_offset;
2366 if (obj_priv->tiling_mode == I915_TILING_Y) 2416 if (obj_priv->tiling_mode == I915_TILING_Y)
2367 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2417 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -4546,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev)
4546 return 0; 4596 return 0;
4547} 4597}
4548 4598
4599/*
4600 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4601 * over cache flushing.
4602 */
4603static int
4604i915_gem_init_pipe_control(struct drm_device *dev)
4605{
4606 drm_i915_private_t *dev_priv = dev->dev_private;
4607 struct drm_gem_object *obj;
4608 struct drm_i915_gem_object *obj_priv;
4609 int ret;
4610
4611 obj = drm_gem_object_alloc(dev, 4096);
4612 if (obj == NULL) {
4613 DRM_ERROR("Failed to allocate seqno page\n");
4614 ret = -ENOMEM;
4615 goto err;
4616 }
4617 obj_priv = to_intel_bo(obj);
4618 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4619
4620 ret = i915_gem_object_pin(obj, 4096);
4621 if (ret)
4622 goto err_unref;
4623
4624 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4625 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4626 if (dev_priv->seqno_page == NULL)
4627 goto err_unpin;
4628
4629 dev_priv->seqno_obj = obj;
4630 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4631
4632 return 0;
4633
4634err_unpin:
4635 i915_gem_object_unpin(obj);
4636err_unref:
4637 drm_gem_object_unreference(obj);
4638err:
4639 return ret;
4640}
4641
4549static int 4642static int
4550i915_gem_init_hws(struct drm_device *dev) 4643i915_gem_init_hws(struct drm_device *dev)
4551{ 4644{
@@ -4563,7 +4656,8 @@ i915_gem_init_hws(struct drm_device *dev)
4563 obj = drm_gem_object_alloc(dev, 4096); 4656 obj = drm_gem_object_alloc(dev, 4096);
4564 if (obj == NULL) { 4657 if (obj == NULL) {
4565 DRM_ERROR("Failed to allocate status page\n"); 4658 DRM_ERROR("Failed to allocate status page\n");
4566 return -ENOMEM; 4659 ret = -ENOMEM;
4660 goto err;
4567 } 4661 }
4568 obj_priv = to_intel_bo(obj); 4662 obj_priv = to_intel_bo(obj);
4569 obj_priv->agp_type = AGP_USER_CACHED_MEMORY; 4663 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
@@ -4571,7 +4665,7 @@ i915_gem_init_hws(struct drm_device *dev)
4571 ret = i915_gem_object_pin(obj, 4096); 4665 ret = i915_gem_object_pin(obj, 4096);
4572 if (ret != 0) { 4666 if (ret != 0) {
4573 drm_gem_object_unreference(obj); 4667 drm_gem_object_unreference(obj);
4574 return ret; 4668 goto err_unref;
4575 } 4669 }
4576 4670
4577 dev_priv->status_gfx_addr = obj_priv->gtt_offset; 4671 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
@@ -4580,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev)
4580 if (dev_priv->hw_status_page == NULL) { 4674 if (dev_priv->hw_status_page == NULL) {
4581 DRM_ERROR("Failed to map status page.\n"); 4675 DRM_ERROR("Failed to map status page.\n");
4582 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 4676 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4583 i915_gem_object_unpin(obj); 4677 ret = -EINVAL;
4584 drm_gem_object_unreference(obj); 4678 goto err_unpin;
4585 return -EINVAL;
4586 } 4679 }
4680
4681 if (HAS_PIPE_CONTROL(dev)) {
4682 ret = i915_gem_init_pipe_control(dev);
4683 if (ret)
4684 goto err_unpin;
4685 }
4686
4587 dev_priv->hws_obj = obj; 4687 dev_priv->hws_obj = obj;
4588 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4688 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4589 if (IS_GEN6(dev)) { 4689 if (IS_GEN6(dev)) {
@@ -4596,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev)
4596 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4696 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4597 4697
4598 return 0; 4698 return 0;
4699
4700err_unpin:
4701 i915_gem_object_unpin(obj);
4702err_unref:
4703 drm_gem_object_unreference(obj);
4704err:
4705 return 0;
4706}
4707
4708static void
4709i915_gem_cleanup_pipe_control(struct drm_device *dev)
4710{
4711 drm_i915_private_t *dev_priv = dev->dev_private;
4712 struct drm_gem_object *obj;
4713 struct drm_i915_gem_object *obj_priv;
4714
4715 obj = dev_priv->seqno_obj;
4716 obj_priv = to_intel_bo(obj);
4717 kunmap(obj_priv->pages[0]);
4718 i915_gem_object_unpin(obj);
4719 drm_gem_object_unreference(obj);
4720 dev_priv->seqno_obj = NULL;
4721
4722 dev_priv->seqno_page = NULL;
4599} 4723}
4600 4724
4601static void 4725static void
@@ -4619,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
4619 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 4743 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4620 dev_priv->hw_status_page = NULL; 4744 dev_priv->hw_status_page = NULL;
4621 4745
4746 if (HAS_PIPE_CONTROL(dev))
4747 i915_gem_cleanup_pipe_control(dev);
4748
4622 /* Write high address into HWS_PGA when disabling. */ 4749 /* Write high address into HWS_PGA when disabling. */
4623 I915_WRITE(HWS_PGA, 0x1ffff000); 4750 I915_WRITE(HWS_PGA, 0x1ffff000);
4624} 4751}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 449157f71610..4bdccefcf2cf 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
202 * reg, so dont bother to check the size */ 202 * reg, so dont bother to check the size */
203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
204 return false; 204 return false;
205 } else if (IS_I9XX(dev)) { 205 } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
206 uint32_t pitch_val = ffs(stride / tile_width) - 1; 206 if (stride > 8192)
207
208 /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
209 * instead of 4 (2KB) on 945s.
210 */
211 if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
212 size > (I830_FENCE_MAX_SIZE_VAL << 20))
213 return false; 207 return false;
214 } else {
215 uint32_t pitch_val = ffs(stride / tile_width) - 1;
216 208
217 if (pitch_val > I830_FENCE_MAX_PITCH_VAL || 209 if (IS_GEN3(dev)) {
218 size > (I830_FENCE_MAX_SIZE_VAL << 19)) 210 if (size > I830_FENCE_MAX_SIZE_VAL << 20)
219 return false; 211 return false;
212 } else {
213 if (size > I830_FENCE_MAX_SIZE_VAL << 19)
214 return false;
215 }
220 } 216 }
221 217
222 /* 965+ just needs multiples of tile width */ 218 /* 965+ just needs multiples of tile width */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6421481d6222..df6a9cd82c4d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -349,7 +349,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
349 READ_BREADCRUMB(dev_priv); 349 READ_BREADCRUMB(dev_priv);
350 } 350 }
351 351
352 if (gt_iir & GT_USER_INTERRUPT) { 352 if (gt_iir & GT_PIPE_NOTIFY) {
353 u32 seqno = i915_get_gem_seqno(dev); 353 u32 seqno = i915_get_gem_seqno(dev);
354 dev_priv->mm.irq_gem_seqno = seqno; 354 dev_priv->mm.irq_gem_seqno = seqno;
355 trace_i915_gem_request_complete(dev, seqno); 355 trace_i915_gem_request_complete(dev, seqno);
@@ -456,11 +456,15 @@ i915_error_object_create(struct drm_device *dev,
456 456
457 for (page = 0; page < page_count; page++) { 457 for (page = 0; page < page_count; page++) {
458 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 458 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
459 unsigned long flags;
460
459 if (d == NULL) 461 if (d == NULL)
460 goto unwind; 462 goto unwind;
461 s = kmap_atomic(src_priv->pages[page], KM_USER0); 463 local_irq_save(flags);
464 s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
462 memcpy(d, s, PAGE_SIZE); 465 memcpy(d, s, PAGE_SIZE);
463 kunmap_atomic(s, KM_USER0); 466 kunmap_atomic(s, KM_IRQ0);
467 local_irq_restore(flags);
464 dst->pages[page] = d; 468 dst->pages[page] = d;
465 } 469 }
466 dst->page_count = page_count; 470 dst->page_count = page_count;
@@ -1005,7 +1009,7 @@ void i915_user_irq_get(struct drm_device *dev)
1005 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1009 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1006 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 1010 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
1007 if (HAS_PCH_SPLIT(dev)) 1011 if (HAS_PCH_SPLIT(dev))
1008 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1012 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1009 else 1013 else
1010 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 1014 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
1011 } 1015 }
@@ -1021,7 +1025,7 @@ void i915_user_irq_put(struct drm_device *dev)
1021 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 1025 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
1022 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 1026 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
1023 if (HAS_PCH_SPLIT(dev)) 1027 if (HAS_PCH_SPLIT(dev))
1024 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1028 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1025 else 1029 else
1026 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 1030 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
1027 } 1031 }
@@ -1305,7 +1309,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1305 /* enable kind of interrupts always enabled */ 1309 /* enable kind of interrupts always enabled */
1306 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1310 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1307 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1311 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1308 u32 render_mask = GT_USER_INTERRUPT; 1312 u32 render_mask = GT_PIPE_NOTIFY;
1309 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1313 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1310 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1314 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1311 1315
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 7cc8410239cb..8fcc75c1aa28 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev)
382 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_i915_private *dev_priv = dev->dev_private;
383 struct intel_opregion *opregion = &dev_priv->opregion; 383 struct intel_opregion *opregion = &dev_priv->opregion;
384 struct drm_connector *connector; 384 struct drm_connector *connector;
385 acpi_handle handle;
386 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
387 unsigned long long device_id;
388 acpi_status status;
385 int i = 0; 389 int i = 0;
386 390
391 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
392 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
393 return;
394
395 if (acpi_is_video_device(acpi_dev))
396 acpi_video_bus = acpi_dev;
397 else {
398 list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
399 if (acpi_is_video_device(acpi_cdev)) {
400 acpi_video_bus = acpi_cdev;
401 break;
402 }
403 }
404 }
405
406 if (!acpi_video_bus) {
407 printk(KERN_WARNING "No ACPI video bus found\n");
408 return;
409 }
410
411 list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
412 if (i >= 8) {
413 dev_printk (KERN_ERR, &dev->pdev->dev,
414 "More than 8 outputs detected\n");
415 return;
416 }
417 status =
418 acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
419 NULL, &device_id);
420 if (ACPI_SUCCESS(status)) {
421 if (!device_id)
422 goto blind_set;
423 opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
424 i++;
425 }
426 }
427
428end:
429 /* If fewer than 8 outputs, the list must be null terminated */
430 if (i < 8)
431 opregion->acpi->didl[i] = 0;
432 return;
433
434blind_set:
435 i = 0;
387 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 436 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
388 int output_type = ACPI_OTHER_OUTPUT; 437 int output_type = ACPI_OTHER_OUTPUT;
389 if (i >= 8) { 438 if (i >= 8) {
@@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev)
416 opregion->acpi->didl[i] |= (1<<31) | output_type | i; 465 opregion->acpi->didl[i] |= (1<<31) | output_type | i;
417 i++; 466 i++;
418 } 467 }
419 468 goto end;
420 /* If fewer than 8 outputs, the list must be null terminated */
421 if (i < 8)
422 opregion->acpi->didl[i] = 0;
423} 469}
424 470
425int intel_opregion_init(struct drm_device *dev, int resume) 471int intel_opregion_init(struct drm_device *dev, int resume)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cbbf59f56dfa..4cbc5210fd30 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -230,6 +230,16 @@
230#define ASYNC_FLIP (1<<22) 230#define ASYNC_FLIP (1<<22)
231#define DISPLAY_PLANE_A (0<<20) 231#define DISPLAY_PLANE_A (0<<20)
232#define DISPLAY_PLANE_B (1<<20) 232#define DISPLAY_PLANE_B (1<<20)
233#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
234#define PIPE_CONTROL_QW_WRITE (1<<14)
235#define PIPE_CONTROL_DEPTH_STALL (1<<13)
236#define PIPE_CONTROL_WC_FLUSH (1<<12)
237#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
238#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
239#define PIPE_CONTROL_ISP_DIS (1<<9)
240#define PIPE_CONTROL_NOTIFY (1<<8)
241#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
242#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
233 243
234/* 244/*
235 * Fence registers 245 * Fence registers
@@ -241,7 +251,7 @@
241#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 251#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
242#define I830_FENCE_PITCH_SHIFT 4 252#define I830_FENCE_PITCH_SHIFT 4
243#define I830_FENCE_REG_VALID (1<<0) 253#define I830_FENCE_REG_VALID (1<<0)
244#define I915_FENCE_MAX_PITCH_VAL 0x10 254#define I915_FENCE_MAX_PITCH_VAL 4
245#define I830_FENCE_MAX_PITCH_VAL 6 255#define I830_FENCE_MAX_PITCH_VAL 6
246#define I830_FENCE_MAX_SIZE_VAL (1<<8) 256#define I830_FENCE_MAX_SIZE_VAL (1<<8)
247 257
@@ -2285,6 +2295,7 @@
2285#define DEIER 0x4400c 2295#define DEIER 0x4400c
2286 2296
2287/* GT interrupt */ 2297/* GT interrupt */
2298#define GT_PIPE_NOTIFY (1 << 4)
2288#define GT_SYNC_STATUS (1 << 2) 2299#define GT_SYNC_STATUS (1 << 2)
2289#define GT_USER_INTERRUPT (1 << 0) 2300#define GT_USER_INTERRUPT (1 << 0)
2290 2301
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9c920396d702..f27e3703a716 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4853,17 +4853,18 @@ static void intel_init_display(struct drm_device *dev)
4853 dev_priv->display.update_wm = g4x_update_wm; 4853 dev_priv->display.update_wm = g4x_update_wm;
4854 else if (IS_I965G(dev)) 4854 else if (IS_I965G(dev))
4855 dev_priv->display.update_wm = i965_update_wm; 4855 dev_priv->display.update_wm = i965_update_wm;
4856 else if (IS_I9XX(dev) || IS_MOBILE(dev)) { 4856 else if (IS_I9XX(dev)) {
4857 dev_priv->display.update_wm = i9xx_update_wm; 4857 dev_priv->display.update_wm = i9xx_update_wm;
4858 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 4858 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
4859 } else if (IS_I85X(dev)) {
4860 dev_priv->display.update_wm = i9xx_update_wm;
4861 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
4859 } else { 4862 } else {
4860 if (IS_I85X(dev)) 4863 dev_priv->display.update_wm = i830_update_wm;
4861 dev_priv->display.get_fifo_size = i85x_get_fifo_size; 4864 if (IS_845G(dev))
4862 else if (IS_845G(dev))
4863 dev_priv->display.get_fifo_size = i845_get_fifo_size; 4865 dev_priv->display.get_fifo_size = i845_get_fifo_size;
4864 else 4866 else
4865 dev_priv->display.get_fifo_size = i830_get_fifo_size; 4867 dev_priv->display.get_fifo_size = i830_get_fifo_size;
4866 dev_priv->display.update_wm = i830_update_wm;
4867 } 4868 }
4868} 4869}
4869 4870
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index e91a815861f4..5319d9e2f7ba 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -2912,7 +2912,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2
2912 UCHAR ucTV_BootUpDefaultStandard; 2912 UCHAR ucTV_BootUpDefaultStandard;
2913 UCHAR ucExt_TV_ASIC_ID; 2913 UCHAR ucExt_TV_ASIC_ID;
2914 UCHAR ucExt_TV_ASIC_SlaveAddr; 2914 UCHAR ucExt_TV_ASIC_SlaveAddr;
2915 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; 2915 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING_V1_2];
2916}ATOM_ANALOG_TV_INFO_V1_2; 2916}ATOM_ANALOG_TV_INFO_V1_2;
2917 2917
2918typedef struct _ATOM_DPCD_INFO 2918typedef struct _ATOM_DPCD_INFO
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index d7388fdb6d0b..cf60c0b3ef15 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2975,7 +2975,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2975 2975
2976 for (i = 0; i < track->num_cb; i++) { 2976 for (i = 0; i < track->num_cb; i++) {
2977 if (track->cb[i].robj == NULL) { 2977 if (track->cb[i].robj == NULL) {
2978 if (!(track->fastfill || track->color_channel_mask || 2978 if (!(track->zb_cb_clear || track->color_channel_mask ||
2979 track->blend_read_enable)) { 2979 track->blend_read_enable)) {
2980 continue; 2980 continue;
2981 } 2981 }
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index fadfe68de9cc..f47cdca1c004 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -75,7 +75,7 @@ struct r100_cs_track {
75 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; 75 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
76 bool z_enabled; 76 bool z_enabled;
77 bool separate_cube; 77 bool separate_cube;
78 bool fastfill; 78 bool zb_cb_clear;
79 bool blend_read_enable; 79 bool blend_read_enable;
80}; 80};
81 81
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index bd75f99bd65e..a5ff8076b423 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -324,13 +324,12 @@ void r300_gpu_init(struct radeon_device *rdev)
324 uint32_t gb_tile_config, tmp; 324 uint32_t gb_tile_config, tmp;
325 325
326 r100_hdp_reset(rdev); 326 r100_hdp_reset(rdev);
327 /* FIXME: rv380 one pipes ? */
328 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || 327 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
329 (rdev->family == CHIP_R350)) { 328 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
330 /* r300,r350 */ 329 /* r300,r350 */
331 rdev->num_gb_pipes = 2; 330 rdev->num_gb_pipes = 2;
332 } else { 331 } else {
333 /* rv350,rv370,rv380,r300 AD */ 332 /* rv350,rv370,rv380,r300 AD, r350 AH */
334 rdev->num_gb_pipes = 1; 333 rdev->num_gb_pipes = 1;
335 } 334 }
336 rdev->num_z_pipes = 1; 335 rdev->num_z_pipes = 1;
@@ -1045,7 +1044,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1045 break; 1044 break;
1046 case 0x4d1c: 1045 case 0x4d1c:
1047 /* ZB_BW_CNTL */ 1046 /* ZB_BW_CNTL */
1048 track->fastfill = !!(idx_value & (1 << 2)); 1047 track->zb_cb_clear = !!(idx_value & (1 << 5));
1049 break; 1048 break;
1050 case 0x4e04: 1049 case 0x4e04:
1051 /* RB3D_BLENDCNTL */ 1050 /* RB3D_BLENDCNTL */
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index ea46d558e8f3..c5c2742e4140 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -921,7 +921,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
921 921
922 ptr_addr = drm_buffer_read_object(cmdbuf->buffer, 922 ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
923 sizeof(stack_ptr_addr), &stack_ptr_addr); 923 sizeof(stack_ptr_addr), &stack_ptr_addr);
924 ref_age_base = (u32 *)(unsigned long)*ptr_addr; 924 ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr);
925 925
926 for (i=0; i < header.scratch.n_bufs; i++) { 926 for (i=0; i < header.scratch.n_bufs; i++) {
927 buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); 927 buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 3dc968c9f5a4..c2bda4ad62e7 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -59,6 +59,12 @@ void r420_pipes_init(struct radeon_device *rdev)
59 /* get max number of pipes */ 59 /* get max number of pipes */
60 gb_pipe_select = RREG32(0x402C); 60 gb_pipe_select = RREG32(0x402C);
61 num_pipes = ((gb_pipe_select >> 12) & 3) + 1; 61 num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
62
63 /* SE chips have 1 pipe */
64 if ((rdev->pdev->device == 0x5e4c) ||
65 (rdev->pdev->device == 0x5e4f))
66 num_pipes = 1;
67
62 rdev->num_gb_pipes = num_pipes; 68 rdev->num_gb_pipes = num_pipes;
63 tmp = 0; 69 tmp = 0;
64 switch (num_pipes) { 70 switch (num_pipes) {
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index c4457791dff1..28e473f1f56f 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -134,12 +134,10 @@ int radeon_agp_init(struct radeon_device *rdev)
134 int ret; 134 int ret;
135 135
136 /* Acquire AGP. */ 136 /* Acquire AGP. */
137 if (!rdev->ddev->agp->acquired) { 137 ret = drm_agp_acquire(rdev->ddev);
138 ret = drm_agp_acquire(rdev->ddev); 138 if (ret) {
139 if (ret) { 139 DRM_ERROR("Unable to acquire AGP: %d\n", ret);
140 DRM_ERROR("Unable to acquire AGP: %d\n", ret); 140 return ret;
141 return ret;
142 }
143 } 141 }
144 142
145 ret = drm_agp_info(rdev->ddev, &info); 143 ret = drm_agp_info(rdev->ddev, &info);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5673665ff216..9916d825401c 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1264,7 +1264,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
1264 switch (crev) { 1264 switch (crev) {
1265 case 1: 1265 case 1:
1266 tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); 1266 tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
1267 if (index > MAX_SUPPORTED_TV_TIMING) 1267 if (index >= MAX_SUPPORTED_TV_TIMING)
1268 return false; 1268 return false;
1269 1269
1270 mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total); 1270 mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
@@ -1302,7 +1302,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
1302 break; 1302 break;
1303 case 2: 1303 case 2:
1304 tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset); 1304 tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset);
1305 if (index > MAX_SUPPORTED_TV_TIMING_V1_2) 1305 if (index >= MAX_SUPPORTED_TV_TIMING_V1_2)
1306 return false; 1306 return false;
1307 1307
1308 dtd_timings = &tv_info_v1_2->aModeTimings[index]; 1308 dtd_timings = &tv_info_v1_2->aModeTimings[index];
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 1331351c5178..4559a53d5e57 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1316,6 +1316,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1316 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1316 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
1317 if (!radeon_connector->ddc_bus) 1317 if (!radeon_connector->ddc_bus)
1318 goto failed; 1318 goto failed;
1319 }
1320 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1319 radeon_connector->dac_load_detect = true; 1321 radeon_connector->dac_load_detect = true;
1320 drm_connector_attach_property(&radeon_connector->base, 1322 drm_connector_attach_property(&radeon_connector->base,
1321 rdev->mode_info.load_detect_property, 1323 rdev->mode_info.load_detect_property,
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 419630dd2075..2f042a3c0e62 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -435,14 +435,19 @@ static void radeon_init_pipes(struct drm_device *dev)
435 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { 435 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
436 gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); 436 gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
437 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; 437 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
438 /* SE cards have 1 pipe */
439 if ((dev->pdev->device == 0x5e4c) ||
440 (dev->pdev->device == 0x5e4f))
441 dev_priv->num_gb_pipes = 1;
438 } else { 442 } else {
439 /* R3xx */ 443 /* R3xx */
440 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && 444 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
441 dev->pdev->device != 0x4144) || 445 dev->pdev->device != 0x4144) ||
442 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { 446 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 &&
447 dev->pdev->device != 0x4148)) {
443 dev_priv->num_gb_pipes = 2; 448 dev_priv->num_gb_pipes = 2;
444 } else { 449 } else {
445 /* RV3xx/R300 AD */ 450 /* RV3xx/R300 AD/R350 AH */
446 dev_priv->num_gb_pipes = 1; 451 dev_priv->num_gb_pipes = 1;
447 } 452 }
448 } 453 }
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b8d672828246..bb1c122cad21 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -86,12 +86,12 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
86 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 86 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
87 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 87 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
88 88
89 WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id); 89 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
90 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007); 90 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
91 91
92 WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0); 92 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
93 for (i = 0; i < 256; i++) { 93 for (i = 0; i < 256; i++) {
94 WREG32(EVERGREEN_DC_LUT_30_COLOR, 94 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
95 (radeon_crtc->lut_r[i] << 20) | 95 (radeon_crtc->lut_r[i] << 20) |
96 (radeon_crtc->lut_g[i] << 10) | 96 (radeon_crtc->lut_g[i] << 10) |
97 (radeon_crtc->lut_b[i] << 0)); 97 (radeon_crtc->lut_b[i] << 0));
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 4b05563d99e1..b3749d47be7b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -216,6 +216,7 @@ static struct drm_driver driver_old = {
216 .mmap = drm_mmap, 216 .mmap = drm_mmap,
217 .poll = drm_poll, 217 .poll = drm_poll,
218 .fasync = drm_fasync, 218 .fasync = drm_fasync,
219 .read = drm_read,
219#ifdef CONFIG_COMPAT 220#ifdef CONFIG_COMPAT
220 .compat_ioctl = radeon_compat_ioctl, 221 .compat_ioctl = radeon_compat_ioctl,
221#endif 222#endif
@@ -304,6 +305,7 @@ static struct drm_driver kms_driver = {
304 .mmap = radeon_mmap, 305 .mmap = radeon_mmap,
305 .poll = drm_poll, 306 .poll = drm_poll,
306 .fasync = drm_fasync, 307 .fasync = drm_fasync,
308 .read = drm_read,
307#ifdef CONFIG_COMPAT 309#ifdef CONFIG_COMPAT
308 .compat_ioctl = radeon_kms_compat_ioctl, 310 .compat_ioctl = radeon_kms_compat_ioctl,
309#endif 311#endif
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 30293bec0801..c5ddaf58563a 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -254,6 +254,53 @@ radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
254 return dig_connector; 254 return dig_connector;
255} 255}
256 256
257void radeon_panel_mode_fixup(struct drm_encoder *encoder,
258 struct drm_display_mode *adjusted_mode)
259{
260 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
261 struct drm_device *dev = encoder->dev;
262 struct radeon_device *rdev = dev->dev_private;
263 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
264 unsigned hblank = native_mode->htotal - native_mode->hdisplay;
265 unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
266 unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
267 unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
268 unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
269 unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
270
271 adjusted_mode->clock = native_mode->clock;
272 adjusted_mode->flags = native_mode->flags;
273
274 if (ASIC_IS_AVIVO(rdev)) {
275 adjusted_mode->hdisplay = native_mode->hdisplay;
276 adjusted_mode->vdisplay = native_mode->vdisplay;
277 }
278
279 adjusted_mode->htotal = native_mode->hdisplay + hblank;
280 adjusted_mode->hsync_start = native_mode->hdisplay + hover;
281 adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
282
283 adjusted_mode->vtotal = native_mode->vdisplay + vblank;
284 adjusted_mode->vsync_start = native_mode->vdisplay + vover;
285 adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
286
287 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
288
289 if (ASIC_IS_AVIVO(rdev)) {
290 adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
291 adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
292 }
293
294 adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
295 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
296 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
297
298 adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
299 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
300 adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
301
302}
303
257static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, 304static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
258 struct drm_display_mode *mode, 305 struct drm_display_mode *mode,
259 struct drm_display_mode *adjusted_mode) 306 struct drm_display_mode *adjusted_mode)
@@ -275,18 +322,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
275 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; 322 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
276 323
277 /* get the native mode for LVDS */ 324 /* get the native mode for LVDS */
278 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { 325 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
279 struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 326 radeon_panel_mode_fixup(encoder, adjusted_mode);
280 int mode_id = adjusted_mode->base.id;
281 *adjusted_mode = *native_mode;
282 if (!ASIC_IS_AVIVO(rdev)) {
283 adjusted_mode->hdisplay = mode->hdisplay;
284 adjusted_mode->vdisplay = mode->vdisplay;
285 adjusted_mode->crtc_hdisplay = mode->hdisplay;
286 adjusted_mode->crtc_vdisplay = mode->vdisplay;
287 }
288 adjusted_mode->base.id = mode_id;
289 }
290 327
291 /* get the native mode for TV */ 328 /* get the native mode for TV */
292 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { 329 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
@@ -1326,7 +1363,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1326 1363
1327 radeon_encoder->pixel_clock = adjusted_mode->clock; 1364 radeon_encoder->pixel_clock = adjusted_mode->clock;
1328 1365
1329 if (ASIC_IS_AVIVO(rdev)) { 1366 if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
1330 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) 1367 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
1331 atombios_yuv_setup(encoder, true); 1368 atombios_yuv_setup(encoder, true);
1332 else 1369 else
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d3657dcfdd26..c633319f98ed 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -165,7 +165,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
165{ 165{
166 struct radeon_device *rdev = dev->dev_private; 166 struct radeon_device *rdev = dev->dev_private;
167 167
168 if (crtc < 0 || crtc > 1) { 168 if (crtc < 0 || crtc >= rdev->num_crtc) {
169 DRM_ERROR("Invalid crtc %d\n", crtc); 169 DRM_ERROR("Invalid crtc %d\n", crtc);
170 return -EINVAL; 170 return -EINVAL;
171 } 171 }
@@ -177,7 +177,7 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
177{ 177{
178 struct radeon_device *rdev = dev->dev_private; 178 struct radeon_device *rdev = dev->dev_private;
179 179
180 if (crtc < 0 || crtc > 1) { 180 if (crtc < 0 || crtc >= rdev->num_crtc) {
181 DRM_ERROR("Invalid crtc %d\n", crtc); 181 DRM_ERROR("Invalid crtc %d\n", crtc);
182 return -EINVAL; 182 return -EINVAL;
183 } 183 }
@@ -191,7 +191,7 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
191{ 191{
192 struct radeon_device *rdev = dev->dev_private; 192 struct radeon_device *rdev = dev->dev_private;
193 193
194 if (crtc < 0 || crtc > 1) { 194 if (crtc < 0 || crtc >= rdev->num_crtc) {
195 DRM_ERROR("Invalid crtc %d\n", crtc); 195 DRM_ERROR("Invalid crtc %d\n", crtc);
196 return; 196 return;
197 } 197 }
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 2441cca7d775..0274abe17ad9 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -228,16 +228,8 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
228 drm_mode_set_crtcinfo(adjusted_mode, 0); 228 drm_mode_set_crtcinfo(adjusted_mode, 0);
229 229
230 /* get the native mode for LVDS */ 230 /* get the native mode for LVDS */
231 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { 231 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
232 struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 232 radeon_panel_mode_fixup(encoder, adjusted_mode);
233 int mode_id = adjusted_mode->base.id;
234 *adjusted_mode = *native_mode;
235 adjusted_mode->hdisplay = mode->hdisplay;
236 adjusted_mode->vdisplay = mode->vdisplay;
237 adjusted_mode->crtc_hdisplay = mode->hdisplay;
238 adjusted_mode->crtc_vdisplay = mode->vdisplay;
239 adjusted_mode->base.id = mode_id;
240 }
241 233
242 return true; 234 return true;
243} 235}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 0b8e32776b10..5413fcd63086 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -558,6 +558,8 @@ extern int radeon_static_clocks_init(struct drm_device *dev);
558bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 558bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
559 struct drm_display_mode *mode, 559 struct drm_display_mode *mode,
560 struct drm_display_mode *adjusted_mode); 560 struct drm_display_mode *adjusted_mode);
561void radeon_panel_mode_fixup(struct drm_encoder *encoder,
562 struct drm_display_mode *adjusted_mode);
561void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc); 563void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc);
562 564
563/* legacy tv */ 565/* legacy tv */
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 40ab6d9c3736..cc5316dcf580 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -424,7 +424,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
424 if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && 424 if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
425 (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 425 (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
426 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3); 426 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
427 offset = *cmd << 10; 427 offset = *cmd3 << 10;
428 if (radeon_check_and_fixup_offset 428 if (radeon_check_and_fixup_offset
429 (dev_priv, file_priv, &offset)) { 429 (dev_priv, file_priv, &offset)) {
430 DRM_ERROR("Invalid second packet offset\n"); 430 DRM_ERROR("Invalid second packet offset\n");
@@ -2895,9 +2895,12 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
2895 return rv; 2895 return rv;
2896 rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer, 2896 rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
2897 cmdbuf->bufsz); 2897 cmdbuf->bufsz);
2898 if (rv) 2898 if (rv) {
2899 drm_buffer_free(cmdbuf->buffer);
2899 return rv; 2900 return rv;
2900 } 2901 }
2902 } else
2903 goto done;
2901 2904
2902 orig_nbox = cmdbuf->nbox; 2905 orig_nbox = cmdbuf->nbox;
2903 2906
@@ -2905,8 +2908,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
2905 int temp; 2908 int temp;
2906 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); 2909 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
2907 2910
2908 if (cmdbuf->bufsz != 0) 2911 drm_buffer_free(cmdbuf->buffer);
2909 drm_buffer_free(cmdbuf->buffer);
2910 2912
2911 return temp; 2913 return temp;
2912 } 2914 }
@@ -3012,16 +3014,15 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
3012 } 3014 }
3013 } 3015 }
3014 3016
3015 if (cmdbuf->bufsz != 0) 3017 drm_buffer_free(cmdbuf->buffer);
3016 drm_buffer_free(cmdbuf->buffer);
3017 3018
3019 done:
3018 DRM_DEBUG("DONE\n"); 3020 DRM_DEBUG("DONE\n");
3019 COMMIT_RING(); 3021 COMMIT_RING();
3020 return 0; 3022 return 0;
3021 3023
3022 err: 3024 err:
3023 if (cmdbuf->bufsz != 0) 3025 drm_buffer_free(cmdbuf->buffer);
3024 drm_buffer_free(cmdbuf->buffer);
3025 return -EINVAL; 3026 return -EINVAL;
3026} 3027}
3027 3028
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index dd47b2a9a791..0e3754a3a303 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1716,40 +1716,12 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1716} 1716}
1717EXPORT_SYMBOL(ttm_bo_wait); 1717EXPORT_SYMBOL(ttm_bo_wait);
1718 1718
1719void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1720{
1721 atomic_set(&bo->reserved, 0);
1722 wake_up_all(&bo->event_queue);
1723}
1724
1725int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1726 bool no_wait)
1727{
1728 int ret;
1729
1730 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1731 if (no_wait)
1732 return -EBUSY;
1733 else if (interruptible) {
1734 ret = wait_event_interruptible
1735 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1736 if (unlikely(ret != 0))
1737 return ret;
1738 } else {
1739 wait_event(bo->event_queue,
1740 atomic_read(&bo->reserved) == 0);
1741 }
1742 }
1743 return 0;
1744}
1745
1746int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1719int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1747{ 1720{
1748 int ret = 0; 1721 int ret = 0;
1749 1722
1750 /* 1723 /*
1751 * Using ttm_bo_reserve instead of ttm_bo_block_reservation 1724 * Using ttm_bo_reserve makes sure the lru lists are updated.
1752 * makes sure the lru lists are updated.
1753 */ 1725 */
1754 1726
1755 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1727 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 3d172ef04ee1..de41e55a944a 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -204,7 +204,6 @@ static int __ttm_vt_unlock(struct ttm_lock *lock)
204 lock->flags &= ~TTM_VT_LOCK; 204 lock->flags &= ~TTM_VT_LOCK;
205 wake_up_all(&lock->queue); 205 wake_up_all(&lock->queue);
206 spin_unlock(&lock->lock); 206 spin_unlock(&lock->lock);
207 printk(KERN_INFO TTM_PFX "vt unlock.\n");
208 207
209 return ret; 208 return ret;
210} 209}
@@ -265,10 +264,8 @@ int ttm_vt_lock(struct ttm_lock *lock,
265 ttm_lock_type, &ttm_vt_lock_remove, NULL); 264 ttm_lock_type, &ttm_vt_lock_remove, NULL);
266 if (ret) 265 if (ret)
267 (void)__ttm_vt_unlock(lock); 266 (void)__ttm_vt_unlock(lock);
268 else { 267 else
269 lock->vt_holder = tfile; 268 lock->vt_holder = tfile;
270 printk(KERN_INFO TTM_PFX "vt lock.\n");
271 }
272 269
273 return ret; 270 return ret;
274} 271}
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
index 6ec04ac12459..6efac8117c93 100644
--- a/drivers/gpu/drm/via/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
@@ -75,7 +75,7 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
75 75
76 DRM_DEBUG("\n"); 76 DRM_DEBUG("\n");
77 77
78 if (fx->lock > VIA_NR_XVMC_LOCKS) 78 if (fx->lock >= VIA_NR_XVMC_LOCKS)
79 return -EFAULT; 79 return -EFAULT;
80 80
81 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock); 81 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index d6d1149d525d..c8768f38511e 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -276,8 +276,10 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
276 276
277 mutex_lock(&vgasr_mutex); 277 mutex_lock(&vgasr_mutex);
278 278
279 if (!vgasr_priv.active) 279 if (!vgasr_priv.active) {
280 return -EINVAL; 280 cnt = -EINVAL;
281 goto out;
282 }
281 283
282 /* pwr off the device not in use */ 284 /* pwr off the device not in use */
283 if (strncmp(usercmd, "OFF", 3) == 0) { 285 if (strncmp(usercmd, "OFF", 3) == 0) {
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index 7e597d7f770f..24663a8717b1 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -59,6 +59,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
59 59
60static const struct hid_device_id ch_devices[] = { 60static const struct hid_device_id ch_devices[] = {
61 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 61 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
62 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
62 { } 63 { }
63}; 64};
64MODULE_DEVICE_TABLE(hid, ch_devices); 65MODULE_DEVICE_TABLE(hid, ch_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2e2aa759d230..143e788b729b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1043,13 +1043,8 @@ void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
1043 1043
1044 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) 1044 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
1045 hid->hiddev_report_event(hid, report); 1045 hid->hiddev_report_event(hid, report);
1046 if (hid->claimed & HID_CLAIMED_HIDRAW) { 1046 if (hid->claimed & HID_CLAIMED_HIDRAW)
1047 /* numbered reports need to be passed with the report num */ 1047 hidraw_report_event(hid, data, size);
1048 if (report_enum->numbered)
1049 hidraw_report_event(hid, data - 1, size + 1);
1050 else
1051 hidraw_report_event(hid, data, size);
1052 }
1053 1048
1054 for (a = 0; a < report->maxfield; a++) 1049 for (a = 0; a < report->maxfield; a++)
1055 hid_input_field(hid, report->field[a], cdata, interrupt); 1050 hid_input_field(hid, report->field[a], cdata, interrupt);
@@ -1296,6 +1291,7 @@ static const struct hid_device_id hid_blacklist[] = {
1296 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1291 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1297 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1292 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
1298 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 1293 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
1294 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
1299 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, 1295 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
1300 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, 1296 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
1301 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, 1297 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 797e06470356..09d27649a0f7 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -131,6 +131,7 @@
131 131
132#define USB_VENDOR_ID_CHERRY 0x046a 132#define USB_VENDOR_ID_CHERRY 0x046a
133#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 133#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
134#define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027
134 135
135#define USB_VENDOR_ID_CHIC 0x05fe 136#define USB_VENDOR_ID_CHIC 0x05fe
136#define USB_DEVICE_ID_CHIC_GAMEPAD 0x0014 137#define USB_DEVICE_ID_CHIC_GAMEPAD 0x0014
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 9b24fc510712..4777bbfa1cc2 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * HID driver for N-Trig touchscreens 2 * HID driver for N-Trig touchscreens
3 * 3 *
4 * Copyright (c) 2008 Rafi Rubin 4 * Copyright (c) 2008-2010 Rafi Rubin
5 * Copyright (c) 2009 Stephane Chatty 5 * Copyright (c) 2009-2010 Stephane Chatty
6 * 6 *
7 */ 7 */
8 8
@@ -15,6 +15,8 @@
15 15
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/hid.h> 17#include <linux/hid.h>
18#include <linux/usb.h>
19#include "usbhid/usbhid.h"
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/slab.h> 21#include <linux/slab.h>
20 22
@@ -22,17 +24,16 @@
22 24
23#define NTRIG_DUPLICATE_USAGES 0x001 25#define NTRIG_DUPLICATE_USAGES 0x001
24 26
25#define nt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
26 EV_KEY, (c))
27
28struct ntrig_data { 27struct ntrig_data {
29 /* Incoming raw values for a single contact */ 28 /* Incoming raw values for a single contact */
30 __u16 x, y, w, h; 29 __u16 x, y, w, h;
31 __u16 id; 30 __u16 id;
32 __u8 confidence; 31
32 bool tipswitch;
33 bool confidence;
34 bool first_contact_touch;
33 35
34 bool reading_mt; 36 bool reading_mt;
35 __u8 first_contact_confidence;
36 37
37 __u8 mt_footer[4]; 38 __u8 mt_footer[4];
38 __u8 mt_foot_count; 39 __u8 mt_foot_count;
@@ -139,9 +140,10 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
139 case 0xff000001: 140 case 0xff000001:
140 /* Tag indicating the start of a multitouch group */ 141 /* Tag indicating the start of a multitouch group */
141 nd->reading_mt = 1; 142 nd->reading_mt = 1;
142 nd->first_contact_confidence = 0; 143 nd->first_contact_touch = 0;
143 break; 144 break;
144 case HID_DG_TIPSWITCH: 145 case HID_DG_TIPSWITCH:
146 nd->tipswitch = value;
145 /* Prevent emission of touch until validated */ 147 /* Prevent emission of touch until validated */
146 return 1; 148 return 1;
147 case HID_DG_CONFIDENCE: 149 case HID_DG_CONFIDENCE:
@@ -169,8 +171,14 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
169 * to emit a normal (X, Y) position 171 * to emit a normal (X, Y) position
170 */ 172 */
171 if (!nd->reading_mt) { 173 if (!nd->reading_mt) {
174 /*
175 * TipSwitch indicates the presence of a
176 * finger in single touch mode.
177 */
178 input_report_key(input, BTN_TOUCH,
179 nd->tipswitch);
172 input_report_key(input, BTN_TOOL_DOUBLETAP, 180 input_report_key(input, BTN_TOOL_DOUBLETAP,
173 (nd->confidence != 0)); 181 nd->tipswitch);
174 input_event(input, EV_ABS, ABS_X, nd->x); 182 input_event(input, EV_ABS, ABS_X, nd->x);
175 input_event(input, EV_ABS, ABS_Y, nd->y); 183 input_event(input, EV_ABS, ABS_Y, nd->y);
176 } 184 }
@@ -209,7 +217,13 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
209 217
210 /* emit a normal (X, Y) for the first point only */ 218 /* emit a normal (X, Y) for the first point only */
211 if (nd->id == 0) { 219 if (nd->id == 0) {
212 nd->first_contact_confidence = nd->confidence; 220 /*
221 * TipSwitch is superfluous in multitouch
222 * mode. The footer events tell us
223 * if there is a finger on the screen or
224 * not.
225 */
226 nd->first_contact_touch = nd->confidence;
213 input_event(input, EV_ABS, ABS_X, nd->x); 227 input_event(input, EV_ABS, ABS_X, nd->x);
214 input_event(input, EV_ABS, ABS_Y, nd->y); 228 input_event(input, EV_ABS, ABS_Y, nd->y);
215 } 229 }
@@ -239,30 +253,11 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
239 253
240 nd->reading_mt = 0; 254 nd->reading_mt = 0;
241 255
242 if (nd->first_contact_confidence) { 256 if (nd->first_contact_touch) {
243 switch (value) { 257 input_report_key(input, BTN_TOOL_DOUBLETAP, 1);
244 case 0: /* for single touch devices */
245 case 1:
246 input_report_key(input,
247 BTN_TOOL_DOUBLETAP, 1);
248 break;
249 case 2:
250 input_report_key(input,
251 BTN_TOOL_TRIPLETAP, 1);
252 break;
253 case 3:
254 default:
255 input_report_key(input,
256 BTN_TOOL_QUADTAP, 1);
257 }
258 input_report_key(input, BTN_TOUCH, 1); 258 input_report_key(input, BTN_TOUCH, 1);
259 } else { 259 } else {
260 input_report_key(input, 260 input_report_key(input, BTN_TOOL_DOUBLETAP, 0);
261 BTN_TOOL_DOUBLETAP, 0);
262 input_report_key(input,
263 BTN_TOOL_TRIPLETAP, 0);
264 input_report_key(input,
265 BTN_TOOL_QUADTAP, 0);
266 input_report_key(input, BTN_TOUCH, 0); 261 input_report_key(input, BTN_TOUCH, 0);
267 } 262 }
268 break; 263 break;
@@ -286,6 +281,7 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
286 struct ntrig_data *nd; 281 struct ntrig_data *nd;
287 struct hid_input *hidinput; 282 struct hid_input *hidinput;
288 struct input_dev *input; 283 struct input_dev *input;
284 struct hid_report *report;
289 285
290 if (id->driver_data) 286 if (id->driver_data)
291 hdev->quirks |= HID_QUIRK_MULTI_INPUT; 287 hdev->quirks |= HID_QUIRK_MULTI_INPUT;
@@ -327,13 +323,7 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
327 __clear_bit(BTN_TOOL_PEN, input->keybit); 323 __clear_bit(BTN_TOOL_PEN, input->keybit);
328 __clear_bit(BTN_TOOL_FINGER, input->keybit); 324 __clear_bit(BTN_TOOL_FINGER, input->keybit);
329 __clear_bit(BTN_0, input->keybit); 325 __clear_bit(BTN_0, input->keybit);
330 /*
331 * A little something special to enable
332 * two and three finger taps.
333 */
334 __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); 326 __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
335 __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
336 __set_bit(BTN_TOOL_QUADTAP, input->keybit);
337 /* 327 /*
338 * The physical touchscreen (single touch) 328 * The physical touchscreen (single touch)
339 * input has a value for physical, whereas 329 * input has a value for physical, whereas
@@ -349,6 +339,12 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
349 } 339 }
350 } 340 }
351 341
342 /* This is needed for devices with more recent firmware versions */
343 report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0x0a];
344 if (report)
345 usbhid_submit_report(hdev, report, USB_DIR_OUT);
346
347
352 return 0; 348 return 0;
353err_free: 349err_free:
354 kfree(nd); 350 kfree(nd);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 7502a4b2fa86..402d5574b574 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -76,7 +76,7 @@ static int sony_set_operational_usb(struct hid_device *hdev)
76 76
77static int sony_set_operational_bt(struct hid_device *hdev) 77static int sony_set_operational_bt(struct hid_device *hdev)
78{ 78{
79 unsigned char buf[] = { 0x53, 0xf4, 0x42, 0x03, 0x00, 0x00 }; 79 unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
80 return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT); 80 return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
81} 81}
82 82
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index f7700cf49721..f947d8337e21 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -277,7 +277,6 @@ static int __init wacom_init(void)
277 ret = hid_register_driver(&wacom_driver); 277 ret = hid_register_driver(&wacom_driver);
278 if (ret) 278 if (ret)
279 printk(KERN_ERR "can't register wacom driver\n"); 279 printk(KERN_ERR "can't register wacom driver\n");
280 printk(KERN_ERR "wacom driver registered\n");
281 return ret; 280 return ret;
282} 281}
283 282
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 56d06cd8075b..7b85b696fdab 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -999,13 +999,6 @@ static int usbhid_start(struct hid_device *hid)
999 } 999 }
1000 } 1000 }
1001 1001
1002 init_waitqueue_head(&usbhid->wait);
1003 INIT_WORK(&usbhid->reset_work, hid_reset);
1004 INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
1005 setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
1006
1007 spin_lock_init(&usbhid->lock);
1008
1009 usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); 1002 usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL);
1010 if (!usbhid->urbctrl) { 1003 if (!usbhid->urbctrl) {
1011 ret = -ENOMEM; 1004 ret = -ENOMEM;
@@ -1179,6 +1172,12 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
1179 usbhid->intf = intf; 1172 usbhid->intf = intf;
1180 usbhid->ifnum = interface->desc.bInterfaceNumber; 1173 usbhid->ifnum = interface->desc.bInterfaceNumber;
1181 1174
1175 init_waitqueue_head(&usbhid->wait);
1176 INIT_WORK(&usbhid->reset_work, hid_reset);
1177 INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
1178 setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
1179 spin_lock_init(&usbhid->lock);
1180
1182 ret = hid_add_device(hid); 1181 ret = hid_add_device(hid);
1183 if (ret) { 1182 if (ret) {
1184 if (ret != -ENODEV) 1183 if (ret != -ENODEV)
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 0f28d91f29d8..f085c18d2905 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -195,6 +195,9 @@ static unsigned int applesmc_accelerometer;
195/* Indicates whether this computer has light sensors and keyboard backlight. */ 195/* Indicates whether this computer has light sensors and keyboard backlight. */
196static unsigned int applesmc_light; 196static unsigned int applesmc_light;
197 197
198/* The number of fans handled by the driver */
199static unsigned int fans_handled;
200
198/* Indicates which temperature sensors set to use. */ 201/* Indicates which temperature sensors set to use. */
199static unsigned int applesmc_temperature_set; 202static unsigned int applesmc_temperature_set;
200 203
@@ -1492,39 +1495,24 @@ static int __init applesmc_init(void)
1492 1495
1493 /* create fan files */ 1496 /* create fan files */
1494 count = applesmc_get_fan_count(); 1497 count = applesmc_get_fan_count();
1495 if (count < 0) { 1498 if (count < 0)
1496 printk(KERN_ERR "applesmc: Cannot get the number of fans.\n"); 1499 printk(KERN_ERR "applesmc: Cannot get the number of fans.\n");
1497 } else { 1500 else
1498 printk(KERN_INFO "applesmc: %d fans found.\n", count); 1501 printk(KERN_INFO "applesmc: %d fans found.\n", count);
1499 1502
1500 switch (count) { 1503 if (count > 4) {
1501 default: 1504 count = 4;
1502 printk(KERN_WARNING "applesmc: More than 4 fans found," 1505 printk(KERN_WARNING "applesmc: More than 4 fans found,"
1503 " but at most 4 fans are supported" 1506 " but at most 4 fans are supported"
1504 " by the driver.\n"); 1507 " by the driver.\n");
1505 case 4: 1508 }
1506 ret = sysfs_create_group(&pdev->dev.kobj, 1509
1507 &fan_attribute_groups[3]); 1510 while (fans_handled < count) {
1508 if (ret) 1511 ret = sysfs_create_group(&pdev->dev.kobj,
1509 goto out_key_enumeration; 1512 &fan_attribute_groups[fans_handled]);
1510 case 3: 1513 if (ret)
1511 ret = sysfs_create_group(&pdev->dev.kobj, 1514 goto out_fans;
1512 &fan_attribute_groups[2]); 1515 fans_handled++;
1513 if (ret)
1514 goto out_key_enumeration;
1515 case 2:
1516 ret = sysfs_create_group(&pdev->dev.kobj,
1517 &fan_attribute_groups[1]);
1518 if (ret)
1519 goto out_key_enumeration;
1520 case 1:
1521 ret = sysfs_create_group(&pdev->dev.kobj,
1522 &fan_attribute_groups[0]);
1523 if (ret)
1524 goto out_fan_1;
1525 case 0:
1526 ;
1527 }
1528 } 1516 }
1529 1517
1530 for (i = 0; 1518 for (i = 0;
@@ -1593,10 +1581,10 @@ out_accelerometer:
1593 applesmc_release_accelerometer(); 1581 applesmc_release_accelerometer();
1594out_temperature: 1582out_temperature:
1595 sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); 1583 sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
1596 sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[0]); 1584out_fans:
1597out_fan_1: 1585 while (fans_handled)
1598 sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[1]); 1586 sysfs_remove_group(&pdev->dev.kobj,
1599out_key_enumeration: 1587 &fan_attribute_groups[--fans_handled]);
1600 sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group); 1588 sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
1601out_name: 1589out_name:
1602 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr); 1590 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
@@ -1622,8 +1610,9 @@ static void __exit applesmc_exit(void)
1622 if (applesmc_accelerometer) 1610 if (applesmc_accelerometer)
1623 applesmc_release_accelerometer(); 1611 applesmc_release_accelerometer();
1624 sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); 1612 sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
1625 sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[0]); 1613 while (fans_handled)
1626 sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[1]); 1614 sysfs_remove_group(&pdev->dev.kobj,
1615 &fan_attribute_groups[--fans_handled]);
1627 sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group); 1616 sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
1628 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr); 1617 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
1629 platform_device_unregister(pdev); 1618 platform_device_unregister(pdev);
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 7f948105d8ad..0f388adc6187 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -268,8 +268,11 @@ static ssize_t store_fan16(struct device *dev,
268 if (strict_strtol(buf, 10, &reqval)) 268 if (strict_strtol(buf, 10, &reqval))
269 return -EINVAL; 269 return -EINVAL;
270 270
271 /* If a minimum RPM of zero is requested, then we set the register to
272 0xffff. This value allows the fan to be stopped completely without
273 generating an alarm. */
271 reqval = 274 reqval =
272 (SENSORS_LIMIT((reqval) <= 0 ? 0 : 5400000 / (reqval), 0, 65534)); 275 (reqval <= 0 ? 0xffff : SENSORS_LIMIT(5400000 / reqval, 0, 0xfffe));
273 276
274 mutex_lock(&data->update_lock); 277 mutex_lock(&data->update_lock);
275 data->reg[param->msb[0]] = (reqval >> 8) & 0xff; 278 data->reg[param->msb[0]] = (reqval >> 8) & 0xff;
@@ -285,8 +288,9 @@ static ssize_t store_fan16(struct device *dev,
285 * Voltages are scaled in the device so that the nominal voltage 288 * Voltages are scaled in the device so that the nominal voltage
286 * is 3/4ths of the 0-255 range (i.e. 192). 289 * is 3/4ths of the 0-255 range (i.e. 192).
287 * If all voltages are 'normal' then all voltage registers will 290 * If all voltages are 'normal' then all voltage registers will
288 * read 0xC0. This doesn't help us if we don't have a point of refernce. 291 * read 0xC0.
289 * The data sheet however provides us with the full scale value for each 292 *
293 * The data sheet provides us with the 3/4 scale value for each voltage
290 * which is stored in in_scaling. The sda->index parameter value provides 294 * which is stored in in_scaling. The sda->index parameter value provides
291 * the index into in_scaling. 295 * the index into in_scaling.
292 * 296 *
@@ -295,7 +299,7 @@ static ssize_t store_fan16(struct device *dev,
295 */ 299 */
296 300
297static int asc7621_in_scaling[] = { 301static int asc7621_in_scaling[] = {
298 3320, 3000, 4380, 6640, 16000 302 2500, 2250, 3300, 5000, 12000
299}; 303};
300 304
301static ssize_t show_in10(struct device *dev, struct device_attribute *attr, 305static ssize_t show_in10(struct device *dev, struct device_attribute *attr,
@@ -306,19 +310,12 @@ static ssize_t show_in10(struct device *dev, struct device_attribute *attr,
306 u8 nr = sda->index; 310 u8 nr = sda->index;
307 311
308 mutex_lock(&data->update_lock); 312 mutex_lock(&data->update_lock);
309 regval = (data->reg[param->msb[0]] * asc7621_in_scaling[nr]) / 256; 313 regval = (data->reg[param->msb[0]] << 8) | (data->reg[param->lsb[0]]);
310
311 /* The LSB value is a 2-bit scaling of the MSB's LSbit value.
312 * I.E. If the maximim voltage for this input is 6640 millivolts then
313 * a MSB register value of 0 = 0mv and 255 = 6640mv.
314 * A 1 step change therefore represents 25.9mv (6640 / 256).
315 * The extra 2-bits therefore represent increments of 6.48mv.
316 */
317 regval += ((asc7621_in_scaling[nr] / 256) / 4) *
318 (data->reg[param->lsb[0]] >> 6);
319
320 mutex_unlock(&data->update_lock); 314 mutex_unlock(&data->update_lock);
321 315
316 /* The LSB value is a 2-bit scaling of the MSB's LSbit value. */
317 regval = (regval >> 6) * asc7621_in_scaling[nr] / (0xc0 << 2);
318
322 return sprintf(buf, "%u\n", regval); 319 return sprintf(buf, "%u\n", regval);
323} 320}
324 321
@@ -331,7 +328,7 @@ static ssize_t show_in8(struct device *dev, struct device_attribute *attr,
331 328
332 return sprintf(buf, "%u\n", 329 return sprintf(buf, "%u\n",
333 ((data->reg[param->msb[0]] * 330 ((data->reg[param->msb[0]] *
334 asc7621_in_scaling[nr]) / 256)); 331 asc7621_in_scaling[nr]) / 0xc0));
335} 332}
336 333
337static ssize_t store_in8(struct device *dev, struct device_attribute *attr, 334static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
@@ -344,9 +341,11 @@ static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
344 if (strict_strtol(buf, 10, &reqval)) 341 if (strict_strtol(buf, 10, &reqval))
345 return -EINVAL; 342 return -EINVAL;
346 343
347 reqval = SENSORS_LIMIT(reqval, 0, asc7621_in_scaling[nr]); 344 reqval = SENSORS_LIMIT(reqval, 0, 0xffff);
345
346 reqval = reqval * 0xc0 / asc7621_in_scaling[nr];
348 347
349 reqval = (reqval * 255 + 128) / asc7621_in_scaling[nr]; 348 reqval = SENSORS_LIMIT(reqval, 0, 0xff);
350 349
351 mutex_lock(&data->update_lock); 350 mutex_lock(&data->update_lock);
352 data->reg[param->msb[0]] = reqval; 351 data->reg[param->msb[0]] = reqval;
@@ -846,11 +845,11 @@ static struct asc7621_param asc7621_params[] = {
846 PWRITE(in3_max, 3, PRI_LOW, 0x4b, 0, 0, 0, in8), 845 PWRITE(in3_max, 3, PRI_LOW, 0x4b, 0, 0, 0, in8),
847 PWRITE(in4_max, 4, PRI_LOW, 0x4d, 0, 0, 0, in8), 846 PWRITE(in4_max, 4, PRI_LOW, 0x4d, 0, 0, 0, in8),
848 847
849 PREAD(in0_alarm, 0, PRI_LOW, 0x41, 0, 0x01, 0, bitmask), 848 PREAD(in0_alarm, 0, PRI_HIGH, 0x41, 0, 0x01, 0, bitmask),
850 PREAD(in1_alarm, 1, PRI_LOW, 0x41, 0, 0x01, 1, bitmask), 849 PREAD(in1_alarm, 1, PRI_HIGH, 0x41, 0, 0x01, 1, bitmask),
851 PREAD(in2_alarm, 2, PRI_LOW, 0x41, 0, 0x01, 2, bitmask), 850 PREAD(in2_alarm, 2, PRI_HIGH, 0x41, 0, 0x01, 2, bitmask),
852 PREAD(in3_alarm, 3, PRI_LOW, 0x41, 0, 0x01, 3, bitmask), 851 PREAD(in3_alarm, 3, PRI_HIGH, 0x41, 0, 0x01, 3, bitmask),
853 PREAD(in4_alarm, 4, PRI_LOW, 0x42, 0, 0x01, 0, bitmask), 852 PREAD(in4_alarm, 4, PRI_HIGH, 0x42, 0, 0x01, 0, bitmask),
854 853
855 PREAD(fan1_input, 0, PRI_HIGH, 0x29, 0x28, 0, 0, fan16), 854 PREAD(fan1_input, 0, PRI_HIGH, 0x29, 0x28, 0, 0, fan16),
856 PREAD(fan2_input, 1, PRI_HIGH, 0x2b, 0x2a, 0, 0, fan16), 855 PREAD(fan2_input, 1, PRI_HIGH, 0x2b, 0x2a, 0, 0, fan16),
@@ -862,10 +861,10 @@ static struct asc7621_param asc7621_params[] = {
862 PWRITE(fan3_min, 2, PRI_LOW, 0x59, 0x58, 0, 0, fan16), 861 PWRITE(fan3_min, 2, PRI_LOW, 0x59, 0x58, 0, 0, fan16),
863 PWRITE(fan4_min, 3, PRI_LOW, 0x5b, 0x5a, 0, 0, fan16), 862 PWRITE(fan4_min, 3, PRI_LOW, 0x5b, 0x5a, 0, 0, fan16),
864 863
865 PREAD(fan1_alarm, 0, PRI_LOW, 0x42, 0, 0x01, 0, bitmask), 864 PREAD(fan1_alarm, 0, PRI_HIGH, 0x42, 0, 0x01, 2, bitmask),
866 PREAD(fan2_alarm, 1, PRI_LOW, 0x42, 0, 0x01, 1, bitmask), 865 PREAD(fan2_alarm, 1, PRI_HIGH, 0x42, 0, 0x01, 3, bitmask),
867 PREAD(fan3_alarm, 2, PRI_LOW, 0x42, 0, 0x01, 2, bitmask), 866 PREAD(fan3_alarm, 2, PRI_HIGH, 0x42, 0, 0x01, 4, bitmask),
868 PREAD(fan4_alarm, 3, PRI_LOW, 0x42, 0, 0x01, 3, bitmask), 867 PREAD(fan4_alarm, 3, PRI_HIGH, 0x42, 0, 0x01, 5, bitmask),
869 868
870 PREAD(temp1_input, 0, PRI_HIGH, 0x25, 0x10, 0, 0, temp10), 869 PREAD(temp1_input, 0, PRI_HIGH, 0x25, 0x10, 0, 0, temp10),
871 PREAD(temp2_input, 1, PRI_HIGH, 0x26, 0x15, 0, 0, temp10), 870 PREAD(temp2_input, 1, PRI_HIGH, 0x26, 0x15, 0, 0, temp10),
@@ -886,10 +885,10 @@ static struct asc7621_param asc7621_params[] = {
886 PWRITE(temp3_max, 2, PRI_LOW, 0x53, 0, 0, 0, temp8), 885 PWRITE(temp3_max, 2, PRI_LOW, 0x53, 0, 0, 0, temp8),
887 PWRITE(temp4_max, 3, PRI_LOW, 0x35, 0, 0, 0, temp8), 886 PWRITE(temp4_max, 3, PRI_LOW, 0x35, 0, 0, 0, temp8),
888 887
889 PREAD(temp1_alarm, 0, PRI_LOW, 0x41, 0, 0x01, 4, bitmask), 888 PREAD(temp1_alarm, 0, PRI_HIGH, 0x41, 0, 0x01, 4, bitmask),
890 PREAD(temp2_alarm, 1, PRI_LOW, 0x41, 0, 0x01, 5, bitmask), 889 PREAD(temp2_alarm, 1, PRI_HIGH, 0x41, 0, 0x01, 5, bitmask),
891 PREAD(temp3_alarm, 2, PRI_LOW, 0x41, 0, 0x01, 6, bitmask), 890 PREAD(temp3_alarm, 2, PRI_HIGH, 0x41, 0, 0x01, 6, bitmask),
892 PREAD(temp4_alarm, 3, PRI_LOW, 0x43, 0, 0x01, 0, bitmask), 891 PREAD(temp4_alarm, 3, PRI_HIGH, 0x43, 0, 0x01, 0, bitmask),
893 892
894 PWRITE(temp1_source, 0, PRI_LOW, 0x02, 0, 0x07, 4, bitmask), 893 PWRITE(temp1_source, 0, PRI_LOW, 0x02, 0, 0x07, 4, bitmask),
895 PWRITE(temp2_source, 1, PRI_LOW, 0x02, 0, 0x07, 0, bitmask), 894 PWRITE(temp2_source, 1, PRI_LOW, 0x02, 0, 0x07, 0, bitmask),
@@ -898,7 +897,7 @@ static struct asc7621_param asc7621_params[] = {
898 897
899 PWRITE(temp1_smoothing_enable, 0, PRI_LOW, 0x62, 0, 0x01, 3, bitmask), 898 PWRITE(temp1_smoothing_enable, 0, PRI_LOW, 0x62, 0, 0x01, 3, bitmask),
900 PWRITE(temp2_smoothing_enable, 1, PRI_LOW, 0x63, 0, 0x01, 7, bitmask), 899 PWRITE(temp2_smoothing_enable, 1, PRI_LOW, 0x63, 0, 0x01, 7, bitmask),
901 PWRITE(temp3_smoothing_enable, 2, PRI_LOW, 0x64, 0, 0x01, 3, bitmask), 900 PWRITE(temp3_smoothing_enable, 2, PRI_LOW, 0x63, 0, 0x01, 3, bitmask),
902 PWRITE(temp4_smoothing_enable, 3, PRI_LOW, 0x3c, 0, 0x01, 3, bitmask), 901 PWRITE(temp4_smoothing_enable, 3, PRI_LOW, 0x3c, 0, 0x01, 3, bitmask),
903 902
904 PWRITE(temp1_smoothing_time, 0, PRI_LOW, 0x62, 0, 0x07, 0, temp_st), 903 PWRITE(temp1_smoothing_time, 0, PRI_LOW, 0x62, 0, 0x07, 0, temp_st),
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 75f3fa55663d..16c420240724 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -1169,15 +1169,19 @@ static int atk_create_files(struct atk_data *data)
1169 int err; 1169 int err;
1170 1170
1171 list_for_each_entry(s, &data->sensor_list, list) { 1171 list_for_each_entry(s, &data->sensor_list, list) {
1172 sysfs_attr_init(&s->input_attr.attr);
1172 err = device_create_file(data->hwmon_dev, &s->input_attr); 1173 err = device_create_file(data->hwmon_dev, &s->input_attr);
1173 if (err) 1174 if (err)
1174 return err; 1175 return err;
1176 sysfs_attr_init(&s->label_attr.attr);
1175 err = device_create_file(data->hwmon_dev, &s->label_attr); 1177 err = device_create_file(data->hwmon_dev, &s->label_attr);
1176 if (err) 1178 if (err)
1177 return err; 1179 return err;
1180 sysfs_attr_init(&s->limit1_attr.attr);
1178 err = device_create_file(data->hwmon_dev, &s->limit1_attr); 1181 err = device_create_file(data->hwmon_dev, &s->limit1_attr);
1179 if (err) 1182 if (err)
1180 return err; 1183 return err;
1184 sysfs_attr_init(&s->limit2_attr.attr);
1181 err = device_create_file(data->hwmon_dev, &s->limit2_attr); 1185 err = device_create_file(data->hwmon_dev, &s->limit2_attr);
1182 if (err) 1186 if (err)
1183 return err; 1187 return err;
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index be475e844c2a..7580f55e67e3 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -217,6 +217,10 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
217 AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted), 217 AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted),
218 AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), 218 AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
219 AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), 219 AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
220 AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
221 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
222 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
223 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
220 { NULL, } 224 { NULL, }
221/* Laptop models without axis info (yet): 225/* Laptop models without axis info (yet):
222 * "NC6910" "HP Compaq 6910" 226 * "NC6910" "HP Compaq 6910"
@@ -324,8 +328,8 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
324 lis3lv02d_joystick_disable(); 328 lis3lv02d_joystick_disable();
325 lis3lv02d_poweroff(&lis3_dev); 329 lis3lv02d_poweroff(&lis3_dev);
326 330
327 flush_work(&hpled_led.work);
328 led_classdev_unregister(&hpled_led.led_classdev); 331 led_classdev_unregister(&hpled_led.led_classdev);
332 flush_work(&hpled_led.work);
329 333
330 return lis3lv02d_remove_fs(&lis3_dev); 334 return lis3lv02d_remove_fs(&lis3_dev);
331} 335}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 9c6170cd9aac..87ab0568bb0e 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -564,7 +564,7 @@ config I2C_STU300
564 564
565config I2C_VERSATILE 565config I2C_VERSATILE
566 tristate "ARM Versatile/Realview I2C bus support" 566 tristate "ARM Versatile/Realview I2C bus support"
567 depends on ARCH_VERSATILE || ARCH_REALVIEW 567 depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
568 select I2C_ALGOBIT 568 select I2C_ALGOBIT
569 help 569 help
570 Say yes if you want to support the I2C serial bus on ARMs Versatile 570 Say yes if you want to support the I2C serial bus on ARMs Versatile
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index a2481f40ea1c..0e9f85d0a835 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -447,7 +447,7 @@ static struct i2c_adapter octeon_i2c_ops = {
447/** 447/**
448 * octeon_i2c_setclock - Calculate and set clock divisors. 448 * octeon_i2c_setclock - Calculate and set clock divisors.
449 */ 449 */
450static int __init octeon_i2c_setclock(struct octeon_i2c *i2c) 450static int __devinit octeon_i2c_setclock(struct octeon_i2c *i2c)
451{ 451{
452 int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff; 452 int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
453 int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000; 453 int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
@@ -490,7 +490,7 @@ static int __init octeon_i2c_setclock(struct octeon_i2c *i2c)
490 return 0; 490 return 0;
491} 491}
492 492
493static int __init octeon_i2c_initlowlevel(struct octeon_i2c *i2c) 493static int __devinit octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
494{ 494{
495 u8 status; 495 u8 status;
496 int tries; 496 int tries;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 3202a86f420e..7c469a62c3c1 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -40,12 +40,11 @@
40#include "i2c-core.h" 40#include "i2c-core.h"
41 41
42 42
43/* core_lock protects i2c_adapter_idr, userspace_devices, and guarantees 43/* core_lock protects i2c_adapter_idr, and guarantees
44 that device detection, deletion of detected devices, and attach_adapter 44 that device detection, deletion of detected devices, and attach_adapter
45 and detach_adapter calls are serialized */ 45 and detach_adapter calls are serialized */
46static DEFINE_MUTEX(core_lock); 46static DEFINE_MUTEX(core_lock);
47static DEFINE_IDR(i2c_adapter_idr); 47static DEFINE_IDR(i2c_adapter_idr);
48static LIST_HEAD(userspace_devices);
49 48
50static struct device_type i2c_client_type; 49static struct device_type i2c_client_type;
51static int i2c_check_addr(struct i2c_adapter *adapter, int addr); 50static int i2c_check_addr(struct i2c_adapter *adapter, int addr);
@@ -117,8 +116,10 @@ static int i2c_device_probe(struct device *dev)
117 dev_dbg(dev, "probe\n"); 116 dev_dbg(dev, "probe\n");
118 117
119 status = driver->probe(client, i2c_match_id(driver->id_table, client)); 118 status = driver->probe(client, i2c_match_id(driver->id_table, client));
120 if (status) 119 if (status) {
121 client->driver = NULL; 120 client->driver = NULL;
121 i2c_set_clientdata(client, NULL);
122 }
122 return status; 123 return status;
123} 124}
124 125
@@ -139,8 +140,10 @@ static int i2c_device_remove(struct device *dev)
139 dev->driver = NULL; 140 dev->driver = NULL;
140 status = 0; 141 status = 0;
141 } 142 }
142 if (status == 0) 143 if (status == 0) {
143 client->driver = NULL; 144 client->driver = NULL;
145 i2c_set_clientdata(client, NULL);
146 }
144 return status; 147 return status;
145} 148}
146 149
@@ -156,106 +159,130 @@ static void i2c_device_shutdown(struct device *dev)
156 driver->shutdown(client); 159 driver->shutdown(client);
157} 160}
158 161
159#ifdef CONFIG_SUSPEND 162#ifdef CONFIG_PM_SLEEP
160static int i2c_device_pm_suspend(struct device *dev) 163static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg)
161{ 164{
162 const struct dev_pm_ops *pm; 165 struct i2c_client *client = i2c_verify_client(dev);
166 struct i2c_driver *driver;
163 167
164 if (!dev->driver) 168 if (!client || !dev->driver)
165 return 0; 169 return 0;
166 pm = dev->driver->pm; 170 driver = to_i2c_driver(dev->driver);
167 if (!pm || !pm->suspend) 171 if (!driver->suspend)
168 return 0; 172 return 0;
169 return pm->suspend(dev); 173 return driver->suspend(client, mesg);
170} 174}
171 175
172static int i2c_device_pm_resume(struct device *dev) 176static int i2c_legacy_resume(struct device *dev)
173{ 177{
174 const struct dev_pm_ops *pm; 178 struct i2c_client *client = i2c_verify_client(dev);
179 struct i2c_driver *driver;
175 180
176 if (!dev->driver) 181 if (!client || !dev->driver)
177 return 0; 182 return 0;
178 pm = dev->driver->pm; 183 driver = to_i2c_driver(dev->driver);
179 if (!pm || !pm->resume) 184 if (!driver->resume)
180 return 0; 185 return 0;
181 return pm->resume(dev); 186 return driver->resume(client);
182} 187}
183#else
184#define i2c_device_pm_suspend NULL
185#define i2c_device_pm_resume NULL
186#endif
187 188
188#ifdef CONFIG_PM_RUNTIME 189static int i2c_device_pm_suspend(struct device *dev)
189static int i2c_device_runtime_suspend(struct device *dev)
190{ 190{
191 const struct dev_pm_ops *pm; 191 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
192 192
193 if (!dev->driver) 193 if (pm_runtime_suspended(dev))
194 return 0; 194 return 0;
195 pm = dev->driver->pm;
196 if (!pm || !pm->runtime_suspend)
197 return 0;
198 return pm->runtime_suspend(dev);
199}
200 195
201static int i2c_device_runtime_resume(struct device *dev) 196 if (pm)
202{ 197 return pm->suspend ? pm->suspend(dev) : 0;
203 const struct dev_pm_ops *pm;
204 198
205 if (!dev->driver) 199 return i2c_legacy_suspend(dev, PMSG_SUSPEND);
206 return 0;
207 pm = dev->driver->pm;
208 if (!pm || !pm->runtime_resume)
209 return 0;
210 return pm->runtime_resume(dev);
211} 200}
212 201
213static int i2c_device_runtime_idle(struct device *dev) 202static int i2c_device_pm_resume(struct device *dev)
214{ 203{
215 const struct dev_pm_ops *pm = NULL; 204 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
216 int ret; 205 int ret;
217 206
218 if (dev->driver) 207 if (pm)
219 pm = dev->driver->pm; 208 ret = pm->resume ? pm->resume(dev) : 0;
220 if (pm && pm->runtime_idle) { 209 else
221 ret = pm->runtime_idle(dev); 210 ret = i2c_legacy_resume(dev);
222 if (ret) 211
223 return ret; 212 if (!ret) {
213 pm_runtime_disable(dev);
214 pm_runtime_set_active(dev);
215 pm_runtime_enable(dev);
224 } 216 }
225 217
226 return pm_runtime_suspend(dev); 218 return ret;
227} 219}
228#else
229#define i2c_device_runtime_suspend NULL
230#define i2c_device_runtime_resume NULL
231#define i2c_device_runtime_idle NULL
232#endif
233 220
234static int i2c_device_suspend(struct device *dev, pm_message_t mesg) 221static int i2c_device_pm_freeze(struct device *dev)
235{ 222{
236 struct i2c_client *client = i2c_verify_client(dev); 223 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
237 struct i2c_driver *driver;
238 224
239 if (!client || !dev->driver) 225 if (pm_runtime_suspended(dev))
240 return 0; 226 return 0;
241 driver = to_i2c_driver(dev->driver); 227
242 if (!driver->suspend) 228 if (pm)
243 return 0; 229 return pm->freeze ? pm->freeze(dev) : 0;
244 return driver->suspend(client, mesg); 230
231 return i2c_legacy_suspend(dev, PMSG_FREEZE);
245} 232}
246 233
247static int i2c_device_resume(struct device *dev) 234static int i2c_device_pm_thaw(struct device *dev)
248{ 235{
249 struct i2c_client *client = i2c_verify_client(dev); 236 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
250 struct i2c_driver *driver;
251 237
252 if (!client || !dev->driver) 238 if (pm_runtime_suspended(dev))
253 return 0; 239 return 0;
254 driver = to_i2c_driver(dev->driver); 240
255 if (!driver->resume) 241 if (pm)
242 return pm->thaw ? pm->thaw(dev) : 0;
243
244 return i2c_legacy_resume(dev);
245}
246
247static int i2c_device_pm_poweroff(struct device *dev)
248{
249 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
250
251 if (pm_runtime_suspended(dev))
256 return 0; 252 return 0;
257 return driver->resume(client); 253
254 if (pm)
255 return pm->poweroff ? pm->poweroff(dev) : 0;
256
257 return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
258}
259
260static int i2c_device_pm_restore(struct device *dev)
261{
262 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
263 int ret;
264
265 if (pm)
266 ret = pm->restore ? pm->restore(dev) : 0;
267 else
268 ret = i2c_legacy_resume(dev);
269
270 if (!ret) {
271 pm_runtime_disable(dev);
272 pm_runtime_set_active(dev);
273 pm_runtime_enable(dev);
274 }
275
276 return ret;
258} 277}
278#else /* !CONFIG_PM_SLEEP */
279#define i2c_device_pm_suspend NULL
280#define i2c_device_pm_resume NULL
281#define i2c_device_pm_freeze NULL
282#define i2c_device_pm_thaw NULL
283#define i2c_device_pm_poweroff NULL
284#define i2c_device_pm_restore NULL
285#endif /* !CONFIG_PM_SLEEP */
259 286
260static void i2c_client_dev_release(struct device *dev) 287static void i2c_client_dev_release(struct device *dev)
261{ 288{
@@ -298,9 +325,15 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
298static const struct dev_pm_ops i2c_device_pm_ops = { 325static const struct dev_pm_ops i2c_device_pm_ops = {
299 .suspend = i2c_device_pm_suspend, 326 .suspend = i2c_device_pm_suspend,
300 .resume = i2c_device_pm_resume, 327 .resume = i2c_device_pm_resume,
301 .runtime_suspend = i2c_device_runtime_suspend, 328 .freeze = i2c_device_pm_freeze,
302 .runtime_resume = i2c_device_runtime_resume, 329 .thaw = i2c_device_pm_thaw,
303 .runtime_idle = i2c_device_runtime_idle, 330 .poweroff = i2c_device_pm_poweroff,
331 .restore = i2c_device_pm_restore,
332 SET_RUNTIME_PM_OPS(
333 pm_generic_runtime_suspend,
334 pm_generic_runtime_resume,
335 pm_generic_runtime_idle
336 )
304}; 337};
305 338
306struct bus_type i2c_bus_type = { 339struct bus_type i2c_bus_type = {
@@ -309,8 +342,6 @@ struct bus_type i2c_bus_type = {
309 .probe = i2c_device_probe, 342 .probe = i2c_device_probe,
310 .remove = i2c_device_remove, 343 .remove = i2c_device_remove,
311 .shutdown = i2c_device_shutdown, 344 .shutdown = i2c_device_shutdown,
312 .suspend = i2c_device_suspend,
313 .resume = i2c_device_resume,
314 .pm = &i2c_device_pm_ops, 345 .pm = &i2c_device_pm_ops,
315}; 346};
316EXPORT_SYMBOL_GPL(i2c_bus_type); 347EXPORT_SYMBOL_GPL(i2c_bus_type);
@@ -538,9 +569,9 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
538 return -EEXIST; 569 return -EEXIST;
539 570
540 /* Keep track of the added device */ 571 /* Keep track of the added device */
541 mutex_lock(&core_lock); 572 i2c_lock_adapter(adap);
542 list_add_tail(&client->detected, &userspace_devices); 573 list_add_tail(&client->detected, &adap->userspace_clients);
543 mutex_unlock(&core_lock); 574 i2c_unlock_adapter(adap);
544 dev_info(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device", 575 dev_info(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device",
545 info.type, info.addr); 576 info.type, info.addr);
546 577
@@ -579,9 +610,10 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
579 610
580 /* Make sure the device was added through sysfs */ 611 /* Make sure the device was added through sysfs */
581 res = -ENOENT; 612 res = -ENOENT;
582 mutex_lock(&core_lock); 613 i2c_lock_adapter(adap);
583 list_for_each_entry_safe(client, next, &userspace_devices, detected) { 614 list_for_each_entry_safe(client, next, &adap->userspace_clients,
584 if (client->addr == addr && client->adapter == adap) { 615 detected) {
616 if (client->addr == addr) {
585 dev_info(dev, "%s: Deleting device %s at 0x%02hx\n", 617 dev_info(dev, "%s: Deleting device %s at 0x%02hx\n",
586 "delete_device", client->name, client->addr); 618 "delete_device", client->name, client->addr);
587 619
@@ -591,7 +623,7 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
591 break; 623 break;
592 } 624 }
593 } 625 }
594 mutex_unlock(&core_lock); 626 i2c_unlock_adapter(adap);
595 627
596 if (res < 0) 628 if (res < 0)
597 dev_err(dev, "%s: Can't find device in list\n", 629 dev_err(dev, "%s: Can't find device in list\n",
@@ -673,6 +705,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
673 } 705 }
674 706
675 rt_mutex_init(&adap->bus_lock); 707 rt_mutex_init(&adap->bus_lock);
708 INIT_LIST_HEAD(&adap->userspace_clients);
676 709
677 /* Set default timeout to 1 second if not already set */ 710 /* Set default timeout to 1 second if not already set */
678 if (adap->timeout == 0) 711 if (adap->timeout == 0)
@@ -875,14 +908,15 @@ int i2c_del_adapter(struct i2c_adapter *adap)
875 return res; 908 return res;
876 909
877 /* Remove devices instantiated from sysfs */ 910 /* Remove devices instantiated from sysfs */
878 list_for_each_entry_safe(client, next, &userspace_devices, detected) { 911 i2c_lock_adapter(adap);
879 if (client->adapter == adap) { 912 list_for_each_entry_safe(client, next, &adap->userspace_clients,
880 dev_dbg(&adap->dev, "Removing %s at 0x%x\n", 913 detected) {
881 client->name, client->addr); 914 dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name,
882 list_del(&client->detected); 915 client->addr);
883 i2c_unregister_device(client); 916 list_del(&client->detected);
884 } 917 i2c_unregister_device(client);
885 } 918 }
919 i2c_unlock_adapter(adap);
886 920
887 /* Detach any active clients. This can't fail, thus we do not 921 /* Detach any active clients. This can't fail, thus we do not
888 checking the returned value. */ 922 checking the returned value. */
@@ -1260,12 +1294,23 @@ static int i2c_detect_address(struct i2c_client *temp_client,
1260 return 0; 1294 return 0;
1261 1295
1262 /* Make sure there is something at this address */ 1296 /* Make sure there is something at this address */
1263 if (i2c_smbus_xfer(adapter, addr, 0, 0, 0, I2C_SMBUS_QUICK, NULL) < 0) 1297 if (addr == 0x73 && (adapter->class & I2C_CLASS_HWMON)) {
1264 return 0; 1298 /* Special probe for FSC hwmon chips */
1299 union i2c_smbus_data dummy;
1265 1300
1266 /* Prevent 24RF08 corruption */ 1301 if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_READ, 0,
1267 if ((addr & ~0x0f) == 0x50) 1302 I2C_SMBUS_BYTE_DATA, &dummy) < 0)
1268 i2c_smbus_xfer(adapter, addr, 0, 0, 0, I2C_SMBUS_QUICK, NULL); 1303 return 0;
1304 } else {
1305 if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
1306 I2C_SMBUS_QUICK, NULL) < 0)
1307 return 0;
1308
1309 /* Prevent 24RF08 corruption */
1310 if ((addr & ~0x0f) == 0x50)
1311 i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
1312 I2C_SMBUS_QUICK, NULL);
1313 }
1269 1314
1270 /* Finally call the custom detection function */ 1315 /* Finally call the custom detection function */
1271 memset(&info, 0, sizeof(struct i2c_board_info)); 1316 memset(&info, 0, sizeof(struct i2c_board_info));
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index ab87e4f7cec9..0b7815d2581c 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -65,8 +65,7 @@ MODULE_LICENSE("Dual MPL/GPL");
65typedef struct ide_info_t { 65typedef struct ide_info_t {
66 struct pcmcia_device *p_dev; 66 struct pcmcia_device *p_dev;
67 struct ide_host *host; 67 struct ide_host *host;
68 int ndev; 68 int ndev;
69 dev_node_t node;
70} ide_info_t; 69} ide_info_t;
71 70
72static void ide_release(struct pcmcia_device *); 71static void ide_release(struct pcmcia_device *);
@@ -102,7 +101,6 @@ static int ide_probe(struct pcmcia_device *link)
102 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 101 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
103 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 102 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
104 link->io.IOAddrLines = 3; 103 link->io.IOAddrLines = 3;
105 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
106 link->conf.Attributes = CONF_ENABLE_IRQ; 104 link->conf.Attributes = CONF_ENABLE_IRQ;
107 link->conf.IntType = INT_MEMORY_AND_IO; 105 link->conf.IntType = INT_MEMORY_AND_IO;
108 106
@@ -285,8 +283,7 @@ static int ide_config(struct pcmcia_device *link)
285 io_base = link->io.BasePort1; 283 io_base = link->io.BasePort1;
286 ctl_base = stk->ctl_base; 284 ctl_base = stk->ctl_base;
287 285
288 ret = pcmcia_request_irq(link, &link->irq); 286 if (!link->irq)
289 if (ret)
290 goto failed; 287 goto failed;
291 ret = pcmcia_request_configuration(link, &link->conf); 288 ret = pcmcia_request_configuration(link, &link->conf);
292 if (ret) 289 if (ret)
@@ -299,24 +296,21 @@ static int ide_config(struct pcmcia_device *link)
299 if (is_kme) 296 if (is_kme)
300 outb(0x81, ctl_base+1); 297 outb(0x81, ctl_base+1);
301 298
302 host = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link); 299 host = idecs_register(io_base, ctl_base, link->irq, link);
303 if (host == NULL && link->io.NumPorts1 == 0x20) { 300 if (host == NULL && link->io.NumPorts1 == 0x20) {
304 outb(0x02, ctl_base + 0x10); 301 outb(0x02, ctl_base + 0x10);
305 host = idecs_register(io_base + 0x10, ctl_base + 0x10, 302 host = idecs_register(io_base + 0x10, ctl_base + 0x10,
306 link->irq.AssignedIRQ, link); 303 link->irq, link);
307 } 304 }
308 305
309 if (host == NULL) 306 if (host == NULL)
310 goto failed; 307 goto failed;
311 308
312 info->ndev = 1; 309 info->ndev = 1;
313 sprintf(info->node.dev_name, "hd%c", 'a' + host->ports[0]->index * 2);
314 info->node.major = host->ports[0]->major;
315 info->node.minor = 0;
316 info->host = host; 310 info->host = host;
317 link->dev_node = &info->node; 311 dev_info(&link->dev, "ide-cs: hd%c: Vpp = %d.%d\n",
318 printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n", 312 'a' + host->ports[0]->index * 2,
319 info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10); 313 link->conf.Vpp / 10, link->conf.Vpp % 10);
320 314
321 kfree(stk); 315 kfree(stk);
322 return 0; 316 return 0;
@@ -409,6 +403,8 @@ static struct pcmcia_device_id ide_ids[] = {
409 PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), 403 PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
410 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), 404 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
411 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), 405 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
406 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb),
407 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
412 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), 408 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
413 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), 409 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
414 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), 410 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
@@ -429,6 +425,8 @@ static struct pcmcia_device_id ide_ids[] = {
429 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), 425 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
430 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), 426 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
431 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), 427 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
428 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133),
429 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
432 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), 430 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
433 PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), 431 PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
434 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), 432 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 975adce5f40c..330d2a423362 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -46,6 +46,7 @@ source "drivers/infiniband/hw/ipath/Kconfig"
46source "drivers/infiniband/hw/ehca/Kconfig" 46source "drivers/infiniband/hw/ehca/Kconfig"
47source "drivers/infiniband/hw/amso1100/Kconfig" 47source "drivers/infiniband/hw/amso1100/Kconfig"
48source "drivers/infiniband/hw/cxgb3/Kconfig" 48source "drivers/infiniband/hw/cxgb3/Kconfig"
49source "drivers/infiniband/hw/cxgb4/Kconfig"
49source "drivers/infiniband/hw/mlx4/Kconfig" 50source "drivers/infiniband/hw/mlx4/Kconfig"
50source "drivers/infiniband/hw/nes/Kconfig" 51source "drivers/infiniband/hw/nes/Kconfig"
51 52
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index ed35e4496241..0c4e589d746e 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
4obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ 4obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
5obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ 5obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
6obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ 6obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
7obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/
7obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ 8obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
8obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ 9obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
9obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 10obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6d777069d86d..b930b8110a63 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -79,7 +79,6 @@ static DEFINE_IDR(sdp_ps);
79static DEFINE_IDR(tcp_ps); 79static DEFINE_IDR(tcp_ps);
80static DEFINE_IDR(udp_ps); 80static DEFINE_IDR(udp_ps);
81static DEFINE_IDR(ipoib_ps); 81static DEFINE_IDR(ipoib_ps);
82static int next_port;
83 82
84struct cma_device { 83struct cma_device {
85 struct list_head list; 84 struct list_head list;
@@ -1677,13 +1676,13 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
1677 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1676 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
1678 return -EINVAL; 1677 return -EINVAL;
1679 1678
1680 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL); 1679 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
1680 GFP_KERNEL);
1681 if (!id->route.path_rec) { 1681 if (!id->route.path_rec) {
1682 ret = -ENOMEM; 1682 ret = -ENOMEM;
1683 goto err; 1683 goto err;
1684 } 1684 }
1685 1685
1686 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1687 id->route.num_paths = num_paths; 1686 id->route.num_paths = num_paths;
1688 return 0; 1687 return 0;
1689err: 1688err:
@@ -1970,47 +1969,33 @@ err1:
1970 1969
1971static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 1970static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
1972{ 1971{
1973 struct rdma_bind_list *bind_list; 1972 static unsigned int last_used_port;
1974 int port, ret, low, high; 1973 int low, high, remaining;
1975 1974 unsigned int rover;
1976 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1977 if (!bind_list)
1978 return -ENOMEM;
1979
1980retry:
1981 /* FIXME: add proper port randomization per like inet_csk_get_port */
1982 do {
1983 ret = idr_get_new_above(ps, bind_list, next_port, &port);
1984 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1985
1986 if (ret)
1987 goto err1;
1988 1975
1989 inet_get_local_port_range(&low, &high); 1976 inet_get_local_port_range(&low, &high);
1990 if (port > high) { 1977 remaining = (high - low) + 1;
1991 if (next_port != low) { 1978 rover = net_random() % remaining + low;
1992 idr_remove(ps, port); 1979retry:
1993 next_port = low; 1980 if (last_used_port != rover &&
1994 goto retry; 1981 !idr_find(ps, (unsigned short) rover)) {
1995 } 1982 int ret = cma_alloc_port(ps, id_priv, rover);
1996 ret = -EADDRNOTAVAIL; 1983 /*
1997 goto err2; 1984 * Remember previously used port number in order to avoid
1985 * re-using same port immediately after it is closed.
1986 */
1987 if (!ret)
1988 last_used_port = rover;
1989 if (ret != -EADDRNOTAVAIL)
1990 return ret;
1998 } 1991 }
1999 1992 if (--remaining) {
2000 if (port == high) 1993 rover++;
2001 next_port = low; 1994 if ((rover < low) || (rover > high))
2002 else 1995 rover = low;
2003 next_port = port + 1; 1996 goto retry;
2004 1997 }
2005 bind_list->ps = ps; 1998 return -EADDRNOTAVAIL;
2006 bind_list->port = (unsigned short) port;
2007 cma_bind_port(bind_list, id_priv);
2008 return 0;
2009err2:
2010 idr_remove(ps, port);
2011err1:
2012 kfree(bind_list);
2013 return ret;
2014} 1999}
2015 2000
2016static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2001static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2995,12 +2980,7 @@ static void cma_remove_one(struct ib_device *device)
2995 2980
2996static int __init cma_init(void) 2981static int __init cma_init(void)
2997{ 2982{
2998 int ret, low, high, remaining; 2983 int ret;
2999
3000 get_random_bytes(&next_port, sizeof next_port);
3001 inet_get_local_port_range(&low, &high);
3002 remaining = (high - low) + 1;
3003 next_port = ((unsigned int) next_port % remaining) + low;
3004 2984
3005 cma_wq = create_singlethread_workqueue("rdma_cm"); 2985 cma_wq = create_singlethread_workqueue("rdma_cm");
3006 if (!cma_wq) 2986 if (!cma_wq)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1df1194aeba4..6dc7b77d5d29 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -291,13 +291,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
291 } 291 }
292 292
293 if (mad_reg_req) { 293 if (mad_reg_req) {
294 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); 294 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
295 if (!reg_req) { 295 if (!reg_req) {
296 ret = ERR_PTR(-ENOMEM); 296 ret = ERR_PTR(-ENOMEM);
297 goto error3; 297 goto error3;
298 } 298 }
299 /* Make a copy of the MAD registration request */
300 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
301 } 299 }
302 300
303 /* Now, fill in the various structures */ 301 /* Now, fill in the various structures */
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 512b1c43460c..46474842cfe9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1181,7 +1181,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
1181 file->filp = filp; 1181 file->filp = filp;
1182 file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev); 1182 file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
1183 1183
1184 return 0; 1184 return nonseekable_open(inode, filp);
1185} 1185}
1186 1186
1187static int ib_ucm_close(struct inode *inode, struct file *filp) 1187static int ib_ucm_close(struct inode *inode, struct file *filp)
@@ -1229,6 +1229,7 @@ static const struct file_operations ucm_fops = {
1229 .release = ib_ucm_close, 1229 .release = ib_ucm_close,
1230 .write = ib_ucm_write, 1230 .write = ib_ucm_write,
1231 .poll = ib_ucm_poll, 1231 .poll = ib_ucm_poll,
1232 .llseek = no_llseek,
1232}; 1233};
1233 1234
1234static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 1235static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 46185084121e..ac7edc24165c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1220,7 +1220,8 @@ static int ucma_open(struct inode *inode, struct file *filp)
1220 1220
1221 filp->private_data = file; 1221 filp->private_data = file;
1222 file->filp = filp; 1222 file->filp = filp;
1223 return 0; 1223
1224 return nonseekable_open(inode, filp);
1224} 1225}
1225 1226
1226static int ucma_close(struct inode *inode, struct file *filp) 1227static int ucma_close(struct inode *inode, struct file *filp)
@@ -1250,6 +1251,7 @@ static const struct file_operations ucma_fops = {
1250 .release = ucma_close, 1251 .release = ucma_close,
1251 .write = ucma_write, 1252 .write = ucma_write,
1252 .poll = ucma_poll, 1253 .poll = ucma_poll,
1254 .llseek = no_llseek,
1253}; 1255};
1254 1256
1255static struct miscdevice ucma_misc = { 1257static struct miscdevice ucma_misc = {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index e7db054fb1c8..6babb72b39fc 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -781,7 +781,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
781{ 781{
782 struct ib_umad_port *port; 782 struct ib_umad_port *port;
783 struct ib_umad_file *file; 783 struct ib_umad_file *file;
784 int ret = 0; 784 int ret;
785 785
786 port = container_of(inode->i_cdev, struct ib_umad_port, cdev); 786 port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
787 if (port) 787 if (port)
@@ -814,6 +814,8 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
814 814
815 list_add_tail(&file->port_list, &port->file_list); 815 list_add_tail(&file->port_list, &port->file_list);
816 816
817 ret = nonseekable_open(inode, filp);
818
817out: 819out:
818 mutex_unlock(&port->file_mutex); 820 mutex_unlock(&port->file_mutex);
819 return ret; 821 return ret;
@@ -866,7 +868,8 @@ static const struct file_operations umad_fops = {
866 .compat_ioctl = ib_umad_compat_ioctl, 868 .compat_ioctl = ib_umad_compat_ioctl,
867#endif 869#endif
868 .open = ib_umad_open, 870 .open = ib_umad_open,
869 .release = ib_umad_close 871 .release = ib_umad_close,
872 .llseek = no_llseek,
870}; 873};
871 874
872static int ib_umad_sm_open(struct inode *inode, struct file *filp) 875static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -903,7 +906,7 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
903 906
904 filp->private_data = port; 907 filp->private_data = port;
905 908
906 return 0; 909 return nonseekable_open(inode, filp);
907 910
908fail: 911fail:
909 kref_put(&port->umad_dev->ref, ib_umad_release_dev); 912 kref_put(&port->umad_dev->ref, ib_umad_release_dev);
@@ -933,7 +936,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
933static const struct file_operations umad_sm_fops = { 936static const struct file_operations umad_sm_fops = {
934 .owner = THIS_MODULE, 937 .owner = THIS_MODULE,
935 .open = ib_umad_sm_open, 938 .open = ib_umad_sm_open,
936 .release = ib_umad_sm_close 939 .release = ib_umad_sm_close,
940 .llseek = no_llseek,
937}; 941};
938 942
939static struct ib_client umad_client = { 943static struct ib_client umad_client = {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fb3526254426..ec83e9fe387b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -369,7 +369,8 @@ static const struct file_operations uverbs_event_fops = {
369 .read = ib_uverbs_event_read, 369 .read = ib_uverbs_event_read,
370 .poll = ib_uverbs_event_poll, 370 .poll = ib_uverbs_event_poll,
371 .release = ib_uverbs_event_close, 371 .release = ib_uverbs_event_close,
372 .fasync = ib_uverbs_event_fasync 372 .fasync = ib_uverbs_event_fasync,
373 .llseek = no_llseek,
373}; 374};
374 375
375void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) 376void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -623,7 +624,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
623 624
624 filp->private_data = file; 625 filp->private_data = file;
625 626
626 return 0; 627 return nonseekable_open(inode, filp);
627 628
628err_module: 629err_module:
629 module_put(dev->ib_dev->owner); 630 module_put(dev->ib_dev->owner);
@@ -651,7 +652,8 @@ static const struct file_operations uverbs_fops = {
651 .owner = THIS_MODULE, 652 .owner = THIS_MODULE,
652 .write = ib_uverbs_write, 653 .write = ib_uverbs_write,
653 .open = ib_uverbs_open, 654 .open = ib_uverbs_open,
654 .release = ib_uverbs_close 655 .release = ib_uverbs_close,
656 .llseek = no_llseek,
655}; 657};
656 658
657static const struct file_operations uverbs_mmap_fops = { 659static const struct file_operations uverbs_mmap_fops = {
@@ -659,7 +661,8 @@ static const struct file_operations uverbs_mmap_fops = {
659 .write = ib_uverbs_write, 661 .write = ib_uverbs_write,
660 .mmap = ib_uverbs_mmap, 662 .mmap = ib_uverbs_mmap,
661 .open = ib_uverbs_open, 663 .open = ib_uverbs_open,
662 .release = ib_uverbs_close 664 .release = ib_uverbs_close,
665 .llseek = no_llseek,
663}; 666};
664 667
665static struct ib_client uverbs_client = { 668static struct ib_client uverbs_client = {
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index f7ff66f98361..6ae698e68775 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -250,7 +250,7 @@ struct c2_array {
250struct sp_chunk { 250struct sp_chunk {
251 struct sp_chunk *next; 251 struct sp_chunk *next;
252 dma_addr_t dma_addr; 252 dma_addr_t dma_addr;
253 DECLARE_PCI_UNMAP_ADDR(mapping); 253 DEFINE_DMA_UNMAP_ADDR(mapping);
254 u16 head; 254 u16 head;
255 u16 shared_ptr[0]; 255 u16 shared_ptr[0];
256}; 256};
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index d4f5f5d42e90..78d247ec6961 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -49,7 +49,7 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
49 return -ENOMEM; 49 return -ENOMEM;
50 50
51 new_head->dma_addr = dma_addr; 51 new_head->dma_addr = dma_addr;
52 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); 52 dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
53 53
54 new_head->next = NULL; 54 new_head->next = NULL;
55 new_head->head = 0; 55 new_head->head = 0;
@@ -81,7 +81,7 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
81 while (root) { 81 while (root) {
82 next = root->next; 82 next = root->next;
83 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root, 83 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
84 pci_unmap_addr(root, mapping)); 84 dma_unmap_addr(root, mapping));
85 root = next; 85 root = next;
86 } 86 }
87} 87}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index f7b0fc23f413..49e0e8533f74 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -257,7 +257,7 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
257static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 257static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
258{ 258{
259 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size, 259 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
260 mq->msg_pool.host, pci_unmap_addr(mq, mapping)); 260 mq->msg_pool.host, dma_unmap_addr(mq, mapping));
261} 261}
262 262
263static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, 263static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
@@ -278,7 +278,7 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
278 NULL, /* peer (currently unknown) */ 278 NULL, /* peer (currently unknown) */
279 C2_MQ_HOST_TARGET); 279 C2_MQ_HOST_TARGET);
280 280
281 pci_unmap_addr_set(mq, mapping, mq->host_dma); 281 dma_unmap_addr_set(mq, mapping, mq->host_dma);
282 282
283 return 0; 283 return 0;
284} 284}
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
index acede007b94a..fc1b9a7cec4b 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -71,7 +71,7 @@ struct c2_mq {
71 u8 __iomem *adapter; 71 u8 __iomem *adapter;
72 } msg_pool; 72 } msg_pool;
73 dma_addr_t host_dma; 73 dma_addr_t host_dma;
74 DECLARE_PCI_UNMAP_ADDR(mapping); 74 DEFINE_DMA_UNMAP_ADDR(mapping);
75 u16 hint_count; 75 u16 hint_count;
76 u16 priv; 76 u16 priv;
77 struct c2_mq_shared __iomem *peer; 77 struct c2_mq_shared __iomem *peer;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h
index 1076df2ee96a..bf189987711f 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.h
+++ b/drivers/infiniband/hw/amso1100/c2_provider.h
@@ -50,7 +50,7 @@
50 50
51struct c2_buf_list { 51struct c2_buf_list {
52 void *buf; 52 void *buf;
53 DECLARE_PCI_UNMAP_ADDR(mapping) 53 DEFINE_DMA_UNMAP_ADDR(mapping);
54}; 54};
55 55
56 56
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 78c4bcc6ef60..85cfae4cad71 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -524,7 +524,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
524 err = -ENOMEM; 524 err = -ENOMEM;
525 goto bail1; 525 goto bail1;
526 } 526 }
527 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 527 dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
528 pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages, 528 pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
529 (unsigned long long) c2dev->rep_vq.host_dma); 529 (unsigned long long) c2dev->rep_vq.host_dma);
530 c2_mq_rep_init(&c2dev->rep_vq, 530 c2_mq_rep_init(&c2dev->rep_vq,
@@ -545,7 +545,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
545 err = -ENOMEM; 545 err = -ENOMEM;
546 goto bail2; 546 goto bail2;
547 } 547 }
548 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 548 dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
549 pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages, 549 pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
550 (unsigned long long) c2dev->aeq.host_dma); 550 (unsigned long long) c2dev->aeq.host_dma);
551 c2_mq_rep_init(&c2dev->aeq, 551 c2_mq_rep_init(&c2dev->aeq,
@@ -596,11 +596,11 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
596 bail3: 596 bail3:
597 dma_free_coherent(&c2dev->pcidev->dev, 597 dma_free_coherent(&c2dev->pcidev->dev,
598 c2dev->aeq.q_size * c2dev->aeq.msg_size, 598 c2dev->aeq.q_size * c2dev->aeq.msg_size,
599 q2_pages, pci_unmap_addr(&c2dev->aeq, mapping)); 599 q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
600 bail2: 600 bail2:
601 dma_free_coherent(&c2dev->pcidev->dev, 601 dma_free_coherent(&c2dev->pcidev->dev,
602 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 602 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
603 q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping)); 603 q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
604 bail1: 604 bail1:
605 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 605 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
606 bail0: 606 bail0:
@@ -637,13 +637,13 @@ void __devexit c2_rnic_term(struct c2_dev *c2dev)
637 dma_free_coherent(&c2dev->pcidev->dev, 637 dma_free_coherent(&c2dev->pcidev->dev,
638 c2dev->aeq.q_size * c2dev->aeq.msg_size, 638 c2dev->aeq.q_size * c2dev->aeq.msg_size,
639 c2dev->aeq.msg_pool.host, 639 c2dev->aeq.msg_pool.host,
640 pci_unmap_addr(&c2dev->aeq, mapping)); 640 dma_unmap_addr(&c2dev->aeq, mapping));
641 641
642 /* Free the verbs reply queue */ 642 /* Free the verbs reply queue */
643 dma_free_coherent(&c2dev->pcidev->dev, 643 dma_free_coherent(&c2dev->pcidev->dev,
644 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 644 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
645 c2dev->rep_vq.msg_pool.host, 645 c2dev->rep_vq.msg_pool.host,
646 pci_unmap_addr(&c2dev->rep_vq, mapping)); 646 dma_unmap_addr(&c2dev->rep_vq, mapping));
647 647
648 /* Free the MQ shared pointer pool */ 648 /* Free the MQ shared pointer pool */
649 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 649 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 35f286f1ad1e..005b7b52bc1e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -174,7 +174,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
174 kfree(cq->sw_queue); 174 kfree(cq->sw_queue);
175 return -ENOMEM; 175 return -ENOMEM;
176 } 176 }
177 pci_unmap_addr_set(cq, mapping, cq->dma_addr); 177 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
178 memset(cq->queue, 0, size); 178 memset(cq->queue, 0, size);
179 setup.id = cq->cqid; 179 setup.id = cq->cqid;
180 setup.base_addr = (u64) (cq->dma_addr); 180 setup.base_addr = (u64) (cq->dma_addr);
@@ -297,7 +297,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
297 goto err4; 297 goto err4;
298 298
299 memset(wq->queue, 0, depth * sizeof(union t3_wr)); 299 memset(wq->queue, 0, depth * sizeof(union t3_wr));
300 pci_unmap_addr_set(wq, mapping, wq->dma_addr); 300 dma_unmap_addr_set(wq, mapping, wq->dma_addr);
301 wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; 301 wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
302 if (!kernel_domain) 302 if (!kernel_domain)
303 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + 303 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
@@ -325,7 +325,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
325 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 325 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
326 (1UL << (cq->size_log2)) 326 (1UL << (cq->size_log2))
327 * sizeof(struct t3_cqe), cq->queue, 327 * sizeof(struct t3_cqe), cq->queue,
328 pci_unmap_addr(cq, mapping)); 328 dma_unmap_addr(cq, mapping));
329 cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); 329 cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
330 return err; 330 return err;
331} 331}
@@ -336,7 +336,7 @@ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
336 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 336 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
337 (1UL << (wq->size_log2)) 337 (1UL << (wq->size_log2))
338 * sizeof(union t3_wr), wq->queue, 338 * sizeof(union t3_wr), wq->queue,
339 pci_unmap_addr(wq, mapping)); 339 dma_unmap_addr(wq, mapping));
340 kfree(wq->sq); 340 kfree(wq->sq);
341 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2)); 341 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
342 kfree(wq->rq); 342 kfree(wq->rq);
@@ -537,7 +537,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
537 err = -ENOMEM; 537 err = -ENOMEM;
538 goto err; 538 goto err;
539 } 539 }
540 pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping, 540 dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
541 rdev_p->ctrl_qp.dma_addr); 541 rdev_p->ctrl_qp.dma_addr);
542 rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; 542 rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
543 memset(rdev_p->ctrl_qp.workq, 0, 543 memset(rdev_p->ctrl_qp.workq, 0,
@@ -583,7 +583,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
583 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 583 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
584 (1UL << T3_CTRL_QP_SIZE_LOG2) 584 (1UL << T3_CTRL_QP_SIZE_LOG2)
585 * sizeof(union t3_wr), rdev_p->ctrl_qp.workq, 585 * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
586 pci_unmap_addr(&rdev_p->ctrl_qp, mapping)); 586 dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
587 return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID); 587 return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
588} 588}
589 589
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 073373c2c560..8f0caf7d4482 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -71,7 +71,7 @@ struct cxio_hal_ctrl_qp {
71 wait_queue_head_t waitq;/* wait for RspQ/CQE msg */ 71 wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
72 union t3_wr *workq; /* the work request queue */ 72 union t3_wr *workq; /* the work request queue */
73 dma_addr_t dma_addr; /* pci bus address of the workq */ 73 dma_addr_t dma_addr; /* pci bus address of the workq */
74 DECLARE_PCI_UNMAP_ADDR(mapping) 74 DEFINE_DMA_UNMAP_ADDR(mapping);
75 void __iomem *doorbell; 75 void __iomem *doorbell;
76}; 76};
77 77
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 15073b2da1c5..e5ddb63e7d23 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -691,7 +691,7 @@ struct t3_swrq {
691struct t3_wq { 691struct t3_wq {
692 union t3_wr *queue; /* DMA accessable memory */ 692 union t3_wr *queue; /* DMA accessable memory */
693 dma_addr_t dma_addr; /* DMA address for HW */ 693 dma_addr_t dma_addr; /* DMA address for HW */
694 DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */ 694 DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
695 u32 error; /* 1 once we go to ERROR */ 695 u32 error; /* 1 once we go to ERROR */
696 u32 qpid; 696 u32 qpid;
697 u32 wptr; /* idx to next available WR slot */ 697 u32 wptr; /* idx to next available WR slot */
@@ -718,7 +718,7 @@ struct t3_cq {
718 u32 wptr; 718 u32 wptr;
719 u32 size_log2; 719 u32 size_log2;
720 dma_addr_t dma_addr; 720 dma_addr_t dma_addr;
721 DECLARE_PCI_UNMAP_ADDR(mapping) 721 DEFINE_DMA_UNMAP_ADDR(mapping);
722 struct t3_cqe *queue; 722 struct t3_cqe *queue;
723 struct t3_cqe *sw_queue; 723 struct t3_cqe *sw_queue;
724 u32 sw_rptr; 724 u32 sw_rptr;
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 63f975f3e30f..8e77dc543dd1 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -47,8 +47,6 @@ MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
47MODULE_LICENSE("Dual BSD/GPL"); 47MODULE_LICENSE("Dual BSD/GPL");
48MODULE_VERSION(DRV_VERSION); 48MODULE_VERSION(DRV_VERSION);
49 49
50cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
51
52static void open_rnic_dev(struct t3cdev *); 50static void open_rnic_dev(struct t3cdev *);
53static void close_rnic_dev(struct t3cdev *); 51static void close_rnic_dev(struct t3cdev *);
54static void iwch_event_handler(struct t3cdev *, u32, u32); 52static void iwch_event_handler(struct t3cdev *, u32, u32);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 4fef03296276..ebfb117ba68b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -102,12 +102,9 @@ static unsigned int cong_flavor = 1;
102module_param(cong_flavor, uint, 0644); 102module_param(cong_flavor, uint, 0644);
103MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)"); 103MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
104 104
105static void process_work(struct work_struct *work);
106static struct workqueue_struct *workq; 105static struct workqueue_struct *workq;
107static DECLARE_WORK(skb_work, process_work);
108 106
109static struct sk_buff_head rxq; 107static struct sk_buff_head rxq;
110static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
111 108
112static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 109static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
113static void ep_timeout(unsigned long arg); 110static void ep_timeout(unsigned long arg);
@@ -151,7 +148,7 @@ int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2
151 return -EIO; 148 return -EIO;
152 } 149 }
153 error = l2t_send(tdev, skb, l2e); 150 error = l2t_send(tdev, skb, l2e);
154 if (error) 151 if (error < 0)
155 kfree_skb(skb); 152 kfree_skb(skb);
156 return error; 153 return error;
157} 154}
@@ -167,7 +164,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
167 return -EIO; 164 return -EIO;
168 } 165 }
169 error = cxgb3_ofld_send(tdev, skb); 166 error = cxgb3_ofld_send(tdev, skb);
170 if (error) 167 if (error < 0)
171 kfree_skb(skb); 168 kfree_skb(skb);
172 return error; 169 return error;
173} 170}
@@ -302,27 +299,6 @@ static void release_ep_resources(struct iwch_ep *ep)
302 put_ep(&ep->com); 299 put_ep(&ep->com);
303} 300}
304 301
305static void process_work(struct work_struct *work)
306{
307 struct sk_buff *skb = NULL;
308 void *ep;
309 struct t3cdev *tdev;
310 int ret;
311
312 while ((skb = skb_dequeue(&rxq))) {
313 ep = *((void **) (skb->cb));
314 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
315 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
316 if (ret & CPL_RET_BUF_DONE)
317 kfree_skb(skb);
318
319 /*
320 * ep was referenced in sched(), and is freed here.
321 */
322 put_ep((struct iwch_ep_common *)ep);
323 }
324}
325
326static int status2errno(int status) 302static int status2errno(int status)
327{ 303{
328 switch (status) { 304 switch (status) {
@@ -2157,7 +2133,49 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2157 2133
2158/* 2134/*
2159 * All the CM events are handled on a work queue to have a safe context. 2135 * All the CM events are handled on a work queue to have a safe context.
2136 * These are the real handlers that are called from the work queue.
2160 */ 2137 */
2138static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
2139 [CPL_ACT_ESTABLISH] = act_establish,
2140 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2141 [CPL_RX_DATA] = rx_data,
2142 [CPL_TX_DMA_ACK] = tx_ack,
2143 [CPL_ABORT_RPL_RSS] = abort_rpl,
2144 [CPL_ABORT_RPL] = abort_rpl,
2145 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2146 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2147 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2148 [CPL_PASS_ESTABLISH] = pass_establish,
2149 [CPL_PEER_CLOSE] = peer_close,
2150 [CPL_ABORT_REQ_RSS] = peer_abort,
2151 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2152 [CPL_RDMA_TERMINATE] = terminate,
2153 [CPL_RDMA_EC_STATUS] = ec_status,
2154};
2155
2156static void process_work(struct work_struct *work)
2157{
2158 struct sk_buff *skb = NULL;
2159 void *ep;
2160 struct t3cdev *tdev;
2161 int ret;
2162
2163 while ((skb = skb_dequeue(&rxq))) {
2164 ep = *((void **) (skb->cb));
2165 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
2166 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
2167 if (ret & CPL_RET_BUF_DONE)
2168 kfree_skb(skb);
2169
2170 /*
2171 * ep was referenced in sched(), and is freed here.
2172 */
2173 put_ep((struct iwch_ep_common *)ep);
2174 }
2175}
2176
2177static DECLARE_WORK(skb_work, process_work);
2178
2161static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 2179static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2162{ 2180{
2163 struct iwch_ep_common *epc = ctx; 2181 struct iwch_ep_common *epc = ctx;
@@ -2189,6 +2207,29 @@ static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2189 return CPL_RET_BUF_DONE; 2207 return CPL_RET_BUF_DONE;
2190} 2208}
2191 2209
2210/*
2211 * All upcalls from the T3 Core go to sched() to schedule the
2212 * processing on a work queue.
2213 */
2214cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
2215 [CPL_ACT_ESTABLISH] = sched,
2216 [CPL_ACT_OPEN_RPL] = sched,
2217 [CPL_RX_DATA] = sched,
2218 [CPL_TX_DMA_ACK] = sched,
2219 [CPL_ABORT_RPL_RSS] = sched,
2220 [CPL_ABORT_RPL] = sched,
2221 [CPL_PASS_OPEN_RPL] = sched,
2222 [CPL_CLOSE_LISTSRV_RPL] = sched,
2223 [CPL_PASS_ACCEPT_REQ] = sched,
2224 [CPL_PASS_ESTABLISH] = sched,
2225 [CPL_PEER_CLOSE] = sched,
2226 [CPL_CLOSE_CON_RPL] = sched,
2227 [CPL_ABORT_REQ_RSS] = sched,
2228 [CPL_RDMA_TERMINATE] = sched,
2229 [CPL_RDMA_EC_STATUS] = sched,
2230 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2231};
2232
2192int __init iwch_cm_init(void) 2233int __init iwch_cm_init(void)
2193{ 2234{
2194 skb_queue_head_init(&rxq); 2235 skb_queue_head_init(&rxq);
@@ -2197,46 +2238,6 @@ int __init iwch_cm_init(void)
2197 if (!workq) 2238 if (!workq)
2198 return -ENOMEM; 2239 return -ENOMEM;
2199 2240
2200 /*
2201 * All upcalls from the T3 Core go to sched() to
2202 * schedule the processing on a work queue.
2203 */
2204 t3c_handlers[CPL_ACT_ESTABLISH] = sched;
2205 t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
2206 t3c_handlers[CPL_RX_DATA] = sched;
2207 t3c_handlers[CPL_TX_DMA_ACK] = sched;
2208 t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
2209 t3c_handlers[CPL_ABORT_RPL] = sched;
2210 t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
2211 t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2212 t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2213 t3c_handlers[CPL_PASS_ESTABLISH] = sched;
2214 t3c_handlers[CPL_PEER_CLOSE] = sched;
2215 t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
2216 t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2217 t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2218 t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
2219 t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2220
2221 /*
2222 * These are the real handlers that are called from a
2223 * work queue.
2224 */
2225 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2226 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2227 work_handlers[CPL_RX_DATA] = rx_data;
2228 work_handlers[CPL_TX_DMA_ACK] = tx_ack;
2229 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2230 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2231 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2232 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2233 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2234 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2235 work_handlers[CPL_PEER_CLOSE] = peer_close;
2236 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2237 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2238 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2239 work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
2240 return 0; 2241 return 0;
2241} 2242}
2242 2243
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
new file mode 100644
index 000000000000..ccb85eaaad75
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -0,0 +1,18 @@
1config INFINIBAND_CXGB4
2 tristate "Chelsio T4 RDMA Driver"
3 depends on CHELSIO_T4 && INET
4 select GENERIC_ALLOCATOR
5 ---help---
6 This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
7 10GbE adapters.
8
9 For general information about Chelsio and our products, visit
10 our website at <http://www.chelsio.com>.
11
12 For customer support, please visit our customer support page at
13 <http://www.chelsio.com/support.htm>.
14
15 Please send feedback to <linux-bugs@chelsio.com>.
16
17 To compile this driver as a module, choose M here: the module
18 will be called iw_cxgb4.
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
new file mode 100644
index 000000000000..e31a499f0172
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -0,0 +1,5 @@
1EXTRA_CFLAGS += -Idrivers/net/cxgb4
2
3obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
4
5iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
new file mode 100644
index 000000000000..30ce0a8eca09
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -0,0 +1,2374 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38#include <linux/inetdevice.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
41
42#include <net/neighbour.h>
43#include <net/netevent.h>
44#include <net/route.h>
45
46#include "iw_cxgb4.h"
47
48static char *states[] = {
49 "idle",
50 "listen",
51 "connecting",
52 "mpa_wait_req",
53 "mpa_req_sent",
54 "mpa_req_rcvd",
55 "mpa_rep_sent",
56 "fpdu_mode",
57 "aborting",
58 "closing",
59 "moribund",
60 "dead",
61 NULL,
62};
63
64int c4iw_max_read_depth = 8;
65module_param(c4iw_max_read_depth, int, 0644);
66MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
67
68static int enable_tcp_timestamps;
69module_param(enable_tcp_timestamps, int, 0644);
70MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
71
72static int enable_tcp_sack;
73module_param(enable_tcp_sack, int, 0644);
74MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
75
76static int enable_tcp_window_scaling = 1;
77module_param(enable_tcp_window_scaling, int, 0644);
78MODULE_PARM_DESC(enable_tcp_window_scaling,
79 "Enable tcp window scaling (default=1)");
80
81int c4iw_debug;
82module_param(c4iw_debug, int, 0644);
83MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
84
85static int peer2peer;
86module_param(peer2peer, int, 0644);
87MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
88
89static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
90module_param(p2p_type, int, 0644);
91MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
92 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
93
94static int ep_timeout_secs = 60;
95module_param(ep_timeout_secs, int, 0644);
96MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
97 "in seconds (default=60)");
98
99static int mpa_rev = 1;
100module_param(mpa_rev, int, 0644);
101MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
102 "1 is spec compliant. (default=1)");
103
104static int markers_enabled;
105module_param(markers_enabled, int, 0644);
106MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
107
108static int crc_enabled = 1;
109module_param(crc_enabled, int, 0644);
110MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
111
112static int rcv_win = 256 * 1024;
113module_param(rcv_win, int, 0644);
114MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
115
116static int snd_win = 32 * 1024;
117module_param(snd_win, int, 0644);
118MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
119
120static struct workqueue_struct *workq;
121
122static struct sk_buff_head rxq;
123
124static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
125static void ep_timeout(unsigned long arg);
126static void connect_reply_upcall(struct c4iw_ep *ep, int status);
127
128static LIST_HEAD(timeout_list);
129static spinlock_t timeout_lock;
130
131static void start_ep_timer(struct c4iw_ep *ep)
132{
133 PDBG("%s ep %p\n", __func__, ep);
134 if (timer_pending(&ep->timer)) {
135 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
136 del_timer_sync(&ep->timer);
137 } else
138 c4iw_get_ep(&ep->com);
139 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
140 ep->timer.data = (unsigned long)ep;
141 ep->timer.function = ep_timeout;
142 add_timer(&ep->timer);
143}
144
145static void stop_ep_timer(struct c4iw_ep *ep)
146{
147 PDBG("%s ep %p\n", __func__, ep);
148 if (!timer_pending(&ep->timer)) {
149 printk(KERN_ERR "%s timer stopped when its not running! "
150 "ep %p state %u\n", __func__, ep, ep->com.state);
151 WARN_ON(1);
152 return;
153 }
154 del_timer_sync(&ep->timer);
155 c4iw_put_ep(&ep->com);
156}
157
158static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
159 struct l2t_entry *l2e)
160{
161 int error = 0;
162
163 if (c4iw_fatal_error(rdev)) {
164 kfree_skb(skb);
165 PDBG("%s - device in error state - dropping\n", __func__);
166 return -EIO;
167 }
168 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
169 if (error < 0)
170 kfree_skb(skb);
171 return error;
172}
173
174int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
175{
176 int error = 0;
177
178 if (c4iw_fatal_error(rdev)) {
179 kfree_skb(skb);
180 PDBG("%s - device in error state - dropping\n", __func__);
181 return -EIO;
182 }
183 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
184 if (error < 0)
185 kfree_skb(skb);
186 return error;
187}
188
189static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
190{
191 struct cpl_tid_release *req;
192
193 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
194 if (!skb)
195 return;
196 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
197 INIT_TP_WR(req, hwtid);
198 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
199 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
200 c4iw_ofld_send(rdev, skb);
201 return;
202}
203
204static void set_emss(struct c4iw_ep *ep, u16 opt)
205{
206 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
207 ep->mss = ep->emss;
208 if (GET_TCPOPT_TSTAMP(opt))
209 ep->emss -= 12;
210 if (ep->emss < 128)
211 ep->emss = 128;
212 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
213 ep->mss, ep->emss);
214}
215
216static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
217{
218 unsigned long flags;
219 enum c4iw_ep_state state;
220
221 spin_lock_irqsave(&epc->lock, flags);
222 state = epc->state;
223 spin_unlock_irqrestore(&epc->lock, flags);
224 return state;
225}
226
227static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
228{
229 epc->state = new;
230}
231
232static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
233{
234 unsigned long flags;
235
236 spin_lock_irqsave(&epc->lock, flags);
237 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
238 __state_set(epc, new);
239 spin_unlock_irqrestore(&epc->lock, flags);
240 return;
241}
242
243static void *alloc_ep(int size, gfp_t gfp)
244{
245 struct c4iw_ep_common *epc;
246
247 epc = kzalloc(size, gfp);
248 if (epc) {
249 kref_init(&epc->kref);
250 spin_lock_init(&epc->lock);
251 init_waitqueue_head(&epc->waitq);
252 }
253 PDBG("%s alloc ep %p\n", __func__, epc);
254 return epc;
255}
256
257void _c4iw_free_ep(struct kref *kref)
258{
259 struct c4iw_ep *ep;
260
261 ep = container_of(kref, struct c4iw_ep, com.kref);
262 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
263 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
264 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
265 dst_release(ep->dst);
266 cxgb4_l2t_release(ep->l2t);
267 }
268 kfree(ep);
269}
270
271static void release_ep_resources(struct c4iw_ep *ep)
272{
273 set_bit(RELEASE_RESOURCES, &ep->com.flags);
274 c4iw_put_ep(&ep->com);
275}
276
277static int status2errno(int status)
278{
279 switch (status) {
280 case CPL_ERR_NONE:
281 return 0;
282 case CPL_ERR_CONN_RESET:
283 return -ECONNRESET;
284 case CPL_ERR_ARP_MISS:
285 return -EHOSTUNREACH;
286 case CPL_ERR_CONN_TIMEDOUT:
287 return -ETIMEDOUT;
288 case CPL_ERR_TCAM_FULL:
289 return -ENOMEM;
290 case CPL_ERR_CONN_EXIST:
291 return -EADDRINUSE;
292 default:
293 return -EIO;
294 }
295}
296
297/*
298 * Try and reuse skbs already allocated...
299 */
300static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
301{
302 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
303 skb_trim(skb, 0);
304 skb_get(skb);
305 skb_reset_transport_header(skb);
306 } else {
307 skb = alloc_skb(len, gfp);
308 }
309 return skb;
310}
311
312static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
313 __be32 peer_ip, __be16 local_port,
314 __be16 peer_port, u8 tos)
315{
316 struct rtable *rt;
317 struct flowi fl = {
318 .oif = 0,
319 .nl_u = {
320 .ip4_u = {
321 .daddr = peer_ip,
322 .saddr = local_ip,
323 .tos = tos}
324 },
325 .proto = IPPROTO_TCP,
326 .uli_u = {
327 .ports = {
328 .sport = local_port,
329 .dport = peer_port}
330 }
331 };
332
333 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
334 return NULL;
335 return rt;
336}
337
338static void arp_failure_discard(void *handle, struct sk_buff *skb)
339{
340 PDBG("%s c4iw_dev %p\n", __func__, handle);
341 kfree_skb(skb);
342}
343
344/*
345 * Handle an ARP failure for an active open.
346 */
347static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
348{
349 printk(KERN_ERR MOD "ARP failure duing connect\n");
350 kfree_skb(skb);
351}
352
353/*
354 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
355 * and send it along.
356 */
357static void abort_arp_failure(void *handle, struct sk_buff *skb)
358{
359 struct c4iw_rdev *rdev = handle;
360 struct cpl_abort_req *req = cplhdr(skb);
361
362 PDBG("%s rdev %p\n", __func__, rdev);
363 req->cmd = CPL_ABORT_NO_RST;
364 c4iw_ofld_send(rdev, skb);
365}
366
367static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
368{
369 unsigned int flowclen = 80;
370 struct fw_flowc_wr *flowc;
371 int i;
372
373 skb = get_skb(skb, flowclen, GFP_KERNEL);
374 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
375
376 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
377 FW_FLOWC_WR_NPARAMS(8));
378 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
379 16)) | FW_WR_FLOWID(ep->hwtid));
380
381 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
382 flowc->mnemval[0].val = cpu_to_be32(0);
383 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
384 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
385 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
386 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
387 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
388 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
389 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
390 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
391 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
392 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
393 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
394 flowc->mnemval[6].val = cpu_to_be32(snd_win);
395 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
396 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
397 /* Pad WR to 16 byte boundary */
398 flowc->mnemval[8].mnemonic = 0;
399 flowc->mnemval[8].val = 0;
400 for (i = 0; i < 9; i++) {
401 flowc->mnemval[i].r4[0] = 0;
402 flowc->mnemval[i].r4[1] = 0;
403 flowc->mnemval[i].r4[2] = 0;
404 }
405
406 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
407 c4iw_ofld_send(&ep->com.dev->rdev, skb);
408}
409
410static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
411{
412 struct cpl_close_con_req *req;
413 struct sk_buff *skb;
414 int wrlen = roundup(sizeof *req, 16);
415
416 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
417 skb = get_skb(NULL, wrlen, gfp);
418 if (!skb) {
419 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
420 return -ENOMEM;
421 }
422 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
423 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
424 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
425 memset(req, 0, wrlen);
426 INIT_TP_WR(req, ep->hwtid);
427 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
428 ep->hwtid));
429 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
430}
431
432static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
433{
434 struct cpl_abort_req *req;
435 int wrlen = roundup(sizeof *req, 16);
436
437 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
438 skb = get_skb(skb, wrlen, gfp);
439 if (!skb) {
440 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
441 __func__);
442 return -ENOMEM;
443 }
444 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
445 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
446 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
447 memset(req, 0, wrlen);
448 INIT_TP_WR(req, ep->hwtid);
449 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
450 req->cmd = CPL_ABORT_SEND_RST;
451 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
452}
453
454static int send_connect(struct c4iw_ep *ep)
455{
456 struct cpl_act_open_req *req;
457 struct sk_buff *skb;
458 u64 opt0;
459 u32 opt2;
460 unsigned int mtu_idx;
461 int wscale;
462 int wrlen = roundup(sizeof *req, 16);
463
464 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
465
466 skb = get_skb(NULL, wrlen, GFP_KERNEL);
467 if (!skb) {
468 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
469 __func__);
470 return -ENOMEM;
471 }
472 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
473
474 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
475 wscale = compute_wscale(rcv_win);
476 opt0 = KEEP_ALIVE(1) |
477 WND_SCALE(wscale) |
478 MSS_IDX(mtu_idx) |
479 L2T_IDX(ep->l2t->idx) |
480 TX_CHAN(ep->tx_chan) |
481 SMAC_SEL(ep->smac_idx) |
482 DSCP(ep->tos) |
483 RCV_BUFSIZ(rcv_win>>10);
484 opt2 = RX_CHANNEL(0) |
485 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
486 if (enable_tcp_timestamps)
487 opt2 |= TSTAMPS_EN(1);
488 if (enable_tcp_sack)
489 opt2 |= SACK_EN(1);
490 if (wscale && enable_tcp_window_scaling)
491 opt2 |= WND_SCALE_EN(1);
492 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
493
494 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
495 INIT_TP_WR(req, 0);
496 OPCODE_TID(req) = cpu_to_be32(
497 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
498 req->local_port = ep->com.local_addr.sin_port;
499 req->peer_port = ep->com.remote_addr.sin_port;
500 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
501 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
502 req->opt0 = cpu_to_be64(opt0);
503 req->params = 0;
504 req->opt2 = cpu_to_be32(opt2);
505 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
506}
507
508static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
509{
510 int mpalen, wrlen;
511 struct fw_ofld_tx_data_wr *req;
512 struct mpa_message *mpa;
513
514 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
515
516 BUG_ON(skb_cloned(skb));
517
518 mpalen = sizeof(*mpa) + ep->plen;
519 wrlen = roundup(mpalen + sizeof *req, 16);
520 skb = get_skb(skb, wrlen, GFP_KERNEL);
521 if (!skb) {
522 connect_reply_upcall(ep, -ENOMEM);
523 return;
524 }
525 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
526
527 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
528 memset(req, 0, wrlen);
529 req->op_to_immdlen = cpu_to_be32(
530 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
531 FW_WR_COMPL(1) |
532 FW_WR_IMMDLEN(mpalen));
533 req->flowid_len16 = cpu_to_be32(
534 FW_WR_FLOWID(ep->hwtid) |
535 FW_WR_LEN16(wrlen >> 4));
536 req->plen = cpu_to_be32(mpalen);
537 req->tunnel_to_proxy = cpu_to_be32(
538 FW_OFLD_TX_DATA_WR_FLUSH(1) |
539 FW_OFLD_TX_DATA_WR_SHOVE(1));
540
541 mpa = (struct mpa_message *)(req + 1);
542 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
543 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
544 (markers_enabled ? MPA_MARKERS : 0);
545 mpa->private_data_size = htons(ep->plen);
546 mpa->revision = mpa_rev;
547
548 if (ep->plen)
549 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
550
551 /*
552 * Reference the mpa skb. This ensures the data area
553 * will remain in memory until the hw acks the tx.
554 * Function fw4_ack() will deref it.
555 */
556 skb_get(skb);
557 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
558 BUG_ON(ep->mpa_skb);
559 ep->mpa_skb = skb;
560 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
561 start_ep_timer(ep);
562 state_set(&ep->com, MPA_REQ_SENT);
563 ep->mpa_attr.initiator = 1;
564 return;
565}
566
567static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
568{
569 int mpalen, wrlen;
570 struct fw_ofld_tx_data_wr *req;
571 struct mpa_message *mpa;
572 struct sk_buff *skb;
573
574 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
575
576 mpalen = sizeof(*mpa) + plen;
577 wrlen = roundup(mpalen + sizeof *req, 16);
578
579 skb = get_skb(NULL, wrlen, GFP_KERNEL);
580 if (!skb) {
581 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
582 return -ENOMEM;
583 }
584 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
585
586 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
587 memset(req, 0, wrlen);
588 req->op_to_immdlen = cpu_to_be32(
589 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
590 FW_WR_COMPL(1) |
591 FW_WR_IMMDLEN(mpalen));
592 req->flowid_len16 = cpu_to_be32(
593 FW_WR_FLOWID(ep->hwtid) |
594 FW_WR_LEN16(wrlen >> 4));
595 req->plen = cpu_to_be32(mpalen);
596 req->tunnel_to_proxy = cpu_to_be32(
597 FW_OFLD_TX_DATA_WR_FLUSH(1) |
598 FW_OFLD_TX_DATA_WR_SHOVE(1));
599
600 mpa = (struct mpa_message *)(req + 1);
601 memset(mpa, 0, sizeof(*mpa));
602 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
603 mpa->flags = MPA_REJECT;
604 mpa->revision = mpa_rev;
605 mpa->private_data_size = htons(plen);
606 if (plen)
607 memcpy(mpa->private_data, pdata, plen);
608
609 /*
610 * Reference the mpa skb again. This ensures the data area
611 * will remain in memory until the hw acks the tx.
612 * Function fw4_ack() will deref it.
613 */
614 skb_get(skb);
615 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
616 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
617 BUG_ON(ep->mpa_skb);
618 ep->mpa_skb = skb;
619 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
620}
621
622static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
623{
624 int mpalen, wrlen;
625 struct fw_ofld_tx_data_wr *req;
626 struct mpa_message *mpa;
627 struct sk_buff *skb;
628
629 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
630
631 mpalen = sizeof(*mpa) + plen;
632 wrlen = roundup(mpalen + sizeof *req, 16);
633
634 skb = get_skb(NULL, wrlen, GFP_KERNEL);
635 if (!skb) {
636 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
637 return -ENOMEM;
638 }
639 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
640
641 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
642 memset(req, 0, wrlen);
643 req->op_to_immdlen = cpu_to_be32(
644 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
645 FW_WR_COMPL(1) |
646 FW_WR_IMMDLEN(mpalen));
647 req->flowid_len16 = cpu_to_be32(
648 FW_WR_FLOWID(ep->hwtid) |
649 FW_WR_LEN16(wrlen >> 4));
650 req->plen = cpu_to_be32(mpalen);
651 req->tunnel_to_proxy = cpu_to_be32(
652 FW_OFLD_TX_DATA_WR_FLUSH(1) |
653 FW_OFLD_TX_DATA_WR_SHOVE(1));
654
655 mpa = (struct mpa_message *)(req + 1);
656 memset(mpa, 0, sizeof(*mpa));
657 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
658 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
659 (markers_enabled ? MPA_MARKERS : 0);
660 mpa->revision = mpa_rev;
661 mpa->private_data_size = htons(plen);
662 if (plen)
663 memcpy(mpa->private_data, pdata, plen);
664
665 /*
666 * Reference the mpa skb. This ensures the data area
667 * will remain in memory until the hw acks the tx.
668 * Function fw4_ack() will deref it.
669 */
670 skb_get(skb);
671 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
672 ep->mpa_skb = skb;
673 state_set(&ep->com, MPA_REP_SENT);
674 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
675}
676
677static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
678{
679 struct c4iw_ep *ep;
680 struct cpl_act_establish *req = cplhdr(skb);
681 unsigned int tid = GET_TID(req);
682 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
683 struct tid_info *t = dev->rdev.lldi.tids;
684
685 ep = lookup_atid(t, atid);
686
687 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
688 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
689
690 dst_confirm(ep->dst);
691
692 /* setup the hwtid for this connection */
693 ep->hwtid = tid;
694 cxgb4_insert_tid(t, ep, tid);
695
696 ep->snd_seq = be32_to_cpu(req->snd_isn);
697 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
698
699 set_emss(ep, ntohs(req->tcp_opt));
700
701 /* dealloc the atid */
702 cxgb4_free_atid(t, atid);
703
704 /* start MPA negotiation */
705 send_flowc(ep, NULL);
706 send_mpa_req(ep, skb);
707
708 return 0;
709}
710
711static void close_complete_upcall(struct c4iw_ep *ep)
712{
713 struct iw_cm_event event;
714
715 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
716 memset(&event, 0, sizeof(event));
717 event.event = IW_CM_EVENT_CLOSE;
718 if (ep->com.cm_id) {
719 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
720 ep, ep->com.cm_id, ep->hwtid);
721 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
722 ep->com.cm_id->rem_ref(ep->com.cm_id);
723 ep->com.cm_id = NULL;
724 ep->com.qp = NULL;
725 }
726}
727
728static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
729{
730 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
731 close_complete_upcall(ep);
732 state_set(&ep->com, ABORTING);
733 return send_abort(ep, skb, gfp);
734}
735
736static void peer_close_upcall(struct c4iw_ep *ep)
737{
738 struct iw_cm_event event;
739
740 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
741 memset(&event, 0, sizeof(event));
742 event.event = IW_CM_EVENT_DISCONNECT;
743 if (ep->com.cm_id) {
744 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
745 ep, ep->com.cm_id, ep->hwtid);
746 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
747 }
748}
749
750static void peer_abort_upcall(struct c4iw_ep *ep)
751{
752 struct iw_cm_event event;
753
754 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
755 memset(&event, 0, sizeof(event));
756 event.event = IW_CM_EVENT_CLOSE;
757 event.status = -ECONNRESET;
758 if (ep->com.cm_id) {
759 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
760 ep->com.cm_id, ep->hwtid);
761 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
762 ep->com.cm_id->rem_ref(ep->com.cm_id);
763 ep->com.cm_id = NULL;
764 ep->com.qp = NULL;
765 }
766}
767
768static void connect_reply_upcall(struct c4iw_ep *ep, int status)
769{
770 struct iw_cm_event event;
771
772 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
773 memset(&event, 0, sizeof(event));
774 event.event = IW_CM_EVENT_CONNECT_REPLY;
775 event.status = status;
776 event.local_addr = ep->com.local_addr;
777 event.remote_addr = ep->com.remote_addr;
778
779 if ((status == 0) || (status == -ECONNREFUSED)) {
780 event.private_data_len = ep->plen;
781 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
782 }
783 if (ep->com.cm_id) {
784 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
785 ep->hwtid, status);
786 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
787 }
788 if (status < 0) {
789 ep->com.cm_id->rem_ref(ep->com.cm_id);
790 ep->com.cm_id = NULL;
791 ep->com.qp = NULL;
792 }
793}
794
795static void connect_request_upcall(struct c4iw_ep *ep)
796{
797 struct iw_cm_event event;
798
799 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
800 memset(&event, 0, sizeof(event));
801 event.event = IW_CM_EVENT_CONNECT_REQUEST;
802 event.local_addr = ep->com.local_addr;
803 event.remote_addr = ep->com.remote_addr;
804 event.private_data_len = ep->plen;
805 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
806 event.provider_data = ep;
807 if (state_read(&ep->parent_ep->com) != DEAD) {
808 c4iw_get_ep(&ep->com);
809 ep->parent_ep->com.cm_id->event_handler(
810 ep->parent_ep->com.cm_id,
811 &event);
812 }
813 c4iw_put_ep(&ep->parent_ep->com);
814 ep->parent_ep = NULL;
815}
816
817static void established_upcall(struct c4iw_ep *ep)
818{
819 struct iw_cm_event event;
820
821 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
822 memset(&event, 0, sizeof(event));
823 event.event = IW_CM_EVENT_ESTABLISHED;
824 if (ep->com.cm_id) {
825 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
826 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
827 }
828}
829
830static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
831{
832 struct cpl_rx_data_ack *req;
833 struct sk_buff *skb;
834 int wrlen = roundup(sizeof *req, 16);
835
836 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
837 skb = get_skb(NULL, wrlen, GFP_KERNEL);
838 if (!skb) {
839 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
840 return 0;
841 }
842
843 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
844 memset(req, 0, wrlen);
845 INIT_TP_WR(req, ep->hwtid);
846 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
847 ep->hwtid));
848 req->credit_dack = cpu_to_be32(credits);
849 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
850 c4iw_ofld_send(&ep->com.dev->rdev, skb);
851 return credits;
852}
853
854static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
855{
856 struct mpa_message *mpa;
857 u16 plen;
858 struct c4iw_qp_attributes attrs;
859 enum c4iw_qp_attr_mask mask;
860 int err;
861
862 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
863
864 /*
865 * Stop mpa timer. If it expired, then the state has
866 * changed and we bail since ep_timeout already aborted
867 * the connection.
868 */
869 stop_ep_timer(ep);
870 if (state_read(&ep->com) != MPA_REQ_SENT)
871 return;
872
873 /*
874 * If we get more than the supported amount of private data
875 * then we must fail this connection.
876 */
877 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
878 err = -EINVAL;
879 goto err;
880 }
881
882 /*
883 * copy the new data into our accumulation buffer.
884 */
885 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
886 skb->len);
887 ep->mpa_pkt_len += skb->len;
888
889 /*
890 * if we don't even have the mpa message, then bail.
891 */
892 if (ep->mpa_pkt_len < sizeof(*mpa))
893 return;
894 mpa = (struct mpa_message *) ep->mpa_pkt;
895
896 /* Validate MPA header. */
897 if (mpa->revision != mpa_rev) {
898 err = -EPROTO;
899 goto err;
900 }
901 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
902 err = -EPROTO;
903 goto err;
904 }
905
906 plen = ntohs(mpa->private_data_size);
907
908 /*
909 * Fail if there's too much private data.
910 */
911 if (plen > MPA_MAX_PRIVATE_DATA) {
912 err = -EPROTO;
913 goto err;
914 }
915
916 /*
917 * If plen does not account for pkt size
918 */
919 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
920 err = -EPROTO;
921 goto err;
922 }
923
924 ep->plen = (u8) plen;
925
926 /*
927 * If we don't have all the pdata yet, then bail.
928 * We'll continue process when more data arrives.
929 */
930 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
931 return;
932
933 if (mpa->flags & MPA_REJECT) {
934 err = -ECONNREFUSED;
935 goto err;
936 }
937
938 /*
939 * If we get here we have accumulated the entire mpa
940 * start reply message including private data. And
941 * the MPA header is valid.
942 */
943 state_set(&ep->com, FPDU_MODE);
944 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
945 ep->mpa_attr.recv_marker_enabled = markers_enabled;
946 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
947 ep->mpa_attr.version = mpa_rev;
948 ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
949 FW_RI_INIT_P2PTYPE_DISABLED;
950 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
951 "xmit_marker_enabled=%d, version=%d\n", __func__,
952 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
953 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
954
955 attrs.mpa_attr = ep->mpa_attr;
956 attrs.max_ird = ep->ird;
957 attrs.max_ord = ep->ord;
958 attrs.llp_stream_handle = ep;
959 attrs.next_state = C4IW_QP_STATE_RTS;
960
961 mask = C4IW_QP_ATTR_NEXT_STATE |
962 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
963 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
964
965 /* bind QP and TID with INIT_WR */
966 err = c4iw_modify_qp(ep->com.qp->rhp,
967 ep->com.qp, mask, &attrs, 1);
968 if (err)
969 goto err;
970 goto out;
971err:
972 abort_connection(ep, skb, GFP_KERNEL);
973out:
974 connect_reply_upcall(ep, err);
975 return;
976}
977
978static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
979{
980 struct mpa_message *mpa;
981 u16 plen;
982
983 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
984
985 if (state_read(&ep->com) != MPA_REQ_WAIT)
986 return;
987
988 /*
989 * If we get more than the supported amount of private data
990 * then we must fail this connection.
991 */
992 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
993 stop_ep_timer(ep);
994 abort_connection(ep, skb, GFP_KERNEL);
995 return;
996 }
997
998 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
999
1000 /*
1001 * Copy the new data into our accumulation buffer.
1002 */
1003 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1004 skb->len);
1005 ep->mpa_pkt_len += skb->len;
1006
1007 /*
1008 * If we don't even have the mpa message, then bail.
1009 * We'll continue process when more data arrives.
1010 */
1011 if (ep->mpa_pkt_len < sizeof(*mpa))
1012 return;
1013
1014 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1015 stop_ep_timer(ep);
1016 mpa = (struct mpa_message *) ep->mpa_pkt;
1017
1018 /*
1019 * Validate MPA Header.
1020 */
1021 if (mpa->revision != mpa_rev) {
1022 abort_connection(ep, skb, GFP_KERNEL);
1023 return;
1024 }
1025
1026 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1027 abort_connection(ep, skb, GFP_KERNEL);
1028 return;
1029 }
1030
1031 plen = ntohs(mpa->private_data_size);
1032
1033 /*
1034 * Fail if there's too much private data.
1035 */
1036 if (plen > MPA_MAX_PRIVATE_DATA) {
1037 abort_connection(ep, skb, GFP_KERNEL);
1038 return;
1039 }
1040
1041 /*
1042 * If plen does not account for pkt size
1043 */
1044 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1045 abort_connection(ep, skb, GFP_KERNEL);
1046 return;
1047 }
1048 ep->plen = (u8) plen;
1049
1050 /*
1051 * If we don't have all the pdata yet, then bail.
1052 */
1053 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1054 return;
1055
1056 /*
1057 * If we get here we have accumulated the entire mpa
1058 * start reply message including private data.
1059 */
1060 ep->mpa_attr.initiator = 0;
1061 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1062 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1063 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1064 ep->mpa_attr.version = mpa_rev;
1065 ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
1066 FW_RI_INIT_P2PTYPE_DISABLED;
1067 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1068 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1069 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1070 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1071 ep->mpa_attr.p2p_type);
1072
1073 state_set(&ep->com, MPA_REQ_RCVD);
1074
1075 /* drive upcall */
1076 connect_request_upcall(ep);
1077 return;
1078}
1079
1080static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1081{
1082 struct c4iw_ep *ep;
1083 struct cpl_rx_data *hdr = cplhdr(skb);
1084 unsigned int dlen = ntohs(hdr->len);
1085 unsigned int tid = GET_TID(hdr);
1086 struct tid_info *t = dev->rdev.lldi.tids;
1087
1088 ep = lookup_tid(t, tid);
1089 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1090 skb_pull(skb, sizeof(*hdr));
1091 skb_trim(skb, dlen);
1092
1093 ep->rcv_seq += dlen;
1094 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1095
1096 /* update RX credits */
1097 update_rx_credits(ep, dlen);
1098
1099 switch (state_read(&ep->com)) {
1100 case MPA_REQ_SENT:
1101 process_mpa_reply(ep, skb);
1102 break;
1103 case MPA_REQ_WAIT:
1104 process_mpa_request(ep, skb);
1105 break;
1106 case MPA_REP_SENT:
1107 break;
1108 default:
1109 printk(KERN_ERR MOD "%s Unexpected streaming data."
1110 " ep %p state %d tid %u\n",
1111 __func__, ep, state_read(&ep->com), ep->hwtid);
1112
1113 /*
1114 * The ep will timeout and inform the ULP of the failure.
1115 * See ep_timeout().
1116 */
1117 break;
1118 }
1119 return 0;
1120}
1121
1122static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1123{
1124 struct c4iw_ep *ep;
1125 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1126 unsigned long flags;
1127 int release = 0;
1128 unsigned int tid = GET_TID(rpl);
1129 struct tid_info *t = dev->rdev.lldi.tids;
1130
1131 ep = lookup_tid(t, tid);
1132 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1133 BUG_ON(!ep);
1134 spin_lock_irqsave(&ep->com.lock, flags);
1135 switch (ep->com.state) {
1136 case ABORTING:
1137 __state_set(&ep->com, DEAD);
1138 release = 1;
1139 break;
1140 default:
1141 printk(KERN_ERR "%s ep %p state %d\n",
1142 __func__, ep, ep->com.state);
1143 break;
1144 }
1145 spin_unlock_irqrestore(&ep->com.lock, flags);
1146
1147 if (release)
1148 release_ep_resources(ep);
1149 return 0;
1150}
1151
1152/*
1153 * Return whether a failed active open has allocated a TID
1154 */
1155static inline int act_open_has_tid(int status)
1156{
1157 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1158 status != CPL_ERR_ARP_MISS;
1159}
1160
1161static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1162{
1163 struct c4iw_ep *ep;
1164 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1165 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1166 ntohl(rpl->atid_status)));
1167 struct tid_info *t = dev->rdev.lldi.tids;
1168 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
1169
1170 ep = lookup_atid(t, atid);
1171
1172 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1173 status, status2errno(status));
1174
1175 if (status == CPL_ERR_RTX_NEG_ADVICE) {
1176 printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1177 atid);
1178 return 0;
1179 }
1180
1181 connect_reply_upcall(ep, status2errno(status));
1182 state_set(&ep->com, DEAD);
1183
1184 if (status && act_open_has_tid(status))
1185 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1186
1187 cxgb4_free_atid(t, atid);
1188 dst_release(ep->dst);
1189 cxgb4_l2t_release(ep->l2t);
1190 c4iw_put_ep(&ep->com);
1191
1192 return 0;
1193}
1194
1195static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1196{
1197 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1198 struct tid_info *t = dev->rdev.lldi.tids;
1199 unsigned int stid = GET_TID(rpl);
1200 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1201
1202 if (!ep) {
1203 printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
1204 return 0;
1205 }
1206 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1207 rpl->status, status2errno(rpl->status));
1208 ep->com.rpl_err = status2errno(rpl->status);
1209 ep->com.rpl_done = 1;
1210 wake_up(&ep->com.waitq);
1211
1212 return 0;
1213}
1214
1215static int listen_stop(struct c4iw_listen_ep *ep)
1216{
1217 struct sk_buff *skb;
1218 struct cpl_close_listsvr_req *req;
1219
1220 PDBG("%s ep %p\n", __func__, ep);
1221 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1222 if (!skb) {
1223 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1224 return -ENOMEM;
1225 }
1226 req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
1227 INIT_TP_WR(req, 0);
1228 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
1229 ep->stid));
1230 req->reply_ctrl = cpu_to_be16(
1231 QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
1232 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
1233 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
1234}
1235
1236static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1237{
1238 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1239 struct tid_info *t = dev->rdev.lldi.tids;
1240 unsigned int stid = GET_TID(rpl);
1241 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1242
1243 PDBG("%s ep %p\n", __func__, ep);
1244 ep->com.rpl_err = status2errno(rpl->status);
1245 ep->com.rpl_done = 1;
1246 wake_up(&ep->com.waitq);
1247 return 0;
1248}
1249
1250static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1251 struct cpl_pass_accept_req *req)
1252{
1253 struct cpl_pass_accept_rpl *rpl;
1254 unsigned int mtu_idx;
1255 u64 opt0;
1256 u32 opt2;
1257 int wscale;
1258
1259 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1260 BUG_ON(skb_cloned(skb));
1261 skb_trim(skb, sizeof(*rpl));
1262 skb_get(skb);
1263 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1264 wscale = compute_wscale(rcv_win);
1265 opt0 = KEEP_ALIVE(1) |
1266 WND_SCALE(wscale) |
1267 MSS_IDX(mtu_idx) |
1268 L2T_IDX(ep->l2t->idx) |
1269 TX_CHAN(ep->tx_chan) |
1270 SMAC_SEL(ep->smac_idx) |
1271 DSCP(ep->tos) |
1272 RCV_BUFSIZ(rcv_win>>10);
1273 opt2 = RX_CHANNEL(0) |
1274 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
1275
1276 if (enable_tcp_timestamps && req->tcpopt.tstamp)
1277 opt2 |= TSTAMPS_EN(1);
1278 if (enable_tcp_sack && req->tcpopt.sack)
1279 opt2 |= SACK_EN(1);
1280 if (wscale && enable_tcp_window_scaling)
1281 opt2 |= WND_SCALE_EN(1);
1282
1283 rpl = cplhdr(skb);
1284 INIT_TP_WR(rpl, ep->hwtid);
1285 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1286 ep->hwtid));
1287 rpl->opt0 = cpu_to_be64(opt0);
1288 rpl->opt2 = cpu_to_be32(opt2);
1289 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
1290 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1291
1292 return;
1293}
1294
1295static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
1296 struct sk_buff *skb)
1297{
1298 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
1299 peer_ip);
1300 BUG_ON(skb_cloned(skb));
1301 skb_trim(skb, sizeof(struct cpl_tid_release));
1302 skb_get(skb);
1303 release_tid(&dev->rdev, hwtid, skb);
1304 return;
1305}
1306
1307static void get_4tuple(struct cpl_pass_accept_req *req,
1308 __be32 *local_ip, __be32 *peer_ip,
1309 __be16 *local_port, __be16 *peer_port)
1310{
1311 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
1312 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
1313 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
1314 struct tcphdr *tcp = (struct tcphdr *)
1315 ((u8 *)(req + 1) + eth_len + ip_len);
1316
1317 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
1318 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
1319 ntohs(tcp->dest));
1320
1321 *peer_ip = ip->saddr;
1322 *local_ip = ip->daddr;
1323 *peer_port = tcp->source;
1324 *local_port = tcp->dest;
1325
1326 return;
1327}
1328
1329static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1330{
1331 struct c4iw_ep *child_ep, *parent_ep;
1332 struct cpl_pass_accept_req *req = cplhdr(skb);
1333 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
1334 struct tid_info *t = dev->rdev.lldi.tids;
1335 unsigned int hwtid = GET_TID(req);
1336 struct dst_entry *dst;
1337 struct l2t_entry *l2t;
1338 struct rtable *rt;
1339 __be32 local_ip, peer_ip;
1340 __be16 local_port, peer_port;
1341 struct net_device *pdev;
1342 u32 tx_chan, smac_idx;
1343 u16 rss_qid;
1344 u32 mtu;
1345 int step;
1346 int txq_idx;
1347
1348 parent_ep = lookup_stid(t, stid);
1349 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1350
1351 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
1352
1353 if (state_read(&parent_ep->com) != LISTEN) {
1354 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1355 __func__);
1356 goto reject;
1357 }
1358
1359 /* Find output route */
1360 rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
1361 GET_POPEN_TOS(ntohl(req->tos_stid)));
1362 if (!rt) {
1363 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1364 __func__);
1365 goto reject;
1366 }
1367 dst = &rt->u.dst;
1368 if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
1369 pdev = ip_dev_find(&init_net, peer_ip);
1370 BUG_ON(!pdev);
1371 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1372 pdev, 0);
1373 mtu = pdev->mtu;
1374 tx_chan = cxgb4_port_chan(pdev);
1375 smac_idx = tx_chan << 1;
1376 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1377 txq_idx = cxgb4_port_idx(pdev) * step;
1378 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1379 rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
1380 dev_put(pdev);
1381 } else {
1382 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1383 dst->neighbour->dev, 0);
1384 mtu = dst_mtu(dst);
1385 tx_chan = cxgb4_port_chan(dst->neighbour->dev);
1386 smac_idx = tx_chan << 1;
1387 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1388 txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
1389 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1390 rss_qid = dev->rdev.lldi.rxq_ids[
1391 cxgb4_port_idx(dst->neighbour->dev) * step];
1392 }
1393 if (!l2t) {
1394 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1395 __func__);
1396 dst_release(dst);
1397 goto reject;
1398 }
1399
1400 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1401 if (!child_ep) {
1402 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1403 __func__);
1404 cxgb4_l2t_release(l2t);
1405 dst_release(dst);
1406 goto reject;
1407 }
1408 state_set(&child_ep->com, CONNECTING);
1409 child_ep->com.dev = dev;
1410 child_ep->com.cm_id = NULL;
1411 child_ep->com.local_addr.sin_family = PF_INET;
1412 child_ep->com.local_addr.sin_port = local_port;
1413 child_ep->com.local_addr.sin_addr.s_addr = local_ip;
1414 child_ep->com.remote_addr.sin_family = PF_INET;
1415 child_ep->com.remote_addr.sin_port = peer_port;
1416 child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
1417 c4iw_get_ep(&parent_ep->com);
1418 child_ep->parent_ep = parent_ep;
1419 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
1420 child_ep->l2t = l2t;
1421 child_ep->dst = dst;
1422 child_ep->hwtid = hwtid;
1423 child_ep->tx_chan = tx_chan;
1424 child_ep->smac_idx = smac_idx;
1425 child_ep->rss_qid = rss_qid;
1426 child_ep->mtu = mtu;
1427 child_ep->txq_idx = txq_idx;
1428
1429 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
1430 tx_chan, smac_idx, rss_qid);
1431
1432 init_timer(&child_ep->timer);
1433 cxgb4_insert_tid(t, child_ep, hwtid);
1434 accept_cr(child_ep, peer_ip, skb, req);
1435 goto out;
1436reject:
1437 reject_cr(dev, hwtid, peer_ip, skb);
1438out:
1439 return 0;
1440}
1441
1442static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1443{
1444 struct c4iw_ep *ep;
1445 struct cpl_pass_establish *req = cplhdr(skb);
1446 struct tid_info *t = dev->rdev.lldi.tids;
1447 unsigned int tid = GET_TID(req);
1448
1449 ep = lookup_tid(t, tid);
1450 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1451 ep->snd_seq = be32_to_cpu(req->snd_isn);
1452 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1453
1454 set_emss(ep, ntohs(req->tcp_opt));
1455
1456 dst_confirm(ep->dst);
1457 state_set(&ep->com, MPA_REQ_WAIT);
1458 start_ep_timer(ep);
1459 send_flowc(ep, skb);
1460
1461 return 0;
1462}
1463
1464static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1465{
1466 struct cpl_peer_close *hdr = cplhdr(skb);
1467 struct c4iw_ep *ep;
1468 struct c4iw_qp_attributes attrs;
1469 unsigned long flags;
1470 int disconnect = 1;
1471 int release = 0;
1472 int closing = 0;
1473 struct tid_info *t = dev->rdev.lldi.tids;
1474 unsigned int tid = GET_TID(hdr);
1475 int start_timer = 0;
1476 int stop_timer = 0;
1477
1478 ep = lookup_tid(t, tid);
1479 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1480 dst_confirm(ep->dst);
1481
1482 spin_lock_irqsave(&ep->com.lock, flags);
1483 switch (ep->com.state) {
1484 case MPA_REQ_WAIT:
1485 __state_set(&ep->com, CLOSING);
1486 break;
1487 case MPA_REQ_SENT:
1488 __state_set(&ep->com, CLOSING);
1489 connect_reply_upcall(ep, -ECONNRESET);
1490 break;
1491 case MPA_REQ_RCVD:
1492
1493 /*
1494 * We're gonna mark this puppy DEAD, but keep
1495 * the reference on it until the ULP accepts or
1496 * rejects the CR. Also wake up anyone waiting
1497 * in rdma connection migration (see c4iw_accept_cr()).
1498 */
1499 __state_set(&ep->com, CLOSING);
1500 ep->com.rpl_done = 1;
1501 ep->com.rpl_err = -ECONNRESET;
1502 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1503 wake_up(&ep->com.waitq);
1504 break;
1505 case MPA_REP_SENT:
1506 __state_set(&ep->com, CLOSING);
1507 ep->com.rpl_done = 1;
1508 ep->com.rpl_err = -ECONNRESET;
1509 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1510 wake_up(&ep->com.waitq);
1511 break;
1512 case FPDU_MODE:
1513 start_timer = 1;
1514 __state_set(&ep->com, CLOSING);
1515 closing = 1;
1516 peer_close_upcall(ep);
1517 break;
1518 case ABORTING:
1519 disconnect = 0;
1520 break;
1521 case CLOSING:
1522 __state_set(&ep->com, MORIBUND);
1523 disconnect = 0;
1524 break;
1525 case MORIBUND:
1526 stop_timer = 1;
1527 if (ep->com.cm_id && ep->com.qp) {
1528 attrs.next_state = C4IW_QP_STATE_IDLE;
1529 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1530 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1531 }
1532 close_complete_upcall(ep);
1533 __state_set(&ep->com, DEAD);
1534 release = 1;
1535 disconnect = 0;
1536 break;
1537 case DEAD:
1538 disconnect = 0;
1539 break;
1540 default:
1541 BUG_ON(1);
1542 }
1543 spin_unlock_irqrestore(&ep->com.lock, flags);
1544 if (closing) {
1545 attrs.next_state = C4IW_QP_STATE_CLOSING;
1546 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1547 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1548 }
1549 if (start_timer)
1550 start_ep_timer(ep);
1551 if (stop_timer)
1552 stop_ep_timer(ep);
1553 if (disconnect)
1554 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1555 if (release)
1556 release_ep_resources(ep);
1557 return 0;
1558}
1559
1560/*
1561 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1562 */
1563static int is_neg_adv_abort(unsigned int status)
1564{
1565 return status == CPL_ERR_RTX_NEG_ADVICE ||
1566 status == CPL_ERR_PERSIST_NEG_ADVICE;
1567}
1568
1569static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1570{
1571 struct cpl_abort_req_rss *req = cplhdr(skb);
1572 struct c4iw_ep *ep;
1573 struct cpl_abort_rpl *rpl;
1574 struct sk_buff *rpl_skb;
1575 struct c4iw_qp_attributes attrs;
1576 int ret;
1577 int release = 0;
1578 unsigned long flags;
1579 struct tid_info *t = dev->rdev.lldi.tids;
1580 unsigned int tid = GET_TID(req);
1581 int stop_timer = 0;
1582
1583 ep = lookup_tid(t, tid);
1584 if (is_neg_adv_abort(req->status)) {
1585 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
1586 ep->hwtid);
1587 return 0;
1588 }
1589 spin_lock_irqsave(&ep->com.lock, flags);
1590 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
1591 ep->com.state);
1592 switch (ep->com.state) {
1593 case CONNECTING:
1594 break;
1595 case MPA_REQ_WAIT:
1596 stop_timer = 1;
1597 break;
1598 case MPA_REQ_SENT:
1599 stop_timer = 1;
1600 connect_reply_upcall(ep, -ECONNRESET);
1601 break;
1602 case MPA_REP_SENT:
1603 ep->com.rpl_done = 1;
1604 ep->com.rpl_err = -ECONNRESET;
1605 PDBG("waking up ep %p\n", ep);
1606 wake_up(&ep->com.waitq);
1607 break;
1608 case MPA_REQ_RCVD:
1609
1610 /*
1611 * We're gonna mark this puppy DEAD, but keep
1612 * the reference on it until the ULP accepts or
1613 * rejects the CR. Also wake up anyone waiting
1614 * in rdma connection migration (see c4iw_accept_cr()).
1615 */
1616 ep->com.rpl_done = 1;
1617 ep->com.rpl_err = -ECONNRESET;
1618 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1619 wake_up(&ep->com.waitq);
1620 break;
1621 case MORIBUND:
1622 case CLOSING:
1623 stop_timer = 1;
1624 /*FALLTHROUGH*/
1625 case FPDU_MODE:
1626 if (ep->com.cm_id && ep->com.qp) {
1627 attrs.next_state = C4IW_QP_STATE_ERROR;
1628 ret = c4iw_modify_qp(ep->com.qp->rhp,
1629 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
1630 &attrs, 1);
1631 if (ret)
1632 printk(KERN_ERR MOD
1633 "%s - qp <- error failed!\n",
1634 __func__);
1635 }
1636 peer_abort_upcall(ep);
1637 break;
1638 case ABORTING:
1639 break;
1640 case DEAD:
1641 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1642 spin_unlock_irqrestore(&ep->com.lock, flags);
1643 return 0;
1644 default:
1645 BUG_ON(1);
1646 break;
1647 }
1648 dst_confirm(ep->dst);
1649 if (ep->com.state != ABORTING) {
1650 __state_set(&ep->com, DEAD);
1651 release = 1;
1652 }
1653 spin_unlock_irqrestore(&ep->com.lock, flags);
1654
1655 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1656 if (!rpl_skb) {
1657 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1658 __func__);
1659 release = 1;
1660 goto out;
1661 }
1662 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1663 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1664 INIT_TP_WR(rpl, ep->hwtid);
1665 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1666 rpl->cmd = CPL_ABORT_NO_RST;
1667 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
1668out:
1669 if (stop_timer)
1670 stop_ep_timer(ep);
1671 if (release)
1672 release_ep_resources(ep);
1673 return 0;
1674}
1675
1676static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1677{
1678 struct c4iw_ep *ep;
1679 struct c4iw_qp_attributes attrs;
1680 struct cpl_close_con_rpl *rpl = cplhdr(skb);
1681 unsigned long flags;
1682 int release = 0;
1683 struct tid_info *t = dev->rdev.lldi.tids;
1684 unsigned int tid = GET_TID(rpl);
1685 int stop_timer = 0;
1686
1687 ep = lookup_tid(t, tid);
1688
1689 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1690 BUG_ON(!ep);
1691
1692 /* The cm_id may be null if we failed to connect */
1693 spin_lock_irqsave(&ep->com.lock, flags);
1694 switch (ep->com.state) {
1695 case CLOSING:
1696 __state_set(&ep->com, MORIBUND);
1697 break;
1698 case MORIBUND:
1699 stop_timer = 1;
1700 if ((ep->com.cm_id) && (ep->com.qp)) {
1701 attrs.next_state = C4IW_QP_STATE_IDLE;
1702 c4iw_modify_qp(ep->com.qp->rhp,
1703 ep->com.qp,
1704 C4IW_QP_ATTR_NEXT_STATE,
1705 &attrs, 1);
1706 }
1707 close_complete_upcall(ep);
1708 __state_set(&ep->com, DEAD);
1709 release = 1;
1710 break;
1711 case ABORTING:
1712 case DEAD:
1713 break;
1714 default:
1715 BUG_ON(1);
1716 break;
1717 }
1718 spin_unlock_irqrestore(&ep->com.lock, flags);
1719 if (stop_timer)
1720 stop_ep_timer(ep);
1721 if (release)
1722 release_ep_resources(ep);
1723 return 0;
1724}
1725
1726static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
1727{
1728 struct c4iw_ep *ep;
1729 struct cpl_rdma_terminate *term = cplhdr(skb);
1730 struct tid_info *t = dev->rdev.lldi.tids;
1731 unsigned int tid = GET_TID(term);
1732
1733 ep = lookup_tid(t, tid);
1734
1735 if (state_read(&ep->com) != FPDU_MODE)
1736 return 0;
1737
1738 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1739 skb_pull(skb, sizeof *term);
1740 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1741 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1742 skb->len);
1743 ep->com.qp->attr.terminate_msg_len = skb->len;
1744 ep->com.qp->attr.is_terminate_local = 0;
1745 return 0;
1746}
1747
1748/*
1749 * Upcall from the adapter indicating data has been transmitted.
1750 * For us its just the single MPA request or reply. We can now free
1751 * the skb holding the mpa message.
1752 */
1753static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
1754{
1755 struct c4iw_ep *ep;
1756 struct cpl_fw4_ack *hdr = cplhdr(skb);
1757 u8 credits = hdr->credits;
1758 unsigned int tid = GET_TID(hdr);
1759 struct tid_info *t = dev->rdev.lldi.tids;
1760
1761
1762 ep = lookup_tid(t, tid);
1763 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1764 if (credits == 0) {
1765 PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
1766 __func__, ep, ep->hwtid, state_read(&ep->com));
1767 return 0;
1768 }
1769
1770 dst_confirm(ep->dst);
1771 if (ep->mpa_skb) {
1772 PDBG("%s last streaming msg ack ep %p tid %u state %u "
1773 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
1774 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
1775 kfree_skb(ep->mpa_skb);
1776 ep->mpa_skb = NULL;
1777 }
1778 return 0;
1779}
1780
1781int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1782{
1783 int err;
1784 struct c4iw_ep *ep = to_ep(cm_id);
1785 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1786
1787 if (state_read(&ep->com) == DEAD) {
1788 c4iw_put_ep(&ep->com);
1789 return -ECONNRESET;
1790 }
1791 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1792 if (mpa_rev == 0)
1793 abort_connection(ep, NULL, GFP_KERNEL);
1794 else {
1795 err = send_mpa_reject(ep, pdata, pdata_len);
1796 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1797 }
1798 c4iw_put_ep(&ep->com);
1799 return 0;
1800}
1801
1802int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1803{
1804 int err;
1805 struct c4iw_qp_attributes attrs;
1806 enum c4iw_qp_attr_mask mask;
1807 struct c4iw_ep *ep = to_ep(cm_id);
1808 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
1809 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
1810
1811 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1812 if (state_read(&ep->com) == DEAD) {
1813 err = -ECONNRESET;
1814 goto err;
1815 }
1816
1817 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1818 BUG_ON(!qp);
1819
1820 if ((conn_param->ord > c4iw_max_read_depth) ||
1821 (conn_param->ird > c4iw_max_read_depth)) {
1822 abort_connection(ep, NULL, GFP_KERNEL);
1823 err = -EINVAL;
1824 goto err;
1825 }
1826
1827 cm_id->add_ref(cm_id);
1828 ep->com.cm_id = cm_id;
1829 ep->com.qp = qp;
1830
1831 ep->ird = conn_param->ird;
1832 ep->ord = conn_param->ord;
1833
1834 if (peer2peer && ep->ird == 0)
1835 ep->ird = 1;
1836
1837 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1838
1839 /* bind QP to EP and move to RTS */
1840 attrs.mpa_attr = ep->mpa_attr;
1841 attrs.max_ird = ep->ird;
1842 attrs.max_ord = ep->ord;
1843 attrs.llp_stream_handle = ep;
1844 attrs.next_state = C4IW_QP_STATE_RTS;
1845
1846 /* bind QP and TID with INIT_WR */
1847 mask = C4IW_QP_ATTR_NEXT_STATE |
1848 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
1849 C4IW_QP_ATTR_MPA_ATTR |
1850 C4IW_QP_ATTR_MAX_IRD |
1851 C4IW_QP_ATTR_MAX_ORD;
1852
1853 err = c4iw_modify_qp(ep->com.qp->rhp,
1854 ep->com.qp, mask, &attrs, 1);
1855 if (err)
1856 goto err1;
1857 err = send_mpa_reply(ep, conn_param->private_data,
1858 conn_param->private_data_len);
1859 if (err)
1860 goto err1;
1861
1862 state_set(&ep->com, FPDU_MODE);
1863 established_upcall(ep);
1864 c4iw_put_ep(&ep->com);
1865 return 0;
1866err1:
1867 ep->com.cm_id = NULL;
1868 ep->com.qp = NULL;
1869 cm_id->rem_ref(cm_id);
1870err:
1871 c4iw_put_ep(&ep->com);
1872 return err;
1873}
1874
1875int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1876{
1877 int err = 0;
1878 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
1879 struct c4iw_ep *ep;
1880 struct rtable *rt;
1881 struct net_device *pdev;
1882 int step;
1883
1884 if ((conn_param->ord > c4iw_max_read_depth) ||
1885 (conn_param->ird > c4iw_max_read_depth)) {
1886 err = -EINVAL;
1887 goto out;
1888 }
1889 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1890 if (!ep) {
1891 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1892 err = -ENOMEM;
1893 goto out;
1894 }
1895 init_timer(&ep->timer);
1896 ep->plen = conn_param->private_data_len;
1897 if (ep->plen)
1898 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1899 conn_param->private_data, ep->plen);
1900 ep->ird = conn_param->ird;
1901 ep->ord = conn_param->ord;
1902
1903 if (peer2peer && ep->ord == 0)
1904 ep->ord = 1;
1905
1906 cm_id->add_ref(cm_id);
1907 ep->com.dev = dev;
1908 ep->com.cm_id = cm_id;
1909 ep->com.qp = get_qhp(dev, conn_param->qpn);
1910 BUG_ON(!ep->com.qp);
1911 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1912 ep->com.qp, cm_id);
1913
1914 /*
1915 * Allocate an active TID to initiate a TCP connection.
1916 */
1917 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
1918 if (ep->atid == -1) {
1919 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1920 err = -ENOMEM;
1921 goto fail2;
1922 }
1923
1924 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
1925 ntohl(cm_id->local_addr.sin_addr.s_addr),
1926 ntohs(cm_id->local_addr.sin_port),
1927 ntohl(cm_id->remote_addr.sin_addr.s_addr),
1928 ntohs(cm_id->remote_addr.sin_port));
1929
1930 /* find a route */
1931 rt = find_route(dev,
1932 cm_id->local_addr.sin_addr.s_addr,
1933 cm_id->remote_addr.sin_addr.s_addr,
1934 cm_id->local_addr.sin_port,
1935 cm_id->remote_addr.sin_port, 0);
1936 if (!rt) {
1937 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1938 err = -EHOSTUNREACH;
1939 goto fail3;
1940 }
1941 ep->dst = &rt->u.dst;
1942
1943 /* get a l2t entry */
1944 if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
1945 PDBG("%s LOOPBACK\n", __func__);
1946 pdev = ip_dev_find(&init_net,
1947 cm_id->remote_addr.sin_addr.s_addr);
1948 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1949 ep->dst->neighbour,
1950 pdev, 0);
1951 ep->mtu = pdev->mtu;
1952 ep->tx_chan = cxgb4_port_chan(pdev);
1953 ep->smac_idx = ep->tx_chan << 1;
1954 step = ep->com.dev->rdev.lldi.ntxq /
1955 ep->com.dev->rdev.lldi.nchan;
1956 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1957 step = ep->com.dev->rdev.lldi.nrxq /
1958 ep->com.dev->rdev.lldi.nchan;
1959 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1960 cxgb4_port_idx(pdev) * step];
1961 dev_put(pdev);
1962 } else {
1963 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1964 ep->dst->neighbour,
1965 ep->dst->neighbour->dev, 0);
1966 ep->mtu = dst_mtu(ep->dst);
1967 ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
1968 ep->smac_idx = ep->tx_chan << 1;
1969 step = ep->com.dev->rdev.lldi.ntxq /
1970 ep->com.dev->rdev.lldi.nchan;
1971 ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
1972 step = ep->com.dev->rdev.lldi.nrxq /
1973 ep->com.dev->rdev.lldi.nchan;
1974 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1975 cxgb4_port_idx(ep->dst->neighbour->dev) * step];
1976 }
1977 if (!ep->l2t) {
1978 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1979 err = -ENOMEM;
1980 goto fail4;
1981 }
1982
1983 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1984 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1985 ep->l2t->idx);
1986
1987 state_set(&ep->com, CONNECTING);
1988 ep->tos = 0;
1989 ep->com.local_addr = cm_id->local_addr;
1990 ep->com.remote_addr = cm_id->remote_addr;
1991
1992 /* send connect request to rnic */
1993 err = send_connect(ep);
1994 if (!err)
1995 goto out;
1996
1997 cxgb4_l2t_release(ep->l2t);
1998fail4:
1999 dst_release(ep->dst);
2000fail3:
2001 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2002fail2:
2003 cm_id->rem_ref(cm_id);
2004 c4iw_put_ep(&ep->com);
2005out:
2006 return err;
2007}
2008
2009int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2010{
2011 int err = 0;
2012 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2013 struct c4iw_listen_ep *ep;
2014
2015
2016 might_sleep();
2017
2018 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2019 if (!ep) {
2020 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2021 err = -ENOMEM;
2022 goto fail1;
2023 }
2024 PDBG("%s ep %p\n", __func__, ep);
2025 cm_id->add_ref(cm_id);
2026 ep->com.cm_id = cm_id;
2027 ep->com.dev = dev;
2028 ep->backlog = backlog;
2029 ep->com.local_addr = cm_id->local_addr;
2030
2031 /*
2032 * Allocate a server TID.
2033 */
2034 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2035 if (ep->stid == -1) {
2036 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
2037 err = -ENOMEM;
2038 goto fail2;
2039 }
2040
2041 state_set(&ep->com, LISTEN);
2042 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
2043 ep->com.local_addr.sin_addr.s_addr,
2044 ep->com.local_addr.sin_port,
2045 ep->com.dev->rdev.lldi.rxq_ids[0]);
2046 if (err)
2047 goto fail3;
2048
2049 /* wait for pass_open_rpl */
2050 wait_event(ep->com.waitq, ep->com.rpl_done);
2051 err = ep->com.rpl_err;
2052 if (!err) {
2053 cm_id->provider_data = ep;
2054 goto out;
2055 }
2056fail3:
2057 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2058fail2:
2059 cm_id->rem_ref(cm_id);
2060 c4iw_put_ep(&ep->com);
2061fail1:
2062out:
2063 return err;
2064}
2065
2066int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2067{
2068 int err;
2069 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2070
2071 PDBG("%s ep %p\n", __func__, ep);
2072
2073 might_sleep();
2074 state_set(&ep->com, DEAD);
2075 ep->com.rpl_done = 0;
2076 ep->com.rpl_err = 0;
2077 err = listen_stop(ep);
2078 if (err)
2079 goto done;
2080 wait_event(ep->com.waitq, ep->com.rpl_done);
2081 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2082done:
2083 err = ep->com.rpl_err;
2084 cm_id->rem_ref(cm_id);
2085 c4iw_put_ep(&ep->com);
2086 return err;
2087}
2088
2089int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2090{
2091 int ret = 0;
2092 unsigned long flags;
2093 int close = 0;
2094 int fatal = 0;
2095 struct c4iw_rdev *rdev;
2096 int start_timer = 0;
2097 int stop_timer = 0;
2098
2099 spin_lock_irqsave(&ep->com.lock, flags);
2100
2101 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2102 states[ep->com.state], abrupt);
2103
2104 rdev = &ep->com.dev->rdev;
2105 if (c4iw_fatal_error(rdev)) {
2106 fatal = 1;
2107 close_complete_upcall(ep);
2108 ep->com.state = DEAD;
2109 }
2110 switch (ep->com.state) {
2111 case MPA_REQ_WAIT:
2112 case MPA_REQ_SENT:
2113 case MPA_REQ_RCVD:
2114 case MPA_REP_SENT:
2115 case FPDU_MODE:
2116 close = 1;
2117 if (abrupt)
2118 ep->com.state = ABORTING;
2119 else {
2120 ep->com.state = CLOSING;
2121 start_timer = 1;
2122 }
2123 set_bit(CLOSE_SENT, &ep->com.flags);
2124 break;
2125 case CLOSING:
2126 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2127 close = 1;
2128 if (abrupt) {
2129 stop_timer = 1;
2130 ep->com.state = ABORTING;
2131 } else
2132 ep->com.state = MORIBUND;
2133 }
2134 break;
2135 case MORIBUND:
2136 case ABORTING:
2137 case DEAD:
2138 PDBG("%s ignoring disconnect ep %p state %u\n",
2139 __func__, ep, ep->com.state);
2140 break;
2141 default:
2142 BUG();
2143 break;
2144 }
2145
2146 spin_unlock_irqrestore(&ep->com.lock, flags);
2147 if (start_timer)
2148 start_ep_timer(ep);
2149 if (stop_timer)
2150 stop_ep_timer(ep);
2151 if (close) {
2152 if (abrupt)
2153 ret = abort_connection(ep, NULL, gfp);
2154 else
2155 ret = send_halfclose(ep, gfp);
2156 if (ret)
2157 fatal = 1;
2158 }
2159 if (fatal)
2160 release_ep_resources(ep);
2161 return ret;
2162}
2163
2164/*
2165 * These are the real handlers that are called from a
2166 * work queue.
2167 */
2168static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
2169 [CPL_ACT_ESTABLISH] = act_establish,
2170 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2171 [CPL_RX_DATA] = rx_data,
2172 [CPL_ABORT_RPL_RSS] = abort_rpl,
2173 [CPL_ABORT_RPL] = abort_rpl,
2174 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2175 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2176 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2177 [CPL_PASS_ESTABLISH] = pass_establish,
2178 [CPL_PEER_CLOSE] = peer_close,
2179 [CPL_ABORT_REQ_RSS] = peer_abort,
2180 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2181 [CPL_RDMA_TERMINATE] = terminate,
2182 [CPL_FW4_ACK] = fw4_ack
2183};
2184
2185static void process_timeout(struct c4iw_ep *ep)
2186{
2187 struct c4iw_qp_attributes attrs;
2188 int abort = 1;
2189
2190 spin_lock_irq(&ep->com.lock);
2191 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
2192 ep->com.state);
2193 switch (ep->com.state) {
2194 case MPA_REQ_SENT:
2195 __state_set(&ep->com, ABORTING);
2196 connect_reply_upcall(ep, -ETIMEDOUT);
2197 break;
2198 case MPA_REQ_WAIT:
2199 __state_set(&ep->com, ABORTING);
2200 break;
2201 case CLOSING:
2202 case MORIBUND:
2203 if (ep->com.cm_id && ep->com.qp) {
2204 attrs.next_state = C4IW_QP_STATE_ERROR;
2205 c4iw_modify_qp(ep->com.qp->rhp,
2206 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2207 &attrs, 1);
2208 }
2209 __state_set(&ep->com, ABORTING);
2210 break;
2211 default:
2212 printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
2213 __func__, ep, ep->hwtid, ep->com.state);
2214 WARN_ON(1);
2215 abort = 0;
2216 }
2217 spin_unlock_irq(&ep->com.lock);
2218 if (abort)
2219 abort_connection(ep, NULL, GFP_KERNEL);
2220 c4iw_put_ep(&ep->com);
2221}
2222
2223static void process_timedout_eps(void)
2224{
2225 struct c4iw_ep *ep;
2226
2227 spin_lock_irq(&timeout_lock);
2228 while (!list_empty(&timeout_list)) {
2229 struct list_head *tmp;
2230
2231 tmp = timeout_list.next;
2232 list_del(tmp);
2233 spin_unlock_irq(&timeout_lock);
2234 ep = list_entry(tmp, struct c4iw_ep, entry);
2235 process_timeout(ep);
2236 spin_lock_irq(&timeout_lock);
2237 }
2238 spin_unlock_irq(&timeout_lock);
2239}
2240
2241static void process_work(struct work_struct *work)
2242{
2243 struct sk_buff *skb = NULL;
2244 struct c4iw_dev *dev;
2245 struct cpl_act_establish *rpl = cplhdr(skb);
2246 unsigned int opcode;
2247 int ret;
2248
2249 while ((skb = skb_dequeue(&rxq))) {
2250 rpl = cplhdr(skb);
2251 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
2252 opcode = rpl->ot.opcode;
2253
2254 BUG_ON(!work_handlers[opcode]);
2255 ret = work_handlers[opcode](dev, skb);
2256 if (!ret)
2257 kfree_skb(skb);
2258 }
2259 process_timedout_eps();
2260}
2261
2262static DECLARE_WORK(skb_work, process_work);
2263
2264static void ep_timeout(unsigned long arg)
2265{
2266 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2267
2268 spin_lock(&timeout_lock);
2269 list_add_tail(&ep->entry, &timeout_list);
2270 spin_unlock(&timeout_lock);
2271 queue_work(workq, &skb_work);
2272}
2273
2274/*
2275 * All the CM events are handled on a work queue to have a safe context.
2276 */
2277static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
2278{
2279
2280 /*
2281 * Save dev in the skb->cb area.
2282 */
2283 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
2284
2285 /*
2286 * Queue the skb and schedule the worker thread.
2287 */
2288 skb_queue_tail(&rxq, skb);
2289 queue_work(workq, &skb_work);
2290 return 0;
2291}
2292
2293static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2294{
2295 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2296
2297 if (rpl->status != CPL_ERR_NONE) {
2298 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2299 "for tid %u\n", rpl->status, GET_TID(rpl));
2300 }
2301 return 0;
2302}
2303
2304static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2305{
2306 struct cpl_fw6_msg *rpl = cplhdr(skb);
2307 struct c4iw_wr_wait *wr_waitp;
2308 int ret;
2309
2310 PDBG("%s type %u\n", __func__, rpl->type);
2311
2312 switch (rpl->type) {
2313 case 1:
2314 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
2315 wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
2316 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
2317 if (wr_waitp) {
2318 wr_waitp->ret = ret;
2319 wr_waitp->done = 1;
2320 wake_up(&wr_waitp->wait);
2321 }
2322 break;
2323 case 2:
2324 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2325 break;
2326 default:
2327 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
2328 rpl->type);
2329 break;
2330 }
2331 return 0;
2332}
2333
2334/*
2335 * Most upcalls from the T4 Core go to sched() to
2336 * schedule the processing on a work queue.
2337 */
2338c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2339 [CPL_ACT_ESTABLISH] = sched,
2340 [CPL_ACT_OPEN_RPL] = sched,
2341 [CPL_RX_DATA] = sched,
2342 [CPL_ABORT_RPL_RSS] = sched,
2343 [CPL_ABORT_RPL] = sched,
2344 [CPL_PASS_OPEN_RPL] = sched,
2345 [CPL_CLOSE_LISTSRV_RPL] = sched,
2346 [CPL_PASS_ACCEPT_REQ] = sched,
2347 [CPL_PASS_ESTABLISH] = sched,
2348 [CPL_PEER_CLOSE] = sched,
2349 [CPL_CLOSE_CON_RPL] = sched,
2350 [CPL_ABORT_REQ_RSS] = sched,
2351 [CPL_RDMA_TERMINATE] = sched,
2352 [CPL_FW4_ACK] = sched,
2353 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2354 [CPL_FW6_MSG] = fw6_msg
2355};
2356
2357int __init c4iw_cm_init(void)
2358{
2359 spin_lock_init(&timeout_lock);
2360 skb_queue_head_init(&rxq);
2361
2362 workq = create_singlethread_workqueue("iw_cxgb4");
2363 if (!workq)
2364 return -ENOMEM;
2365
2366 return 0;
2367}
2368
2369void __exit c4iw_cm_term(void)
2370{
2371 WARN_ON(!list_empty(&timeout_list));
2372 flush_workqueue(workq);
2373 destroy_workqueue(workq);
2374}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
new file mode 100644
index 000000000000..fb1aafcc294f
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -0,0 +1,882 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "iw_cxgb4.h"
34
35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36 struct c4iw_dev_ucontext *uctx)
37{
38 struct fw_ri_res_wr *res_wr;
39 struct fw_ri_res *res;
40 int wr_len;
41 struct c4iw_wr_wait wr_wait;
42 struct sk_buff *skb;
43 int ret;
44
45 wr_len = sizeof *res_wr + sizeof *res;
46 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
47 if (!skb)
48 return -ENOMEM;
49 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
50
51 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
52 memset(res_wr, 0, wr_len);
53 res_wr->op_nres = cpu_to_be32(
54 FW_WR_OP(FW_RI_RES_WR) |
55 V_FW_RI_RES_WR_NRES(1) |
56 FW_WR_COMPL(1));
57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 res_wr->cookie = (u64)&wr_wait;
59 res = res_wr->res;
60 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
61 res->u.cq.op = FW_RI_RES_OP_RESET;
62 res->u.cq.iqid = cpu_to_be32(cq->cqid);
63
64 c4iw_init_wr_wait(&wr_wait);
65 ret = c4iw_ofld_send(rdev, skb);
66 if (!ret) {
67 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
68 if (!wr_wait.done) {
69 printk(KERN_ERR MOD "Device %s not responding!\n",
70 pci_name(rdev->lldi.pdev));
71 rdev->flags = T4_FATAL_ERROR;
72 ret = -EIO;
73 } else
74 ret = wr_wait.ret;
75 }
76
77 kfree(cq->sw_queue);
78 dma_free_coherent(&(rdev->lldi.pdev->dev),
79 cq->memsize, cq->queue,
80 pci_unmap_addr(cq, mapping));
81 c4iw_put_cqid(rdev, cq->cqid, uctx);
82 return ret;
83}
84
85static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
86 struct c4iw_dev_ucontext *uctx)
87{
88 struct fw_ri_res_wr *res_wr;
89 struct fw_ri_res *res;
90 int wr_len;
91 int user = (uctx != &rdev->uctx);
92 struct c4iw_wr_wait wr_wait;
93 int ret;
94 struct sk_buff *skb;
95
96 cq->cqid = c4iw_get_cqid(rdev, uctx);
97 if (!cq->cqid) {
98 ret = -ENOMEM;
99 goto err1;
100 }
101
102 if (!user) {
103 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
104 if (!cq->sw_queue) {
105 ret = -ENOMEM;
106 goto err2;
107 }
108 }
109 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
110 &cq->dma_addr, GFP_KERNEL);
111 if (!cq->queue) {
112 ret = -ENOMEM;
113 goto err3;
114 }
115 pci_unmap_addr_set(cq, mapping, cq->dma_addr);
116 memset(cq->queue, 0, cq->memsize);
117
118 /* build fw_ri_res_wr */
119 wr_len = sizeof *res_wr + sizeof *res;
120
121 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
122 if (!skb) {
123 ret = -ENOMEM;
124 goto err4;
125 }
126 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
127
128 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
129 memset(res_wr, 0, wr_len);
130 res_wr->op_nres = cpu_to_be32(
131 FW_WR_OP(FW_RI_RES_WR) |
132 V_FW_RI_RES_WR_NRES(1) |
133 FW_WR_COMPL(1));
134 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
135 res_wr->cookie = (u64)&wr_wait;
136 res = res_wr->res;
137 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
138 res->u.cq.op = FW_RI_RES_OP_WRITE;
139 res->u.cq.iqid = cpu_to_be32(cq->cqid);
140 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
141 V_FW_RI_RES_WR_IQANUS(0) |
142 V_FW_RI_RES_WR_IQANUD(1) |
143 F_FW_RI_RES_WR_IQANDST |
144 V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
145 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
146 F_FW_RI_RES_WR_IQDROPRSS |
147 V_FW_RI_RES_WR_IQPCIECH(2) |
148 V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
149 F_FW_RI_RES_WR_IQO |
150 V_FW_RI_RES_WR_IQESIZE(1));
151 res->u.cq.iqsize = cpu_to_be16(cq->size);
152 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
153
154 c4iw_init_wr_wait(&wr_wait);
155
156 ret = c4iw_ofld_send(rdev, skb);
157 if (ret)
158 goto err4;
159 PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
160 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
161 if (!wr_wait.done) {
162 printk(KERN_ERR MOD "Device %s not responding!\n",
163 pci_name(rdev->lldi.pdev));
164 rdev->flags = T4_FATAL_ERROR;
165 ret = -EIO;
166 } else
167 ret = wr_wait.ret;
168 if (ret)
169 goto err4;
170
171 cq->gen = 1;
172 cq->gts = rdev->lldi.gts_reg;
173 cq->rdev = rdev;
174 if (user) {
175 cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
176 (cq->cqid << rdev->cqshift);
177 cq->ugts &= PAGE_MASK;
178 }
179 return 0;
180err4:
181 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
182 pci_unmap_addr(cq, mapping));
183err3:
184 kfree(cq->sw_queue);
185err2:
186 c4iw_put_cqid(rdev, cq->cqid, uctx);
187err1:
188 return ret;
189}
190
191static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
192{
193 struct t4_cqe cqe;
194
195 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
196 wq, cq, cq->sw_cidx, cq->sw_pidx);
197 memset(&cqe, 0, sizeof(cqe));
198 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
199 V_CQE_OPCODE(FW_RI_SEND) |
200 V_CQE_TYPE(0) |
201 V_CQE_SWCQE(1) |
202 V_CQE_QPID(wq->rq.qid));
203 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
204 cq->sw_queue[cq->sw_pidx] = cqe;
205 t4_swcq_produce(cq);
206}
207
208int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
209{
210 int flushed = 0;
211 int in_use = wq->rq.in_use - count;
212
213 BUG_ON(in_use < 0);
214 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
215 wq, cq, wq->rq.in_use, count);
216 while (in_use--) {
217 insert_recv_cqe(wq, cq);
218 flushed++;
219 }
220 return flushed;
221}
222
223static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
224 struct t4_swsqe *swcqe)
225{
226 struct t4_cqe cqe;
227
228 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
229 wq, cq, cq->sw_cidx, cq->sw_pidx);
230 memset(&cqe, 0, sizeof(cqe));
231 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
232 V_CQE_OPCODE(swcqe->opcode) |
233 V_CQE_TYPE(1) |
234 V_CQE_SWCQE(1) |
235 V_CQE_QPID(wq->sq.qid));
236 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
237 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
238 cq->sw_queue[cq->sw_pidx] = cqe;
239 t4_swcq_produce(cq);
240}
241
242int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
243{
244 int flushed = 0;
245 struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
246 int in_use = wq->sq.in_use - count;
247
248 BUG_ON(in_use < 0);
249 while (in_use--) {
250 swsqe->signaled = 0;
251 insert_sq_cqe(wq, cq, swsqe);
252 swsqe++;
253 if (swsqe == (wq->sq.sw_sq + wq->sq.size))
254 swsqe = wq->sq.sw_sq;
255 flushed++;
256 }
257 return flushed;
258}
259
260/*
261 * Move all CQEs from the HWCQ into the SWCQ.
262 */
263void c4iw_flush_hw_cq(struct t4_cq *cq)
264{
265 struct t4_cqe *cqe = NULL, *swcqe;
266 int ret;
267
268 PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
269 ret = t4_next_hw_cqe(cq, &cqe);
270 while (!ret) {
271 PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
272 __func__, cq->cidx, cq->sw_pidx);
273 swcqe = &cq->sw_queue[cq->sw_pidx];
274 *swcqe = *cqe;
275 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
276 t4_swcq_produce(cq);
277 t4_hwcq_consume(cq);
278 ret = t4_next_hw_cqe(cq, &cqe);
279 }
280}
281
282static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
283{
284 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
285 return 0;
286
287 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
288 return 0;
289
290 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
291 return 0;
292
293 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
294 return 0;
295 return 1;
296}
297
298void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
299{
300 struct t4_cqe *cqe;
301 u32 ptr;
302
303 *count = 0;
304 ptr = cq->sw_cidx;
305 while (ptr != cq->sw_pidx) {
306 cqe = &cq->sw_queue[ptr];
307 if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
308 wq->sq.oldest_read)) &&
309 (CQE_QPID(cqe) == wq->sq.qid))
310 (*count)++;
311 if (++ptr == cq->size)
312 ptr = 0;
313 }
314 PDBG("%s cq %p count %d\n", __func__, cq, *count);
315}
316
317void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
318{
319 struct t4_cqe *cqe;
320 u32 ptr;
321
322 *count = 0;
323 PDBG("%s count zero %d\n", __func__, *count);
324 ptr = cq->sw_cidx;
325 while (ptr != cq->sw_pidx) {
326 cqe = &cq->sw_queue[ptr];
327 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
328 (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
329 (*count)++;
330 if (++ptr == cq->size)
331 ptr = 0;
332 }
333 PDBG("%s cq %p count %d\n", __func__, cq, *count);
334}
335
336static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
337{
338 struct t4_swsqe *swsqe;
339 u16 ptr = wq->sq.cidx;
340 int count = wq->sq.in_use;
341 int unsignaled = 0;
342
343 swsqe = &wq->sq.sw_sq[ptr];
344 while (count--)
345 if (!swsqe->signaled) {
346 if (++ptr == wq->sq.size)
347 ptr = 0;
348 swsqe = &wq->sq.sw_sq[ptr];
349 unsignaled++;
350 } else if (swsqe->complete) {
351
352 /*
353 * Insert this completed cqe into the swcq.
354 */
355 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
356 __func__, ptr, cq->sw_pidx);
357 swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
358 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
359 t4_swcq_produce(cq);
360 swsqe->signaled = 0;
361 wq->sq.in_use -= unsignaled;
362 break;
363 } else
364 break;
365}
366
367static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
368 struct t4_cqe *read_cqe)
369{
370 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
371 read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
372 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
373 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
374 V_CQE_OPCODE(FW_RI_READ_REQ) |
375 V_CQE_TYPE(1));
376}
377
378/*
379 * Return a ptr to the next read wr in the SWSQ or NULL.
380 */
381static void advance_oldest_read(struct t4_wq *wq)
382{
383
384 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
385
386 if (rptr == wq->sq.size)
387 rptr = 0;
388 while (rptr != wq->sq.pidx) {
389 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
390
391 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
392 return;
393 if (++rptr == wq->sq.size)
394 rptr = 0;
395 }
396 wq->sq.oldest_read = NULL;
397}
398
399/*
400 * poll_cq
401 *
402 * Caller must:
403 * check the validity of the first CQE,
404 * supply the wq assicated with the qpid.
405 *
406 * credit: cq credit to return to sge.
407 * cqe_flushed: 1 iff the CQE is flushed.
408 * cqe: copy of the polled CQE.
409 *
410 * return value:
411 * 0 CQE returned ok.
412 * -EAGAIN CQE skipped, try again.
413 * -EOVERFLOW CQ overflow detected.
414 */
415static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
416 u8 *cqe_flushed, u64 *cookie, u32 *credit)
417{
418 int ret = 0;
419 struct t4_cqe *hw_cqe, read_cqe;
420
421 *cqe_flushed = 0;
422 *credit = 0;
423 ret = t4_next_cqe(cq, &hw_cqe);
424 if (ret)
425 return ret;
426
427 PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
428 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
429 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
430 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
431 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
432 CQE_WRID_LOW(hw_cqe));
433
434 /*
435 * skip cqe's not affiliated with a QP.
436 */
437 if (wq == NULL) {
438 ret = -EAGAIN;
439 goto skip_cqe;
440 }
441
442 /*
443 * Gotta tweak READ completions:
444 * 1) the cqe doesn't contain the sq_wptr from the wr.
445 * 2) opcode not reflected from the wr.
446 * 3) read_len not reflected from the wr.
447 * 4) cq_type is RQ_TYPE not SQ_TYPE.
448 */
449 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
450
451 /*
452 * If this is an unsolicited read response, then the read
453 * was generated by the kernel driver as part of peer-2-peer
454 * connection setup. So ignore the completion.
455 */
456 if (!wq->sq.oldest_read) {
457 if (CQE_STATUS(hw_cqe))
458 t4_set_wq_in_error(wq);
459 ret = -EAGAIN;
460 goto skip_cqe;
461 }
462
463 /*
464 * Don't write to the HWCQ, so create a new read req CQE
465 * in local memory.
466 */
467 create_read_req_cqe(wq, hw_cqe, &read_cqe);
468 hw_cqe = &read_cqe;
469 advance_oldest_read(wq);
470 }
471
472 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
473 *cqe_flushed = t4_wq_in_error(wq);
474 t4_set_wq_in_error(wq);
475 goto proc_cqe;
476 }
477
478 /*
479 * RECV completion.
480 */
481 if (RQ_TYPE(hw_cqe)) {
482
483 /*
484 * HW only validates 4 bits of MSN. So we must validate that
485 * the MSN in the SEND is the next expected MSN. If its not,
486 * then we complete this with T4_ERR_MSN and mark the wq in
487 * error.
488 */
489
490 if (t4_rq_empty(wq)) {
491 t4_set_wq_in_error(wq);
492 ret = -EAGAIN;
493 goto skip_cqe;
494 }
495 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
496 t4_set_wq_in_error(wq);
497 hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
498 goto proc_cqe;
499 }
500 goto proc_cqe;
501 }
502
503 /*
504 * If we get here its a send completion.
505 *
506 * Handle out of order completion. These get stuffed
507 * in the SW SQ. Then the SW SQ is walked to move any
508 * now in-order completions into the SW CQ. This handles
509 * 2 cases:
510 * 1) reaping unsignaled WRs when the first subsequent
511 * signaled WR is completed.
512 * 2) out of order read completions.
513 */
514 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
515 struct t4_swsqe *swsqe;
516
517 PDBG("%s out of order completion going in sw_sq at idx %u\n",
518 __func__, CQE_WRID_SQ_IDX(hw_cqe));
519 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
520 swsqe->cqe = *hw_cqe;
521 swsqe->complete = 1;
522 ret = -EAGAIN;
523 goto flush_wq;
524 }
525
526proc_cqe:
527 *cqe = *hw_cqe;
528
529 /*
530 * Reap the associated WR(s) that are freed up with this
531 * completion.
532 */
533 if (SQ_TYPE(hw_cqe)) {
534 wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
535 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
536 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
537 t4_sq_consume(wq);
538 } else {
539 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
540 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
541 BUG_ON(t4_rq_empty(wq));
542 t4_rq_consume(wq);
543 }
544
545flush_wq:
546 /*
547 * Flush any completed cqes that are now in-order.
548 */
549 flush_completed_wrs(wq, cq);
550
551skip_cqe:
552 if (SW_CQE(hw_cqe)) {
553 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
554 __func__, cq, cq->cqid, cq->sw_cidx);
555 t4_swcq_consume(cq);
556 } else {
557 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
558 __func__, cq, cq->cqid, cq->cidx);
559 t4_hwcq_consume(cq);
560 }
561 return ret;
562}
563
564/*
565 * Get one cq entry from c4iw and map it to openib.
566 *
567 * Returns:
568 * 0 cqe returned
569 * -ENODATA EMPTY;
570 * -EAGAIN caller must try again
571 * any other -errno fatal error
572 */
573static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
574{
575 struct c4iw_qp *qhp = NULL;
576 struct t4_cqe cqe = {0, 0}, *rd_cqe;
577 struct t4_wq *wq;
578 u32 credit = 0;
579 u8 cqe_flushed;
580 u64 cookie = 0;
581 int ret;
582
583 ret = t4_next_cqe(&chp->cq, &rd_cqe);
584
585 if (ret)
586 return ret;
587
588 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
589 if (!qhp)
590 wq = NULL;
591 else {
592 spin_lock(&qhp->lock);
593 wq = &(qhp->wq);
594 }
595 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
596 if (ret)
597 goto out;
598
599 wc->wr_id = cookie;
600 wc->qp = &qhp->ibqp;
601 wc->vendor_err = CQE_STATUS(&cqe);
602 wc->wc_flags = 0;
603
604 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
605 "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
606 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
607 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
608
609 if (CQE_TYPE(&cqe) == 0) {
610 if (!CQE_STATUS(&cqe))
611 wc->byte_len = CQE_LEN(&cqe);
612 else
613 wc->byte_len = 0;
614 wc->opcode = IB_WC_RECV;
615 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
616 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
617 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
618 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
619 }
620 } else {
621 switch (CQE_OPCODE(&cqe)) {
622 case FW_RI_RDMA_WRITE:
623 wc->opcode = IB_WC_RDMA_WRITE;
624 break;
625 case FW_RI_READ_REQ:
626 wc->opcode = IB_WC_RDMA_READ;
627 wc->byte_len = CQE_LEN(&cqe);
628 break;
629 case FW_RI_SEND_WITH_INV:
630 case FW_RI_SEND_WITH_SE_INV:
631 wc->opcode = IB_WC_SEND;
632 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
633 break;
634 case FW_RI_SEND:
635 case FW_RI_SEND_WITH_SE:
636 wc->opcode = IB_WC_SEND;
637 break;
638 case FW_RI_BIND_MW:
639 wc->opcode = IB_WC_BIND_MW;
640 break;
641
642 case FW_RI_LOCAL_INV:
643 wc->opcode = IB_WC_LOCAL_INV;
644 break;
645 case FW_RI_FAST_REGISTER:
646 wc->opcode = IB_WC_FAST_REG_MR;
647 break;
648 default:
649 printk(KERN_ERR MOD "Unexpected opcode %d "
650 "in the CQE received for QPID=0x%0x\n",
651 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
652 ret = -EINVAL;
653 goto out;
654 }
655 }
656
657 if (cqe_flushed)
658 wc->status = IB_WC_WR_FLUSH_ERR;
659 else {
660
661 switch (CQE_STATUS(&cqe)) {
662 case T4_ERR_SUCCESS:
663 wc->status = IB_WC_SUCCESS;
664 break;
665 case T4_ERR_STAG:
666 wc->status = IB_WC_LOC_ACCESS_ERR;
667 break;
668 case T4_ERR_PDID:
669 wc->status = IB_WC_LOC_PROT_ERR;
670 break;
671 case T4_ERR_QPID:
672 case T4_ERR_ACCESS:
673 wc->status = IB_WC_LOC_ACCESS_ERR;
674 break;
675 case T4_ERR_WRAP:
676 wc->status = IB_WC_GENERAL_ERR;
677 break;
678 case T4_ERR_BOUND:
679 wc->status = IB_WC_LOC_LEN_ERR;
680 break;
681 case T4_ERR_INVALIDATE_SHARED_MR:
682 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
683 wc->status = IB_WC_MW_BIND_ERR;
684 break;
685 case T4_ERR_CRC:
686 case T4_ERR_MARKER:
687 case T4_ERR_PDU_LEN_ERR:
688 case T4_ERR_OUT_OF_RQE:
689 case T4_ERR_DDP_VERSION:
690 case T4_ERR_RDMA_VERSION:
691 case T4_ERR_DDP_QUEUE_NUM:
692 case T4_ERR_MSN:
693 case T4_ERR_TBIT:
694 case T4_ERR_MO:
695 case T4_ERR_MSN_RANGE:
696 case T4_ERR_IRD_OVERFLOW:
697 case T4_ERR_OPCODE:
698 wc->status = IB_WC_FATAL_ERR;
699 break;
700 case T4_ERR_SWFLUSH:
701 wc->status = IB_WC_WR_FLUSH_ERR;
702 break;
703 default:
704 printk(KERN_ERR MOD
705 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
706 CQE_STATUS(&cqe), CQE_QPID(&cqe));
707 ret = -EINVAL;
708 }
709 }
710out:
711 if (wq)
712 spin_unlock(&qhp->lock);
713 return ret;
714}
715
716int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
717{
718 struct c4iw_cq *chp;
719 unsigned long flags;
720 int npolled;
721 int err = 0;
722
723 chp = to_c4iw_cq(ibcq);
724
725 spin_lock_irqsave(&chp->lock, flags);
726 for (npolled = 0; npolled < num_entries; ++npolled) {
727 do {
728 err = c4iw_poll_cq_one(chp, wc + npolled);
729 } while (err == -EAGAIN);
730 if (err)
731 break;
732 }
733 spin_unlock_irqrestore(&chp->lock, flags);
734 return !err || err == -ENODATA ? npolled : err;
735}
736
737int c4iw_destroy_cq(struct ib_cq *ib_cq)
738{
739 struct c4iw_cq *chp;
740 struct c4iw_ucontext *ucontext;
741
742 PDBG("%s ib_cq %p\n", __func__, ib_cq);
743 chp = to_c4iw_cq(ib_cq);
744
745 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
746 atomic_dec(&chp->refcnt);
747 wait_event(chp->wait, !atomic_read(&chp->refcnt));
748
749 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
750 : NULL;
751 destroy_cq(&chp->rhp->rdev, &chp->cq,
752 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
753 kfree(chp);
754 return 0;
755}
756
757struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
758 int vector, struct ib_ucontext *ib_context,
759 struct ib_udata *udata)
760{
761 struct c4iw_dev *rhp;
762 struct c4iw_cq *chp;
763 struct c4iw_create_cq_resp uresp;
764 struct c4iw_ucontext *ucontext = NULL;
765 int ret;
766 size_t memsize;
767 struct c4iw_mm_entry *mm, *mm2;
768
769 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
770
771 rhp = to_c4iw_dev(ibdev);
772
773 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
774 if (!chp)
775 return ERR_PTR(-ENOMEM);
776
777 if (ib_context)
778 ucontext = to_c4iw_ucontext(ib_context);
779
780 /* account for the status page. */
781 entries++;
782
783 /*
784 * entries must be multiple of 16 for HW.
785 */
786 entries = roundup(entries, 16);
787 memsize = entries * sizeof *chp->cq.queue;
788
789 /*
790 * memsize must be a multiple of the page size if its a user cq.
791 */
792 if (ucontext)
793 memsize = roundup(memsize, PAGE_SIZE);
794 chp->cq.size = entries;
795 chp->cq.memsize = memsize;
796
797 ret = create_cq(&rhp->rdev, &chp->cq,
798 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
799 if (ret)
800 goto err1;
801
802 chp->rhp = rhp;
803 chp->cq.size--; /* status page */
804 chp->ibcq.cqe = chp->cq.size;
805 spin_lock_init(&chp->lock);
806 atomic_set(&chp->refcnt, 1);
807 init_waitqueue_head(&chp->wait);
808 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
809 if (ret)
810 goto err2;
811
812 if (ucontext) {
813 mm = kmalloc(sizeof *mm, GFP_KERNEL);
814 if (!mm)
815 goto err3;
816 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
817 if (!mm2)
818 goto err4;
819
820 uresp.qid_mask = rhp->rdev.cqmask;
821 uresp.cqid = chp->cq.cqid;
822 uresp.size = chp->cq.size;
823 uresp.memsize = chp->cq.memsize;
824 spin_lock(&ucontext->mmap_lock);
825 uresp.key = ucontext->key;
826 ucontext->key += PAGE_SIZE;
827 uresp.gts_key = ucontext->key;
828 ucontext->key += PAGE_SIZE;
829 spin_unlock(&ucontext->mmap_lock);
830 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
831 if (ret)
832 goto err5;
833
834 mm->key = uresp.key;
835 mm->addr = virt_to_phys(chp->cq.queue);
836 mm->len = chp->cq.memsize;
837 insert_mmap(ucontext, mm);
838
839 mm2->key = uresp.gts_key;
840 mm2->addr = chp->cq.ugts;
841 mm2->len = PAGE_SIZE;
842 insert_mmap(ucontext, mm2);
843 }
844 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
845 __func__, chp->cq.cqid, chp, chp->cq.size,
846 chp->cq.memsize,
847 (unsigned long long) chp->cq.dma_addr);
848 return &chp->ibcq;
849err5:
850 kfree(mm2);
851err4:
852 kfree(mm);
853err3:
854 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
855err2:
856 destroy_cq(&chp->rhp->rdev, &chp->cq,
857 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
858err1:
859 kfree(chp);
860 return ERR_PTR(ret);
861}
862
863int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
864{
865 return -ENOSYS;
866}
867
868int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
869{
870 struct c4iw_cq *chp;
871 int ret;
872 unsigned long flag;
873
874 chp = to_c4iw_cq(ibcq);
875 spin_lock_irqsave(&chp->lock, flag);
876 ret = t4_arm_cq(&chp->cq,
877 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
878 spin_unlock_irqrestore(&chp->lock, flag);
879 if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
880 ret = 0;
881 return ret;
882}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
new file mode 100644
index 000000000000..be23b5eab13b
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -0,0 +1,520 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/debugfs.h>
35
36#include <rdma/ib_verbs.h>
37
38#include "iw_cxgb4.h"
39
40#define DRV_VERSION "0.1"
41
42MODULE_AUTHOR("Steve Wise");
43MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
44MODULE_LICENSE("Dual BSD/GPL");
45MODULE_VERSION(DRV_VERSION);
46
47static LIST_HEAD(dev_list);
48static DEFINE_MUTEX(dev_mutex);
49
50static struct dentry *c4iw_debugfs_root;
51
52struct debugfs_qp_data {
53 struct c4iw_dev *devp;
54 char *buf;
55 int bufsize;
56 int pos;
57};
58
59static int count_qps(int id, void *p, void *data)
60{
61 struct c4iw_qp *qp = p;
62 int *countp = data;
63
64 if (id != qp->wq.sq.qid)
65 return 0;
66
67 *countp = *countp + 1;
68 return 0;
69}
70
71static int dump_qps(int id, void *p, void *data)
72{
73 struct c4iw_qp *qp = p;
74 struct debugfs_qp_data *qpd = data;
75 int space;
76 int cc;
77
78 if (id != qp->wq.sq.qid)
79 return 0;
80
81 space = qpd->bufsize - qpd->pos - 1;
82 if (space == 0)
83 return 1;
84
85 if (qp->ep)
86 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u "
87 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
88 qp->wq.sq.qid, (int)qp->attr.state,
89 qp->ep->hwtid, (int)qp->ep->com.state,
90 &qp->ep->com.local_addr.sin_addr.s_addr,
91 ntohs(qp->ep->com.local_addr.sin_port),
92 &qp->ep->com.remote_addr.sin_addr.s_addr,
93 ntohs(qp->ep->com.remote_addr.sin_port));
94 else
95 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n",
96 qp->wq.sq.qid, (int)qp->attr.state);
97 if (cc < space)
98 qpd->pos += cc;
99 return 0;
100}
101
102static int qp_release(struct inode *inode, struct file *file)
103{
104 struct debugfs_qp_data *qpd = file->private_data;
105 if (!qpd) {
106 printk(KERN_INFO "%s null qpd?\n", __func__);
107 return 0;
108 }
109 kfree(qpd->buf);
110 kfree(qpd);
111 return 0;
112}
113
114static int qp_open(struct inode *inode, struct file *file)
115{
116 struct debugfs_qp_data *qpd;
117 int ret = 0;
118 int count = 1;
119
120 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
121 if (!qpd) {
122 ret = -ENOMEM;
123 goto out;
124 }
125 qpd->devp = inode->i_private;
126 qpd->pos = 0;
127
128 spin_lock_irq(&qpd->devp->lock);
129 idr_for_each(&qpd->devp->qpidr, count_qps, &count);
130 spin_unlock_irq(&qpd->devp->lock);
131
132 qpd->bufsize = count * 128;
133 qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL);
134 if (!qpd->buf) {
135 ret = -ENOMEM;
136 goto err1;
137 }
138
139 spin_lock_irq(&qpd->devp->lock);
140 idr_for_each(&qpd->devp->qpidr, dump_qps, qpd);
141 spin_unlock_irq(&qpd->devp->lock);
142
143 qpd->buf[qpd->pos++] = 0;
144 file->private_data = qpd;
145 goto out;
146err1:
147 kfree(qpd);
148out:
149 return ret;
150}
151
152static ssize_t qp_read(struct file *file, char __user *buf, size_t count,
153 loff_t *ppos)
154{
155 struct debugfs_qp_data *qpd = file->private_data;
156 loff_t pos = *ppos;
157 loff_t avail = qpd->pos;
158
159 if (pos < 0)
160 return -EINVAL;
161 if (pos >= avail)
162 return 0;
163 if (count > avail - pos)
164 count = avail - pos;
165
166 while (count) {
167 size_t len = 0;
168
169 len = min((int)count, (int)qpd->pos - (int)pos);
170 if (copy_to_user(buf, qpd->buf + pos, len))
171 return -EFAULT;
172 if (len == 0)
173 return -EINVAL;
174
175 buf += len;
176 pos += len;
177 count -= len;
178 }
179 count = pos - *ppos;
180 *ppos = pos;
181 return count;
182}
183
184static const struct file_operations qp_debugfs_fops = {
185 .owner = THIS_MODULE,
186 .open = qp_open,
187 .release = qp_release,
188 .read = qp_read,
189};
190
191static int setup_debugfs(struct c4iw_dev *devp)
192{
193 struct dentry *de;
194
195 if (!devp->debugfs_root)
196 return -1;
197
198 de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
199 (void *)devp, &qp_debugfs_fops);
200 if (de && de->d_inode)
201 de->d_inode->i_size = 4096;
202 return 0;
203}
204
205void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
206 struct c4iw_dev_ucontext *uctx)
207{
208 struct list_head *pos, *nxt;
209 struct c4iw_qid_list *entry;
210
211 mutex_lock(&uctx->lock);
212 list_for_each_safe(pos, nxt, &uctx->qpids) {
213 entry = list_entry(pos, struct c4iw_qid_list, entry);
214 list_del_init(&entry->entry);
215 if (!(entry->qid & rdev->qpmask))
216 c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid,
217 &rdev->resource.qid_fifo_lock);
218 kfree(entry);
219 }
220
221 list_for_each_safe(pos, nxt, &uctx->qpids) {
222 entry = list_entry(pos, struct c4iw_qid_list, entry);
223 list_del_init(&entry->entry);
224 kfree(entry);
225 }
226 mutex_unlock(&uctx->lock);
227}
228
229void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
230 struct c4iw_dev_ucontext *uctx)
231{
232 INIT_LIST_HEAD(&uctx->qpids);
233 INIT_LIST_HEAD(&uctx->cqids);
234 mutex_init(&uctx->lock);
235}
236
237/* Caller takes care of locking if needed */
238static int c4iw_rdev_open(struct c4iw_rdev *rdev)
239{
240 int err;
241
242 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
243
244 /*
245 * qpshift is the number of bits to shift the qpid left in order
246 * to get the correct address of the doorbell for that qp.
247 */
248 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
249 rdev->qpmask = rdev->lldi.udb_density - 1;
250 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
251 rdev->cqmask = rdev->lldi.ucq_density - 1;
252 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
253 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x\n",
254 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
255 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
256 rdev->lldi.vr->pbl.start,
257 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
258 rdev->lldi.vr->rq.size);
259 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
260 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
261 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
262 (void *)pci_resource_start(rdev->lldi.pdev, 2),
263 rdev->lldi.db_reg,
264 rdev->lldi.gts_reg,
265 rdev->qpshift, rdev->qpmask,
266 rdev->cqshift, rdev->cqmask);
267
268 if (c4iw_num_stags(rdev) == 0) {
269 err = -EINVAL;
270 goto err1;
271 }
272
273 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
274 if (err) {
275 printk(KERN_ERR MOD "error %d initializing resources\n", err);
276 goto err1;
277 }
278 err = c4iw_pblpool_create(rdev);
279 if (err) {
280 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
281 goto err2;
282 }
283 err = c4iw_rqtpool_create(rdev);
284 if (err) {
285 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
286 goto err3;
287 }
288 return 0;
289err3:
290 c4iw_pblpool_destroy(rdev);
291err2:
292 c4iw_destroy_resource(&rdev->resource);
293err1:
294 return err;
295}
296
297static void c4iw_rdev_close(struct c4iw_rdev *rdev)
298{
299 c4iw_pblpool_destroy(rdev);
300 c4iw_rqtpool_destroy(rdev);
301 c4iw_destroy_resource(&rdev->resource);
302}
303
304static void c4iw_remove(struct c4iw_dev *dev)
305{
306 PDBG("%s c4iw_dev %p\n", __func__, dev);
307 cancel_delayed_work_sync(&dev->db_drop_task);
308 list_del(&dev->entry);
309 c4iw_unregister_device(dev);
310 c4iw_rdev_close(&dev->rdev);
311 idr_destroy(&dev->cqidr);
312 idr_destroy(&dev->qpidr);
313 idr_destroy(&dev->mmidr);
314 ib_dealloc_device(&dev->ibdev);
315}
316
317static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
318{
319 struct c4iw_dev *devp;
320 int ret;
321
322 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
323 if (!devp) {
324 printk(KERN_ERR MOD "Cannot allocate ib device\n");
325 return NULL;
326 }
327 devp->rdev.lldi = *infop;
328
329 mutex_lock(&dev_mutex);
330
331 ret = c4iw_rdev_open(&devp->rdev);
332 if (ret) {
333 mutex_unlock(&dev_mutex);
334 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
335 ib_dealloc_device(&devp->ibdev);
336 return NULL;
337 }
338
339 idr_init(&devp->cqidr);
340 idr_init(&devp->qpidr);
341 idr_init(&devp->mmidr);
342 spin_lock_init(&devp->lock);
343 list_add_tail(&devp->entry, &dev_list);
344 mutex_unlock(&dev_mutex);
345
346 if (c4iw_register_device(devp)) {
347 printk(KERN_ERR MOD "Unable to register device\n");
348 mutex_lock(&dev_mutex);
349 c4iw_remove(devp);
350 mutex_unlock(&dev_mutex);
351 }
352 if (c4iw_debugfs_root) {
353 devp->debugfs_root = debugfs_create_dir(
354 pci_name(devp->rdev.lldi.pdev),
355 c4iw_debugfs_root);
356 setup_debugfs(devp);
357 }
358 return devp;
359}
360
361static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
362{
363 struct c4iw_dev *dev;
364 static int vers_printed;
365 int i;
366
367 if (!vers_printed++)
368 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
369 DRV_VERSION);
370
371 dev = c4iw_alloc(infop);
372 if (!dev)
373 goto out;
374
375 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
376 __func__, pci_name(dev->rdev.lldi.pdev),
377 dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq,
378 dev->rdev.lldi.ntxq, dev->rdev.lldi.nports);
379
380 for (i = 0; i < dev->rdev.lldi.nrxq; i++)
381 PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
382
383 printk(KERN_INFO MOD "Initialized device %s\n",
384 pci_name(dev->rdev.lldi.pdev));
385out:
386 return dev;
387}
388
389static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl,
390 unsigned int skb_len,
391 unsigned int pull_len)
392{
393 struct sk_buff *skb;
394 struct skb_shared_info *ssi;
395
396 if (gl->tot_len <= 512) {
397 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
398 if (unlikely(!skb))
399 goto out;
400 __skb_put(skb, gl->tot_len);
401 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
402 } else {
403 skb = alloc_skb(skb_len, GFP_ATOMIC);
404 if (unlikely(!skb))
405 goto out;
406 __skb_put(skb, pull_len);
407 skb_copy_to_linear_data(skb, gl->va, pull_len);
408
409 ssi = skb_shinfo(skb);
410 ssi->frags[0].page = gl->frags[0].page;
411 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
412 ssi->frags[0].size = gl->frags[0].size - pull_len;
413 if (gl->nfrags > 1)
414 memcpy(&ssi->frags[1], &gl->frags[1],
415 (gl->nfrags - 1) * sizeof(skb_frag_t));
416 ssi->nr_frags = gl->nfrags;
417
418 skb->len = gl->tot_len;
419 skb->data_len = skb->len - pull_len;
420 skb->truesize += skb->data_len;
421
422 /* Get a reference for the last page, we don't own it */
423 get_page(gl->frags[gl->nfrags - 1].page);
424 }
425out:
426 return skb;
427}
428
429static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
430 const struct pkt_gl *gl)
431{
432 struct c4iw_dev *dev = handle;
433 struct sk_buff *skb;
434 const struct cpl_act_establish *rpl;
435 unsigned int opcode;
436
437 if (gl == NULL) {
438 /* omit RSS and rsp_ctrl at end of descriptor */
439 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
440
441 skb = alloc_skb(256, GFP_ATOMIC);
442 if (!skb)
443 goto nomem;
444 __skb_put(skb, len);
445 skb_copy_to_linear_data(skb, &rsp[1], len);
446 } else if (gl == CXGB4_MSG_AN) {
447 const struct rsp_ctrl *rc = (void *)rsp;
448
449 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
450 c4iw_ev_handler(dev, qid);
451 return 0;
452 } else {
453 skb = t4_pktgl_to_skb(gl, 128, 128);
454 if (unlikely(!skb))
455 goto nomem;
456 }
457
458 rpl = cplhdr(skb);
459 opcode = rpl->ot.opcode;
460
461 if (c4iw_handlers[opcode])
462 c4iw_handlers[opcode](dev, skb);
463 else
464 printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
465 opcode);
466
467 return 0;
468nomem:
469 return -1;
470}
471
472static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
473{
474 PDBG("%s new_state %u\n", __func__, new_state);
475 return 0;
476}
477
478static struct cxgb4_uld_info c4iw_uld_info = {
479 .name = DRV_NAME,
480 .add = c4iw_uld_add,
481 .rx_handler = c4iw_uld_rx_handler,
482 .state_change = c4iw_uld_state_change,
483};
484
485static int __init c4iw_init_module(void)
486{
487 int err;
488
489 err = c4iw_cm_init();
490 if (err)
491 return err;
492
493 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
494 if (!c4iw_debugfs_root)
495 printk(KERN_WARNING MOD
496 "could not create debugfs entry, continuing\n");
497
498 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
499
500 return 0;
501}
502
503static void __exit c4iw_exit_module(void)
504{
505 struct c4iw_dev *dev, *tmp;
506
507 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
508
509 mutex_lock(&dev_mutex);
510 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
511 c4iw_remove(dev);
512 }
513 mutex_unlock(&dev_mutex);
514
515 c4iw_cm_term();
516 debugfs_remove_recursive(c4iw_debugfs_root);
517}
518
519module_init(c4iw_init_module);
520module_exit(c4iw_exit_module);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
new file mode 100644
index 000000000000..491e76a0327f
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -0,0 +1,193 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/slab.h>
33#include <linux/mman.h>
34#include <net/sock.h>
35
36#include "iw_cxgb4.h"
37
38static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
39 struct c4iw_qp *qhp,
40 struct t4_cqe *err_cqe,
41 enum ib_event_type ib_event)
42{
43 struct ib_event event;
44 struct c4iw_qp_attributes attrs;
45
46 if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
47 (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
48 PDBG("%s AE received after RTS - "
49 "qp state %d qpid 0x%x status 0x%x\n", __func__,
50 qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe));
51 return;
52 }
53
54 printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
55 "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
56 CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
57 CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
58 CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
59
60 if (qhp->attr.state == C4IW_QP_STATE_RTS) {
61 attrs.next_state = C4IW_QP_STATE_TERMINATE;
62 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
63 &attrs, 1);
64 }
65
66 event.event = ib_event;
67 event.device = chp->ibcq.device;
68 if (ib_event == IB_EVENT_CQ_ERR)
69 event.element.cq = &chp->ibcq;
70 else
71 event.element.qp = &qhp->ibqp;
72 if (qhp->ibqp.event_handler)
73 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
74
75 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
76}
77
78void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
79{
80 struct c4iw_cq *chp;
81 struct c4iw_qp *qhp;
82 u32 cqid;
83
84 spin_lock(&dev->lock);
85 qhp = get_qhp(dev, CQE_QPID(err_cqe));
86 if (!qhp) {
87 printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
88 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
89 CQE_QPID(err_cqe),
90 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
91 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
92 CQE_WRID_LOW(err_cqe));
93 spin_unlock(&dev->lock);
94 goto out;
95 }
96
97 if (SQ_TYPE(err_cqe))
98 cqid = qhp->attr.scq;
99 else
100 cqid = qhp->attr.rcq;
101 chp = get_chp(dev, cqid);
102 if (!chp) {
103 printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
104 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
105 cqid, CQE_QPID(err_cqe),
106 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
107 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
108 CQE_WRID_LOW(err_cqe));
109 spin_unlock(&dev->lock);
110 goto out;
111 }
112
113 c4iw_qp_add_ref(&qhp->ibqp);
114 atomic_inc(&chp->refcnt);
115 spin_unlock(&dev->lock);
116
117 /* Bad incoming write */
118 if (RQ_TYPE(err_cqe) &&
119 (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
120 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
121 goto done;
122 }
123
124 switch (CQE_STATUS(err_cqe)) {
125
126 /* Completion Events */
127 case T4_ERR_SUCCESS:
128 printk(KERN_ERR MOD "AE with status 0!\n");
129 break;
130
131 case T4_ERR_STAG:
132 case T4_ERR_PDID:
133 case T4_ERR_QPID:
134 case T4_ERR_ACCESS:
135 case T4_ERR_WRAP:
136 case T4_ERR_BOUND:
137 case T4_ERR_INVALIDATE_SHARED_MR:
138 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
139 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
140 break;
141
142 /* Device Fatal Errors */
143 case T4_ERR_ECC:
144 case T4_ERR_ECC_PSTAG:
145 case T4_ERR_INTERNAL_ERR:
146 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
147 break;
148
149 /* QP Fatal Errors */
150 case T4_ERR_OUT_OF_RQE:
151 case T4_ERR_PBL_ADDR_BOUND:
152 case T4_ERR_CRC:
153 case T4_ERR_MARKER:
154 case T4_ERR_PDU_LEN_ERR:
155 case T4_ERR_DDP_VERSION:
156 case T4_ERR_RDMA_VERSION:
157 case T4_ERR_OPCODE:
158 case T4_ERR_DDP_QUEUE_NUM:
159 case T4_ERR_MSN:
160 case T4_ERR_TBIT:
161 case T4_ERR_MO:
162 case T4_ERR_MSN_GAP:
163 case T4_ERR_MSN_RANGE:
164 case T4_ERR_RQE_ADDR_BOUND:
165 case T4_ERR_IRD_OVERFLOW:
166 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
167 break;
168
169 default:
170 printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
171 CQE_STATUS(err_cqe), qhp->wq.sq.qid);
172 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
173 break;
174 }
175done:
176 if (atomic_dec_and_test(&chp->refcnt))
177 wake_up(&chp->wait);
178 c4iw_qp_rem_ref(&qhp->ibqp);
179out:
180 return;
181}
182
183int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
184{
185 struct c4iw_cq *chp;
186
187 chp = get_chp(dev, qid);
188 if (chp)
189 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
190 else
191 PDBG("%s unknown cqid 0x%x\n", __func__, qid);
192 return 0;
193}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
new file mode 100644
index 000000000000..a6269981e815
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -0,0 +1,745 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef __IW_CXGB4_H__
32#define __IW_CXGB4_H__
33
34#include <linux/mutex.h>
35#include <linux/list.h>
36#include <linux/spinlock.h>
37#include <linux/idr.h>
38#include <linux/workqueue.h>
39#include <linux/netdevice.h>
40#include <linux/sched.h>
41#include <linux/pci.h>
42#include <linux/dma-mapping.h>
43#include <linux/inet.h>
44#include <linux/wait.h>
45#include <linux/kref.h>
46#include <linux/timer.h>
47#include <linux/io.h>
48#include <linux/kfifo.h>
49
50#include <asm/byteorder.h>
51
52#include <net/net_namespace.h>
53
54#include <rdma/ib_verbs.h>
55#include <rdma/iw_cm.h>
56
57#include "cxgb4.h"
58#include "cxgb4_uld.h"
59#include "l2t.h"
60#include "user.h"
61
62#define DRV_NAME "iw_cxgb4"
63#define MOD DRV_NAME ":"
64
65extern int c4iw_debug;
66#define PDBG(fmt, args...) \
67do { \
68 if (c4iw_debug) \
69 printk(MOD fmt, ## args); \
70} while (0)
71
72#include "t4.h"
73
74#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
75#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
76
77static inline void *cplhdr(struct sk_buff *skb)
78{
79 return skb->data;
80}
81
82#define C4IW_WR_TO (10*HZ)
83
84struct c4iw_wr_wait {
85 wait_queue_head_t wait;
86 int done;
87 int ret;
88};
89
90static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
91{
92 wr_waitp->ret = 0;
93 wr_waitp->done = 0;
94 init_waitqueue_head(&wr_waitp->wait);
95}
96
97struct c4iw_resource {
98 struct kfifo tpt_fifo;
99 spinlock_t tpt_fifo_lock;
100 struct kfifo qid_fifo;
101 spinlock_t qid_fifo_lock;
102 struct kfifo pdid_fifo;
103 spinlock_t pdid_fifo_lock;
104};
105
106struct c4iw_qid_list {
107 struct list_head entry;
108 u32 qid;
109};
110
111struct c4iw_dev_ucontext {
112 struct list_head qpids;
113 struct list_head cqids;
114 struct mutex lock;
115};
116
117enum c4iw_rdev_flags {
118 T4_FATAL_ERROR = (1<<0),
119};
120
121struct c4iw_rdev {
122 struct c4iw_resource resource;
123 unsigned long qpshift;
124 u32 qpmask;
125 unsigned long cqshift;
126 u32 cqmask;
127 struct c4iw_dev_ucontext uctx;
128 struct gen_pool *pbl_pool;
129 struct gen_pool *rqt_pool;
130 u32 flags;
131 struct cxgb4_lld_info lldi;
132};
133
134static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
135{
136 return rdev->flags & T4_FATAL_ERROR;
137}
138
139static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
140{
141 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
142}
143
144struct c4iw_dev {
145 struct ib_device ibdev;
146 struct c4iw_rdev rdev;
147 u32 device_cap_flags;
148 struct idr cqidr;
149 struct idr qpidr;
150 struct idr mmidr;
151 spinlock_t lock;
152 struct list_head entry;
153 struct delayed_work db_drop_task;
154 struct dentry *debugfs_root;
155};
156
157static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
158{
159 return container_of(ibdev, struct c4iw_dev, ibdev);
160}
161
162static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
163{
164 return container_of(rdev, struct c4iw_dev, rdev);
165}
166
167static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
168{
169 return idr_find(&rhp->cqidr, cqid);
170}
171
172static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
173{
174 return idr_find(&rhp->qpidr, qpid);
175}
176
177static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
178{
179 return idr_find(&rhp->mmidr, mmid);
180}
181
182static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
183 void *handle, u32 id)
184{
185 int ret;
186 int newid;
187
188 do {
189 if (!idr_pre_get(idr, GFP_KERNEL))
190 return -ENOMEM;
191 spin_lock_irq(&rhp->lock);
192 ret = idr_get_new_above(idr, handle, id, &newid);
193 BUG_ON(newid != id);
194 spin_unlock_irq(&rhp->lock);
195 } while (ret == -EAGAIN);
196
197 return ret;
198}
199
200static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
201{
202 spin_lock_irq(&rhp->lock);
203 idr_remove(idr, id);
204 spin_unlock_irq(&rhp->lock);
205}
206
207struct c4iw_pd {
208 struct ib_pd ibpd;
209 u32 pdid;
210 struct c4iw_dev *rhp;
211};
212
213static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
214{
215 return container_of(ibpd, struct c4iw_pd, ibpd);
216}
217
218struct tpt_attributes {
219 u64 len;
220 u64 va_fbo;
221 enum fw_ri_mem_perms perms;
222 u32 stag;
223 u32 pdid;
224 u32 qpid;
225 u32 pbl_addr;
226 u32 pbl_size;
227 u32 state:1;
228 u32 type:2;
229 u32 rsvd:1;
230 u32 remote_invaliate_disable:1;
231 u32 zbva:1;
232 u32 mw_bind_enable:1;
233 u32 page_size:5;
234};
235
236struct c4iw_mr {
237 struct ib_mr ibmr;
238 struct ib_umem *umem;
239 struct c4iw_dev *rhp;
240 u64 kva;
241 struct tpt_attributes attr;
242};
243
244static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
245{
246 return container_of(ibmr, struct c4iw_mr, ibmr);
247}
248
249struct c4iw_mw {
250 struct ib_mw ibmw;
251 struct c4iw_dev *rhp;
252 u64 kva;
253 struct tpt_attributes attr;
254};
255
256static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
257{
258 return container_of(ibmw, struct c4iw_mw, ibmw);
259}
260
261struct c4iw_fr_page_list {
262 struct ib_fast_reg_page_list ibpl;
263 DECLARE_PCI_UNMAP_ADDR(mapping);
264 dma_addr_t dma_addr;
265 struct c4iw_dev *dev;
266 int size;
267};
268
269static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
270 struct ib_fast_reg_page_list *ibpl)
271{
272 return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
273}
274
275struct c4iw_cq {
276 struct ib_cq ibcq;
277 struct c4iw_dev *rhp;
278 struct t4_cq cq;
279 spinlock_t lock;
280 atomic_t refcnt;
281 wait_queue_head_t wait;
282};
283
284static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
285{
286 return container_of(ibcq, struct c4iw_cq, ibcq);
287}
288
289struct c4iw_mpa_attributes {
290 u8 initiator;
291 u8 recv_marker_enabled;
292 u8 xmit_marker_enabled;
293 u8 crc_enabled;
294 u8 version;
295 u8 p2p_type;
296};
297
298struct c4iw_qp_attributes {
299 u32 scq;
300 u32 rcq;
301 u32 sq_num_entries;
302 u32 rq_num_entries;
303 u32 sq_max_sges;
304 u32 sq_max_sges_rdma_write;
305 u32 rq_max_sges;
306 u32 state;
307 u8 enable_rdma_read;
308 u8 enable_rdma_write;
309 u8 enable_bind;
310 u8 enable_mmid0_fastreg;
311 u32 max_ord;
312 u32 max_ird;
313 u32 pd;
314 u32 next_state;
315 char terminate_buffer[52];
316 u32 terminate_msg_len;
317 u8 is_terminate_local;
318 struct c4iw_mpa_attributes mpa_attr;
319 struct c4iw_ep *llp_stream_handle;
320};
321
322struct c4iw_qp {
323 struct ib_qp ibqp;
324 struct c4iw_dev *rhp;
325 struct c4iw_ep *ep;
326 struct c4iw_qp_attributes attr;
327 struct t4_wq wq;
328 spinlock_t lock;
329 atomic_t refcnt;
330 wait_queue_head_t wait;
331 struct timer_list timer;
332};
333
334static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
335{
336 return container_of(ibqp, struct c4iw_qp, ibqp);
337}
338
339struct c4iw_ucontext {
340 struct ib_ucontext ibucontext;
341 struct c4iw_dev_ucontext uctx;
342 u32 key;
343 spinlock_t mmap_lock;
344 struct list_head mmaps;
345};
346
347static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
348{
349 return container_of(c, struct c4iw_ucontext, ibucontext);
350}
351
352struct c4iw_mm_entry {
353 struct list_head entry;
354 u64 addr;
355 u32 key;
356 unsigned len;
357};
358
359static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
360 u32 key, unsigned len)
361{
362 struct list_head *pos, *nxt;
363 struct c4iw_mm_entry *mm;
364
365 spin_lock(&ucontext->mmap_lock);
366 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
367
368 mm = list_entry(pos, struct c4iw_mm_entry, entry);
369 if (mm->key == key && mm->len == len) {
370 list_del_init(&mm->entry);
371 spin_unlock(&ucontext->mmap_lock);
372 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
373 key, (unsigned long long) mm->addr, mm->len);
374 return mm;
375 }
376 }
377 spin_unlock(&ucontext->mmap_lock);
378 return NULL;
379}
380
381static inline void insert_mmap(struct c4iw_ucontext *ucontext,
382 struct c4iw_mm_entry *mm)
383{
384 spin_lock(&ucontext->mmap_lock);
385 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
386 mm->key, (unsigned long long) mm->addr, mm->len);
387 list_add_tail(&mm->entry, &ucontext->mmaps);
388 spin_unlock(&ucontext->mmap_lock);
389}
390
391enum c4iw_qp_attr_mask {
392 C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
393 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
394 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
395 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
396 C4IW_QP_ATTR_MAX_ORD = 1 << 11,
397 C4IW_QP_ATTR_MAX_IRD = 1 << 12,
398 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
399 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
400 C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
401 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
402 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
403 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
404 C4IW_QP_ATTR_MAX_ORD |
405 C4IW_QP_ATTR_MAX_IRD |
406 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
407 C4IW_QP_ATTR_STREAM_MSG_BUFFER |
408 C4IW_QP_ATTR_MPA_ATTR |
409 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
410};
411
412int c4iw_modify_qp(struct c4iw_dev *rhp,
413 struct c4iw_qp *qhp,
414 enum c4iw_qp_attr_mask mask,
415 struct c4iw_qp_attributes *attrs,
416 int internal);
417
418enum c4iw_qp_state {
419 C4IW_QP_STATE_IDLE,
420 C4IW_QP_STATE_RTS,
421 C4IW_QP_STATE_ERROR,
422 C4IW_QP_STATE_TERMINATE,
423 C4IW_QP_STATE_CLOSING,
424 C4IW_QP_STATE_TOT
425};
426
427static inline int c4iw_convert_state(enum ib_qp_state ib_state)
428{
429 switch (ib_state) {
430 case IB_QPS_RESET:
431 case IB_QPS_INIT:
432 return C4IW_QP_STATE_IDLE;
433 case IB_QPS_RTS:
434 return C4IW_QP_STATE_RTS;
435 case IB_QPS_SQD:
436 return C4IW_QP_STATE_CLOSING;
437 case IB_QPS_SQE:
438 return C4IW_QP_STATE_TERMINATE;
439 case IB_QPS_ERR:
440 return C4IW_QP_STATE_ERROR;
441 default:
442 return -1;
443 }
444}
445
446static inline u32 c4iw_ib_to_tpt_access(int a)
447{
448 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
449 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
450 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
451 FW_RI_MEM_ACCESS_LOCAL_READ;
452}
453
454static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
455{
456 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
457 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
458}
459
460enum c4iw_mmid_state {
461 C4IW_STAG_STATE_VALID,
462 C4IW_STAG_STATE_INVALID
463};
464
465#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
466
467#define MPA_KEY_REQ "MPA ID Req Frame"
468#define MPA_KEY_REP "MPA ID Rep Frame"
469
470#define MPA_MAX_PRIVATE_DATA 256
471#define MPA_REJECT 0x20
472#define MPA_CRC 0x40
473#define MPA_MARKERS 0x80
474#define MPA_FLAGS_MASK 0xE0
475
476#define c4iw_put_ep(ep) { \
477 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
478 ep, atomic_read(&((ep)->kref.refcount))); \
479 WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
480 kref_put(&((ep)->kref), _c4iw_free_ep); \
481}
482
483#define c4iw_get_ep(ep) { \
484 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
485 ep, atomic_read(&((ep)->kref.refcount))); \
486 kref_get(&((ep)->kref)); \
487}
488void _c4iw_free_ep(struct kref *kref);
489
490struct mpa_message {
491 u8 key[16];
492 u8 flags;
493 u8 revision;
494 __be16 private_data_size;
495 u8 private_data[0];
496};
497
498struct terminate_message {
499 u8 layer_etype;
500 u8 ecode;
501 __be16 hdrct_rsvd;
502 u8 len_hdrs[0];
503};
504
505#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
506
507enum c4iw_layers_types {
508 LAYER_RDMAP = 0x00,
509 LAYER_DDP = 0x10,
510 LAYER_MPA = 0x20,
511 RDMAP_LOCAL_CATA = 0x00,
512 RDMAP_REMOTE_PROT = 0x01,
513 RDMAP_REMOTE_OP = 0x02,
514 DDP_LOCAL_CATA = 0x00,
515 DDP_TAGGED_ERR = 0x01,
516 DDP_UNTAGGED_ERR = 0x02,
517 DDP_LLP = 0x03
518};
519
520enum c4iw_rdma_ecodes {
521 RDMAP_INV_STAG = 0x00,
522 RDMAP_BASE_BOUNDS = 0x01,
523 RDMAP_ACC_VIOL = 0x02,
524 RDMAP_STAG_NOT_ASSOC = 0x03,
525 RDMAP_TO_WRAP = 0x04,
526 RDMAP_INV_VERS = 0x05,
527 RDMAP_INV_OPCODE = 0x06,
528 RDMAP_STREAM_CATA = 0x07,
529 RDMAP_GLOBAL_CATA = 0x08,
530 RDMAP_CANT_INV_STAG = 0x09,
531 RDMAP_UNSPECIFIED = 0xff
532};
533
534enum c4iw_ddp_ecodes {
535 DDPT_INV_STAG = 0x00,
536 DDPT_BASE_BOUNDS = 0x01,
537 DDPT_STAG_NOT_ASSOC = 0x02,
538 DDPT_TO_WRAP = 0x03,
539 DDPT_INV_VERS = 0x04,
540 DDPU_INV_QN = 0x01,
541 DDPU_INV_MSN_NOBUF = 0x02,
542 DDPU_INV_MSN_RANGE = 0x03,
543 DDPU_INV_MO = 0x04,
544 DDPU_MSG_TOOBIG = 0x05,
545 DDPU_INV_VERS = 0x06
546};
547
548enum c4iw_mpa_ecodes {
549 MPA_CRC_ERR = 0x02,
550 MPA_MARKER_ERR = 0x03
551};
552
553enum c4iw_ep_state {
554 IDLE = 0,
555 LISTEN,
556 CONNECTING,
557 MPA_REQ_WAIT,
558 MPA_REQ_SENT,
559 MPA_REQ_RCVD,
560 MPA_REP_SENT,
561 FPDU_MODE,
562 ABORTING,
563 CLOSING,
564 MORIBUND,
565 DEAD,
566};
567
568enum c4iw_ep_flags {
569 PEER_ABORT_IN_PROGRESS = 0,
570 ABORT_REQ_IN_PROGRESS = 1,
571 RELEASE_RESOURCES = 2,
572 CLOSE_SENT = 3,
573};
574
575struct c4iw_ep_common {
576 struct iw_cm_id *cm_id;
577 struct c4iw_qp *qp;
578 struct c4iw_dev *dev;
579 enum c4iw_ep_state state;
580 struct kref kref;
581 spinlock_t lock;
582 struct sockaddr_in local_addr;
583 struct sockaddr_in remote_addr;
584 wait_queue_head_t waitq;
585 int rpl_done;
586 int rpl_err;
587 unsigned long flags;
588};
589
590struct c4iw_listen_ep {
591 struct c4iw_ep_common com;
592 unsigned int stid;
593 int backlog;
594};
595
596struct c4iw_ep {
597 struct c4iw_ep_common com;
598 struct c4iw_ep *parent_ep;
599 struct timer_list timer;
600 struct list_head entry;
601 unsigned int atid;
602 u32 hwtid;
603 u32 snd_seq;
604 u32 rcv_seq;
605 struct l2t_entry *l2t;
606 struct dst_entry *dst;
607 struct sk_buff *mpa_skb;
608 struct c4iw_mpa_attributes mpa_attr;
609 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
610 unsigned int mpa_pkt_len;
611 u32 ird;
612 u32 ord;
613 u32 smac_idx;
614 u32 tx_chan;
615 u32 mtu;
616 u16 mss;
617 u16 emss;
618 u16 plen;
619 u16 rss_qid;
620 u16 txq_idx;
621 u8 tos;
622};
623
624static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
625{
626 return cm_id->provider_data;
627}
628
629static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
630{
631 return cm_id->provider_data;
632}
633
634static inline int compute_wscale(int win)
635{
636 int wscale = 0;
637
638 while (wscale < 14 && (65535<<wscale) < win)
639 wscale++;
640 return wscale;
641}
642
643typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
644
645int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
646 struct l2t_entry *l2t);
647void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
648 struct c4iw_dev_ucontext *uctx);
649u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock);
650void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock);
651int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
652int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
653int c4iw_pblpool_create(struct c4iw_rdev *rdev);
654int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
655void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
656void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
657void c4iw_destroy_resource(struct c4iw_resource *rscp);
658int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
659int c4iw_register_device(struct c4iw_dev *dev);
660void c4iw_unregister_device(struct c4iw_dev *dev);
661int __init c4iw_cm_init(void);
662void __exit c4iw_cm_term(void);
663void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
664 struct c4iw_dev_ucontext *uctx);
665void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
666 struct c4iw_dev_ucontext *uctx);
667int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
668int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
669 struct ib_send_wr **bad_wr);
670int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
671 struct ib_recv_wr **bad_wr);
672int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
673 struct ib_mw_bind *mw_bind);
674int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
675int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
676int c4iw_destroy_listen(struct iw_cm_id *cm_id);
677int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
678int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
679void c4iw_qp_add_ref(struct ib_qp *qp);
680void c4iw_qp_rem_ref(struct ib_qp *qp);
681void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
682struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
683 struct ib_device *device,
684 int page_list_len);
685struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
686int c4iw_dealloc_mw(struct ib_mw *mw);
687struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
688struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
689 u64 length, u64 virt, int acc,
690 struct ib_udata *udata);
691struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
692struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
693 struct ib_phys_buf *buffer_list,
694 int num_phys_buf,
695 int acc,
696 u64 *iova_start);
697int c4iw_reregister_phys_mem(struct ib_mr *mr,
698 int mr_rereg_mask,
699 struct ib_pd *pd,
700 struct ib_phys_buf *buffer_list,
701 int num_phys_buf,
702 int acc, u64 *iova_start);
703int c4iw_dereg_mr(struct ib_mr *ib_mr);
704int c4iw_destroy_cq(struct ib_cq *ib_cq);
705struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
706 int vector,
707 struct ib_ucontext *ib_context,
708 struct ib_udata *udata);
709int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
710int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
711int c4iw_destroy_qp(struct ib_qp *ib_qp);
712struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
713 struct ib_qp_init_attr *attrs,
714 struct ib_udata *udata);
715int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
716 int attr_mask, struct ib_udata *udata);
717struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
718u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
719void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
720u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
721void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
722int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
723void c4iw_flush_hw_cq(struct t4_cq *cq);
724void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
725void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
726int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
727int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
728int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
729int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
730u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
731int c4iw_post_zb_read(struct c4iw_qp *qhp);
732int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
733u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
734void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
735 struct c4iw_dev_ucontext *uctx);
736u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
737void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
738 struct c4iw_dev_ucontext *uctx);
739void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
740
741extern struct cxgb4_client t4c_client;
742extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
743extern int c4iw_max_read_depth;
744
745#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
new file mode 100644
index 000000000000..e54ff6d25691
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -0,0 +1,811 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_umem.h>
34#include <asm/atomic.h>
35
36#include "iw_cxgb4.h"
37
38#define T4_ULPTX_MIN_IO 32
39#define C4IW_MAX_INLINE_SIZE 96
40
41static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
42 void *data)
43{
44 struct sk_buff *skb;
45 struct ulp_mem_io *req;
46 struct ulptx_idata *sc;
47 u8 wr_len, *to_dp, *from_dp;
48 int copy_len, num_wqe, i, ret = 0;
49 struct c4iw_wr_wait wr_wait;
50
51 addr &= 0x7FFFFFF;
52 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
53 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
54 c4iw_init_wr_wait(&wr_wait);
55 for (i = 0; i < num_wqe; i++) {
56
57 copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
58 len;
59 wr_len = roundup(sizeof *req + sizeof *sc +
60 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
61
62 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
63 if (!skb)
64 return -ENOMEM;
65 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
66
67 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
68 memset(req, 0, wr_len);
69 INIT_ULPTX_WR(req, wr_len, 0, 0);
70
71 if (i == (num_wqe-1)) {
72 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
73 FW_WR_COMPL(1));
74 req->wr.wr_lo = (__force __be64)&wr_wait;
75 } else
76 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
77 req->wr.wr_mid = cpu_to_be32(
78 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
79
80 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
81 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
82 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
83 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
84 16));
85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
86
87 sc = (struct ulptx_idata *)(req + 1);
88 sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
89 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
90
91 to_dp = (u8 *)(sc + 1);
92 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
93 if (data)
94 memcpy(to_dp, from_dp, copy_len);
95 else
96 memset(to_dp, 0, copy_len);
97 if (copy_len % T4_ULPTX_MIN_IO)
98 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
99 (copy_len % T4_ULPTX_MIN_IO));
100 ret = c4iw_ofld_send(rdev, skb);
101 if (ret)
102 return ret;
103 len -= C4IW_MAX_INLINE_SIZE;
104 }
105
106 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
107 if (!wr_wait.done) {
108 printk(KERN_ERR MOD "Device %s not responding!\n",
109 pci_name(rdev->lldi.pdev));
110 rdev->flags = T4_FATAL_ERROR;
111 ret = -EIO;
112 } else
113 ret = wr_wait.ret;
114 return ret;
115}
116
117/*
118 * Build and write a TPT entry.
119 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
120 * pbl_size and pbl_addr
121 * OUT: stag index
122 */
123static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
124 u32 *stag, u8 stag_state, u32 pdid,
125 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
126 int bind_enabled, u32 zbva, u64 to,
127 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
128{
129 int err;
130 struct fw_ri_tpte tpt;
131 u32 stag_idx;
132 static atomic_t key;
133
134 if (c4iw_fatal_error(rdev))
135 return -EIO;
136
137 stag_state = stag_state > 0;
138 stag_idx = (*stag) >> 8;
139
140 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
141 stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo,
142 &rdev->resource.tpt_fifo_lock);
143 if (!stag_idx)
144 return -ENOMEM;
145 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
146 }
147 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
148 __func__, stag_state, type, pdid, stag_idx);
149
150 /* write TPT entry */
151 if (reset_tpt_entry)
152 memset(&tpt, 0, sizeof(tpt));
153 else {
154 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
155 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
156 V_FW_RI_TPTE_STAGSTATE(stag_state) |
157 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
158 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
159 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
160 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
161 FW_RI_VA_BASED_TO))|
162 V_FW_RI_TPTE_PS(page_size));
163 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
164 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
165 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
166 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
167 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
168 tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
169 tpt.len_hi = cpu_to_be32((u32)(len >> 32));
170 }
171 err = write_adapter_mem(rdev, stag_idx +
172 (rdev->lldi.vr->stag.start >> 5),
173 sizeof(tpt), &tpt);
174
175 if (reset_tpt_entry)
176 c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx,
177 &rdev->resource.tpt_fifo_lock);
178 return err;
179}
180
181static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
182 u32 pbl_addr, u32 pbl_size)
183{
184 int err;
185
186 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
187 __func__, pbl_addr, rdev->lldi.vr->pbl.start,
188 pbl_size);
189
190 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
191 return err;
192}
193
194static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
195 u32 pbl_addr)
196{
197 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
198 pbl_size, pbl_addr);
199}
200
201static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
202{
203 *stag = T4_STAG_UNSET;
204 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
205 0UL, 0, 0, 0, 0);
206}
207
208static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
209{
210 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
211 0);
212}
213
214static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
215 u32 pbl_size, u32 pbl_addr)
216{
217 *stag = T4_STAG_UNSET;
218 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
219 0UL, 0, 0, pbl_size, pbl_addr);
220}
221
222static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
223{
224 u32 mmid;
225
226 mhp->attr.state = 1;
227 mhp->attr.stag = stag;
228 mmid = stag >> 8;
229 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
230 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
231 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
232}
233
234static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
235 struct c4iw_mr *mhp, int shift)
236{
237 u32 stag = T4_STAG_UNSET;
238 int ret;
239
240 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
241 FW_RI_STAG_NSMR, mhp->attr.perms,
242 mhp->attr.mw_bind_enable, mhp->attr.zbva,
243 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
244 mhp->attr.pbl_size, mhp->attr.pbl_addr);
245 if (ret)
246 return ret;
247
248 ret = finish_mem_reg(mhp, stag);
249 if (ret)
250 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
251 mhp->attr.pbl_addr);
252 return ret;
253}
254
255static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
256 struct c4iw_mr *mhp, int shift, int npages)
257{
258 u32 stag;
259 int ret;
260
261 if (npages > mhp->attr.pbl_size)
262 return -ENOMEM;
263
264 stag = mhp->attr.stag;
265 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
266 FW_RI_STAG_NSMR, mhp->attr.perms,
267 mhp->attr.mw_bind_enable, mhp->attr.zbva,
268 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
269 mhp->attr.pbl_size, mhp->attr.pbl_addr);
270 if (ret)
271 return ret;
272
273 ret = finish_mem_reg(mhp, stag);
274 if (ret)
275 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
276 mhp->attr.pbl_addr);
277
278 return ret;
279}
280
281static int alloc_pbl(struct c4iw_mr *mhp, int npages)
282{
283 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
284 npages << 3);
285
286 if (!mhp->attr.pbl_addr)
287 return -ENOMEM;
288
289 mhp->attr.pbl_size = npages;
290
291 return 0;
292}
293
294static int build_phys_page_list(struct ib_phys_buf *buffer_list,
295 int num_phys_buf, u64 *iova_start,
296 u64 *total_size, int *npages,
297 int *shift, __be64 **page_list)
298{
299 u64 mask;
300 int i, j, n;
301
302 mask = 0;
303 *total_size = 0;
304 for (i = 0; i < num_phys_buf; ++i) {
305 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
306 return -EINVAL;
307 if (i != 0 && i != num_phys_buf - 1 &&
308 (buffer_list[i].size & ~PAGE_MASK))
309 return -EINVAL;
310 *total_size += buffer_list[i].size;
311 if (i > 0)
312 mask |= buffer_list[i].addr;
313 else
314 mask |= buffer_list[i].addr & PAGE_MASK;
315 if (i != num_phys_buf - 1)
316 mask |= buffer_list[i].addr + buffer_list[i].size;
317 else
318 mask |= (buffer_list[i].addr + buffer_list[i].size +
319 PAGE_SIZE - 1) & PAGE_MASK;
320 }
321
322 if (*total_size > 0xFFFFFFFFULL)
323 return -ENOMEM;
324
325 /* Find largest page shift we can use to cover buffers */
326 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
327 if ((1ULL << *shift) & mask)
328 break;
329
330 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
331 buffer_list[0].addr &= ~0ull << *shift;
332
333 *npages = 0;
334 for (i = 0; i < num_phys_buf; ++i)
335 *npages += (buffer_list[i].size +
336 (1ULL << *shift) - 1) >> *shift;
337
338 if (!*npages)
339 return -EINVAL;
340
341 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
342 if (!*page_list)
343 return -ENOMEM;
344
345 n = 0;
346 for (i = 0; i < num_phys_buf; ++i)
347 for (j = 0;
348 j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
349 ++j)
350 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
351 ((u64) j << *shift));
352
353 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
354 __func__, (unsigned long long)*iova_start,
355 (unsigned long long)mask, *shift, (unsigned long long)*total_size,
356 *npages);
357
358 return 0;
359
360}
361
362int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
363 struct ib_pd *pd, struct ib_phys_buf *buffer_list,
364 int num_phys_buf, int acc, u64 *iova_start)
365{
366
367 struct c4iw_mr mh, *mhp;
368 struct c4iw_pd *php;
369 struct c4iw_dev *rhp;
370 __be64 *page_list = NULL;
371 int shift = 0;
372 u64 total_size;
373 int npages;
374 int ret;
375
376 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
377
378 /* There can be no memory windows */
379 if (atomic_read(&mr->usecnt))
380 return -EINVAL;
381
382 mhp = to_c4iw_mr(mr);
383 rhp = mhp->rhp;
384 php = to_c4iw_pd(mr->pd);
385
386 /* make sure we are on the same adapter */
387 if (rhp != php->rhp)
388 return -EINVAL;
389
390 memcpy(&mh, mhp, sizeof *mhp);
391
392 if (mr_rereg_mask & IB_MR_REREG_PD)
393 php = to_c4iw_pd(pd);
394 if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
395 mh.attr.perms = c4iw_ib_to_tpt_access(acc);
396 mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
397 IB_ACCESS_MW_BIND;
398 }
399 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
400 ret = build_phys_page_list(buffer_list, num_phys_buf,
401 iova_start,
402 &total_size, &npages,
403 &shift, &page_list);
404 if (ret)
405 return ret;
406 }
407
408 ret = reregister_mem(rhp, php, &mh, shift, npages);
409 kfree(page_list);
410 if (ret)
411 return ret;
412 if (mr_rereg_mask & IB_MR_REREG_PD)
413 mhp->attr.pdid = php->pdid;
414 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
415 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
416 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
417 mhp->attr.zbva = 0;
418 mhp->attr.va_fbo = *iova_start;
419 mhp->attr.page_size = shift - 12;
420 mhp->attr.len = (u32) total_size;
421 mhp->attr.pbl_size = npages;
422 }
423
424 return 0;
425}
426
427struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
428 struct ib_phys_buf *buffer_list,
429 int num_phys_buf, int acc, u64 *iova_start)
430{
431 __be64 *page_list;
432 int shift;
433 u64 total_size;
434 int npages;
435 struct c4iw_dev *rhp;
436 struct c4iw_pd *php;
437 struct c4iw_mr *mhp;
438 int ret;
439
440 PDBG("%s ib_pd %p\n", __func__, pd);
441 php = to_c4iw_pd(pd);
442 rhp = php->rhp;
443
444 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
445 if (!mhp)
446 return ERR_PTR(-ENOMEM);
447
448 mhp->rhp = rhp;
449
450 /* First check that we have enough alignment */
451 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
452 ret = -EINVAL;
453 goto err;
454 }
455
456 if (num_phys_buf > 1 &&
457 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
458 ret = -EINVAL;
459 goto err;
460 }
461
462 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
463 &total_size, &npages, &shift,
464 &page_list);
465 if (ret)
466 goto err;
467
468 ret = alloc_pbl(mhp, npages);
469 if (ret) {
470 kfree(page_list);
471 goto err_pbl;
472 }
473
474 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
475 npages);
476 kfree(page_list);
477 if (ret)
478 goto err_pbl;
479
480 mhp->attr.pdid = php->pdid;
481 mhp->attr.zbva = 0;
482
483 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
484 mhp->attr.va_fbo = *iova_start;
485 mhp->attr.page_size = shift - 12;
486
487 mhp->attr.len = (u32) total_size;
488 mhp->attr.pbl_size = npages;
489 ret = register_mem(rhp, php, mhp, shift);
490 if (ret)
491 goto err_pbl;
492
493 return &mhp->ibmr;
494
495err_pbl:
496 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
497 mhp->attr.pbl_size << 3);
498
499err:
500 kfree(mhp);
501 return ERR_PTR(ret);
502
503}
504
505struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
506{
507 struct c4iw_dev *rhp;
508 struct c4iw_pd *php;
509 struct c4iw_mr *mhp;
510 int ret;
511 u32 stag = T4_STAG_UNSET;
512
513 PDBG("%s ib_pd %p\n", __func__, pd);
514 php = to_c4iw_pd(pd);
515 rhp = php->rhp;
516
517 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
518 if (!mhp)
519 return ERR_PTR(-ENOMEM);
520
521 mhp->rhp = rhp;
522 mhp->attr.pdid = php->pdid;
523 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
524 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
525 mhp->attr.zbva = 0;
526 mhp->attr.va_fbo = 0;
527 mhp->attr.page_size = 0;
528 mhp->attr.len = ~0UL;
529 mhp->attr.pbl_size = 0;
530
531 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
532 FW_RI_STAG_NSMR, mhp->attr.perms,
533 mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
534 if (ret)
535 goto err1;
536
537 ret = finish_mem_reg(mhp, stag);
538 if (ret)
539 goto err2;
540 return &mhp->ibmr;
541err2:
542 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
543 mhp->attr.pbl_addr);
544err1:
545 kfree(mhp);
546 return ERR_PTR(ret);
547}
548
549struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
550 u64 virt, int acc, struct ib_udata *udata)
551{
552 __be64 *pages;
553 int shift, n, len;
554 int i, j, k;
555 int err = 0;
556 struct ib_umem_chunk *chunk;
557 struct c4iw_dev *rhp;
558 struct c4iw_pd *php;
559 struct c4iw_mr *mhp;
560
561 PDBG("%s ib_pd %p\n", __func__, pd);
562
563 if (length == ~0ULL)
564 return ERR_PTR(-EINVAL);
565
566 if ((length + start) < start)
567 return ERR_PTR(-EINVAL);
568
569 php = to_c4iw_pd(pd);
570 rhp = php->rhp;
571 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
572 if (!mhp)
573 return ERR_PTR(-ENOMEM);
574
575 mhp->rhp = rhp;
576
577 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
578 if (IS_ERR(mhp->umem)) {
579 err = PTR_ERR(mhp->umem);
580 kfree(mhp);
581 return ERR_PTR(err);
582 }
583
584 shift = ffs(mhp->umem->page_size) - 1;
585
586 n = 0;
587 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
588 n += chunk->nents;
589
590 err = alloc_pbl(mhp, n);
591 if (err)
592 goto err;
593
594 pages = (__be64 *) __get_free_page(GFP_KERNEL);
595 if (!pages) {
596 err = -ENOMEM;
597 goto err_pbl;
598 }
599
600 i = n = 0;
601
602 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
603 for (j = 0; j < chunk->nmap; ++j) {
604 len = sg_dma_len(&chunk->page_list[j]) >> shift;
605 for (k = 0; k < len; ++k) {
606 pages[i++] = cpu_to_be64(sg_dma_address(
607 &chunk->page_list[j]) +
608 mhp->umem->page_size * k);
609 if (i == PAGE_SIZE / sizeof *pages) {
610 err = write_pbl(&mhp->rhp->rdev,
611 pages,
612 mhp->attr.pbl_addr + (n << 3), i);
613 if (err)
614 goto pbl_done;
615 n += i;
616 i = 0;
617 }
618 }
619 }
620
621 if (i)
622 err = write_pbl(&mhp->rhp->rdev, pages,
623 mhp->attr.pbl_addr + (n << 3), i);
624
625pbl_done:
626 free_page((unsigned long) pages);
627 if (err)
628 goto err_pbl;
629
630 mhp->attr.pdid = php->pdid;
631 mhp->attr.zbva = 0;
632 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
633 mhp->attr.va_fbo = virt;
634 mhp->attr.page_size = shift - 12;
635 mhp->attr.len = (u32) length;
636
637 err = register_mem(rhp, php, mhp, shift);
638 if (err)
639 goto err_pbl;
640
641 return &mhp->ibmr;
642
643err_pbl:
644 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
645 mhp->attr.pbl_size << 3);
646
647err:
648 ib_umem_release(mhp->umem);
649 kfree(mhp);
650 return ERR_PTR(err);
651}
652
653struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
654{
655 struct c4iw_dev *rhp;
656 struct c4iw_pd *php;
657 struct c4iw_mw *mhp;
658 u32 mmid;
659 u32 stag = 0;
660 int ret;
661
662 php = to_c4iw_pd(pd);
663 rhp = php->rhp;
664 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
665 if (!mhp)
666 return ERR_PTR(-ENOMEM);
667 ret = allocate_window(&rhp->rdev, &stag, php->pdid);
668 if (ret) {
669 kfree(mhp);
670 return ERR_PTR(ret);
671 }
672 mhp->rhp = rhp;
673 mhp->attr.pdid = php->pdid;
674 mhp->attr.type = FW_RI_STAG_MW;
675 mhp->attr.stag = stag;
676 mmid = (stag) >> 8;
677 mhp->ibmw.rkey = stag;
678 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
679 deallocate_window(&rhp->rdev, mhp->attr.stag);
680 kfree(mhp);
681 return ERR_PTR(-ENOMEM);
682 }
683 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
684 return &(mhp->ibmw);
685}
686
687int c4iw_dealloc_mw(struct ib_mw *mw)
688{
689 struct c4iw_dev *rhp;
690 struct c4iw_mw *mhp;
691 u32 mmid;
692
693 mhp = to_c4iw_mw(mw);
694 rhp = mhp->rhp;
695 mmid = (mw->rkey) >> 8;
696 deallocate_window(&rhp->rdev, mhp->attr.stag);
697 remove_handle(rhp, &rhp->mmidr, mmid);
698 kfree(mhp);
699 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
700 return 0;
701}
702
703struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
704{
705 struct c4iw_dev *rhp;
706 struct c4iw_pd *php;
707 struct c4iw_mr *mhp;
708 u32 mmid;
709 u32 stag = 0;
710 int ret = 0;
711
712 php = to_c4iw_pd(pd);
713 rhp = php->rhp;
714 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
715 if (!mhp)
716 goto err;
717
718 mhp->rhp = rhp;
719 ret = alloc_pbl(mhp, pbl_depth);
720 if (ret)
721 goto err1;
722 mhp->attr.pbl_size = pbl_depth;
723 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
724 mhp->attr.pbl_size, mhp->attr.pbl_addr);
725 if (ret)
726 goto err2;
727 mhp->attr.pdid = php->pdid;
728 mhp->attr.type = FW_RI_STAG_NSMR;
729 mhp->attr.stag = stag;
730 mhp->attr.state = 1;
731 mmid = (stag) >> 8;
732 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
733 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
734 goto err3;
735
736 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
737 return &(mhp->ibmr);
738err3:
739 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
740 mhp->attr.pbl_addr);
741err2:
742 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
743 mhp->attr.pbl_size << 3);
744err1:
745 kfree(mhp);
746err:
747 return ERR_PTR(ret);
748}
749
750struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
751 int page_list_len)
752{
753 struct c4iw_fr_page_list *c4pl;
754 struct c4iw_dev *dev = to_c4iw_dev(device);
755 dma_addr_t dma_addr;
756 int size = sizeof *c4pl + page_list_len * sizeof(u64);
757
758 if (page_list_len > T4_MAX_FR_DEPTH)
759 return ERR_PTR(-EINVAL);
760
761 c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
762 &dma_addr, GFP_KERNEL);
763 if (!c4pl)
764 return ERR_PTR(-ENOMEM);
765
766 pci_unmap_addr_set(c4pl, mapping, dma_addr);
767 c4pl->dma_addr = dma_addr;
768 c4pl->dev = dev;
769 c4pl->size = size;
770 c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
771 c4pl->ibpl.max_page_list_len = page_list_len;
772
773 return &c4pl->ibpl;
774}
775
776void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
777{
778 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
779
780 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
781 c4pl, pci_unmap_addr(c4pl, mapping));
782}
783
784int c4iw_dereg_mr(struct ib_mr *ib_mr)
785{
786 struct c4iw_dev *rhp;
787 struct c4iw_mr *mhp;
788 u32 mmid;
789
790 PDBG("%s ib_mr %p\n", __func__, ib_mr);
791 /* There can be no memory windows */
792 if (atomic_read(&ib_mr->usecnt))
793 return -EINVAL;
794
795 mhp = to_c4iw_mr(ib_mr);
796 rhp = mhp->rhp;
797 mmid = mhp->attr.stag >> 8;
798 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
799 mhp->attr.pbl_addr);
800 if (mhp->attr.pbl_size)
801 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
802 mhp->attr.pbl_size << 3);
803 remove_handle(rhp, &rhp->mmidr, mmid);
804 if (mhp->kva)
805 kfree((void *) (unsigned long) mhp->kva);
806 if (mhp->umem)
807 ib_umem_release(mhp->umem);
808 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
809 kfree(mhp);
810 return 0;
811}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
new file mode 100644
index 000000000000..dfc49020bb9c
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -0,0 +1,518 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/device.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/delay.h>
38#include <linux/errno.h>
39#include <linux/list.h>
40#include <linux/spinlock.h>
41#include <linux/ethtool.h>
42#include <linux/rtnetlink.h>
43#include <linux/inetdevice.h>
44#include <linux/io.h>
45
46#include <asm/irq.h>
47#include <asm/byteorder.h>
48
49#include <rdma/iw_cm.h>
50#include <rdma/ib_verbs.h>
51#include <rdma/ib_smi.h>
52#include <rdma/ib_umem.h>
53#include <rdma/ib_user_verbs.h>
54
55#include "iw_cxgb4.h"
56
57static int fastreg_support;
58module_param(fastreg_support, int, 0644);
59MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=0)");
60
61static int c4iw_modify_port(struct ib_device *ibdev,
62 u8 port, int port_modify_mask,
63 struct ib_port_modify *props)
64{
65 return -ENOSYS;
66}
67
68static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
69 struct ib_ah_attr *ah_attr)
70{
71 return ERR_PTR(-ENOSYS);
72}
73
74static int c4iw_ah_destroy(struct ib_ah *ah)
75{
76 return -ENOSYS;
77}
78
79static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
80{
81 return -ENOSYS;
82}
83
84static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
85{
86 return -ENOSYS;
87}
88
89static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
90 u8 port_num, struct ib_wc *in_wc,
91 struct ib_grh *in_grh, struct ib_mad *in_mad,
92 struct ib_mad *out_mad)
93{
94 return -ENOSYS;
95}
96
97static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
98{
99 struct c4iw_dev *rhp = to_c4iw_dev(context->device);
100 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
101 struct c4iw_mm_entry *mm, *tmp;
102
103 PDBG("%s context %p\n", __func__, context);
104 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
105 kfree(mm);
106 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
107 kfree(ucontext);
108 return 0;
109}
110
111static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
112 struct ib_udata *udata)
113{
114 struct c4iw_ucontext *context;
115 struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
116
117 PDBG("%s ibdev %p\n", __func__, ibdev);
118 context = kzalloc(sizeof(*context), GFP_KERNEL);
119 if (!context)
120 return ERR_PTR(-ENOMEM);
121 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
122 INIT_LIST_HEAD(&context->mmaps);
123 spin_lock_init(&context->mmap_lock);
124 return &context->ibucontext;
125}
126
127static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
128{
129 int len = vma->vm_end - vma->vm_start;
130 u32 key = vma->vm_pgoff << PAGE_SHIFT;
131 struct c4iw_rdev *rdev;
132 int ret = 0;
133 struct c4iw_mm_entry *mm;
134 struct c4iw_ucontext *ucontext;
135 u64 addr;
136
137 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
138 key, len);
139
140 if (vma->vm_start & (PAGE_SIZE-1))
141 return -EINVAL;
142
143 rdev = &(to_c4iw_dev(context->device)->rdev);
144 ucontext = to_c4iw_ucontext(context);
145
146 mm = remove_mmap(ucontext, key, len);
147 if (!mm)
148 return -EINVAL;
149 addr = mm->addr;
150 kfree(mm);
151
152 if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
153 (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
154 pci_resource_len(rdev->lldi.pdev, 2)))) {
155
156 /*
157 * Map T4 DB register.
158 */
159 if (vma->vm_flags & VM_READ)
160 return -EPERM;
161
162 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
163 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
164 vma->vm_flags &= ~VM_MAYREAD;
165 ret = io_remap_pfn_range(vma, vma->vm_start,
166 addr >> PAGE_SHIFT,
167 len, vma->vm_page_prot);
168 } else {
169
170 /*
171 * Map WQ or CQ contig dma memory...
172 */
173 ret = remap_pfn_range(vma, vma->vm_start,
174 addr >> PAGE_SHIFT,
175 len, vma->vm_page_prot);
176 }
177
178 return ret;
179}
180
181static int c4iw_deallocate_pd(struct ib_pd *pd)
182{
183 struct c4iw_dev *rhp;
184 struct c4iw_pd *php;
185
186 php = to_c4iw_pd(pd);
187 rhp = php->rhp;
188 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
189 c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, php->pdid,
190 &rhp->rdev.resource.pdid_fifo_lock);
191 kfree(php);
192 return 0;
193}
194
195static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
196 struct ib_ucontext *context,
197 struct ib_udata *udata)
198{
199 struct c4iw_pd *php;
200 u32 pdid;
201 struct c4iw_dev *rhp;
202
203 PDBG("%s ibdev %p\n", __func__, ibdev);
204 rhp = (struct c4iw_dev *) ibdev;
205 pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_fifo,
206 &rhp->rdev.resource.pdid_fifo_lock);
207 if (!pdid)
208 return ERR_PTR(-EINVAL);
209 php = kzalloc(sizeof(*php), GFP_KERNEL);
210 if (!php) {
211 c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, pdid,
212 &rhp->rdev.resource.pdid_fifo_lock);
213 return ERR_PTR(-ENOMEM);
214 }
215 php->pdid = pdid;
216 php->rhp = rhp;
217 if (context) {
218 if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
219 c4iw_deallocate_pd(&php->ibpd);
220 return ERR_PTR(-EFAULT);
221 }
222 }
223 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
224 return &php->ibpd;
225}
226
227static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
228 u16 *pkey)
229{
230 PDBG("%s ibdev %p\n", __func__, ibdev);
231 *pkey = 0;
232 return 0;
233}
234
235static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
236 union ib_gid *gid)
237{
238 struct c4iw_dev *dev;
239
240 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
241 __func__, ibdev, port, index, gid);
242 dev = to_c4iw_dev(ibdev);
243 BUG_ON(port == 0);
244 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
245 memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
246 return 0;
247}
248
249static int c4iw_query_device(struct ib_device *ibdev,
250 struct ib_device_attr *props)
251{
252
253 struct c4iw_dev *dev;
254 PDBG("%s ibdev %p\n", __func__, ibdev);
255
256 dev = to_c4iw_dev(ibdev);
257 memset(props, 0, sizeof *props);
258 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
259 props->hw_ver = dev->rdev.lldi.adapter_type;
260 props->fw_ver = dev->rdev.lldi.fw_vers;
261 props->device_cap_flags = dev->device_cap_flags;
262 props->page_size_cap = T4_PAGESIZE_MASK;
263 props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
264 props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
265 props->max_mr_size = T4_MAX_MR_SIZE;
266 props->max_qp = T4_MAX_NUM_QP;
267 props->max_qp_wr = T4_MAX_QP_DEPTH;
268 props->max_sge = T4_MAX_RECV_SGE;
269 props->max_sge_rd = 1;
270 props->max_qp_rd_atom = c4iw_max_read_depth;
271 props->max_qp_init_rd_atom = c4iw_max_read_depth;
272 props->max_cq = T4_MAX_NUM_CQ;
273 props->max_cqe = T4_MAX_CQ_DEPTH;
274 props->max_mr = c4iw_num_stags(&dev->rdev);
275 props->max_pd = T4_MAX_NUM_PD;
276 props->local_ca_ack_delay = 0;
277 props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH;
278
279 return 0;
280}
281
282static int c4iw_query_port(struct ib_device *ibdev, u8 port,
283 struct ib_port_attr *props)
284{
285 struct c4iw_dev *dev;
286 struct net_device *netdev;
287 struct in_device *inetdev;
288
289 PDBG("%s ibdev %p\n", __func__, ibdev);
290
291 dev = to_c4iw_dev(ibdev);
292 netdev = dev->rdev.lldi.ports[port-1];
293
294 memset(props, 0, sizeof(struct ib_port_attr));
295 props->max_mtu = IB_MTU_4096;
296 if (netdev->mtu >= 4096)
297 props->active_mtu = IB_MTU_4096;
298 else if (netdev->mtu >= 2048)
299 props->active_mtu = IB_MTU_2048;
300 else if (netdev->mtu >= 1024)
301 props->active_mtu = IB_MTU_1024;
302 else if (netdev->mtu >= 512)
303 props->active_mtu = IB_MTU_512;
304 else
305 props->active_mtu = IB_MTU_256;
306
307 if (!netif_carrier_ok(netdev))
308 props->state = IB_PORT_DOWN;
309 else {
310 inetdev = in_dev_get(netdev);
311 if (inetdev) {
312 if (inetdev->ifa_list)
313 props->state = IB_PORT_ACTIVE;
314 else
315 props->state = IB_PORT_INIT;
316 in_dev_put(inetdev);
317 } else
318 props->state = IB_PORT_INIT;
319 }
320
321 props->port_cap_flags =
322 IB_PORT_CM_SUP |
323 IB_PORT_SNMP_TUNNEL_SUP |
324 IB_PORT_REINIT_SUP |
325 IB_PORT_DEVICE_MGMT_SUP |
326 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
327 props->gid_tbl_len = 1;
328 props->pkey_tbl_len = 1;
329 props->active_width = 2;
330 props->active_speed = 2;
331 props->max_msg_sz = -1;
332
333 return 0;
334}
335
336static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
337 char *buf)
338{
339 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
340 ibdev.dev);
341 PDBG("%s dev 0x%p\n", __func__, dev);
342 return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type);
343}
344
345static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
346 char *buf)
347{
348 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
349 ibdev.dev);
350 PDBG("%s dev 0x%p\n", __func__, dev);
351
352 return sprintf(buf, "%u.%u.%u.%u\n",
353 FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers),
354 FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers),
355 FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers),
356 FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers));
357}
358
359static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
360 char *buf)
361{
362 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
363 ibdev.dev);
364 struct ethtool_drvinfo info;
365 struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
366
367 PDBG("%s dev 0x%p\n", __func__, dev);
368 lldev->ethtool_ops->get_drvinfo(lldev, &info);
369 return sprintf(buf, "%s\n", info.driver);
370}
371
372static ssize_t show_board(struct device *dev, struct device_attribute *attr,
373 char *buf)
374{
375 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
376 ibdev.dev);
377 PDBG("%s dev 0x%p\n", __func__, dev);
378 return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
379 c4iw_dev->rdev.lldi.pdev->device);
380}
381
382static int c4iw_get_mib(struct ib_device *ibdev,
383 union rdma_protocol_stats *stats)
384{
385 return -ENOSYS;
386}
387
388static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
389static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
390static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
391static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
392
393static struct device_attribute *c4iw_class_attributes[] = {
394 &dev_attr_hw_rev,
395 &dev_attr_fw_ver,
396 &dev_attr_hca_type,
397 &dev_attr_board_id,
398};
399
400int c4iw_register_device(struct c4iw_dev *dev)
401{
402 int ret;
403 int i;
404
405 PDBG("%s c4iw_dev %p\n", __func__, dev);
406 BUG_ON(!dev->rdev.lldi.ports[0]);
407 strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
408 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
409 memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
410 dev->ibdev.owner = THIS_MODULE;
411 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
412 if (fastreg_support)
413 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
414 dev->ibdev.local_dma_lkey = 0;
415 dev->ibdev.uverbs_cmd_mask =
416 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
417 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
418 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
419 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
420 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
421 (1ull << IB_USER_VERBS_CMD_REG_MR) |
422 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
423 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
424 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
425 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
426 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
427 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
428 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
429 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
430 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
431 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
432 (1ull << IB_USER_VERBS_CMD_POST_RECV);
433 dev->ibdev.node_type = RDMA_NODE_RNIC;
434 memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
435 dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
436 dev->ibdev.num_comp_vectors = 1;
437 dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
438 dev->ibdev.query_device = c4iw_query_device;
439 dev->ibdev.query_port = c4iw_query_port;
440 dev->ibdev.modify_port = c4iw_modify_port;
441 dev->ibdev.query_pkey = c4iw_query_pkey;
442 dev->ibdev.query_gid = c4iw_query_gid;
443 dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
444 dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
445 dev->ibdev.mmap = c4iw_mmap;
446 dev->ibdev.alloc_pd = c4iw_allocate_pd;
447 dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
448 dev->ibdev.create_ah = c4iw_ah_create;
449 dev->ibdev.destroy_ah = c4iw_ah_destroy;
450 dev->ibdev.create_qp = c4iw_create_qp;
451 dev->ibdev.modify_qp = c4iw_ib_modify_qp;
452 dev->ibdev.destroy_qp = c4iw_destroy_qp;
453 dev->ibdev.create_cq = c4iw_create_cq;
454 dev->ibdev.destroy_cq = c4iw_destroy_cq;
455 dev->ibdev.resize_cq = c4iw_resize_cq;
456 dev->ibdev.poll_cq = c4iw_poll_cq;
457 dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
458 dev->ibdev.reg_phys_mr = c4iw_register_phys_mem;
459 dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem;
460 dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
461 dev->ibdev.dereg_mr = c4iw_dereg_mr;
462 dev->ibdev.alloc_mw = c4iw_alloc_mw;
463 dev->ibdev.bind_mw = c4iw_bind_mw;
464 dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
465 dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
466 dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
467 dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
468 dev->ibdev.attach_mcast = c4iw_multicast_attach;
469 dev->ibdev.detach_mcast = c4iw_multicast_detach;
470 dev->ibdev.process_mad = c4iw_process_mad;
471 dev->ibdev.req_notify_cq = c4iw_arm_cq;
472 dev->ibdev.post_send = c4iw_post_send;
473 dev->ibdev.post_recv = c4iw_post_receive;
474 dev->ibdev.get_protocol_stats = c4iw_get_mib;
475
476 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
477 if (!dev->ibdev.iwcm)
478 return -ENOMEM;
479
480 dev->ibdev.iwcm->connect = c4iw_connect;
481 dev->ibdev.iwcm->accept = c4iw_accept_cr;
482 dev->ibdev.iwcm->reject = c4iw_reject_cr;
483 dev->ibdev.iwcm->create_listen = c4iw_create_listen;
484 dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
485 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
486 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
487 dev->ibdev.iwcm->get_qp = c4iw_get_qp;
488
489 ret = ib_register_device(&dev->ibdev);
490 if (ret)
491 goto bail1;
492
493 for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
494 ret = device_create_file(&dev->ibdev.dev,
495 c4iw_class_attributes[i]);
496 if (ret)
497 goto bail2;
498 }
499 return 0;
500bail2:
501 ib_unregister_device(&dev->ibdev);
502bail1:
503 kfree(dev->ibdev.iwcm);
504 return ret;
505}
506
507void c4iw_unregister_device(struct c4iw_dev *dev)
508{
509 int i;
510
511 PDBG("%s c4iw_dev %p\n", __func__, dev);
512 for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
513 device_remove_file(&dev->ibdev.dev,
514 c4iw_class_attributes[i]);
515 ib_unregister_device(&dev->ibdev);
516 kfree(dev->ibdev.iwcm);
517 return;
518}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
new file mode 100644
index 000000000000..83a01dc0c4c1
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -0,0 +1,1577 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "iw_cxgb4.h"
33
34static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
35 struct c4iw_dev_ucontext *uctx)
36{
37 /*
38 * uP clears EQ contexts when the connection exits rdma mode,
39 * so no need to post a RESET WR for these EQs.
40 */
41 dma_free_coherent(&(rdev->lldi.pdev->dev),
42 wq->rq.memsize, wq->rq.queue,
43 pci_unmap_addr(&wq->rq, mapping));
44 dma_free_coherent(&(rdev->lldi.pdev->dev),
45 wq->sq.memsize, wq->sq.queue,
46 pci_unmap_addr(&wq->sq, mapping));
47 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
48 kfree(wq->rq.sw_rq);
49 kfree(wq->sq.sw_sq);
50 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
51 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
52 return 0;
53}
54
55static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
56 struct t4_cq *rcq, struct t4_cq *scq,
57 struct c4iw_dev_ucontext *uctx)
58{
59 int user = (uctx != &rdev->uctx);
60 struct fw_ri_res_wr *res_wr;
61 struct fw_ri_res *res;
62 int wr_len;
63 struct c4iw_wr_wait wr_wait;
64 struct sk_buff *skb;
65 int ret;
66 int eqsize;
67
68 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
69 if (!wq->sq.qid)
70 return -ENOMEM;
71
72 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
73 if (!wq->rq.qid)
74 goto err1;
75
76 if (!user) {
77 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
78 GFP_KERNEL);
79 if (!wq->sq.sw_sq)
80 goto err2;
81
82 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
83 GFP_KERNEL);
84 if (!wq->rq.sw_rq)
85 goto err3;
86 }
87
88 /*
89 * RQT must be a power of 2.
90 */
91 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
92 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
93 if (!wq->rq.rqt_hwaddr)
94 goto err4;
95
96 wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
97 wq->sq.memsize, &(wq->sq.dma_addr),
98 GFP_KERNEL);
99 if (!wq->sq.queue)
100 goto err5;
101 memset(wq->sq.queue, 0, wq->sq.memsize);
102 pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
103
104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
105 wq->rq.memsize, &(wq->rq.dma_addr),
106 GFP_KERNEL);
107 if (!wq->rq.queue)
108 goto err6;
109 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
110 __func__, wq->sq.queue,
111 (unsigned long long)virt_to_phys(wq->sq.queue),
112 wq->rq.queue,
113 (unsigned long long)virt_to_phys(wq->rq.queue));
114 memset(wq->rq.queue, 0, wq->rq.memsize);
115 pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
116
117 wq->db = rdev->lldi.db_reg;
118 wq->gts = rdev->lldi.gts_reg;
119 if (user) {
120 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
121 (wq->sq.qid << rdev->qpshift);
122 wq->sq.udb &= PAGE_MASK;
123 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
124 (wq->rq.qid << rdev->qpshift);
125 wq->rq.udb &= PAGE_MASK;
126 }
127 wq->rdev = rdev;
128 wq->rq.msn = 1;
129
130 /* build fw_ri_res_wr */
131 wr_len = sizeof *res_wr + 2 * sizeof *res;
132
133 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
134 if (!skb) {
135 ret = -ENOMEM;
136 goto err7;
137 }
138 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
139
140 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
141 memset(res_wr, 0, wr_len);
142 res_wr->op_nres = cpu_to_be32(
143 FW_WR_OP(FW_RI_RES_WR) |
144 V_FW_RI_RES_WR_NRES(2) |
145 FW_WR_COMPL(1));
146 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
147 res_wr->cookie = (u64)&wr_wait;
148 res = res_wr->res;
149 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
150 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
151
152 /*
153 * eqsize is the number of 64B entries plus the status page size.
154 */
155 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
156
157 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
158 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
159 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
160 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
161 V_FW_RI_RES_WR_IQID(scq->cqid));
162 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
163 V_FW_RI_RES_WR_DCAEN(0) |
164 V_FW_RI_RES_WR_DCACPU(0) |
165 V_FW_RI_RES_WR_FBMIN(3) |
166 V_FW_RI_RES_WR_FBMAX(3) |
167 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
168 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
169 V_FW_RI_RES_WR_EQSIZE(eqsize));
170 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
171 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
172 res++;
173 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
174 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
175
176 /*
177 * eqsize is the number of 64B entries plus the status page size.
178 */
179 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
180 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
181 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
182 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
183 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
184 V_FW_RI_RES_WR_IQID(rcq->cqid));
185 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
186 V_FW_RI_RES_WR_DCAEN(0) |
187 V_FW_RI_RES_WR_DCACPU(0) |
188 V_FW_RI_RES_WR_FBMIN(3) |
189 V_FW_RI_RES_WR_FBMAX(3) |
190 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
191 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
192 V_FW_RI_RES_WR_EQSIZE(eqsize));
193 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
194 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
195
196 c4iw_init_wr_wait(&wr_wait);
197
198 ret = c4iw_ofld_send(rdev, skb);
199 if (ret)
200 goto err7;
201 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
202 if (!wr_wait.done) {
203 printk(KERN_ERR MOD "Device %s not responding!\n",
204 pci_name(rdev->lldi.pdev));
205 rdev->flags = T4_FATAL_ERROR;
206 ret = -EIO;
207 } else
208 ret = wr_wait.ret;
209 if (ret)
210 goto err7;
211
212 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
213 __func__, wq->sq.qid, wq->rq.qid, wq->db,
214 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
215
216 return 0;
217err7:
218 dma_free_coherent(&(rdev->lldi.pdev->dev),
219 wq->rq.memsize, wq->rq.queue,
220 pci_unmap_addr(&wq->rq, mapping));
221err6:
222 dma_free_coherent(&(rdev->lldi.pdev->dev),
223 wq->sq.memsize, wq->sq.queue,
224 pci_unmap_addr(&wq->sq, mapping));
225err5:
226 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
227err4:
228 kfree(wq->rq.sw_rq);
229err3:
230 kfree(wq->sq.sw_sq);
231err2:
232 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
233err1:
234 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
235 return -ENOMEM;
236}
237
238static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
239{
240 int i;
241 u32 plen;
242 int size;
243 u8 *datap;
244
245 if (wr->num_sge > T4_MAX_SEND_SGE)
246 return -EINVAL;
247 switch (wr->opcode) {
248 case IB_WR_SEND:
249 if (wr->send_flags & IB_SEND_SOLICITED)
250 wqe->send.sendop_pkd = cpu_to_be32(
251 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
252 else
253 wqe->send.sendop_pkd = cpu_to_be32(
254 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
255 wqe->send.stag_inv = 0;
256 break;
257 case IB_WR_SEND_WITH_INV:
258 if (wr->send_flags & IB_SEND_SOLICITED)
259 wqe->send.sendop_pkd = cpu_to_be32(
260 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
261 else
262 wqe->send.sendop_pkd = cpu_to_be32(
263 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
264 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
265 break;
266
267 default:
268 return -EINVAL;
269 }
270 plen = 0;
271 if (wr->num_sge) {
272 if (wr->send_flags & IB_SEND_INLINE) {
273 datap = (u8 *)wqe->send.u.immd_src[0].data;
274 for (i = 0; i < wr->num_sge; i++) {
275 if ((plen + wr->sg_list[i].length) >
276 T4_MAX_SEND_INLINE) {
277 return -EMSGSIZE;
278 }
279 plen += wr->sg_list[i].length;
280 memcpy(datap,
281 (void *)(unsigned long)wr->sg_list[i].addr,
282 wr->sg_list[i].length);
283 datap += wr->sg_list[i].length;
284 }
285 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
286 wqe->send.u.immd_src[0].r1 = 0;
287 wqe->send.u.immd_src[0].r2 = 0;
288 wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
289 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
290 plen;
291 } else {
292 for (i = 0; i < wr->num_sge; i++) {
293 if ((plen + wr->sg_list[i].length) < plen)
294 return -EMSGSIZE;
295 plen += wr->sg_list[i].length;
296 wqe->send.u.isgl_src[0].sge[i].stag =
297 cpu_to_be32(wr->sg_list[i].lkey);
298 wqe->send.u.isgl_src[0].sge[i].len =
299 cpu_to_be32(wr->sg_list[i].length);
300 wqe->send.u.isgl_src[0].sge[i].to =
301 cpu_to_be64(wr->sg_list[i].addr);
302 }
303 wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
304 wqe->send.u.isgl_src[0].r1 = 0;
305 wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
306 wqe->send.u.isgl_src[0].r2 = 0;
307 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
308 wr->num_sge * sizeof(struct fw_ri_sge);
309 }
310 } else {
311 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
312 wqe->send.u.immd_src[0].r1 = 0;
313 wqe->send.u.immd_src[0].r2 = 0;
314 wqe->send.u.immd_src[0].immdlen = 0;
315 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
316 }
317 *len16 = DIV_ROUND_UP(size, 16);
318 wqe->send.plen = cpu_to_be32(plen);
319 return 0;
320}
321
322static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
323{
324 int i;
325 u32 plen;
326 int size;
327 u8 *datap;
328
329 if (wr->num_sge > T4_MAX_WRITE_SGE)
330 return -EINVAL;
331 wqe->write.r2 = 0;
332 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
333 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
334 plen = 0;
335 if (wr->num_sge) {
336 if (wr->send_flags & IB_SEND_INLINE) {
337 datap = (u8 *)wqe->write.u.immd_src[0].data;
338 for (i = 0; i < wr->num_sge; i++) {
339 if ((plen + wr->sg_list[i].length) >
340 T4_MAX_WRITE_INLINE) {
341 return -EMSGSIZE;
342 }
343 plen += wr->sg_list[i].length;
344 memcpy(datap,
345 (void *)(unsigned long)wr->sg_list[i].addr,
346 wr->sg_list[i].length);
347 datap += wr->sg_list[i].length;
348 }
349 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
350 wqe->write.u.immd_src[0].r1 = 0;
351 wqe->write.u.immd_src[0].r2 = 0;
352 wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
353 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
354 plen;
355 } else {
356 for (i = 0; i < wr->num_sge; i++) {
357 if ((plen + wr->sg_list[i].length) < plen)
358 return -EMSGSIZE;
359 plen += wr->sg_list[i].length;
360 wqe->write.u.isgl_src[0].sge[i].stag =
361 cpu_to_be32(wr->sg_list[i].lkey);
362 wqe->write.u.isgl_src[0].sge[i].len =
363 cpu_to_be32(wr->sg_list[i].length);
364 wqe->write.u.isgl_src[0].sge[i].to =
365 cpu_to_be64(wr->sg_list[i].addr);
366 }
367 wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
368 wqe->write.u.isgl_src[0].r1 = 0;
369 wqe->write.u.isgl_src[0].nsge =
370 cpu_to_be16(wr->num_sge);
371 wqe->write.u.isgl_src[0].r2 = 0;
372 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
373 wr->num_sge * sizeof(struct fw_ri_sge);
374 }
375 } else {
376 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
377 wqe->write.u.immd_src[0].r1 = 0;
378 wqe->write.u.immd_src[0].r2 = 0;
379 wqe->write.u.immd_src[0].immdlen = 0;
380 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
381 }
382 *len16 = DIV_ROUND_UP(size, 16);
383 wqe->write.plen = cpu_to_be32(plen);
384 return 0;
385}
386
387static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
388{
389 if (wr->num_sge > 1)
390 return -EINVAL;
391 if (wr->num_sge) {
392 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
393 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
394 >> 32));
395 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
396 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
397 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
398 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
399 >> 32));
400 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
401 } else {
402 wqe->read.stag_src = cpu_to_be32(2);
403 wqe->read.to_src_hi = 0;
404 wqe->read.to_src_lo = 0;
405 wqe->read.stag_sink = cpu_to_be32(2);
406 wqe->read.plen = 0;
407 wqe->read.to_sink_hi = 0;
408 wqe->read.to_sink_lo = 0;
409 }
410 wqe->read.r2 = 0;
411 wqe->read.r5 = 0;
412 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
413 return 0;
414}
415
416static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
417 struct ib_recv_wr *wr, u8 *len16)
418{
419 int i;
420 int plen = 0;
421
422 for (i = 0; i < wr->num_sge; i++) {
423 if ((plen + wr->sg_list[i].length) < plen)
424 return -EMSGSIZE;
425 plen += wr->sg_list[i].length;
426 wqe->recv.isgl.sge[i].stag =
427 cpu_to_be32(wr->sg_list[i].lkey);
428 wqe->recv.isgl.sge[i].len =
429 cpu_to_be32(wr->sg_list[i].length);
430 wqe->recv.isgl.sge[i].to =
431 cpu_to_be64(wr->sg_list[i].addr);
432 }
433 for (; i < T4_MAX_RECV_SGE; i++) {
434 wqe->recv.isgl.sge[i].stag = 0;
435 wqe->recv.isgl.sge[i].len = 0;
436 wqe->recv.isgl.sge[i].to = 0;
437 }
438 wqe->recv.isgl.op = FW_RI_DATA_ISGL;
439 wqe->recv.isgl.r1 = 0;
440 wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
441 wqe->recv.isgl.r2 = 0;
442 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
443 wr->num_sge * sizeof(struct fw_ri_sge), 16);
444 return 0;
445}
446
447static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
448{
449
450 struct fw_ri_immd *imdp;
451 __be64 *p;
452 int i;
453 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
454
455 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
456 return -EINVAL;
457
458 wqe->fr.qpbinde_to_dcacpu = 0;
459 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
460 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
461 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
462 wqe->fr.len_hi = 0;
463 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
464 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
465 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
466 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
467 0xffffffff);
468 if (pbllen > T4_MAX_FR_IMMD) {
469 struct c4iw_fr_page_list *c4pl =
470 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
471 struct fw_ri_dsgl *sglp;
472
473 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
474 sglp->op = FW_RI_DATA_DSGL;
475 sglp->r1 = 0;
476 sglp->nsge = cpu_to_be16(1);
477 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
478 sglp->len0 = cpu_to_be32(pbllen);
479
480 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
481 } else {
482 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
483 imdp->op = FW_RI_DATA_IMMD;
484 imdp->r1 = 0;
485 imdp->r2 = 0;
486 imdp->immdlen = cpu_to_be32(pbllen);
487 p = (__be64 *)(imdp + 1);
488 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
489 *p = cpu_to_be64(
490 (u64)wr->wr.fast_reg.page_list->page_list[i]);
491 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
492 16);
493 }
494 return 0;
495}
496
497static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
498 u8 *len16)
499{
500 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
501 wqe->inv.r2 = 0;
502 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
503 return 0;
504}
505
506void c4iw_qp_add_ref(struct ib_qp *qp)
507{
508 PDBG("%s ib_qp %p\n", __func__, qp);
509 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
510}
511
512void c4iw_qp_rem_ref(struct ib_qp *qp)
513{
514 PDBG("%s ib_qp %p\n", __func__, qp);
515 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
516 wake_up(&(to_c4iw_qp(qp)->wait));
517}
518
519int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
520 struct ib_send_wr **bad_wr)
521{
522 int err = 0;
523 u8 len16 = 0;
524 enum fw_wr_opcodes fw_opcode = 0;
525 enum fw_ri_wr_flags fw_flags;
526 struct c4iw_qp *qhp;
527 union t4_wr *wqe;
528 u32 num_wrs;
529 struct t4_swsqe *swsqe;
530 unsigned long flag;
531 u16 idx = 0;
532
533 qhp = to_c4iw_qp(ibqp);
534 spin_lock_irqsave(&qhp->lock, flag);
535 if (t4_wq_in_error(&qhp->wq)) {
536 spin_unlock_irqrestore(&qhp->lock, flag);
537 return -EINVAL;
538 }
539 num_wrs = t4_sq_avail(&qhp->wq);
540 if (num_wrs == 0) {
541 spin_unlock_irqrestore(&qhp->lock, flag);
542 return -ENOMEM;
543 }
544 while (wr) {
545 if (num_wrs == 0) {
546 err = -ENOMEM;
547 *bad_wr = wr;
548 break;
549 }
550 wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
551 fw_flags = 0;
552 if (wr->send_flags & IB_SEND_SOLICITED)
553 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
554 if (wr->send_flags & IB_SEND_SIGNALED)
555 fw_flags |= FW_RI_COMPLETION_FLAG;
556 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
557 switch (wr->opcode) {
558 case IB_WR_SEND_WITH_INV:
559 case IB_WR_SEND:
560 if (wr->send_flags & IB_SEND_FENCE)
561 fw_flags |= FW_RI_READ_FENCE_FLAG;
562 fw_opcode = FW_RI_SEND_WR;
563 if (wr->opcode == IB_WR_SEND)
564 swsqe->opcode = FW_RI_SEND;
565 else
566 swsqe->opcode = FW_RI_SEND_WITH_INV;
567 err = build_rdma_send(wqe, wr, &len16);
568 break;
569 case IB_WR_RDMA_WRITE:
570 fw_opcode = FW_RI_RDMA_WRITE_WR;
571 swsqe->opcode = FW_RI_RDMA_WRITE;
572 err = build_rdma_write(wqe, wr, &len16);
573 break;
574 case IB_WR_RDMA_READ:
575 fw_opcode = FW_RI_RDMA_READ_WR;
576 swsqe->opcode = FW_RI_READ_REQ;
577 fw_flags = 0;
578 err = build_rdma_read(wqe, wr, &len16);
579 if (err)
580 break;
581 swsqe->read_len = wr->sg_list[0].length;
582 if (!qhp->wq.sq.oldest_read)
583 qhp->wq.sq.oldest_read = swsqe;
584 break;
585 case IB_WR_FAST_REG_MR:
586 fw_opcode = FW_RI_FR_NSMR_WR;
587 swsqe->opcode = FW_RI_FAST_REGISTER;
588 err = build_fastreg(wqe, wr, &len16);
589 break;
590 case IB_WR_LOCAL_INV:
591 fw_opcode = FW_RI_INV_LSTAG_WR;
592 swsqe->opcode = FW_RI_LOCAL_INV;
593 err = build_inv_stag(wqe, wr, &len16);
594 break;
595 default:
596 PDBG("%s post of type=%d TBD!\n", __func__,
597 wr->opcode);
598 err = -EINVAL;
599 }
600 if (err) {
601 *bad_wr = wr;
602 break;
603 }
604 swsqe->idx = qhp->wq.sq.pidx;
605 swsqe->complete = 0;
606 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
607 swsqe->wr_id = wr->wr_id;
608
609 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
610
611 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
612 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
613 swsqe->opcode, swsqe->read_len);
614 wr = wr->next;
615 num_wrs--;
616 t4_sq_produce(&qhp->wq);
617 idx++;
618 }
619 if (t4_wq_db_enabled(&qhp->wq))
620 t4_ring_sq_db(&qhp->wq, idx);
621 spin_unlock_irqrestore(&qhp->lock, flag);
622 return err;
623}
624
625int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
626 struct ib_recv_wr **bad_wr)
627{
628 int err = 0;
629 struct c4iw_qp *qhp;
630 union t4_recv_wr *wqe;
631 u32 num_wrs;
632 u8 len16 = 0;
633 unsigned long flag;
634 u16 idx = 0;
635
636 qhp = to_c4iw_qp(ibqp);
637 spin_lock_irqsave(&qhp->lock, flag);
638 if (t4_wq_in_error(&qhp->wq)) {
639 spin_unlock_irqrestore(&qhp->lock, flag);
640 return -EINVAL;
641 }
642 num_wrs = t4_rq_avail(&qhp->wq);
643 if (num_wrs == 0) {
644 spin_unlock_irqrestore(&qhp->lock, flag);
645 return -ENOMEM;
646 }
647 while (wr) {
648 if (wr->num_sge > T4_MAX_RECV_SGE) {
649 err = -EINVAL;
650 *bad_wr = wr;
651 break;
652 }
653 wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
654 if (num_wrs)
655 err = build_rdma_recv(qhp, wqe, wr, &len16);
656 else
657 err = -ENOMEM;
658 if (err) {
659 *bad_wr = wr;
660 break;
661 }
662
663 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
664
665 wqe->recv.opcode = FW_RI_RECV_WR;
666 wqe->recv.r1 = 0;
667 wqe->recv.wrid = qhp->wq.rq.pidx;
668 wqe->recv.r2[0] = 0;
669 wqe->recv.r2[1] = 0;
670 wqe->recv.r2[2] = 0;
671 wqe->recv.len16 = len16;
672 if (len16 < 5)
673 wqe->flits[8] = 0;
674
675 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
676 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
677 t4_rq_produce(&qhp->wq);
678 wr = wr->next;
679 num_wrs--;
680 idx++;
681 }
682 if (t4_wq_db_enabled(&qhp->wq))
683 t4_ring_rq_db(&qhp->wq, idx);
684 spin_unlock_irqrestore(&qhp->lock, flag);
685 return err;
686}
687
688int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
689{
690 return -ENOSYS;
691}
692
693static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
694 u8 *ecode)
695{
696 int status;
697 int tagged;
698 int opcode;
699 int rqtype;
700 int send_inv;
701
702 if (!err_cqe) {
703 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
704 *ecode = 0;
705 return;
706 }
707
708 status = CQE_STATUS(err_cqe);
709 opcode = CQE_OPCODE(err_cqe);
710 rqtype = RQ_TYPE(err_cqe);
711 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
712 (opcode == FW_RI_SEND_WITH_SE_INV);
713 tagged = (opcode == FW_RI_RDMA_WRITE) ||
714 (rqtype && (opcode == FW_RI_READ_RESP));
715
716 switch (status) {
717 case T4_ERR_STAG:
718 if (send_inv) {
719 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
720 *ecode = RDMAP_CANT_INV_STAG;
721 } else {
722 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
723 *ecode = RDMAP_INV_STAG;
724 }
725 break;
726 case T4_ERR_PDID:
727 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
728 if ((opcode == FW_RI_SEND_WITH_INV) ||
729 (opcode == FW_RI_SEND_WITH_SE_INV))
730 *ecode = RDMAP_CANT_INV_STAG;
731 else
732 *ecode = RDMAP_STAG_NOT_ASSOC;
733 break;
734 case T4_ERR_QPID:
735 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
736 *ecode = RDMAP_STAG_NOT_ASSOC;
737 break;
738 case T4_ERR_ACCESS:
739 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
740 *ecode = RDMAP_ACC_VIOL;
741 break;
742 case T4_ERR_WRAP:
743 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
744 *ecode = RDMAP_TO_WRAP;
745 break;
746 case T4_ERR_BOUND:
747 if (tagged) {
748 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
749 *ecode = DDPT_BASE_BOUNDS;
750 } else {
751 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
752 *ecode = RDMAP_BASE_BOUNDS;
753 }
754 break;
755 case T4_ERR_INVALIDATE_SHARED_MR:
756 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
757 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
758 *ecode = RDMAP_CANT_INV_STAG;
759 break;
760 case T4_ERR_ECC:
761 case T4_ERR_ECC_PSTAG:
762 case T4_ERR_INTERNAL_ERR:
763 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
764 *ecode = 0;
765 break;
766 case T4_ERR_OUT_OF_RQE:
767 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
768 *ecode = DDPU_INV_MSN_NOBUF;
769 break;
770 case T4_ERR_PBL_ADDR_BOUND:
771 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
772 *ecode = DDPT_BASE_BOUNDS;
773 break;
774 case T4_ERR_CRC:
775 *layer_type = LAYER_MPA|DDP_LLP;
776 *ecode = MPA_CRC_ERR;
777 break;
778 case T4_ERR_MARKER:
779 *layer_type = LAYER_MPA|DDP_LLP;
780 *ecode = MPA_MARKER_ERR;
781 break;
782 case T4_ERR_PDU_LEN_ERR:
783 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
784 *ecode = DDPU_MSG_TOOBIG;
785 break;
786 case T4_ERR_DDP_VERSION:
787 if (tagged) {
788 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
789 *ecode = DDPT_INV_VERS;
790 } else {
791 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
792 *ecode = DDPU_INV_VERS;
793 }
794 break;
795 case T4_ERR_RDMA_VERSION:
796 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
797 *ecode = RDMAP_INV_VERS;
798 break;
799 case T4_ERR_OPCODE:
800 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
801 *ecode = RDMAP_INV_OPCODE;
802 break;
803 case T4_ERR_DDP_QUEUE_NUM:
804 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
805 *ecode = DDPU_INV_QN;
806 break;
807 case T4_ERR_MSN:
808 case T4_ERR_MSN_GAP:
809 case T4_ERR_MSN_RANGE:
810 case T4_ERR_IRD_OVERFLOW:
811 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
812 *ecode = DDPU_INV_MSN_RANGE;
813 break;
814 case T4_ERR_TBIT:
815 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
816 *ecode = 0;
817 break;
818 case T4_ERR_MO:
819 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
820 *ecode = DDPU_INV_MO;
821 break;
822 default:
823 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
824 *ecode = 0;
825 break;
826 }
827}
828
829int c4iw_post_zb_read(struct c4iw_qp *qhp)
830{
831 union t4_wr *wqe;
832 struct sk_buff *skb;
833 u8 len16;
834
835 PDBG("%s enter\n", __func__);
836 skb = alloc_skb(40, GFP_KERNEL);
837 if (!skb) {
838 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
839 return -ENOMEM;
840 }
841 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
842
843 wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
844 memset(wqe, 0, sizeof wqe->read);
845 wqe->read.r2 = cpu_to_be64(0);
846 wqe->read.stag_sink = cpu_to_be32(1);
847 wqe->read.to_sink_hi = cpu_to_be32(0);
848 wqe->read.to_sink_lo = cpu_to_be32(1);
849 wqe->read.stag_src = cpu_to_be32(1);
850 wqe->read.plen = cpu_to_be32(0);
851 wqe->read.to_src_hi = cpu_to_be32(0);
852 wqe->read.to_src_lo = cpu_to_be32(1);
853 len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
854 init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
855
856 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
857}
858
859static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
860 gfp_t gfp)
861{
862 struct fw_ri_wr *wqe;
863 struct sk_buff *skb;
864 struct terminate_message *term;
865
866 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
867 qhp->ep->hwtid);
868
869 skb = alloc_skb(sizeof *wqe, gfp);
870 if (!skb)
871 return;
872 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
873
874 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
875 memset(wqe, 0, sizeof *wqe);
876 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
877 wqe->flowid_len16 = cpu_to_be32(
878 FW_WR_FLOWID(qhp->ep->hwtid) |
879 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
880
881 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
882 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
883 term = (struct terminate_message *)wqe->u.terminate.termmsg;
884 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
885 c4iw_ofld_send(&qhp->rhp->rdev, skb);
886}
887
888/*
889 * Assumes qhp lock is held.
890 */
891static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
892 struct c4iw_cq *schp, unsigned long *flag)
893{
894 int count;
895 int flushed;
896
897 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
898 /* take a ref on the qhp since we must release the lock */
899 atomic_inc(&qhp->refcnt);
900 spin_unlock_irqrestore(&qhp->lock, *flag);
901
902 /* locking heirarchy: cq lock first, then qp lock. */
903 spin_lock_irqsave(&rchp->lock, *flag);
904 spin_lock(&qhp->lock);
905 c4iw_flush_hw_cq(&rchp->cq);
906 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
907 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
908 spin_unlock(&qhp->lock);
909 spin_unlock_irqrestore(&rchp->lock, *flag);
910 if (flushed)
911 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
912
913 /* locking heirarchy: cq lock first, then qp lock. */
914 spin_lock_irqsave(&schp->lock, *flag);
915 spin_lock(&qhp->lock);
916 c4iw_flush_hw_cq(&schp->cq);
917 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
918 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
919 spin_unlock(&qhp->lock);
920 spin_unlock_irqrestore(&schp->lock, *flag);
921 if (flushed)
922 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
923
924 /* deref */
925 if (atomic_dec_and_test(&qhp->refcnt))
926 wake_up(&qhp->wait);
927
928 spin_lock_irqsave(&qhp->lock, *flag);
929}
930
931static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
932{
933 struct c4iw_cq *rchp, *schp;
934
935 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
936 schp = get_chp(qhp->rhp, qhp->attr.scq);
937
938 if (qhp->ibqp.uobject) {
939 t4_set_wq_in_error(&qhp->wq);
940 t4_set_cq_in_error(&rchp->cq);
941 if (schp != rchp)
942 t4_set_cq_in_error(&schp->cq);
943 return;
944 }
945 __flush_qp(qhp, rchp, schp, flag);
946}
947
948static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
949{
950 struct fw_ri_wr *wqe;
951 int ret;
952 struct c4iw_wr_wait wr_wait;
953 struct sk_buff *skb;
954
955 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
956 qhp->ep->hwtid);
957
958 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
959 if (!skb)
960 return -ENOMEM;
961 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
962
963 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
964 memset(wqe, 0, sizeof *wqe);
965 wqe->op_compl = cpu_to_be32(
966 FW_WR_OP(FW_RI_INIT_WR) |
967 FW_WR_COMPL(1));
968 wqe->flowid_len16 = cpu_to_be32(
969 FW_WR_FLOWID(qhp->ep->hwtid) |
970 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
971 wqe->cookie = (u64)&wr_wait;
972
973 wqe->u.fini.type = FW_RI_TYPE_FINI;
974 c4iw_init_wr_wait(&wr_wait);
975 ret = c4iw_ofld_send(&rhp->rdev, skb);
976 if (ret)
977 goto out;
978
979 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
980 if (!wr_wait.done) {
981 printk(KERN_ERR MOD "Device %s not responding!\n",
982 pci_name(rhp->rdev.lldi.pdev));
983 rhp->rdev.flags = T4_FATAL_ERROR;
984 ret = -EIO;
985 } else {
986 ret = wr_wait.ret;
987 if (ret)
988 printk(KERN_WARNING MOD
989 "%s: Abnormal close qpid %d ret %u\n",
990 pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
991 ret);
992 }
993out:
994 PDBG("%s ret %d\n", __func__, ret);
995 return ret;
996}
997
998static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
999{
1000 memset(&init->u, 0, sizeof init->u);
1001 switch (p2p_type) {
1002 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1003 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1004 init->u.write.stag_sink = cpu_to_be32(1);
1005 init->u.write.to_sink = cpu_to_be64(1);
1006 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1007 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1008 sizeof(struct fw_ri_immd),
1009 16);
1010 break;
1011 case FW_RI_INIT_P2PTYPE_READ_REQ:
1012 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1013 init->u.read.stag_src = cpu_to_be32(1);
1014 init->u.read.to_src_lo = cpu_to_be32(1);
1015 init->u.read.stag_sink = cpu_to_be32(1);
1016 init->u.read.to_sink_lo = cpu_to_be32(1);
1017 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1018 break;
1019 }
1020}
1021
1022static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1023{
1024 struct fw_ri_wr *wqe;
1025 int ret;
1026 struct c4iw_wr_wait wr_wait;
1027 struct sk_buff *skb;
1028
1029 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1030 qhp->ep->hwtid);
1031
1032 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
1033 if (!skb)
1034 return -ENOMEM;
1035 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1036
1037 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1038 memset(wqe, 0, sizeof *wqe);
1039 wqe->op_compl = cpu_to_be32(
1040 FW_WR_OP(FW_RI_INIT_WR) |
1041 FW_WR_COMPL(1));
1042 wqe->flowid_len16 = cpu_to_be32(
1043 FW_WR_FLOWID(qhp->ep->hwtid) |
1044 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1045
1046 wqe->cookie = (u64)&wr_wait;
1047
1048 wqe->u.init.type = FW_RI_TYPE_INIT;
1049 wqe->u.init.mpareqbit_p2ptype =
1050 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1051 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1052 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1053 if (qhp->attr.mpa_attr.recv_marker_enabled)
1054 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1055 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1056 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1057 if (qhp->attr.mpa_attr.crc_enabled)
1058 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1059
1060 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1061 FW_RI_QP_RDMA_WRITE_ENABLE |
1062 FW_RI_QP_BIND_ENABLE;
1063 if (!qhp->ibqp.uobject)
1064 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1065 FW_RI_QP_STAG0_ENABLE;
1066 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1067 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1068 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1069 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1070 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1071 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1072 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1073 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1074 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1075 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1076 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1077 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1078 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1079 rhp->rdev.lldi.vr->rq.start);
1080 if (qhp->attr.mpa_attr.initiator)
1081 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1082
1083 c4iw_init_wr_wait(&wr_wait);
1084 ret = c4iw_ofld_send(&rhp->rdev, skb);
1085 if (ret)
1086 goto out;
1087
1088 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
1089 if (!wr_wait.done) {
1090 printk(KERN_ERR MOD "Device %s not responding!\n",
1091 pci_name(rhp->rdev.lldi.pdev));
1092 rhp->rdev.flags = T4_FATAL_ERROR;
1093 ret = -EIO;
1094 } else
1095 ret = wr_wait.ret;
1096out:
1097 PDBG("%s ret %d\n", __func__, ret);
1098 return ret;
1099}
1100
1101int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1102 enum c4iw_qp_attr_mask mask,
1103 struct c4iw_qp_attributes *attrs,
1104 int internal)
1105{
1106 int ret = 0;
1107 struct c4iw_qp_attributes newattr = qhp->attr;
1108 unsigned long flag;
1109 int disconnect = 0;
1110 int terminate = 0;
1111 int abort = 0;
1112 int free = 0;
1113 struct c4iw_ep *ep = NULL;
1114
1115 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1116 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1117 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1118
1119 spin_lock_irqsave(&qhp->lock, flag);
1120
1121 /* Process attr changes if in IDLE */
1122 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1123 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1124 ret = -EIO;
1125 goto out;
1126 }
1127 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1128 newattr.enable_rdma_read = attrs->enable_rdma_read;
1129 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1130 newattr.enable_rdma_write = attrs->enable_rdma_write;
1131 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1132 newattr.enable_bind = attrs->enable_bind;
1133 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1134 if (attrs->max_ord > c4iw_max_read_depth) {
1135 ret = -EINVAL;
1136 goto out;
1137 }
1138 newattr.max_ord = attrs->max_ord;
1139 }
1140 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1141 if (attrs->max_ird > c4iw_max_read_depth) {
1142 ret = -EINVAL;
1143 goto out;
1144 }
1145 newattr.max_ird = attrs->max_ird;
1146 }
1147 qhp->attr = newattr;
1148 }
1149
1150 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1151 goto out;
1152 if (qhp->attr.state == attrs->next_state)
1153 goto out;
1154
1155 switch (qhp->attr.state) {
1156 case C4IW_QP_STATE_IDLE:
1157 switch (attrs->next_state) {
1158 case C4IW_QP_STATE_RTS:
1159 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1160 ret = -EINVAL;
1161 goto out;
1162 }
1163 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1164 ret = -EINVAL;
1165 goto out;
1166 }
1167 qhp->attr.mpa_attr = attrs->mpa_attr;
1168 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1169 qhp->ep = qhp->attr.llp_stream_handle;
1170 qhp->attr.state = C4IW_QP_STATE_RTS;
1171
1172 /*
1173 * Ref the endpoint here and deref when we
1174 * disassociate the endpoint from the QP. This
1175 * happens in CLOSING->IDLE transition or *->ERROR
1176 * transition.
1177 */
1178 c4iw_get_ep(&qhp->ep->com);
1179 spin_unlock_irqrestore(&qhp->lock, flag);
1180 ret = rdma_init(rhp, qhp);
1181 spin_lock_irqsave(&qhp->lock, flag);
1182 if (ret)
1183 goto err;
1184 break;
1185 case C4IW_QP_STATE_ERROR:
1186 qhp->attr.state = C4IW_QP_STATE_ERROR;
1187 flush_qp(qhp, &flag);
1188 break;
1189 default:
1190 ret = -EINVAL;
1191 goto out;
1192 }
1193 break;
1194 case C4IW_QP_STATE_RTS:
1195 switch (attrs->next_state) {
1196 case C4IW_QP_STATE_CLOSING:
1197 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1198 qhp->attr.state = C4IW_QP_STATE_CLOSING;
1199 if (!internal) {
1200 abort = 0;
1201 disconnect = 1;
1202 ep = qhp->ep;
1203 c4iw_get_ep(&ep->com);
1204 }
1205 spin_unlock_irqrestore(&qhp->lock, flag);
1206 ret = rdma_fini(rhp, qhp);
1207 spin_lock_irqsave(&qhp->lock, flag);
1208 if (ret) {
1209 ep = qhp->ep;
1210 c4iw_get_ep(&ep->com);
1211 disconnect = abort = 1;
1212 goto err;
1213 }
1214 break;
1215 case C4IW_QP_STATE_TERMINATE:
1216 qhp->attr.state = C4IW_QP_STATE_TERMINATE;
1217 if (qhp->ibqp.uobject)
1218 t4_set_wq_in_error(&qhp->wq);
1219 ep = qhp->ep;
1220 c4iw_get_ep(&ep->com);
1221 terminate = 1;
1222 disconnect = 1;
1223 break;
1224 case C4IW_QP_STATE_ERROR:
1225 qhp->attr.state = C4IW_QP_STATE_ERROR;
1226 if (!internal) {
1227 abort = 1;
1228 disconnect = 1;
1229 ep = qhp->ep;
1230 c4iw_get_ep(&ep->com);
1231 }
1232 goto err;
1233 break;
1234 default:
1235 ret = -EINVAL;
1236 goto out;
1237 }
1238 break;
1239 case C4IW_QP_STATE_CLOSING:
1240 if (!internal) {
1241 ret = -EINVAL;
1242 goto out;
1243 }
1244 switch (attrs->next_state) {
1245 case C4IW_QP_STATE_IDLE:
1246 flush_qp(qhp, &flag);
1247 qhp->attr.state = C4IW_QP_STATE_IDLE;
1248 qhp->attr.llp_stream_handle = NULL;
1249 c4iw_put_ep(&qhp->ep->com);
1250 qhp->ep = NULL;
1251 wake_up(&qhp->wait);
1252 break;
1253 case C4IW_QP_STATE_ERROR:
1254 goto err;
1255 default:
1256 ret = -EINVAL;
1257 goto err;
1258 }
1259 break;
1260 case C4IW_QP_STATE_ERROR:
1261 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1262 ret = -EINVAL;
1263 goto out;
1264 }
1265 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1266 ret = -EINVAL;
1267 goto out;
1268 }
1269 qhp->attr.state = C4IW_QP_STATE_IDLE;
1270 break;
1271 case C4IW_QP_STATE_TERMINATE:
1272 if (!internal) {
1273 ret = -EINVAL;
1274 goto out;
1275 }
1276 goto err;
1277 break;
1278 default:
1279 printk(KERN_ERR "%s in a bad state %d\n",
1280 __func__, qhp->attr.state);
1281 ret = -EINVAL;
1282 goto err;
1283 break;
1284 }
1285 goto out;
1286err:
1287 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1288 qhp->wq.sq.qid);
1289
1290 /* disassociate the LLP connection */
1291 qhp->attr.llp_stream_handle = NULL;
1292 ep = qhp->ep;
1293 qhp->ep = NULL;
1294 qhp->attr.state = C4IW_QP_STATE_ERROR;
1295 free = 1;
1296 wake_up(&qhp->wait);
1297 BUG_ON(!ep);
1298 flush_qp(qhp, &flag);
1299out:
1300 spin_unlock_irqrestore(&qhp->lock, flag);
1301
1302 if (terminate)
1303 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1304
1305 /*
1306 * If disconnect is 1, then we need to initiate a disconnect
1307 * on the EP. This can be a normal close (RTS->CLOSING) or
1308 * an abnormal close (RTS/CLOSING->ERROR).
1309 */
1310 if (disconnect) {
1311 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1312 GFP_KERNEL);
1313 c4iw_put_ep(&ep->com);
1314 }
1315
1316 /*
1317 * If free is 1, then we've disassociated the EP from the QP
1318 * and we need to dereference the EP.
1319 */
1320 if (free)
1321 c4iw_put_ep(&ep->com);
1322
1323 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1324 return ret;
1325}
1326
1327int c4iw_destroy_qp(struct ib_qp *ib_qp)
1328{
1329 struct c4iw_dev *rhp;
1330 struct c4iw_qp *qhp;
1331 struct c4iw_qp_attributes attrs;
1332 struct c4iw_ucontext *ucontext;
1333
1334 qhp = to_c4iw_qp(ib_qp);
1335 rhp = qhp->rhp;
1336
1337 attrs.next_state = C4IW_QP_STATE_ERROR;
1338 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1339 wait_event(qhp->wait, !qhp->ep);
1340
1341 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1342 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1343 atomic_dec(&qhp->refcnt);
1344 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1345
1346 ucontext = ib_qp->uobject ?
1347 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1348 destroy_qp(&rhp->rdev, &qhp->wq,
1349 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1350
1351 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1352 kfree(qhp);
1353 return 0;
1354}
1355
1356struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1357 struct ib_udata *udata)
1358{
1359 struct c4iw_dev *rhp;
1360 struct c4iw_qp *qhp;
1361 struct c4iw_pd *php;
1362 struct c4iw_cq *schp;
1363 struct c4iw_cq *rchp;
1364 struct c4iw_create_qp_resp uresp;
1365 int sqsize, rqsize;
1366 struct c4iw_ucontext *ucontext;
1367 int ret;
1368 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
1369
1370 PDBG("%s ib_pd %p\n", __func__, pd);
1371
1372 if (attrs->qp_type != IB_QPT_RC)
1373 return ERR_PTR(-EINVAL);
1374
1375 php = to_c4iw_pd(pd);
1376 rhp = php->rhp;
1377 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1378 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1379 if (!schp || !rchp)
1380 return ERR_PTR(-EINVAL);
1381
1382 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1383 return ERR_PTR(-EINVAL);
1384
1385 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1386 if (rqsize > T4_MAX_RQ_SIZE)
1387 return ERR_PTR(-E2BIG);
1388
1389 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1390 if (sqsize > T4_MAX_SQ_SIZE)
1391 return ERR_PTR(-E2BIG);
1392
1393 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1394
1395
1396 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1397 if (!qhp)
1398 return ERR_PTR(-ENOMEM);
1399 qhp->wq.sq.size = sqsize;
1400 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1401 qhp->wq.rq.size = rqsize;
1402 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1403
1404 if (ucontext) {
1405 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1406 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1407 }
1408
1409 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1410 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1411
1412 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1413 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1414 if (ret)
1415 goto err1;
1416
1417 attrs->cap.max_recv_wr = rqsize - 1;
1418 attrs->cap.max_send_wr = sqsize - 1;
1419 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1420
1421 qhp->rhp = rhp;
1422 qhp->attr.pd = php->pdid;
1423 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1424 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1425 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1426 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1427 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1428 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1429 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1430 qhp->attr.state = C4IW_QP_STATE_IDLE;
1431 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1432 qhp->attr.enable_rdma_read = 1;
1433 qhp->attr.enable_rdma_write = 1;
1434 qhp->attr.enable_bind = 1;
1435 qhp->attr.max_ord = 1;
1436 qhp->attr.max_ird = 1;
1437 spin_lock_init(&qhp->lock);
1438 init_waitqueue_head(&qhp->wait);
1439 atomic_set(&qhp->refcnt, 1);
1440
1441 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1442 if (ret)
1443 goto err2;
1444
1445 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid);
1446 if (ret)
1447 goto err3;
1448
1449 if (udata) {
1450 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1451 if (!mm1) {
1452 ret = -ENOMEM;
1453 goto err4;
1454 }
1455 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1456 if (!mm2) {
1457 ret = -ENOMEM;
1458 goto err5;
1459 }
1460 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1461 if (!mm3) {
1462 ret = -ENOMEM;
1463 goto err6;
1464 }
1465 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1466 if (!mm4) {
1467 ret = -ENOMEM;
1468 goto err7;
1469 }
1470
1471 uresp.qid_mask = rhp->rdev.qpmask;
1472 uresp.sqid = qhp->wq.sq.qid;
1473 uresp.sq_size = qhp->wq.sq.size;
1474 uresp.sq_memsize = qhp->wq.sq.memsize;
1475 uresp.rqid = qhp->wq.rq.qid;
1476 uresp.rq_size = qhp->wq.rq.size;
1477 uresp.rq_memsize = qhp->wq.rq.memsize;
1478 spin_lock(&ucontext->mmap_lock);
1479 uresp.sq_key = ucontext->key;
1480 ucontext->key += PAGE_SIZE;
1481 uresp.rq_key = ucontext->key;
1482 ucontext->key += PAGE_SIZE;
1483 uresp.sq_db_gts_key = ucontext->key;
1484 ucontext->key += PAGE_SIZE;
1485 uresp.rq_db_gts_key = ucontext->key;
1486 ucontext->key += PAGE_SIZE;
1487 spin_unlock(&ucontext->mmap_lock);
1488 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1489 if (ret)
1490 goto err8;
1491 mm1->key = uresp.sq_key;
1492 mm1->addr = virt_to_phys(qhp->wq.sq.queue);
1493 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1494 insert_mmap(ucontext, mm1);
1495 mm2->key = uresp.rq_key;
1496 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1497 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1498 insert_mmap(ucontext, mm2);
1499 mm3->key = uresp.sq_db_gts_key;
1500 mm3->addr = qhp->wq.sq.udb;
1501 mm3->len = PAGE_SIZE;
1502 insert_mmap(ucontext, mm3);
1503 mm4->key = uresp.rq_db_gts_key;
1504 mm4->addr = qhp->wq.rq.udb;
1505 mm4->len = PAGE_SIZE;
1506 insert_mmap(ucontext, mm4);
1507 }
1508 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1509 init_timer(&(qhp->timer));
1510 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1511 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1512 qhp->wq.sq.qid);
1513 return &qhp->ibqp;
1514err8:
1515 kfree(mm4);
1516err7:
1517 kfree(mm3);
1518err6:
1519 kfree(mm2);
1520err5:
1521 kfree(mm1);
1522err4:
1523 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1524err3:
1525 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1526err2:
1527 destroy_qp(&rhp->rdev, &qhp->wq,
1528 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1529err1:
1530 kfree(qhp);
1531 return ERR_PTR(ret);
1532}
1533
1534int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1535 int attr_mask, struct ib_udata *udata)
1536{
1537 struct c4iw_dev *rhp;
1538 struct c4iw_qp *qhp;
1539 enum c4iw_qp_attr_mask mask = 0;
1540 struct c4iw_qp_attributes attrs;
1541
1542 PDBG("%s ib_qp %p\n", __func__, ibqp);
1543
1544 /* iwarp does not support the RTR state */
1545 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1546 attr_mask &= ~IB_QP_STATE;
1547
1548 /* Make sure we still have something left to do */
1549 if (!attr_mask)
1550 return 0;
1551
1552 memset(&attrs, 0, sizeof attrs);
1553 qhp = to_c4iw_qp(ibqp);
1554 rhp = qhp->rhp;
1555
1556 attrs.next_state = c4iw_convert_state(attr->qp_state);
1557 attrs.enable_rdma_read = (attr->qp_access_flags &
1558 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1559 attrs.enable_rdma_write = (attr->qp_access_flags &
1560 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1561 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1562
1563
1564 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1565 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1566 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1567 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1568 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1569
1570 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1571}
1572
1573struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1574{
1575 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1576 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1577}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
new file mode 100644
index 000000000000..fb195d1d9015
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -0,0 +1,417 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32/* Crude resource management */
33#include <linux/kernel.h>
34#include <linux/random.h>
35#include <linux/slab.h>
36#include <linux/kfifo.h>
37#include <linux/spinlock.h>
38#include <linux/errno.h>
39#include <linux/genalloc.h>
40#include "iw_cxgb4.h"
41
42#define RANDOM_SIZE 16
43
44static int __c4iw_init_resource_fifo(struct kfifo *fifo,
45 spinlock_t *fifo_lock,
46 u32 nr, u32 skip_low,
47 u32 skip_high,
48 int random)
49{
50 u32 i, j, entry = 0, idx;
51 u32 random_bytes;
52 u32 rarray[16];
53 spin_lock_init(fifo_lock);
54
55 if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
56 return -ENOMEM;
57
58 for (i = 0; i < skip_low + skip_high; i++)
59 kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
60 if (random) {
61 j = 0;
62 random_bytes = random32();
63 for (i = 0; i < RANDOM_SIZE; i++)
64 rarray[i] = i + skip_low;
65 for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
66 if (j >= RANDOM_SIZE) {
67 j = 0;
68 random_bytes = random32();
69 }
70 idx = (random_bytes >> (j * 2)) & 0xF;
71 kfifo_in(fifo,
72 (unsigned char *) &rarray[idx],
73 sizeof(u32));
74 rarray[idx] = i;
75 j++;
76 }
77 for (i = 0; i < RANDOM_SIZE; i++)
78 kfifo_in(fifo,
79 (unsigned char *) &rarray[i],
80 sizeof(u32));
81 } else
82 for (i = skip_low; i < nr - skip_high; i++)
83 kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
84
85 for (i = 0; i < skip_low + skip_high; i++)
86 if (kfifo_out_locked(fifo, (unsigned char *) &entry,
87 sizeof(u32), fifo_lock))
88 break;
89 return 0;
90}
91
92static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
93 u32 nr, u32 skip_low, u32 skip_high)
94{
95 return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
96 skip_high, 0);
97}
98
99static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
100 spinlock_t *fifo_lock,
101 u32 nr, u32 skip_low, u32 skip_high)
102{
103 return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
104 skip_high, 1);
105}
106
107static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
108{
109 u32 i;
110
111 spin_lock_init(&rdev->resource.qid_fifo_lock);
112
113 if (kfifo_alloc(&rdev->resource.qid_fifo, T4_MAX_QIDS * sizeof(u32),
114 GFP_KERNEL))
115 return -ENOMEM;
116
117 for (i = T4_QID_BASE; i < T4_QID_BASE + T4_MAX_QIDS; i++)
118 if (!(i & rdev->qpmask))
119 kfifo_in(&rdev->resource.qid_fifo,
120 (unsigned char *) &i, sizeof(u32));
121 return 0;
122}
123
124/* nr_* must be power of 2 */
125int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
126{
127 int err = 0;
128 err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
129 &rdev->resource.tpt_fifo_lock,
130 nr_tpt, 1, 0);
131 if (err)
132 goto tpt_err;
133 err = c4iw_init_qid_fifo(rdev);
134 if (err)
135 goto qid_err;
136 err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
137 &rdev->resource.pdid_fifo_lock,
138 nr_pdid, 1, 0);
139 if (err)
140 goto pdid_err;
141 return 0;
142pdid_err:
143 kfifo_free(&rdev->resource.qid_fifo);
144qid_err:
145 kfifo_free(&rdev->resource.tpt_fifo);
146tpt_err:
147 return -ENOMEM;
148}
149
150/*
151 * returns 0 if no resource available
152 */
153u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
154{
155 u32 entry;
156 if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
157 return entry;
158 else
159 return 0;
160}
161
162void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
163{
164 PDBG("%s entry 0x%x\n", __func__, entry);
165 kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
166}
167
168u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
169{
170 struct c4iw_qid_list *entry;
171 u32 qid;
172 int i;
173
174 mutex_lock(&uctx->lock);
175 if (!list_empty(&uctx->cqids)) {
176 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
177 entry);
178 list_del(&entry->entry);
179 qid = entry->qid;
180 kfree(entry);
181 } else {
182 qid = c4iw_get_resource(&rdev->resource.qid_fifo,
183 &rdev->resource.qid_fifo_lock);
184 if (!qid)
185 goto out;
186 for (i = qid+1; i & rdev->qpmask; i++) {
187 entry = kmalloc(sizeof *entry, GFP_KERNEL);
188 if (!entry)
189 goto out;
190 entry->qid = i;
191 list_add_tail(&entry->entry, &uctx->cqids);
192 }
193
194 /*
195 * now put the same ids on the qp list since they all
196 * map to the same db/gts page.
197 */
198 entry = kmalloc(sizeof *entry, GFP_KERNEL);
199 if (!entry)
200 goto out;
201 entry->qid = qid;
202 list_add_tail(&entry->entry, &uctx->qpids);
203 for (i = qid+1; i & rdev->qpmask; i++) {
204 entry = kmalloc(sizeof *entry, GFP_KERNEL);
205 if (!entry)
206 goto out;
207 entry->qid = i;
208 list_add_tail(&entry->entry, &uctx->qpids);
209 }
210 }
211out:
212 mutex_unlock(&uctx->lock);
213 PDBG("%s qid 0x%x\n", __func__, qid);
214 return qid;
215}
216
217void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
218 struct c4iw_dev_ucontext *uctx)
219{
220 struct c4iw_qid_list *entry;
221
222 entry = kmalloc(sizeof *entry, GFP_KERNEL);
223 if (!entry)
224 return;
225 PDBG("%s qid 0x%x\n", __func__, qid);
226 entry->qid = qid;
227 mutex_lock(&uctx->lock);
228 list_add_tail(&entry->entry, &uctx->cqids);
229 mutex_unlock(&uctx->lock);
230}
231
232u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
233{
234 struct c4iw_qid_list *entry;
235 u32 qid;
236 int i;
237
238 mutex_lock(&uctx->lock);
239 if (!list_empty(&uctx->qpids)) {
240 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
241 entry);
242 list_del(&entry->entry);
243 qid = entry->qid;
244 kfree(entry);
245 } else {
246 qid = c4iw_get_resource(&rdev->resource.qid_fifo,
247 &rdev->resource.qid_fifo_lock);
248 if (!qid)
249 goto out;
250 for (i = qid+1; i & rdev->qpmask; i++) {
251 entry = kmalloc(sizeof *entry, GFP_KERNEL);
252 if (!entry)
253 goto out;
254 entry->qid = i;
255 list_add_tail(&entry->entry, &uctx->qpids);
256 }
257
258 /*
259 * now put the same ids on the cq list since they all
260 * map to the same db/gts page.
261 */
262 entry = kmalloc(sizeof *entry, GFP_KERNEL);
263 if (!entry)
264 goto out;
265 entry->qid = qid;
266 list_add_tail(&entry->entry, &uctx->cqids);
267 for (i = qid; i & rdev->qpmask; i++) {
268 entry = kmalloc(sizeof *entry, GFP_KERNEL);
269 if (!entry)
270 goto out;
271 entry->qid = i;
272 list_add_tail(&entry->entry, &uctx->cqids);
273 }
274 }
275out:
276 mutex_unlock(&uctx->lock);
277 PDBG("%s qid 0x%x\n", __func__, qid);
278 return qid;
279}
280
281void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
282 struct c4iw_dev_ucontext *uctx)
283{
284 struct c4iw_qid_list *entry;
285
286 entry = kmalloc(sizeof *entry, GFP_KERNEL);
287 if (!entry)
288 return;
289 PDBG("%s qid 0x%x\n", __func__, qid);
290 entry->qid = qid;
291 mutex_lock(&uctx->lock);
292 list_add_tail(&entry->entry, &uctx->qpids);
293 mutex_unlock(&uctx->lock);
294}
295
296void c4iw_destroy_resource(struct c4iw_resource *rscp)
297{
298 kfifo_free(&rscp->tpt_fifo);
299 kfifo_free(&rscp->qid_fifo);
300 kfifo_free(&rscp->pdid_fifo);
301}
302
303/*
304 * PBL Memory Manager. Uses Linux generic allocator.
305 */
306
307#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
308
309u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
310{
311 unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
312 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
313 return (u32)addr;
314}
315
316void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
317{
318 PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
319 gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
320}
321
322int c4iw_pblpool_create(struct c4iw_rdev *rdev)
323{
324 unsigned pbl_start, pbl_chunk, pbl_top;
325
326 rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
327 if (!rdev->pbl_pool)
328 return -ENOMEM;
329
330 pbl_start = rdev->lldi.vr->pbl.start;
331 pbl_chunk = rdev->lldi.vr->pbl.size;
332 pbl_top = pbl_start + pbl_chunk;
333
334 while (pbl_start < pbl_top) {
335 pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
336 if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
337 PDBG("%s failed to add PBL chunk (%x/%x)\n",
338 __func__, pbl_start, pbl_chunk);
339 if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
340 printk(KERN_WARNING MOD
341 "Failed to add all PBL chunks (%x/%x)\n",
342 pbl_start,
343 pbl_top - pbl_start);
344 return 0;
345 }
346 pbl_chunk >>= 1;
347 } else {
348 PDBG("%s added PBL chunk (%x/%x)\n",
349 __func__, pbl_start, pbl_chunk);
350 pbl_start += pbl_chunk;
351 }
352 }
353
354 return 0;
355}
356
357void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
358{
359 gen_pool_destroy(rdev->pbl_pool);
360}
361
362/*
363 * RQT Memory Manager. Uses Linux generic allocator.
364 */
365
366#define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
367
368u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
369{
370 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
371 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
372 return (u32)addr;
373}
374
375void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
376{
377 PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
378 gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
379}
380
381int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
382{
383 unsigned rqt_start, rqt_chunk, rqt_top;
384
385 rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
386 if (!rdev->rqt_pool)
387 return -ENOMEM;
388
389 rqt_start = rdev->lldi.vr->rq.start;
390 rqt_chunk = rdev->lldi.vr->rq.size;
391 rqt_top = rqt_start + rqt_chunk;
392
393 while (rqt_start < rqt_top) {
394 rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
395 if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
396 PDBG("%s failed to add RQT chunk (%x/%x)\n",
397 __func__, rqt_start, rqt_chunk);
398 if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
399 printk(KERN_WARNING MOD
400 "Failed to add all RQT chunks (%x/%x)\n",
401 rqt_start, rqt_top - rqt_start);
402 return 0;
403 }
404 rqt_chunk >>= 1;
405 } else {
406 PDBG("%s added RQT chunk (%x/%x)\n",
407 __func__, rqt_start, rqt_chunk);
408 rqt_start += rqt_chunk;
409 }
410 }
411 return 0;
412}
413
414void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
415{
416 gen_pool_destroy(rdev->rqt_pool);
417}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
new file mode 100644
index 000000000000..d0e8af352408
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -0,0 +1,550 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef __T4_H__
32#define __T4_H__
33
34#include "t4_hw.h"
35#include "t4_regs.h"
36#include "t4_msg.h"
37#include "t4fw_ri_api.h"
38
39#define T4_QID_BASE 1024
40#define T4_MAX_QIDS 256
41#define T4_MAX_NUM_QP (1<<16)
42#define T4_MAX_NUM_CQ (1<<15)
43#define T4_MAX_NUM_PD (1<<15)
44#define T4_MAX_PBL_SIZE 256
45#define T4_MAX_RQ_SIZE 1024
46#define T4_MAX_SQ_SIZE 1024
47#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1)
48#define T4_MAX_CQ_DEPTH 8192
49#define T4_MAX_NUM_STAG (1<<15)
50#define T4_MAX_MR_SIZE (~0ULL - 1)
51#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
52#define T4_STAG_UNSET 0xffffffff
53#define T4_FW_MAJ 0
54#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
55
56struct t4_status_page {
57 __be32 rsvd1; /* flit 0 - hw owns */
58 __be16 rsvd2;
59 __be16 qid;
60 __be16 cidx;
61 __be16 pidx;
62 u8 qp_err; /* flit 1 - sw owns */
63 u8 db_off;
64};
65
66#define T4_EQ_SIZE 64
67
68#define T4_SQ_NUM_SLOTS 4
69#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS)
70#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
71 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
72#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
73 sizeof(struct fw_ri_immd)))
74#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
75 sizeof(struct fw_ri_rdma_write_wr) - \
76 sizeof(struct fw_ri_immd)))
77#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
78 sizeof(struct fw_ri_rdma_write_wr) - \
79 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
80#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
81 sizeof(struct fw_ri_immd)))
82#define T4_MAX_FR_DEPTH 255
83
84#define T4_RQ_NUM_SLOTS 2
85#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
86#define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \
87 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
88
89union t4_wr {
90 struct fw_ri_res_wr res;
91 struct fw_ri_wr ri;
92 struct fw_ri_rdma_write_wr write;
93 struct fw_ri_send_wr send;
94 struct fw_ri_rdma_read_wr read;
95 struct fw_ri_bind_mw_wr bind;
96 struct fw_ri_fr_nsmr_wr fr;
97 struct fw_ri_inv_lstag_wr inv;
98 struct t4_status_page status;
99 __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
100};
101
102union t4_recv_wr {
103 struct fw_ri_recv_wr recv;
104 struct t4_status_page status;
105 __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
106};
107
108static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
109 enum fw_wr_opcodes opcode, u8 flags, u8 len16)
110{
111 int slots_used;
112
113 wqe->send.opcode = (u8)opcode;
114 wqe->send.flags = flags;
115 wqe->send.wrid = wrid;
116 wqe->send.r1[0] = 0;
117 wqe->send.r1[1] = 0;
118 wqe->send.r1[2] = 0;
119 wqe->send.len16 = len16;
120
121 slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE);
122 while (slots_used < T4_SQ_NUM_SLOTS) {
123 wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0;
124 slots_used++;
125 }
126}
127
128/* CQE/AE status codes */
129#define T4_ERR_SUCCESS 0x0
130#define T4_ERR_STAG 0x1 /* STAG invalid: either the */
131 /* STAG is offlimt, being 0, */
132 /* or STAG_key mismatch */
133#define T4_ERR_PDID 0x2 /* PDID mismatch */
134#define T4_ERR_QPID 0x3 /* QPID mismatch */
135#define T4_ERR_ACCESS 0x4 /* Invalid access right */
136#define T4_ERR_WRAP 0x5 /* Wrap error */
137#define T4_ERR_BOUND 0x6 /* base and bounds voilation */
138#define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
139 /* shared memory region */
140#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
141 /* shared memory region */
142#define T4_ERR_ECC 0x9 /* ECC error detected */
143#define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
144 /* reading PSTAG for a MW */
145 /* Invalidate */
146#define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
147 /* software error */
148#define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
149#define T4_ERR_CRC 0x10 /* CRC error */
150#define T4_ERR_MARKER 0x11 /* Marker error */
151#define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
152#define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
153#define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
154#define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
155#define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
156#define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
157#define T4_ERR_MSN 0x18 /* MSN error */
158#define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
159#define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
160 /* or READ_REQ */
161#define T4_ERR_MSN_GAP 0x1B
162#define T4_ERR_MSN_RANGE 0x1C
163#define T4_ERR_IRD_OVERFLOW 0x1D
164#define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
165 /* software error */
166#define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
167 /* mismatch) */
168/*
169 * CQE defs
170 */
171struct t4_cqe {
172 __be32 header;
173 __be32 len;
174 union {
175 struct {
176 __be32 stag;
177 __be32 msn;
178 } rcqe;
179 struct {
180 u32 nada1;
181 u16 nada2;
182 u16 cidx;
183 } scqe;
184 struct {
185 __be32 wrid_hi;
186 __be32 wrid_low;
187 } gen;
188 } u;
189 __be64 reserved;
190 __be64 bits_type_ts;
191};
192
193/* macros for flit 0 of the cqe */
194
195#define S_CQE_QPID 12
196#define M_CQE_QPID 0xFFFFF
197#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
198#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
199
200#define S_CQE_SWCQE 11
201#define M_CQE_SWCQE 0x1
202#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
203#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
204
205#define S_CQE_STATUS 5
206#define M_CQE_STATUS 0x1F
207#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
208#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
209
210#define S_CQE_TYPE 4
211#define M_CQE_TYPE 0x1
212#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
213#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
214
215#define S_CQE_OPCODE 0
216#define M_CQE_OPCODE 0xF
217#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
218#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
219
220#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
221#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
222#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
223#define SQ_TYPE(x) (CQE_TYPE((x)))
224#define RQ_TYPE(x) (!CQE_TYPE((x)))
225#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
226#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
227
228#define CQE_SEND_OPCODE(x)( \
229 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
230 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
231 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
232 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
233
234#define CQE_LEN(x) (be32_to_cpu((x)->len))
235
236/* used for RQ completion processing */
237#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
238#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
239
240/* used for SQ completion processing */
241#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
242
243/* generic accessor macros */
244#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
245#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
246
247/* macros for flit 3 of the cqe */
248#define S_CQE_GENBIT 63
249#define M_CQE_GENBIT 0x1
250#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
251#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
252
253#define S_CQE_OVFBIT 62
254#define M_CQE_OVFBIT 0x1
255#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
256
257#define S_CQE_IQTYPE 60
258#define M_CQE_IQTYPE 0x3
259#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
260
261#define M_CQE_TS 0x0fffffffffffffffULL
262#define G_CQE_TS(x) ((x) & M_CQE_TS)
263
264#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
265#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
266#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
267
268struct t4_swsqe {
269 u64 wr_id;
270 struct t4_cqe cqe;
271 int read_len;
272 int opcode;
273 int complete;
274 int signaled;
275 u16 idx;
276};
277
278struct t4_sq {
279 union t4_wr *queue;
280 dma_addr_t dma_addr;
281 DECLARE_PCI_UNMAP_ADDR(mapping);
282 struct t4_swsqe *sw_sq;
283 struct t4_swsqe *oldest_read;
284 u64 udb;
285 size_t memsize;
286 u32 qid;
287 u16 in_use;
288 u16 size;
289 u16 cidx;
290 u16 pidx;
291};
292
293struct t4_swrqe {
294 u64 wr_id;
295};
296
297struct t4_rq {
298 union t4_recv_wr *queue;
299 dma_addr_t dma_addr;
300 DECLARE_PCI_UNMAP_ADDR(mapping);
301 struct t4_swrqe *sw_rq;
302 u64 udb;
303 size_t memsize;
304 u32 qid;
305 u32 msn;
306 u32 rqt_hwaddr;
307 u16 rqt_size;
308 u16 in_use;
309 u16 size;
310 u16 cidx;
311 u16 pidx;
312};
313
314struct t4_wq {
315 struct t4_sq sq;
316 struct t4_rq rq;
317 void __iomem *db;
318 void __iomem *gts;
319 struct c4iw_rdev *rdev;
320};
321
322static inline int t4_rqes_posted(struct t4_wq *wq)
323{
324 return wq->rq.in_use;
325}
326
327static inline int t4_rq_empty(struct t4_wq *wq)
328{
329 return wq->rq.in_use == 0;
330}
331
332static inline int t4_rq_full(struct t4_wq *wq)
333{
334 return wq->rq.in_use == (wq->rq.size - 1);
335}
336
337static inline u32 t4_rq_avail(struct t4_wq *wq)
338{
339 return wq->rq.size - 1 - wq->rq.in_use;
340}
341
342static inline void t4_rq_produce(struct t4_wq *wq)
343{
344 wq->rq.in_use++;
345 if (++wq->rq.pidx == wq->rq.size)
346 wq->rq.pidx = 0;
347}
348
349static inline void t4_rq_consume(struct t4_wq *wq)
350{
351 wq->rq.in_use--;
352 wq->rq.msn++;
353 if (++wq->rq.cidx == wq->rq.size)
354 wq->rq.cidx = 0;
355}
356
357static inline int t4_sq_empty(struct t4_wq *wq)
358{
359 return wq->sq.in_use == 0;
360}
361
362static inline int t4_sq_full(struct t4_wq *wq)
363{
364 return wq->sq.in_use == (wq->sq.size - 1);
365}
366
367static inline u32 t4_sq_avail(struct t4_wq *wq)
368{
369 return wq->sq.size - 1 - wq->sq.in_use;
370}
371
372static inline void t4_sq_produce(struct t4_wq *wq)
373{
374 wq->sq.in_use++;
375 if (++wq->sq.pidx == wq->sq.size)
376 wq->sq.pidx = 0;
377}
378
379static inline void t4_sq_consume(struct t4_wq *wq)
380{
381 wq->sq.in_use--;
382 if (++wq->sq.cidx == wq->sq.size)
383 wq->sq.cidx = 0;
384}
385
386static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
387{
388 inc *= T4_SQ_NUM_SLOTS;
389 wmb();
390 writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
391}
392
393static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
394{
395 inc *= T4_RQ_NUM_SLOTS;
396 wmb();
397 writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
398}
399
400static inline int t4_wq_in_error(struct t4_wq *wq)
401{
402 return wq->sq.queue[wq->sq.size].status.qp_err;
403}
404
405static inline void t4_set_wq_in_error(struct t4_wq *wq)
406{
407 wq->sq.queue[wq->sq.size].status.qp_err = 1;
408 wq->rq.queue[wq->rq.size].status.qp_err = 1;
409}
410
411static inline void t4_disable_wq_db(struct t4_wq *wq)
412{
413 wq->sq.queue[wq->sq.size].status.db_off = 1;
414 wq->rq.queue[wq->rq.size].status.db_off = 1;
415}
416
417static inline void t4_enable_wq_db(struct t4_wq *wq)
418{
419 wq->sq.queue[wq->sq.size].status.db_off = 0;
420 wq->rq.queue[wq->rq.size].status.db_off = 0;
421}
422
423static inline int t4_wq_db_enabled(struct t4_wq *wq)
424{
425 return !wq->sq.queue[wq->sq.size].status.db_off;
426}
427
428struct t4_cq {
429 struct t4_cqe *queue;
430 dma_addr_t dma_addr;
431 DECLARE_PCI_UNMAP_ADDR(mapping);
432 struct t4_cqe *sw_queue;
433 void __iomem *gts;
434 struct c4iw_rdev *rdev;
435 u64 ugts;
436 size_t memsize;
437 u64 timestamp;
438 u32 cqid;
439 u16 size; /* including status page */
440 u16 cidx;
441 u16 sw_pidx;
442 u16 sw_cidx;
443 u16 sw_in_use;
444 u16 cidx_inc;
445 u8 gen;
446 u8 error;
447};
448
449static inline int t4_arm_cq(struct t4_cq *cq, int se)
450{
451 u32 val;
452 u16 inc;
453
454 do {
455 /*
456 * inc must be less the both the max update value -and-
457 * the size of the CQ.
458 */
459 inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc :
460 CIDXINC_MASK;
461 inc = inc <= (cq->size - 1) ? inc : (cq->size - 1);
462 if (inc == cq->cidx_inc)
463 val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) |
464 INGRESSQID(cq->cqid);
465 else
466 val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) |
467 INGRESSQID(cq->cqid);
468 cq->cidx_inc -= inc;
469 writel(val, cq->gts);
470 } while (cq->cidx_inc);
471 return 0;
472}
473
474static inline void t4_swcq_produce(struct t4_cq *cq)
475{
476 cq->sw_in_use++;
477 if (++cq->sw_pidx == cq->size)
478 cq->sw_pidx = 0;
479}
480
481static inline void t4_swcq_consume(struct t4_cq *cq)
482{
483 cq->sw_in_use--;
484 if (++cq->sw_cidx == cq->size)
485 cq->sw_cidx = 0;
486}
487
488static inline void t4_hwcq_consume(struct t4_cq *cq)
489{
490 cq->cidx_inc++;
491 if (++cq->cidx == cq->size) {
492 cq->cidx = 0;
493 cq->gen ^= 1;
494 }
495}
496
497static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
498{
499 return (CQE_GENBIT(cqe) == cq->gen);
500}
501
502static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
503{
504 int ret = 0;
505 u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts);
506
507 if (G_CQE_GENBIT(bits_type_ts) == cq->gen) {
508 *cqe = &cq->queue[cq->cidx];
509 cq->timestamp = G_CQE_TS(bits_type_ts);
510 } else if (G_CQE_TS(bits_type_ts) > cq->timestamp)
511 ret = -EOVERFLOW;
512 else
513 ret = -ENODATA;
514 if (ret == -EOVERFLOW) {
515 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
516 cq->error = 1;
517 }
518 return ret;
519}
520
521static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
522{
523 if (cq->sw_in_use)
524 return &cq->sw_queue[cq->sw_cidx];
525 return NULL;
526}
527
528static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
529{
530 int ret = 0;
531
532 if (cq->error)
533 ret = -ENODATA;
534 else if (cq->sw_in_use)
535 *cqe = &cq->sw_queue[cq->sw_cidx];
536 else
537 ret = t4_next_hw_cqe(cq, cqe);
538 return ret;
539}
540
541static inline int t4_cq_in_error(struct t4_cq *cq)
542{
543 return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
544}
545
546static inline void t4_set_cq_in_error(struct t4_cq *cq)
547{
548 ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
549}
550#endif
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
new file mode 100644
index 000000000000..fc706bd07fae
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -0,0 +1,829 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef _T4FW_RI_API_H_
32#define _T4FW_RI_API_H_
33
34#include "t4fw_api.h"
35
36enum fw_ri_wr_opcode {
37 FW_RI_RDMA_WRITE = 0x0, /* IETF RDMAP v1.0 ... */
38 FW_RI_READ_REQ = 0x1,
39 FW_RI_READ_RESP = 0x2,
40 FW_RI_SEND = 0x3,
41 FW_RI_SEND_WITH_INV = 0x4,
42 FW_RI_SEND_WITH_SE = 0x5,
43 FW_RI_SEND_WITH_SE_INV = 0x6,
44 FW_RI_TERMINATE = 0x7,
45 FW_RI_RDMA_INIT = 0x8, /* CHELSIO RI specific ... */
46 FW_RI_BIND_MW = 0x9,
47 FW_RI_FAST_REGISTER = 0xa,
48 FW_RI_LOCAL_INV = 0xb,
49 FW_RI_QP_MODIFY = 0xc,
50 FW_RI_BYPASS = 0xd,
51 FW_RI_RECEIVE = 0xe,
52
53 FW_RI_SGE_EC_CR_RETURN = 0xf
54};
55
56enum fw_ri_wr_flags {
57 FW_RI_COMPLETION_FLAG = 0x01,
58 FW_RI_NOTIFICATION_FLAG = 0x02,
59 FW_RI_SOLICITED_EVENT_FLAG = 0x04,
60 FW_RI_READ_FENCE_FLAG = 0x08,
61 FW_RI_LOCAL_FENCE_FLAG = 0x10,
62 FW_RI_RDMA_READ_INVALIDATE = 0x20
63};
64
65enum fw_ri_mpa_attrs {
66 FW_RI_MPA_RX_MARKER_ENABLE = 0x01,
67 FW_RI_MPA_TX_MARKER_ENABLE = 0x02,
68 FW_RI_MPA_CRC_ENABLE = 0x04,
69 FW_RI_MPA_IETF_ENABLE = 0x08
70};
71
72enum fw_ri_qp_caps {
73 FW_RI_QP_RDMA_READ_ENABLE = 0x01,
74 FW_RI_QP_RDMA_WRITE_ENABLE = 0x02,
75 FW_RI_QP_BIND_ENABLE = 0x04,
76 FW_RI_QP_FAST_REGISTER_ENABLE = 0x08,
77 FW_RI_QP_STAG0_ENABLE = 0x10
78};
79
80enum fw_ri_addr_type {
81 FW_RI_ZERO_BASED_TO = 0x00,
82 FW_RI_VA_BASED_TO = 0x01
83};
84
85enum fw_ri_mem_perms {
86 FW_RI_MEM_ACCESS_REM_WRITE = 0x01,
87 FW_RI_MEM_ACCESS_REM_READ = 0x02,
88 FW_RI_MEM_ACCESS_REM = 0x03,
89 FW_RI_MEM_ACCESS_LOCAL_WRITE = 0x04,
90 FW_RI_MEM_ACCESS_LOCAL_READ = 0x08,
91 FW_RI_MEM_ACCESS_LOCAL = 0x0C
92};
93
94enum fw_ri_stag_type {
95 FW_RI_STAG_NSMR = 0x00,
96 FW_RI_STAG_SMR = 0x01,
97 FW_RI_STAG_MW = 0x02,
98 FW_RI_STAG_MW_RELAXED = 0x03
99};
100
101enum fw_ri_data_op {
102 FW_RI_DATA_IMMD = 0x81,
103 FW_RI_DATA_DSGL = 0x82,
104 FW_RI_DATA_ISGL = 0x83
105};
106
107enum fw_ri_sgl_depth {
108 FW_RI_SGL_DEPTH_MAX_SQ = 16,
109 FW_RI_SGL_DEPTH_MAX_RQ = 4
110};
111
112struct fw_ri_dsge_pair {
113 __be32 len[2];
114 __be64 addr[2];
115};
116
117struct fw_ri_dsgl {
118 __u8 op;
119 __u8 r1;
120 __be16 nsge;
121 __be32 len0;
122 __be64 addr0;
123#ifndef C99_NOT_SUPPORTED
124 struct fw_ri_dsge_pair sge[0];
125#endif
126};
127
128struct fw_ri_sge {
129 __be32 stag;
130 __be32 len;
131 __be64 to;
132};
133
134struct fw_ri_isgl {
135 __u8 op;
136 __u8 r1;
137 __be16 nsge;
138 __be32 r2;
139#ifndef C99_NOT_SUPPORTED
140 struct fw_ri_sge sge[0];
141#endif
142};
143
144struct fw_ri_immd {
145 __u8 op;
146 __u8 r1;
147 __be16 r2;
148 __be32 immdlen;
149#ifndef C99_NOT_SUPPORTED
150 __u8 data[0];
151#endif
152};
153
154struct fw_ri_tpte {
155 __be32 valid_to_pdid;
156 __be32 locread_to_qpid;
157 __be32 nosnoop_pbladdr;
158 __be32 len_lo;
159 __be32 va_hi;
160 __be32 va_lo_fbo;
161 __be32 dca_mwbcnt_pstag;
162 __be32 len_hi;
163};
164
165#define S_FW_RI_TPTE_VALID 31
166#define M_FW_RI_TPTE_VALID 0x1
167#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID)
168#define G_FW_RI_TPTE_VALID(x) \
169 (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
170#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U)
171
172#define S_FW_RI_TPTE_STAGKEY 23
173#define M_FW_RI_TPTE_STAGKEY 0xff
174#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY)
175#define G_FW_RI_TPTE_STAGKEY(x) \
176 (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
177
178#define S_FW_RI_TPTE_STAGSTATE 22
179#define M_FW_RI_TPTE_STAGSTATE 0x1
180#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE)
181#define G_FW_RI_TPTE_STAGSTATE(x) \
182 (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
183#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U)
184
185#define S_FW_RI_TPTE_STAGTYPE 20
186#define M_FW_RI_TPTE_STAGTYPE 0x3
187#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE)
188#define G_FW_RI_TPTE_STAGTYPE(x) \
189 (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
190
191#define S_FW_RI_TPTE_PDID 0
192#define M_FW_RI_TPTE_PDID 0xfffff
193#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID)
194#define G_FW_RI_TPTE_PDID(x) \
195 (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
196
197#define S_FW_RI_TPTE_PERM 28
198#define M_FW_RI_TPTE_PERM 0xf
199#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM)
200#define G_FW_RI_TPTE_PERM(x) \
201 (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
202
203#define S_FW_RI_TPTE_REMINVDIS 27
204#define M_FW_RI_TPTE_REMINVDIS 0x1
205#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS)
206#define G_FW_RI_TPTE_REMINVDIS(x) \
207 (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
208#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U)
209
210#define S_FW_RI_TPTE_ADDRTYPE 26
211#define M_FW_RI_TPTE_ADDRTYPE 1
212#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE)
213#define G_FW_RI_TPTE_ADDRTYPE(x) \
214 (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
215#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U)
216
217#define S_FW_RI_TPTE_MWBINDEN 25
218#define M_FW_RI_TPTE_MWBINDEN 0x1
219#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN)
220#define G_FW_RI_TPTE_MWBINDEN(x) \
221 (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
222#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U)
223
224#define S_FW_RI_TPTE_PS 20
225#define M_FW_RI_TPTE_PS 0x1f
226#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS)
227#define G_FW_RI_TPTE_PS(x) \
228 (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
229
230#define S_FW_RI_TPTE_QPID 0
231#define M_FW_RI_TPTE_QPID 0xfffff
232#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID)
233#define G_FW_RI_TPTE_QPID(x) \
234 (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
235
236#define S_FW_RI_TPTE_NOSNOOP 30
237#define M_FW_RI_TPTE_NOSNOOP 0x1
238#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP)
239#define G_FW_RI_TPTE_NOSNOOP(x) \
240 (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
241#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U)
242
243#define S_FW_RI_TPTE_PBLADDR 0
244#define M_FW_RI_TPTE_PBLADDR 0x1fffffff
245#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR)
246#define G_FW_RI_TPTE_PBLADDR(x) \
247 (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
248
249#define S_FW_RI_TPTE_DCA 24
250#define M_FW_RI_TPTE_DCA 0x1f
251#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA)
252#define G_FW_RI_TPTE_DCA(x) \
253 (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
254
255#define S_FW_RI_TPTE_MWBCNT_PSTAG 0
256#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff
257#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \
258 ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
259#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \
260 (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
261
262enum fw_ri_res_type {
263 FW_RI_RES_TYPE_SQ,
264 FW_RI_RES_TYPE_RQ,
265 FW_RI_RES_TYPE_CQ,
266};
267
268enum fw_ri_res_op {
269 FW_RI_RES_OP_WRITE,
270 FW_RI_RES_OP_RESET,
271};
272
273struct fw_ri_res {
274 union fw_ri_restype {
275 struct fw_ri_res_sqrq {
276 __u8 restype;
277 __u8 op;
278 __be16 r3;
279 __be32 eqid;
280 __be32 r4[2];
281 __be32 fetchszm_to_iqid;
282 __be32 dcaen_to_eqsize;
283 __be64 eqaddr;
284 } sqrq;
285 struct fw_ri_res_cq {
286 __u8 restype;
287 __u8 op;
288 __be16 r3;
289 __be32 iqid;
290 __be32 r4[2];
291 __be32 iqandst_to_iqandstindex;
292 __be16 iqdroprss_to_iqesize;
293 __be16 iqsize;
294 __be64 iqaddr;
295 __be32 iqns_iqro;
296 __be32 r6_lo;
297 __be64 r7;
298 } cq;
299 } u;
300};
301
302struct fw_ri_res_wr {
303 __be32 op_nres;
304 __be32 len16_pkd;
305 __u64 cookie;
306#ifndef C99_NOT_SUPPORTED
307 struct fw_ri_res res[0];
308#endif
309};
310
311#define S_FW_RI_RES_WR_NRES 0
312#define M_FW_RI_RES_WR_NRES 0xff
313#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
314#define G_FW_RI_RES_WR_NRES(x) \
315 (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
316
317#define S_FW_RI_RES_WR_FETCHSZM 26
318#define M_FW_RI_RES_WR_FETCHSZM 0x1
319#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM)
320#define G_FW_RI_RES_WR_FETCHSZM(x) \
321 (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
322#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U)
323
324#define S_FW_RI_RES_WR_STATUSPGNS 25
325#define M_FW_RI_RES_WR_STATUSPGNS 0x1
326#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS)
327#define G_FW_RI_RES_WR_STATUSPGNS(x) \
328 (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
329#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U)
330
331#define S_FW_RI_RES_WR_STATUSPGRO 24
332#define M_FW_RI_RES_WR_STATUSPGRO 0x1
333#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO)
334#define G_FW_RI_RES_WR_STATUSPGRO(x) \
335 (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
336#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U)
337
338#define S_FW_RI_RES_WR_FETCHNS 23
339#define M_FW_RI_RES_WR_FETCHNS 0x1
340#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS)
341#define G_FW_RI_RES_WR_FETCHNS(x) \
342 (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
343#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
344
345#define S_FW_RI_RES_WR_FETCHRO 22
346#define M_FW_RI_RES_WR_FETCHRO 0x1
347#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO)
348#define G_FW_RI_RES_WR_FETCHRO(x) \
349 (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
350#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
351
352#define S_FW_RI_RES_WR_HOSTFCMODE 20
353#define M_FW_RI_RES_WR_HOSTFCMODE 0x3
354#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
355#define G_FW_RI_RES_WR_HOSTFCMODE(x) \
356 (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
357
358#define S_FW_RI_RES_WR_CPRIO 19
359#define M_FW_RI_RES_WR_CPRIO 0x1
360#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO)
361#define G_FW_RI_RES_WR_CPRIO(x) \
362 (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
363#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U)
364
365#define S_FW_RI_RES_WR_ONCHIP 18
366#define M_FW_RI_RES_WR_ONCHIP 0x1
367#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP)
368#define G_FW_RI_RES_WR_ONCHIP(x) \
369 (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
370#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U)
371
372#define S_FW_RI_RES_WR_PCIECHN 16
373#define M_FW_RI_RES_WR_PCIECHN 0x3
374#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN)
375#define G_FW_RI_RES_WR_PCIECHN(x) \
376 (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
377
378#define S_FW_RI_RES_WR_IQID 0
379#define M_FW_RI_RES_WR_IQID 0xffff
380#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
381#define G_FW_RI_RES_WR_IQID(x) \
382 (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
383
384#define S_FW_RI_RES_WR_DCAEN 31
385#define M_FW_RI_RES_WR_DCAEN 0x1
386#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN)
387#define G_FW_RI_RES_WR_DCAEN(x) \
388 (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
389#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U)
390
391#define S_FW_RI_RES_WR_DCACPU 26
392#define M_FW_RI_RES_WR_DCACPU 0x1f
393#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU)
394#define G_FW_RI_RES_WR_DCACPU(x) \
395 (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
396
397#define S_FW_RI_RES_WR_FBMIN 23
398#define M_FW_RI_RES_WR_FBMIN 0x7
399#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN)
400#define G_FW_RI_RES_WR_FBMIN(x) \
401 (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
402
403#define S_FW_RI_RES_WR_FBMAX 20
404#define M_FW_RI_RES_WR_FBMAX 0x7
405#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX)
406#define G_FW_RI_RES_WR_FBMAX(x) \
407 (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
408
409#define S_FW_RI_RES_WR_CIDXFTHRESHO 19
410#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1
411#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
412#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
413 (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
414#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
415
416#define S_FW_RI_RES_WR_CIDXFTHRESH 16
417#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7
418#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
419#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \
420 (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
421
422#define S_FW_RI_RES_WR_EQSIZE 0
423#define M_FW_RI_RES_WR_EQSIZE 0xffff
424#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE)
425#define G_FW_RI_RES_WR_EQSIZE(x) \
426 (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
427
428#define S_FW_RI_RES_WR_IQANDST 15
429#define M_FW_RI_RES_WR_IQANDST 0x1
430#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST)
431#define G_FW_RI_RES_WR_IQANDST(x) \
432 (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
433#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
434
435#define S_FW_RI_RES_WR_IQANUS 14
436#define M_FW_RI_RES_WR_IQANUS 0x1
437#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS)
438#define G_FW_RI_RES_WR_IQANUS(x) \
439 (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
440#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U)
441
442#define S_FW_RI_RES_WR_IQANUD 12
443#define M_FW_RI_RES_WR_IQANUD 0x3
444#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD)
445#define G_FW_RI_RES_WR_IQANUD(x) \
446 (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
447
448#define S_FW_RI_RES_WR_IQANDSTINDEX 0
449#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff
450#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
451#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
452 (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
453
454#define S_FW_RI_RES_WR_IQDROPRSS 15
455#define M_FW_RI_RES_WR_IQDROPRSS 0x1
456#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS)
457#define G_FW_RI_RES_WR_IQDROPRSS(x) \
458 (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
459#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U)
460
461#define S_FW_RI_RES_WR_IQGTSMODE 14
462#define M_FW_RI_RES_WR_IQGTSMODE 0x1
463#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE)
464#define G_FW_RI_RES_WR_IQGTSMODE(x) \
465 (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
466#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U)
467
468#define S_FW_RI_RES_WR_IQPCIECH 12
469#define M_FW_RI_RES_WR_IQPCIECH 0x3
470#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH)
471#define G_FW_RI_RES_WR_IQPCIECH(x) \
472 (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
473
474#define S_FW_RI_RES_WR_IQDCAEN 11
475#define M_FW_RI_RES_WR_IQDCAEN 0x1
476#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN)
477#define G_FW_RI_RES_WR_IQDCAEN(x) \
478 (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
479#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
480
481#define S_FW_RI_RES_WR_IQDCACPU 6
482#define M_FW_RI_RES_WR_IQDCACPU 0x1f
483#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU)
484#define G_FW_RI_RES_WR_IQDCACPU(x) \
485 (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
486
487#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4
488#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3
489#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
490 ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
491#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
492 (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
493
494#define S_FW_RI_RES_WR_IQO 3
495#define M_FW_RI_RES_WR_IQO 0x1
496#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO)
497#define G_FW_RI_RES_WR_IQO(x) \
498 (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
499#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U)
500
501#define S_FW_RI_RES_WR_IQCPRIO 2
502#define M_FW_RI_RES_WR_IQCPRIO 0x1
503#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO)
504#define G_FW_RI_RES_WR_IQCPRIO(x) \
505 (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
506#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
507
508#define S_FW_RI_RES_WR_IQESIZE 0
509#define M_FW_RI_RES_WR_IQESIZE 0x3
510#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE)
511#define G_FW_RI_RES_WR_IQESIZE(x) \
512 (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
513
514#define S_FW_RI_RES_WR_IQNS 31
515#define M_FW_RI_RES_WR_IQNS 0x1
516#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
517#define G_FW_RI_RES_WR_IQNS(x) \
518 (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
519#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U)
520
521#define S_FW_RI_RES_WR_IQRO 30
522#define M_FW_RI_RES_WR_IQRO 0x1
523#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
524#define G_FW_RI_RES_WR_IQRO(x) \
525 (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
526#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U)
527
528struct fw_ri_rdma_write_wr {
529 __u8 opcode;
530 __u8 flags;
531 __u16 wrid;
532 __u8 r1[3];
533 __u8 len16;
534 __be64 r2;
535 __be32 plen;
536 __be32 stag_sink;
537 __be64 to_sink;
538#ifndef C99_NOT_SUPPORTED
539 union {
540 struct fw_ri_immd immd_src[0];
541 struct fw_ri_isgl isgl_src[0];
542 } u;
543#endif
544};
545
546struct fw_ri_send_wr {
547 __u8 opcode;
548 __u8 flags;
549 __u16 wrid;
550 __u8 r1[3];
551 __u8 len16;
552 __be32 sendop_pkd;
553 __be32 stag_inv;
554 __be32 plen;
555 __be32 r3;
556 __be64 r4;
557#ifndef C99_NOT_SUPPORTED
558 union {
559 struct fw_ri_immd immd_src[0];
560 struct fw_ri_isgl isgl_src[0];
561 } u;
562#endif
563};
564
565#define S_FW_RI_SEND_WR_SENDOP 0
566#define M_FW_RI_SEND_WR_SENDOP 0xf
567#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP)
568#define G_FW_RI_SEND_WR_SENDOP(x) \
569 (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
570
571struct fw_ri_rdma_read_wr {
572 __u8 opcode;
573 __u8 flags;
574 __u16 wrid;
575 __u8 r1[3];
576 __u8 len16;
577 __be64 r2;
578 __be32 stag_sink;
579 __be32 to_sink_hi;
580 __be32 to_sink_lo;
581 __be32 plen;
582 __be32 stag_src;
583 __be32 to_src_hi;
584 __be32 to_src_lo;
585 __be32 r5;
586};
587
588struct fw_ri_recv_wr {
589 __u8 opcode;
590 __u8 r1;
591 __u16 wrid;
592 __u8 r2[3];
593 __u8 len16;
594 struct fw_ri_isgl isgl;
595};
596
597struct fw_ri_bind_mw_wr {
598 __u8 opcode;
599 __u8 flags;
600 __u16 wrid;
601 __u8 r1[3];
602 __u8 len16;
603 __u8 qpbinde_to_dcacpu;
604 __u8 pgsz_shift;
605 __u8 addr_type;
606 __u8 mem_perms;
607 __be32 stag_mr;
608 __be32 stag_mw;
609 __be32 r3;
610 __be64 len_mw;
611 __be64 va_fbo;
612 __be64 r4;
613};
614
615#define S_FW_RI_BIND_MW_WR_QPBINDE 6
616#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1
617#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
618#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \
619 (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
620#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U)
621
622#define S_FW_RI_BIND_MW_WR_NS 5
623#define M_FW_RI_BIND_MW_WR_NS 0x1
624#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS)
625#define G_FW_RI_BIND_MW_WR_NS(x) \
626 (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
627#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U)
628
629#define S_FW_RI_BIND_MW_WR_DCACPU 0
630#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f
631#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
632#define G_FW_RI_BIND_MW_WR_DCACPU(x) \
633 (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
634
635struct fw_ri_fr_nsmr_wr {
636 __u8 opcode;
637 __u8 flags;
638 __u16 wrid;
639 __u8 r1[3];
640 __u8 len16;
641 __u8 qpbinde_to_dcacpu;
642 __u8 pgsz_shift;
643 __u8 addr_type;
644 __u8 mem_perms;
645 __be32 stag;
646 __be32 len_hi;
647 __be32 len_lo;
648 __be32 va_hi;
649 __be32 va_lo_fbo;
650};
651
652#define S_FW_RI_FR_NSMR_WR_QPBINDE 6
653#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1
654#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
655#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \
656 (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
657#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
658
659#define S_FW_RI_FR_NSMR_WR_NS 5
660#define M_FW_RI_FR_NSMR_WR_NS 0x1
661#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS)
662#define G_FW_RI_FR_NSMR_WR_NS(x) \
663 (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
664#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U)
665
666#define S_FW_RI_FR_NSMR_WR_DCACPU 0
667#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f
668#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
669#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \
670 (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
671
672struct fw_ri_inv_lstag_wr {
673 __u8 opcode;
674 __u8 flags;
675 __u16 wrid;
676 __u8 r1[3];
677 __u8 len16;
678 __be32 r2;
679 __be32 stag_inv;
680};
681
682enum fw_ri_type {
683 FW_RI_TYPE_INIT,
684 FW_RI_TYPE_FINI,
685 FW_RI_TYPE_TERMINATE
686};
687
688enum fw_ri_init_p2ptype {
689 FW_RI_INIT_P2PTYPE_RDMA_WRITE = FW_RI_RDMA_WRITE,
690 FW_RI_INIT_P2PTYPE_READ_REQ = FW_RI_READ_REQ,
691 FW_RI_INIT_P2PTYPE_SEND = FW_RI_SEND,
692 FW_RI_INIT_P2PTYPE_SEND_WITH_INV = FW_RI_SEND_WITH_INV,
693 FW_RI_INIT_P2PTYPE_SEND_WITH_SE = FW_RI_SEND_WITH_SE,
694 FW_RI_INIT_P2PTYPE_SEND_WITH_SE_INV = FW_RI_SEND_WITH_SE_INV,
695 FW_RI_INIT_P2PTYPE_DISABLED = 0xf,
696};
697
698struct fw_ri_wr {
699 __be32 op_compl;
700 __be32 flowid_len16;
701 __u64 cookie;
702 union fw_ri {
703 struct fw_ri_init {
704 __u8 type;
705 __u8 mpareqbit_p2ptype;
706 __u8 r4[2];
707 __u8 mpa_attrs;
708 __u8 qp_caps;
709 __be16 nrqe;
710 __be32 pdid;
711 __be32 qpid;
712 __be32 sq_eqid;
713 __be32 rq_eqid;
714 __be32 scqid;
715 __be32 rcqid;
716 __be32 ord_max;
717 __be32 ird_max;
718 __be32 iss;
719 __be32 irs;
720 __be32 hwrqsize;
721 __be32 hwrqaddr;
722 __be64 r5;
723 union fw_ri_init_p2p {
724 struct fw_ri_rdma_write_wr write;
725 struct fw_ri_rdma_read_wr read;
726 struct fw_ri_send_wr send;
727 } u;
728 } init;
729 struct fw_ri_fini {
730 __u8 type;
731 __u8 r3[7];
732 __be64 r4;
733 } fini;
734 struct fw_ri_terminate {
735 __u8 type;
736 __u8 r3[3];
737 __be32 immdlen;
738 __u8 termmsg[40];
739 } terminate;
740 } u;
741};
742
743#define S_FW_RI_WR_MPAREQBIT 7
744#define M_FW_RI_WR_MPAREQBIT 0x1
745#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
746#define G_FW_RI_WR_MPAREQBIT(x) \
747 (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
748#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U)
749
750#define S_FW_RI_WR_P2PTYPE 0
751#define M_FW_RI_WR_P2PTYPE 0xf
752#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE)
753#define G_FW_RI_WR_P2PTYPE(x) \
754 (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
755
756struct tcp_options {
757 __be16 mss;
758 __u8 wsf;
759#if defined(__LITTLE_ENDIAN_BITFIELD)
760 __u8:4;
761 __u8 unknown:1;
762 __u8:1;
763 __u8 sack:1;
764 __u8 tstamp:1;
765#else
766 __u8 tstamp:1;
767 __u8 sack:1;
768 __u8:1;
769 __u8 unknown:1;
770 __u8:4;
771#endif
772};
773
774struct cpl_pass_accept_req {
775 union opcode_tid ot;
776 __be16 rsvd;
777 __be16 len;
778 __be32 hdr_len;
779 __be16 vlan;
780 __be16 l2info;
781 __be32 tos_stid;
782 struct tcp_options tcpopt;
783};
784
785/* cpl_pass_accept_req.hdr_len fields */
786#define S_SYN_RX_CHAN 0
787#define M_SYN_RX_CHAN 0xF
788#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
789#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
790
791#define S_TCP_HDR_LEN 10
792#define M_TCP_HDR_LEN 0x3F
793#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
794#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
795
796#define S_IP_HDR_LEN 16
797#define M_IP_HDR_LEN 0x3FF
798#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
799#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
800
801#define S_ETH_HDR_LEN 26
802#define M_ETH_HDR_LEN 0x1F
803#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
804#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
805
806/* cpl_pass_accept_req.l2info fields */
807#define S_SYN_MAC_IDX 0
808#define M_SYN_MAC_IDX 0x1FF
809#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
810#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
811
812#define S_SYN_XACT_MATCH 9
813#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
814#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U)
815
816#define S_SYN_INTF 12
817#define M_SYN_INTF 0xF
818#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
819#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
820
821struct ulptx_idata {
822 __be32 cmd_more;
823 __be32 len;
824};
825
826#define S_ULPTX_NSGE 0
827#define M_ULPTX_NSGE 0xFFFF
828#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
829#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
new file mode 100644
index 000000000000..ed6414abde02
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __C4IW_USER_H__
33#define __C4IW_USER_H__
34
35#define C4IW_UVERBS_ABI_VERSION 1
36
37/*
38 * Make sure that all structs defined in this file remain laid out so
39 * that they pack the same way on 32-bit and 64-bit architectures (to
40 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
41 * In particular do not use pointer types -- pass pointers in __u64
42 * instead.
43 */
44struct c4iw_create_cq_resp {
45 __u64 key;
46 __u64 gts_key;
47 __u64 memsize;
48 __u32 cqid;
49 __u32 size;
50 __u32 qid_mask;
51};
52
53struct c4iw_create_qp_resp {
54 __u64 sq_key;
55 __u64 rq_key;
56 __u64 sq_db_gts_key;
57 __u64 rq_db_gts_key;
58 __u64 sq_memsize;
59 __u64 rq_memsize;
60 __u32 sqid;
61 __u32 rqid;
62 __u32 sq_size;
63 __u32 rq_size;
64 __u32 qid_mask;
65};
66#endif
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index cc2ddd29ac57..5a219a2fdf16 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -661,6 +661,14 @@ repoll:
661 wc->opcode = IB_WC_FETCH_ADD; 661 wc->opcode = IB_WC_FETCH_ADD;
662 wc->byte_len = 8; 662 wc->byte_len = 8;
663 break; 663 break;
664 case MLX4_OPCODE_MASKED_ATOMIC_CS:
665 wc->opcode = IB_WC_MASKED_COMP_SWAP;
666 wc->byte_len = 8;
667 break;
668 case MLX4_OPCODE_MASKED_ATOMIC_FA:
669 wc->opcode = IB_WC_MASKED_FETCH_ADD;
670 wc->byte_len = 8;
671 break;
664 case MLX4_OPCODE_BIND_MW: 672 case MLX4_OPCODE_BIND_MW:
665 wc->opcode = IB_WC_BIND_MW; 673 wc->opcode = IB_WC_BIND_MW;
666 break; 674 break;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 01f2a3f93355..39051417054c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -139,6 +139,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
139 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; 139 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
140 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? 140 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
141 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 141 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
142 props->masked_atomic_cap = IB_ATOMIC_HCA;
142 props->max_pkeys = dev->dev->caps.pkey_table_len[1]; 143 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
143 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; 144 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
144 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; 145 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 5643f4a8ffef..6a60827b2301 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -74,17 +74,19 @@ enum {
74}; 74};
75 75
76static const __be32 mlx4_ib_opcode[] = { 76static const __be32 mlx4_ib_opcode[] = {
77 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 77 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
78 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), 78 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
79 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), 79 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
80 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 80 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
81 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 81 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
82 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), 82 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
83 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 83 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
84 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 84 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
85 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 85 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
86 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 86 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
87 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), 87 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
88 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
89 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
88}; 90};
89 91
90static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 92static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
@@ -1407,6 +1409,9 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
1407 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1409 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1408 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 1410 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1409 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 1411 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1412 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
1413 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1414 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1410 } else { 1415 } else {
1411 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 1416 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1412 aseg->compare = 0; 1417 aseg->compare = 0;
@@ -1414,6 +1419,15 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
1414 1419
1415} 1420}
1416 1421
1422static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
1423 struct ib_send_wr *wr)
1424{
1425 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1426 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
1427 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1428 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1429}
1430
1417static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, 1431static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
1418 struct ib_send_wr *wr) 1432 struct ib_send_wr *wr)
1419{ 1433{
@@ -1567,6 +1581,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1567 switch (wr->opcode) { 1581 switch (wr->opcode) {
1568 case IB_WR_ATOMIC_CMP_AND_SWP: 1582 case IB_WR_ATOMIC_CMP_AND_SWP:
1569 case IB_WR_ATOMIC_FETCH_AND_ADD: 1583 case IB_WR_ATOMIC_FETCH_AND_ADD:
1584 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
1570 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 1585 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1571 wr->wr.atomic.rkey); 1586 wr->wr.atomic.rkey);
1572 wqe += sizeof (struct mlx4_wqe_raddr_seg); 1587 wqe += sizeof (struct mlx4_wqe_raddr_seg);
@@ -1579,6 +1594,19 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1579 1594
1580 break; 1595 break;
1581 1596
1597 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
1598 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1599 wr->wr.atomic.rkey);
1600 wqe += sizeof (struct mlx4_wqe_raddr_seg);
1601
1602 set_masked_atomic_seg(wqe, wr);
1603 wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
1604
1605 size += (sizeof (struct mlx4_wqe_raddr_seg) +
1606 sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
1607
1608 break;
1609
1582 case IB_WR_RDMA_READ: 1610 case IB_WR_RDMA_READ:
1583 case IB_WR_RDMA_WRITE: 1611 case IB_WR_RDMA_WRITE:
1584 case IB_WR_RDMA_WRITE_WITH_IMM: 1612 case IB_WR_RDMA_WRITE_WITH_IMM:
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index c5ccc2daab60..b4e0cf4e95cd 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -211,7 +211,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
211 if (!buf->direct.buf) 211 if (!buf->direct.buf)
212 return -ENOMEM; 212 return -ENOMEM;
213 213
214 pci_unmap_addr_set(&buf->direct, mapping, t); 214 dma_unmap_addr_set(&buf->direct, mapping, t);
215 215
216 memset(buf->direct.buf, 0, size); 216 memset(buf->direct.buf, 0, size);
217 217
@@ -251,7 +251,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
251 goto err_free; 251 goto err_free;
252 252
253 dma_list[i] = t; 253 dma_list[i] = t;
254 pci_unmap_addr_set(&buf->page_list[i], mapping, t); 254 dma_unmap_addr_set(&buf->page_list[i], mapping, t);
255 255
256 clear_page(buf->page_list[i].buf); 256 clear_page(buf->page_list[i].buf);
257 } 257 }
@@ -289,12 +289,12 @@ void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
289 289
290 if (is_direct) 290 if (is_direct)
291 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 291 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
292 pci_unmap_addr(&buf->direct, mapping)); 292 dma_unmap_addr(&buf->direct, mapping));
293 else { 293 else {
294 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) 294 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
295 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 295 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
296 buf->page_list[i].buf, 296 buf->page_list[i].buf,
297 pci_unmap_addr(&buf->page_list[i], 297 dma_unmap_addr(&buf->page_list[i],
298 mapping)); 298 mapping));
299 kfree(buf->page_list); 299 kfree(buf->page_list);
300 } 300 }
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 9388164b6053..8e8c728aff88 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -504,7 +504,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
504 goto err_out_free_pages; 504 goto err_out_free_pages;
505 505
506 dma_list[i] = t; 506 dma_list[i] = t;
507 pci_unmap_addr_set(&eq->page_list[i], mapping, t); 507 dma_unmap_addr_set(&eq->page_list[i], mapping, t);
508 508
509 clear_page(eq->page_list[i].buf); 509 clear_page(eq->page_list[i].buf);
510 } 510 }
@@ -579,7 +579,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
579 if (eq->page_list[i].buf) 579 if (eq->page_list[i].buf)
580 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 580 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
581 eq->page_list[i].buf, 581 eq->page_list[i].buf,
582 pci_unmap_addr(&eq->page_list[i], 582 dma_unmap_addr(&eq->page_list[i],
583 mapping)); 583 mapping));
584 584
585 mthca_free_mailbox(dev, mailbox); 585 mthca_free_mailbox(dev, mailbox);
@@ -629,7 +629,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
629 for (i = 0; i < npages; ++i) 629 for (i = 0; i < npages; ++i)
630 pci_free_consistent(dev->pdev, PAGE_SIZE, 630 pci_free_consistent(dev->pdev, PAGE_SIZE,
631 eq->page_list[i].buf, 631 eq->page_list[i].buf,
632 pci_unmap_addr(&eq->page_list[i], mapping)); 632 dma_unmap_addr(&eq->page_list[i], mapping));
633 633
634 kfree(eq->page_list); 634 kfree(eq->page_list);
635 mthca_free_mailbox(dev, mailbox); 635 mthca_free_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 90f4c4d2e983..596acc45569b 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -46,7 +46,7 @@
46 46
47struct mthca_buf_list { 47struct mthca_buf_list {
48 void *buf; 48 void *buf;
49 DECLARE_PCI_UNMAP_ADDR(mapping) 49 DEFINE_DMA_UNMAP_ADDR(mapping);
50}; 50};
51 51
52union mthca_buf { 52union mthca_buf {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index c36a3f514929..86acb7d57064 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1297,7 +1297,7 @@ int nes_destroy_cqp(struct nes_device *nesdev)
1297/** 1297/**
1298 * nes_init_1g_phy 1298 * nes_init_1g_phy
1299 */ 1299 */
1300int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) 1300static int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1301{ 1301{
1302 u32 counter = 0; 1302 u32 counter = 0;
1303 u16 phy_data; 1303 u16 phy_data;
@@ -1351,7 +1351,7 @@ int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1351/** 1351/**
1352 * nes_init_2025_phy 1352 * nes_init_2025_phy
1353 */ 1353 */
1354int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) 1354static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1355{ 1355{
1356 u32 temp_phy_data = 0; 1356 u32 temp_phy_data = 0;
1357 u32 temp_phy_data2 = 0; 1357 u32 temp_phy_data2 = 0;
@@ -2458,7 +2458,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2458 return; 2458 return;
2459 } 2459 }
2460 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT; 2460 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT;
2461 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2462 2461
2463 /* ack the MAC interrupt */ 2462 /* ack the MAC interrupt */
2464 mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200)); 2463 mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200));
@@ -2469,11 +2468,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2469 2468
2470 if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) { 2469 if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) {
2471 nesdev->link_status_interrupts++; 2470 nesdev->link_status_interrupts++;
2472 if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) { 2471 if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS)))
2473 spin_lock_irqsave(&nesadapter->phy_lock, flags);
2474 nes_reset_link(nesdev, mac_index); 2472 nes_reset_link(nesdev, mac_index);
2475 spin_unlock_irqrestore(&nesadapter->phy_lock, flags); 2473
2476 }
2477 /* read the PHY interrupt status register */ 2474 /* read the PHY interrupt status register */
2478 if ((nesadapter->OneG_Mode) && 2475 if ((nesadapter->OneG_Mode) &&
2479 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) { 2476 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
@@ -2587,6 +2584,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2587 break; 2584 break;
2588 } 2585 }
2589 } 2586 }
2587 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2590 2588
2591 if (phy_data & 0x0004) { 2589 if (phy_data & 0x0004) {
2592 if (wide_ppm_offset && 2590 if (wide_ppm_offset &&
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index b7c813f4be43..9f4cadf9f851 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1461,11 +1461,14 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
1461 et_cmd->transceiver = XCVR_INTERNAL; 1461 et_cmd->transceiver = XCVR_INTERNAL;
1462 et_cmd->phy_address = mac_index; 1462 et_cmd->phy_address = mac_index;
1463 } else { 1463 } else {
1464 unsigned long flags;
1464 et_cmd->supported = SUPPORTED_1000baseT_Full 1465 et_cmd->supported = SUPPORTED_1000baseT_Full
1465 | SUPPORTED_Autoneg; 1466 | SUPPORTED_Autoneg;
1466 et_cmd->advertising = ADVERTISED_1000baseT_Full 1467 et_cmd->advertising = ADVERTISED_1000baseT_Full
1467 | ADVERTISED_Autoneg; 1468 | ADVERTISED_Autoneg;
1469 spin_lock_irqsave(&nesadapter->phy_lock, flags);
1468 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); 1470 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1471 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
1469 if (phy_data & 0x1000) 1472 if (phy_data & 0x1000)
1470 et_cmd->autoneg = AUTONEG_ENABLE; 1473 et_cmd->autoneg = AUTONEG_ENABLE;
1471 else 1474 else
@@ -1503,12 +1506,15 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
1503 struct nes_vnic *nesvnic = netdev_priv(netdev); 1506 struct nes_vnic *nesvnic = netdev_priv(netdev);
1504 struct nes_device *nesdev = nesvnic->nesdev; 1507 struct nes_device *nesdev = nesvnic->nesdev;
1505 struct nes_adapter *nesadapter = nesdev->nesadapter; 1508 struct nes_adapter *nesadapter = nesdev->nesadapter;
1506 u16 phy_data;
1507 1509
1508 if ((nesadapter->OneG_Mode) && 1510 if ((nesadapter->OneG_Mode) &&
1509 (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) { 1511 (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) {
1510 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], 1512 unsigned long flags;
1511 &phy_data); 1513 u16 phy_data;
1514 u8 phy_index = nesadapter->phy_index[nesdev->mac_index];
1515
1516 spin_lock_irqsave(&nesadapter->phy_lock, flags);
1517 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1512 if (et_cmd->autoneg) { 1518 if (et_cmd->autoneg) {
1513 /* Turn on Full duplex, Autoneg, and restart autonegotiation */ 1519 /* Turn on Full duplex, Autoneg, and restart autonegotiation */
1514 phy_data |= 0x1300; 1520 phy_data |= 0x1300;
@@ -1516,8 +1522,8 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
1516 /* Turn off autoneg */ 1522 /* Turn off autoneg */
1517 phy_data &= ~0x1000; 1523 phy_data &= ~0x1000;
1518 } 1524 }
1519 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], 1525 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
1520 phy_data); 1526 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
1521 } 1527 }
1522 1528
1523 return 0; 1529 return 0;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 186623d86959..a9f5dd272f1a 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -381,12 +381,8 @@ static u16 nes_read16_eeprom(void __iomem *addr, u16 offset)
381 */ 381 */
382void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data) 382void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data)
383{ 383{
384 struct nes_adapter *nesadapter = nesdev->nesadapter;
385 u32 u32temp; 384 u32 u32temp;
386 u32 counter; 385 u32 counter;
387 unsigned long flags;
388
389 spin_lock_irqsave(&nesadapter->phy_lock, flags);
390 386
391 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 387 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
392 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); 388 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -402,8 +398,6 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
402 if (!(u32temp & 1)) 398 if (!(u32temp & 1))
403 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", 399 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
404 u32temp); 400 u32temp);
405
406 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
407} 401}
408 402
409 403
@@ -414,14 +408,11 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
414 */ 408 */
415void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data) 409void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data)
416{ 410{
417 struct nes_adapter *nesadapter = nesdev->nesadapter;
418 u32 u32temp; 411 u32 u32temp;
419 u32 counter; 412 u32 counter;
420 unsigned long flags;
421 413
422 /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n", 414 /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n",
423 phy_addr, nesdev->mac_index); */ 415 phy_addr, nesdev->mac_index); */
424 spin_lock_irqsave(&nesadapter->phy_lock, flags);
425 416
426 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 417 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
427 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); 418 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -441,7 +432,6 @@ void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16
441 } else { 432 } else {
442 *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 433 *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
443 } 434 }
444 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
445} 435}
446 436
447 437
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index e54f312e4bdc..925e1f2d1d55 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -374,7 +374,7 @@ static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
374/* 374/*
375 * nes_alloc_fast_reg_mr 375 * nes_alloc_fast_reg_mr
376 */ 376 */
377struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len) 377static struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
378{ 378{
379 struct nes_pd *nespd = to_nespd(ibpd); 379 struct nes_pd *nespd = to_nespd(ibpd);
380 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); 380 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index d10b4ec68d28..40e858492f90 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -49,6 +49,25 @@ static u32 ipoib_get_rx_csum(struct net_device *dev)
49 !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 49 !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
50} 50}
51 51
52static int ipoib_set_tso(struct net_device *dev, u32 data)
53{
54 struct ipoib_dev_priv *priv = netdev_priv(dev);
55
56 if (data) {
57 if (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
58 (dev->features & NETIF_F_SG) &&
59 (priv->hca_caps & IB_DEVICE_UD_TSO)) {
60 dev->features |= NETIF_F_TSO;
61 } else {
62 ipoib_warn(priv, "can't set TSO on\n");
63 return -EOPNOTSUPP;
64 }
65 } else
66 dev->features &= ~NETIF_F_TSO;
67
68 return 0;
69}
70
52static int ipoib_get_coalesce(struct net_device *dev, 71static int ipoib_get_coalesce(struct net_device *dev,
53 struct ethtool_coalesce *coal) 72 struct ethtool_coalesce *coal)
54{ 73{
@@ -131,6 +150,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev,
131static const struct ethtool_ops ipoib_ethtool_ops = { 150static const struct ethtool_ops ipoib_ethtool_ops = {
132 .get_drvinfo = ipoib_get_drvinfo, 151 .get_drvinfo = ipoib_get_drvinfo,
133 .get_rx_csum = ipoib_get_rx_csum, 152 .get_rx_csum = ipoib_get_rx_csum,
153 .set_tso = ipoib_set_tso,
134 .get_coalesce = ipoib_get_coalesce, 154 .get_coalesce = ipoib_get_coalesce,
135 .set_coalesce = ipoib_set_coalesce, 155 .set_coalesce = ipoib_set_coalesce,
136 .get_flags = ethtool_op_get_flags, 156 .get_flags = ethtool_op_get_flags,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 93399dff0c6f..7b2fc98e2f2b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -325,7 +325,7 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
325 */ 325 */
326 if (ib_conn) { 326 if (ib_conn) {
327 ib_conn->iser_conn = NULL; 327 ib_conn->iser_conn = NULL;
328 iser_conn_put(ib_conn); 328 iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
329 } 329 }
330} 330}
331 331
@@ -357,11 +357,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
357 /* binds the iSER connection retrieved from the previously 357 /* binds the iSER connection retrieved from the previously
358 * connected ep_handle to the iSCSI layer connection. exchanges 358 * connected ep_handle to the iSCSI layer connection. exchanges
359 * connection pointers */ 359 * connection pointers */
360 iser_err("binding iscsi conn %p to iser_conn %p\n",conn,ib_conn); 360 iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n",
361 conn, conn->dd_data, ib_conn);
361 iser_conn = conn->dd_data; 362 iser_conn = conn->dd_data;
362 ib_conn->iser_conn = iser_conn; 363 ib_conn->iser_conn = iser_conn;
363 iser_conn->ib_conn = ib_conn; 364 iser_conn->ib_conn = ib_conn;
364 iser_conn_get(ib_conn); 365 iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */
365 return 0; 366 return 0;
366} 367}
367 368
@@ -382,7 +383,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
382 * There is no unbind event so the stop callback 383 * There is no unbind event so the stop callback
383 * must release the ref from the bind. 384 * must release the ref from the bind.
384 */ 385 */
385 iser_conn_put(ib_conn); 386 iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
386 } 387 }
387 iser_conn->ib_conn = NULL; 388 iser_conn->ib_conn = NULL;
388} 389}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 036934cdcb92..f1df01567bb6 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -232,6 +232,7 @@ struct iser_device {
232 struct ib_cq *tx_cq; 232 struct ib_cq *tx_cq;
233 struct ib_mr *mr; 233 struct ib_mr *mr;
234 struct tasklet_struct cq_tasklet; 234 struct tasklet_struct cq_tasklet;
235 struct ib_event_handler event_handler;
235 struct list_head ig_list; /* entry in ig devices list */ 236 struct list_head ig_list; /* entry in ig devices list */
236 int refcount; 237 int refcount;
237}; 238};
@@ -246,7 +247,6 @@ struct iser_conn {
246 struct rdma_cm_id *cma_id; /* CMA ID */ 247 struct rdma_cm_id *cma_id; /* CMA ID */
247 struct ib_qp *qp; /* QP */ 248 struct ib_qp *qp; /* QP */
248 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */ 249 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
249 int disc_evt_flag; /* disconn event delivered */
250 wait_queue_head_t wait; /* waitq for conn/disconn */ 250 wait_queue_head_t wait; /* waitq for conn/disconn */
251 int post_recv_buf_count; /* posted rx count */ 251 int post_recv_buf_count; /* posted rx count */
252 atomic_t post_send_buf_count; /* posted tx count */ 252 atomic_t post_send_buf_count; /* posted tx count */
@@ -320,7 +320,7 @@ void iser_conn_init(struct iser_conn *ib_conn);
320 320
321void iser_conn_get(struct iser_conn *ib_conn); 321void iser_conn_get(struct iser_conn *ib_conn);
322 322
323void iser_conn_put(struct iser_conn *ib_conn); 323int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed);
324 324
325void iser_conn_terminate(struct iser_conn *ib_conn); 325void iser_conn_terminate(struct iser_conn *ib_conn);
326 326
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b89d76b39a13..9876865732f7 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -54,6 +54,13 @@ static void iser_qp_event_callback(struct ib_event *cause, void *context)
54 iser_err("got qp event %d\n",cause->event); 54 iser_err("got qp event %d\n",cause->event);
55} 55}
56 56
57static void iser_event_handler(struct ib_event_handler *handler,
58 struct ib_event *event)
59{
60 iser_err("async event %d on device %s port %d\n", event->event,
61 event->device->name, event->element.port_num);
62}
63
57/** 64/**
58 * iser_create_device_ib_res - creates Protection Domain (PD), Completion 65 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
59 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with 66 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
@@ -96,8 +103,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
96 if (IS_ERR(device->mr)) 103 if (IS_ERR(device->mr))
97 goto dma_mr_err; 104 goto dma_mr_err;
98 105
106 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
107 iser_event_handler);
108 if (ib_register_event_handler(&device->event_handler))
109 goto handler_err;
110
99 return 0; 111 return 0;
100 112
113handler_err:
114 ib_dereg_mr(device->mr);
101dma_mr_err: 115dma_mr_err:
102 tasklet_kill(&device->cq_tasklet); 116 tasklet_kill(&device->cq_tasklet);
103cq_arm_err: 117cq_arm_err:
@@ -120,7 +134,7 @@ static void iser_free_device_ib_res(struct iser_device *device)
120 BUG_ON(device->mr == NULL); 134 BUG_ON(device->mr == NULL);
121 135
122 tasklet_kill(&device->cq_tasklet); 136 tasklet_kill(&device->cq_tasklet);
123 137 (void)ib_unregister_event_handler(&device->event_handler);
124 (void)ib_dereg_mr(device->mr); 138 (void)ib_dereg_mr(device->mr);
125 (void)ib_destroy_cq(device->tx_cq); 139 (void)ib_destroy_cq(device->tx_cq);
126 (void)ib_destroy_cq(device->rx_cq); 140 (void)ib_destroy_cq(device->rx_cq);
@@ -149,10 +163,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
149 device = ib_conn->device; 163 device = ib_conn->device;
150 164
151 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); 165 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
152 if (!ib_conn->login_buf) { 166 if (!ib_conn->login_buf)
153 goto alloc_err; 167 goto out_err;
154 ret = -ENOMEM;
155 }
156 168
157 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, 169 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
158 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, 170 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
@@ -161,10 +173,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
161 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + 173 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
162 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), 174 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
163 GFP_KERNEL); 175 GFP_KERNEL);
164 if (!ib_conn->page_vec) { 176 if (!ib_conn->page_vec)
165 ret = -ENOMEM; 177 goto out_err;
166 goto alloc_err; 178
167 }
168 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); 179 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
169 180
170 params.page_shift = SHIFT_4K; 181 params.page_shift = SHIFT_4K;
@@ -184,7 +195,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
184 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params); 195 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
185 if (IS_ERR(ib_conn->fmr_pool)) { 196 if (IS_ERR(ib_conn->fmr_pool)) {
186 ret = PTR_ERR(ib_conn->fmr_pool); 197 ret = PTR_ERR(ib_conn->fmr_pool);
187 goto fmr_pool_err; 198 ib_conn->fmr_pool = NULL;
199 goto out_err;
188 } 200 }
189 201
190 memset(&init_attr, 0, sizeof init_attr); 202 memset(&init_attr, 0, sizeof init_attr);
@@ -202,7 +214,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
202 214
203 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); 215 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
204 if (ret) 216 if (ret)
205 goto qp_err; 217 goto out_err;
206 218
207 ib_conn->qp = ib_conn->cma_id->qp; 219 ib_conn->qp = ib_conn->cma_id->qp;
208 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", 220 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
@@ -210,12 +222,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
210 ib_conn->fmr_pool, ib_conn->cma_id->qp); 222 ib_conn->fmr_pool, ib_conn->cma_id->qp);
211 return ret; 223 return ret;
212 224
213qp_err: 225out_err:
214 (void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
215fmr_pool_err:
216 kfree(ib_conn->page_vec);
217 kfree(ib_conn->login_buf);
218alloc_err:
219 iser_err("unable to alloc mem or create resource, err %d\n", ret); 226 iser_err("unable to alloc mem or create resource, err %d\n", ret);
220 return ret; 227 return ret;
221} 228}
@@ -224,7 +231,7 @@ alloc_err:
224 * releases the FMR pool, QP and CMA ID objects, returns 0 on success, 231 * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
225 * -1 on failure 232 * -1 on failure
226 */ 233 */
227static int iser_free_ib_conn_res(struct iser_conn *ib_conn) 234static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
228{ 235{
229 BUG_ON(ib_conn == NULL); 236 BUG_ON(ib_conn == NULL);
230 237
@@ -239,7 +246,8 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
239 if (ib_conn->qp != NULL) 246 if (ib_conn->qp != NULL)
240 rdma_destroy_qp(ib_conn->cma_id); 247 rdma_destroy_qp(ib_conn->cma_id);
241 248
242 if (ib_conn->cma_id != NULL) 249 /* if cma handler context, the caller acts s.t the cma destroy the id */
250 if (ib_conn->cma_id != NULL && can_destroy_id)
243 rdma_destroy_id(ib_conn->cma_id); 251 rdma_destroy_id(ib_conn->cma_id);
244 252
245 ib_conn->fmr_pool = NULL; 253 ib_conn->fmr_pool = NULL;
@@ -317,7 +325,7 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
317/** 325/**
318 * Frees all conn objects and deallocs conn descriptor 326 * Frees all conn objects and deallocs conn descriptor
319 */ 327 */
320static void iser_conn_release(struct iser_conn *ib_conn) 328static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
321{ 329{
322 struct iser_device *device = ib_conn->device; 330 struct iser_device *device = ib_conn->device;
323 331
@@ -327,13 +335,11 @@ static void iser_conn_release(struct iser_conn *ib_conn)
327 list_del(&ib_conn->conn_list); 335 list_del(&ib_conn->conn_list);
328 mutex_unlock(&ig.connlist_mutex); 336 mutex_unlock(&ig.connlist_mutex);
329 iser_free_rx_descriptors(ib_conn); 337 iser_free_rx_descriptors(ib_conn);
330 iser_free_ib_conn_res(ib_conn); 338 iser_free_ib_conn_res(ib_conn, can_destroy_id);
331 ib_conn->device = NULL; 339 ib_conn->device = NULL;
332 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 340 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
333 if (device != NULL) 341 if (device != NULL)
334 iser_device_try_release(device); 342 iser_device_try_release(device);
335 if (ib_conn->iser_conn)
336 ib_conn->iser_conn->ib_conn = NULL;
337 iscsi_destroy_endpoint(ib_conn->ep); 343 iscsi_destroy_endpoint(ib_conn->ep);
338} 344}
339 345
@@ -342,10 +348,13 @@ void iser_conn_get(struct iser_conn *ib_conn)
342 atomic_inc(&ib_conn->refcount); 348 atomic_inc(&ib_conn->refcount);
343} 349}
344 350
345void iser_conn_put(struct iser_conn *ib_conn) 351int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)
346{ 352{
347 if (atomic_dec_and_test(&ib_conn->refcount)) 353 if (atomic_dec_and_test(&ib_conn->refcount)) {
348 iser_conn_release(ib_conn); 354 iser_conn_release(ib_conn, can_destroy_id);
355 return 1;
356 }
357 return 0;
349} 358}
350 359
351/** 360/**
@@ -369,19 +378,20 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
369 wait_event_interruptible(ib_conn->wait, 378 wait_event_interruptible(ib_conn->wait,
370 ib_conn->state == ISER_CONN_DOWN); 379 ib_conn->state == ISER_CONN_DOWN);
371 380
372 iser_conn_put(ib_conn); 381 iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
373} 382}
374 383
375static void iser_connect_error(struct rdma_cm_id *cma_id) 384static int iser_connect_error(struct rdma_cm_id *cma_id)
376{ 385{
377 struct iser_conn *ib_conn; 386 struct iser_conn *ib_conn;
378 ib_conn = (struct iser_conn *)cma_id->context; 387 ib_conn = (struct iser_conn *)cma_id->context;
379 388
380 ib_conn->state = ISER_CONN_DOWN; 389 ib_conn->state = ISER_CONN_DOWN;
381 wake_up_interruptible(&ib_conn->wait); 390 wake_up_interruptible(&ib_conn->wait);
391 return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
382} 392}
383 393
384static void iser_addr_handler(struct rdma_cm_id *cma_id) 394static int iser_addr_handler(struct rdma_cm_id *cma_id)
385{ 395{
386 struct iser_device *device; 396 struct iser_device *device;
387 struct iser_conn *ib_conn; 397 struct iser_conn *ib_conn;
@@ -390,8 +400,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
390 device = iser_device_find_by_ib_device(cma_id); 400 device = iser_device_find_by_ib_device(cma_id);
391 if (!device) { 401 if (!device) {
392 iser_err("device lookup/creation failed\n"); 402 iser_err("device lookup/creation failed\n");
393 iser_connect_error(cma_id); 403 return iser_connect_error(cma_id);
394 return;
395 } 404 }
396 405
397 ib_conn = (struct iser_conn *)cma_id->context; 406 ib_conn = (struct iser_conn *)cma_id->context;
@@ -400,11 +409,13 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
400 ret = rdma_resolve_route(cma_id, 1000); 409 ret = rdma_resolve_route(cma_id, 1000);
401 if (ret) { 410 if (ret) {
402 iser_err("resolve route failed: %d\n", ret); 411 iser_err("resolve route failed: %d\n", ret);
403 iser_connect_error(cma_id); 412 return iser_connect_error(cma_id);
404 } 413 }
414
415 return 0;
405} 416}
406 417
407static void iser_route_handler(struct rdma_cm_id *cma_id) 418static int iser_route_handler(struct rdma_cm_id *cma_id)
408{ 419{
409 struct rdma_conn_param conn_param; 420 struct rdma_conn_param conn_param;
410 int ret; 421 int ret;
@@ -425,9 +436,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
425 goto failure; 436 goto failure;
426 } 437 }
427 438
428 return; 439 return 0;
429failure: 440failure:
430 iser_connect_error(cma_id); 441 return iser_connect_error(cma_id);
431} 442}
432 443
433static void iser_connected_handler(struct rdma_cm_id *cma_id) 444static void iser_connected_handler(struct rdma_cm_id *cma_id)
@@ -439,12 +450,12 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id)
439 wake_up_interruptible(&ib_conn->wait); 450 wake_up_interruptible(&ib_conn->wait);
440} 451}
441 452
442static void iser_disconnected_handler(struct rdma_cm_id *cma_id) 453static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
443{ 454{
444 struct iser_conn *ib_conn; 455 struct iser_conn *ib_conn;
456 int ret;
445 457
446 ib_conn = (struct iser_conn *)cma_id->context; 458 ib_conn = (struct iser_conn *)cma_id->context;
447 ib_conn->disc_evt_flag = 1;
448 459
449 /* getting here when the state is UP means that the conn is being * 460 /* getting here when the state is UP means that the conn is being *
450 * terminated asynchronously from the iSCSI layer's perspective. */ 461 * terminated asynchronously from the iSCSI layer's perspective. */
@@ -459,20 +470,24 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
459 ib_conn->state = ISER_CONN_DOWN; 470 ib_conn->state = ISER_CONN_DOWN;
460 wake_up_interruptible(&ib_conn->wait); 471 wake_up_interruptible(&ib_conn->wait);
461 } 472 }
473
474 ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
475 return ret;
462} 476}
463 477
464static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 478static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
465{ 479{
466 int ret = 0; 480 int ret = 0;
467 481
468 iser_err("event %d conn %p id %p\n",event->event,cma_id->context,cma_id); 482 iser_err("event %d status %d conn %p id %p\n",
483 event->event, event->status, cma_id->context, cma_id);
469 484
470 switch (event->event) { 485 switch (event->event) {
471 case RDMA_CM_EVENT_ADDR_RESOLVED: 486 case RDMA_CM_EVENT_ADDR_RESOLVED:
472 iser_addr_handler(cma_id); 487 ret = iser_addr_handler(cma_id);
473 break; 488 break;
474 case RDMA_CM_EVENT_ROUTE_RESOLVED: 489 case RDMA_CM_EVENT_ROUTE_RESOLVED:
475 iser_route_handler(cma_id); 490 ret = iser_route_handler(cma_id);
476 break; 491 break;
477 case RDMA_CM_EVENT_ESTABLISHED: 492 case RDMA_CM_EVENT_ESTABLISHED:
478 iser_connected_handler(cma_id); 493 iser_connected_handler(cma_id);
@@ -482,13 +497,12 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
482 case RDMA_CM_EVENT_CONNECT_ERROR: 497 case RDMA_CM_EVENT_CONNECT_ERROR:
483 case RDMA_CM_EVENT_UNREACHABLE: 498 case RDMA_CM_EVENT_UNREACHABLE:
484 case RDMA_CM_EVENT_REJECTED: 499 case RDMA_CM_EVENT_REJECTED:
485 iser_err("event: %d, error: %d\n", event->event, event->status); 500 ret = iser_connect_error(cma_id);
486 iser_connect_error(cma_id);
487 break; 501 break;
488 case RDMA_CM_EVENT_DISCONNECTED: 502 case RDMA_CM_EVENT_DISCONNECTED:
489 case RDMA_CM_EVENT_DEVICE_REMOVAL: 503 case RDMA_CM_EVENT_DEVICE_REMOVAL:
490 case RDMA_CM_EVENT_ADDR_CHANGE: 504 case RDMA_CM_EVENT_ADDR_CHANGE:
491 iser_disconnected_handler(cma_id); 505 ret = iser_disconnected_handler(cma_id);
492 break; 506 break;
493 default: 507 default:
494 iser_err("Unexpected RDMA CM event (%d)\n", event->event); 508 iser_err("Unexpected RDMA CM event (%d)\n", event->event);
@@ -503,7 +517,7 @@ void iser_conn_init(struct iser_conn *ib_conn)
503 init_waitqueue_head(&ib_conn->wait); 517 init_waitqueue_head(&ib_conn->wait);
504 ib_conn->post_recv_buf_count = 0; 518 ib_conn->post_recv_buf_count = 0;
505 atomic_set(&ib_conn->post_send_buf_count, 0); 519 atomic_set(&ib_conn->post_send_buf_count, 0);
506 atomic_set(&ib_conn->refcount, 1); 520 atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */
507 INIT_LIST_HEAD(&ib_conn->conn_list); 521 INIT_LIST_HEAD(&ib_conn->conn_list);
508 spin_lock_init(&ib_conn->lock); 522 spin_lock_init(&ib_conn->lock);
509} 523}
@@ -531,6 +545,7 @@ int iser_connect(struct iser_conn *ib_conn,
531 545
532 ib_conn->state = ISER_CONN_PENDING; 546 ib_conn->state = ISER_CONN_PENDING;
533 547
548 iser_conn_get(ib_conn); /* ref ib conn's cma id */
534 ib_conn->cma_id = rdma_create_id(iser_cma_handler, 549 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
535 (void *)ib_conn, 550 (void *)ib_conn,
536 RDMA_PS_TCP); 551 RDMA_PS_TCP);
@@ -568,7 +583,7 @@ id_failure:
568addr_failure: 583addr_failure:
569 ib_conn->state = ISER_CONN_DOWN; 584 ib_conn->state = ISER_CONN_DOWN;
570connect_failure: 585connect_failure:
571 iser_conn_release(ib_conn); 586 iser_conn_release(ib_conn, 1);
572 return err; 587 return err;
573} 588}
574 589
@@ -737,12 +752,10 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
737 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, 752 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
738 ISCSI_ERR_CONN_FAILED); 753 ISCSI_ERR_CONN_FAILED);
739 754
740 /* complete the termination process if disconnect event was delivered * 755 /* no more non completed posts to the QP, complete the
741 * note there are no more non completed posts to the QP */ 756 * termination process w.o worrying on disconnect event */
742 if (ib_conn->disc_evt_flag) { 757 ib_conn->state = ISER_CONN_DOWN;
743 ib_conn->state = ISER_CONN_DOWN; 758 wake_up_interruptible(&ib_conn->wait);
744 wake_up_interruptible(&ib_conn->wait);
745 }
746 } 759 }
747} 760}
748 761
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 7e18bcf05a66..46239e47a260 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -59,11 +59,11 @@ static unsigned int get_time_pit(void)
59 unsigned long flags; 59 unsigned long flags;
60 unsigned int count; 60 unsigned int count;
61 61
62 spin_lock_irqsave(&i8253_lock, flags); 62 raw_spin_lock_irqsave(&i8253_lock, flags);
63 outb_p(0x00, 0x43); 63 outb_p(0x00, 0x43);
64 count = inb_p(0x40); 64 count = inb_p(0x40);
65 count |= inb_p(0x40) << 8; 65 count |= inb_p(0x40) << 8;
66 spin_unlock_irqrestore(&i8253_lock, flags); 66 raw_spin_unlock_irqrestore(&i8253_lock, flags);
67 67
68 return count; 68 return count;
69} 69}
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index c52bec4d0530..423e0e6031ab 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -929,6 +929,24 @@ static const struct input_device_id joydev_ids[] = {
929 .evbit = { BIT_MASK(EV_ABS) }, 929 .evbit = { BIT_MASK(EV_ABS) },
930 .absbit = { BIT_MASK(ABS_THROTTLE) }, 930 .absbit = { BIT_MASK(ABS_THROTTLE) },
931 }, 931 },
932 {
933 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
934 INPUT_DEVICE_ID_MATCH_KEYBIT,
935 .evbit = { BIT_MASK(EV_KEY) },
936 .keybit = {[BIT_WORD(BTN_JOYSTICK)] = BIT_MASK(BTN_JOYSTICK) },
937 },
938 {
939 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
940 INPUT_DEVICE_ID_MATCH_KEYBIT,
941 .evbit = { BIT_MASK(EV_KEY) },
942 .keybit = { [BIT_WORD(BTN_GAMEPAD)] = BIT_MASK(BTN_GAMEPAD) },
943 },
944 {
945 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
946 INPUT_DEVICE_ID_MATCH_KEYBIT,
947 .evbit = { BIT_MASK(EV_KEY) },
948 .keybit = { [BIT_WORD(BTN_TRIGGER_HAPPY)] = BIT_MASK(BTN_TRIGGER_HAPPY) },
949 },
932 { } /* Terminating entry */ 950 { } /* Terminating entry */
933}; 951};
934 952
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 1c0b529c06aa..4afe0a3b4884 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -146,11 +146,11 @@ static unsigned int get_time_pit(void)
146 unsigned long flags; 146 unsigned long flags;
147 unsigned int count; 147 unsigned int count;
148 148
149 spin_lock_irqsave(&i8253_lock, flags); 149 raw_spin_lock_irqsave(&i8253_lock, flags);
150 outb_p(0x00, 0x43); 150 outb_p(0x00, 0x43);
151 count = inb_p(0x40); 151 count = inb_p(0x40);
152 count |= inb_p(0x40) << 8; 152 count |= inb_p(0x40) << 8;
153 spin_unlock_irqrestore(&i8253_lock, flags); 153 raw_spin_unlock_irqrestore(&i8253_lock, flags);
154 154
155 return count; 155 return count;
156} 156}
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b1edd778639c..405febd94f24 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -54,6 +54,9 @@ static signed short btn_avb_wheel[] =
54static signed short abs_joystick[] = 54static signed short abs_joystick[] =
55{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_HAT0X, ABS_HAT0Y, -1 }; 55{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_HAT0X, ABS_HAT0Y, -1 };
56 56
57static signed short abs_joystick_rudder[] =
58{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y, -1 };
59
57static signed short abs_avb_pegasus[] = 60static signed short abs_avb_pegasus[] =
58{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y, 61{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y,
59 ABS_HAT1X, ABS_HAT1Y, -1 }; 62 ABS_HAT1X, ABS_HAT1Y, -1 };
@@ -76,8 +79,9 @@ static struct iforce_device iforce_device[] = {
76 { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? 79 { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
77 { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, 80 { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
78 { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //? 81 { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //?
82 { 0x06f8, 0x0001, "Guillemot Jet Leader Force Feedback", btn_joystick, abs_joystick_rudder, ff_iforce },
79 { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? 83 { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
80 { 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //? 84 { 0x06f8, 0xa302, "Guillemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //?
81 { 0x06d6, 0x29bc, "Trust Force Feedback Race Master", btn_wheel, abs_wheel, ff_iforce }, 85 { 0x06d6, 0x29bc, "Trust Force Feedback Race Master", btn_wheel, abs_wheel, ff_iforce },
82 { 0x0000, 0x0000, "Unknown I-Force Device [%04x:%04x]", btn_joystick, abs_joystick, ff_iforce } 86 { 0x0000, 0x0000, "Unknown I-Force Device [%04x:%04x]", btn_joystick, abs_joystick, ff_iforce }
83}; 87};
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index b41303d3ec54..6c96631ae5d9 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -212,6 +212,7 @@ static struct usb_device_id iforce_usb_ids [] = {
212 { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */ 212 { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */
213 { USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */ 213 { USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */
214 { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */ 214 { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */
215 { USB_DEVICE(0x06f8, 0x0003) }, /* Guillemot Jet Leader Force Feedback */
215 { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */ 216 { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */
216 { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */ 217 { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */
217 { } /* Terminating entry */ 218 { } /* Terminating entry */
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 64c102355f53..a8293388d019 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -143,19 +143,6 @@ config KEYBOARD_BFIN
143 To compile this driver as a module, choose M here: the 143 To compile this driver as a module, choose M here: the
144 module will be called bf54x-keys. 144 module will be called bf54x-keys.
145 145
146config KEYBOARD_CORGI
147 tristate "Corgi keyboard (deprecated)"
148 depends on PXA_SHARPSL
149 help
150 Say Y here to enable the keyboard on the Sharp Zaurus SL-C7xx
151 series of PDAs.
152
153 This driver is now deprecated, use generic GPIO based matrix
154 keyboard driver instead.
155
156 To compile this driver as a module, choose M here: the
157 module will be called corgikbd.
158
159config KEYBOARD_LKKBD 146config KEYBOARD_LKKBD
160 tristate "DECstation/VAXstation LK201/LK401 keyboard" 147 tristate "DECstation/VAXstation LK201/LK401 keyboard"
161 select SERIO 148 select SERIO
@@ -339,19 +326,6 @@ config KEYBOARD_PXA930_ROTARY
339 To compile this driver as a module, choose M here: the 326 To compile this driver as a module, choose M here: the
340 module will be called pxa930_rotary. 327 module will be called pxa930_rotary.
341 328
342config KEYBOARD_SPITZ
343 tristate "Spitz keyboard (deprecated)"
344 depends on PXA_SHARPSL
345 help
346 Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000,
347 SL-C3000 and Sl-C3100 series of PDAs.
348
349 This driver is now deprecated, use generic GPIO based matrix
350 keyboard driver instead.
351
352 To compile this driver as a module, choose M here: the
353 module will be called spitzkbd.
354
355config KEYBOARD_STOWAWAY 329config KEYBOARD_STOWAWAY
356 tristate "Stowaway keyboard" 330 tristate "Stowaway keyboard"
357 select SERIO 331 select SERIO
@@ -414,28 +388,6 @@ config KEYBOARD_TWL4030
414 To compile this driver as a module, choose M here: the 388 To compile this driver as a module, choose M here: the
415 module will be called twl4030_keypad. 389 module will be called twl4030_keypad.
416 390
417config KEYBOARD_TOSA
418 tristate "Tosa keyboard (deprecated)"
419 depends on MACH_TOSA
420 help
421 Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa)
422
423 This driver is now deprecated, use generic GPIO based matrix
424 keyboard driver instead.
425
426 To compile this driver as a module, choose M here: the
427 module will be called tosakbd.
428
429config KEYBOARD_TOSA_USE_EXT_KEYCODES
430 bool "Tosa keyboard: use extended keycodes"
431 depends on KEYBOARD_TOSA
432 help
433 Say Y here to enable the tosa keyboard driver to generate extended
434 (>= 127) keycodes. Be aware, that they can't be correctly interpreted
435 by either console keyboard driver or by Kdrive keybd driver.
436
437 Say Y only if you know, what you are doing!
438
439config KEYBOARD_XTKBD 391config KEYBOARD_XTKBD
440 tristate "XT keyboard" 392 tristate "XT keyboard"
441 select SERIO 393 select SERIO
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 706c6b5ed5f4..9a74127e4d17 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
11obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o 11obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
12obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o 12obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
13obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o 13obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
14obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
15obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o 14obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
16obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o 15obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
17obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o 16obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
@@ -33,10 +32,8 @@ obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
33obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o 32obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
34obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o 33obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o
35obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o 34obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
36obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
37obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o 35obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
38obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o 36obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
39obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
40obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o 37obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
41obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o 38obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
42obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o 39obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
deleted file mode 100644
index 634af6a8e6b3..000000000000
--- a/drivers/input/keyboard/corgikbd.c
+++ /dev/null
@@ -1,414 +0,0 @@
1/*
2 * Keyboard driver for Sharp Corgi models (SL-C7xx)
3 *
4 * Copyright (c) 2004-2005 Richard Purdie
5 *
6 * Based on xtkbd.c/locomkbd.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/delay.h>
15#include <linux/platform_device.h>
16#include <linux/init.h>
17#include <linux/input.h>
18#include <linux/interrupt.h>
19#include <linux/jiffies.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22
23#include <mach/corgi.h>
24#include <mach/pxa2xx-gpio.h>
25#include <asm/hardware/scoop.h>
26
27#define KB_ROWS 8
28#define KB_COLS 12
29#define KB_ROWMASK(r) (1 << (r))
30#define SCANCODE(r,c) ( ((r)<<4) + (c) + 1 )
31/* zero code, 124 scancodes */
32#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 )
33
34#define SCAN_INTERVAL (50) /* ms */
35#define HINGE_SCAN_INTERVAL (250) /* ms */
36
37#define CORGI_KEY_CALENDER KEY_F1
38#define CORGI_KEY_ADDRESS KEY_F2
39#define CORGI_KEY_FN KEY_F3
40#define CORGI_KEY_CANCEL KEY_F4
41#define CORGI_KEY_OFF KEY_SUSPEND
42#define CORGI_KEY_EXOK KEY_F5
43#define CORGI_KEY_EXCANCEL KEY_F6
44#define CORGI_KEY_EXJOGDOWN KEY_F7
45#define CORGI_KEY_EXJOGUP KEY_F8
46#define CORGI_KEY_JAP1 KEY_LEFTCTRL
47#define CORGI_KEY_JAP2 KEY_LEFTALT
48#define CORGI_KEY_MAIL KEY_F10
49#define CORGI_KEY_OK KEY_F11
50#define CORGI_KEY_MENU KEY_F12
51
52static unsigned char corgikbd_keycode[NR_SCANCODES] = {
53 0, /* 0 */
54 0, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, 0, 0, 0, 0, 0, 0, 0, /* 1-16 */
55 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, 0, 0, 0, 0, 0, 0, 0, /* 17-32 */
56 KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
57 CORGI_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
58 CORGI_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, 0, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, /* 65-80 */
59 CORGI_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, 0, CORGI_KEY_FN, 0, 0, 0, 0, /* 81-96 */
60 KEY_SYSRQ, CORGI_KEY_JAP1, CORGI_KEY_JAP2, CORGI_KEY_CANCEL, CORGI_KEY_OK, CORGI_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0, /* 97-112 */
61 CORGI_KEY_OFF, CORGI_KEY_EXOK, CORGI_KEY_EXCANCEL, CORGI_KEY_EXJOGDOWN, CORGI_KEY_EXJOGUP, 0, 0, 0, 0, 0, 0, 0, /* 113-124 */
62};
63
64
65struct corgikbd {
66 unsigned char keycode[ARRAY_SIZE(corgikbd_keycode)];
67 struct input_dev *input;
68
69 spinlock_t lock;
70 struct timer_list timer;
71 struct timer_list htimer;
72
73 unsigned int suspended;
74 unsigned long suspend_jiffies;
75};
76
77#define KB_DISCHARGE_DELAY 10
78#define KB_ACTIVATE_DELAY 10
79
80/* Helper functions for reading the keyboard matrix
81 * Note: We should really be using the generic gpio functions to alter
82 * GPDR but it requires a function call per GPIO bit which is
83 * excessive when we need to access 12 bits at once, multiple times.
84 * These functions must be called within local_irq_save()/local_irq_restore()
85 * or similar.
86 */
87static inline void corgikbd_discharge_all(void)
88{
89 /* STROBE All HiZ */
90 GPCR2 = CORGI_GPIO_ALL_STROBE_BIT;
91 GPDR2 &= ~CORGI_GPIO_ALL_STROBE_BIT;
92}
93
94static inline void corgikbd_activate_all(void)
95{
96 /* STROBE ALL -> High */
97 GPSR2 = CORGI_GPIO_ALL_STROBE_BIT;
98 GPDR2 |= CORGI_GPIO_ALL_STROBE_BIT;
99
100 udelay(KB_DISCHARGE_DELAY);
101
102 /* Clear any interrupts we may have triggered when altering the GPIO lines */
103 GEDR1 = CORGI_GPIO_HIGH_SENSE_BIT;
104 GEDR2 = CORGI_GPIO_LOW_SENSE_BIT;
105}
106
107static inline void corgikbd_activate_col(int col)
108{
109 /* STROBE col -> High, not col -> HiZ */
110 GPSR2 = CORGI_GPIO_STROBE_BIT(col);
111 GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
112}
113
114static inline void corgikbd_reset_col(int col)
115{
116 /* STROBE col -> Low */
117 GPCR2 = CORGI_GPIO_STROBE_BIT(col);
118 /* STROBE col -> out, not col -> HiZ */
119 GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
120}
121
122#define GET_ROWS_STATUS(c) (((GPLR1 & CORGI_GPIO_HIGH_SENSE_BIT) >> CORGI_GPIO_HIGH_SENSE_RSHIFT) | ((GPLR2 & CORGI_GPIO_LOW_SENSE_BIT) << CORGI_GPIO_LOW_SENSE_LSHIFT))
123
124/*
125 * The corgi keyboard only generates interrupts when a key is pressed.
126 * When a key is pressed, we enable a timer which then scans the
127 * keyboard to detect when the key is released.
128 */
129
130/* Scan the hardware keyboard and push any changes up through the input layer */
131static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data)
132{
133 unsigned int row, col, rowd;
134 unsigned long flags;
135 unsigned int num_pressed;
136
137 if (corgikbd_data->suspended)
138 return;
139
140 spin_lock_irqsave(&corgikbd_data->lock, flags);
141
142 num_pressed = 0;
143 for (col = 0; col < KB_COLS; col++) {
144 /*
145 * Discharge the output driver capacitatance
146 * in the keyboard matrix. (Yes it is significant..)
147 */
148
149 corgikbd_discharge_all();
150 udelay(KB_DISCHARGE_DELAY);
151
152 corgikbd_activate_col(col);
153 udelay(KB_ACTIVATE_DELAY);
154
155 rowd = GET_ROWS_STATUS(col);
156 for (row = 0; row < KB_ROWS; row++) {
157 unsigned int scancode, pressed;
158
159 scancode = SCANCODE(row, col);
160 pressed = rowd & KB_ROWMASK(row);
161
162 input_report_key(corgikbd_data->input, corgikbd_data->keycode[scancode], pressed);
163
164 if (pressed)
165 num_pressed++;
166
167 if (pressed && (corgikbd_data->keycode[scancode] == CORGI_KEY_OFF)
168 && time_after(jiffies, corgikbd_data->suspend_jiffies + HZ)) {
169 input_event(corgikbd_data->input, EV_PWR, CORGI_KEY_OFF, 1);
170 corgikbd_data->suspend_jiffies=jiffies;
171 }
172 }
173 corgikbd_reset_col(col);
174 }
175
176 corgikbd_activate_all();
177
178 input_sync(corgikbd_data->input);
179
180 /* if any keys are pressed, enable the timer */
181 if (num_pressed)
182 mod_timer(&corgikbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
183
184 spin_unlock_irqrestore(&corgikbd_data->lock, flags);
185}
186
187/*
188 * corgi keyboard interrupt handler.
189 */
190static irqreturn_t corgikbd_interrupt(int irq, void *dev_id)
191{
192 struct corgikbd *corgikbd_data = dev_id;
193
194 if (!timer_pending(&corgikbd_data->timer)) {
195 /** wait chattering delay **/
196 udelay(20);
197 corgikbd_scankeyboard(corgikbd_data);
198 }
199
200 return IRQ_HANDLED;
201}
202
203/*
204 * corgi timer checking for released keys
205 */
206static void corgikbd_timer_callback(unsigned long data)
207{
208 struct corgikbd *corgikbd_data = (struct corgikbd *) data;
209 corgikbd_scankeyboard(corgikbd_data);
210}
211
212/*
213 * The hinge switches generate no interrupt so they need to be
214 * monitored by a timer.
215 *
216 * We debounce the switches and pass them to the input system.
217 *
218 * gprr == 0x00 - Keyboard with Landscape Screen
219 * 0x08 - No Keyboard with Portrait Screen
220 * 0x0c - Keyboard and Screen Closed
221 */
222
223#define READ_GPIO_BIT(x) (GPLR(x) & GPIO_bit(x))
224#define HINGE_STABLE_COUNT 2
225static int sharpsl_hinge_state;
226static int hinge_count;
227
228static void corgikbd_hinge_timer(unsigned long data)
229{
230 struct corgikbd *corgikbd_data = (struct corgikbd *) data;
231 unsigned long gprr;
232 unsigned long flags;
233
234 gprr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_GPRR) & (CORGI_SCP_SWA | CORGI_SCP_SWB);
235 gprr |= (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0);
236 if (gprr != sharpsl_hinge_state) {
237 hinge_count = 0;
238 sharpsl_hinge_state = gprr;
239 } else if (hinge_count < HINGE_STABLE_COUNT) {
240 hinge_count++;
241 if (hinge_count >= HINGE_STABLE_COUNT) {
242 spin_lock_irqsave(&corgikbd_data->lock, flags);
243
244 input_report_switch(corgikbd_data->input, SW_LID, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0));
245 input_report_switch(corgikbd_data->input, SW_TABLET_MODE, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0));
246 input_report_switch(corgikbd_data->input, SW_HEADPHONE_INSERT, (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0));
247 input_sync(corgikbd_data->input);
248
249 spin_unlock_irqrestore(&corgikbd_data->lock, flags);
250 }
251 }
252 mod_timer(&corgikbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
253}
254
255#ifdef CONFIG_PM
256static int corgikbd_suspend(struct platform_device *dev, pm_message_t state)
257{
258 int i;
259 struct corgikbd *corgikbd = platform_get_drvdata(dev);
260
261 corgikbd->suspended = 1;
262 /* strobe 0 is the power key so this can't be made an input for
263 powersaving therefore i = 1 */
264 for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
265 pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_IN);
266
267 return 0;
268}
269
270static int corgikbd_resume(struct platform_device *dev)
271{
272 int i;
273 struct corgikbd *corgikbd = platform_get_drvdata(dev);
274
275 for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
276 pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
277
278 /* Upon resume, ignore the suspend key for a short while */
279 corgikbd->suspend_jiffies=jiffies;
280 corgikbd->suspended = 0;
281
282 return 0;
283}
284#else
285#define corgikbd_suspend NULL
286#define corgikbd_resume NULL
287#endif
288
289static int __devinit corgikbd_probe(struct platform_device *pdev)
290{
291 struct corgikbd *corgikbd;
292 struct input_dev *input_dev;
293 int i, err = -ENOMEM;
294
295 corgikbd = kzalloc(sizeof(struct corgikbd), GFP_KERNEL);
296 input_dev = input_allocate_device();
297 if (!corgikbd || !input_dev)
298 goto fail;
299
300 platform_set_drvdata(pdev, corgikbd);
301
302 corgikbd->input = input_dev;
303 spin_lock_init(&corgikbd->lock);
304
305 /* Init Keyboard rescan timer */
306 init_timer(&corgikbd->timer);
307 corgikbd->timer.function = corgikbd_timer_callback;
308 corgikbd->timer.data = (unsigned long) corgikbd;
309
310 /* Init Hinge Timer */
311 init_timer(&corgikbd->htimer);
312 corgikbd->htimer.function = corgikbd_hinge_timer;
313 corgikbd->htimer.data = (unsigned long) corgikbd;
314
315 corgikbd->suspend_jiffies=jiffies;
316
317 memcpy(corgikbd->keycode, corgikbd_keycode, sizeof(corgikbd->keycode));
318
319 input_dev->name = "Corgi Keyboard";
320 input_dev->phys = "corgikbd/input0";
321 input_dev->id.bustype = BUS_HOST;
322 input_dev->id.vendor = 0x0001;
323 input_dev->id.product = 0x0001;
324 input_dev->id.version = 0x0100;
325 input_dev->dev.parent = &pdev->dev;
326
327 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
328 BIT_MASK(EV_PWR) | BIT_MASK(EV_SW);
329 input_dev->keycode = corgikbd->keycode;
330 input_dev->keycodesize = sizeof(unsigned char);
331 input_dev->keycodemax = ARRAY_SIZE(corgikbd_keycode);
332
333 for (i = 0; i < ARRAY_SIZE(corgikbd_keycode); i++)
334 set_bit(corgikbd->keycode[i], input_dev->keybit);
335 clear_bit(0, input_dev->keybit);
336 set_bit(SW_LID, input_dev->swbit);
337 set_bit(SW_TABLET_MODE, input_dev->swbit);
338 set_bit(SW_HEADPHONE_INSERT, input_dev->swbit);
339
340 err = input_register_device(corgikbd->input);
341 if (err)
342 goto fail;
343
344 mod_timer(&corgikbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
345
346 /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
347 for (i = 0; i < CORGI_KEY_SENSE_NUM; i++) {
348 pxa_gpio_mode(CORGI_GPIO_KEY_SENSE(i) | GPIO_IN);
349 if (request_irq(CORGI_IRQ_GPIO_KEY_SENSE(i), corgikbd_interrupt,
350 IRQF_DISABLED | IRQF_TRIGGER_RISING,
351 "corgikbd", corgikbd))
352 printk(KERN_WARNING "corgikbd: Can't get IRQ: %d!\n", i);
353 }
354
355 /* Set Strobe lines as outputs - set high */
356 for (i = 0; i < CORGI_KEY_STROBE_NUM; i++)
357 pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
358
359 /* Setup the headphone jack as an input */
360 pxa_gpio_mode(CORGI_GPIO_AK_INT | GPIO_IN);
361
362 return 0;
363
364 fail: input_free_device(input_dev);
365 kfree(corgikbd);
366 return err;
367}
368
369static int __devexit corgikbd_remove(struct platform_device *pdev)
370{
371 int i;
372 struct corgikbd *corgikbd = platform_get_drvdata(pdev);
373
374 for (i = 0; i < CORGI_KEY_SENSE_NUM; i++)
375 free_irq(CORGI_IRQ_GPIO_KEY_SENSE(i), corgikbd);
376
377 del_timer_sync(&corgikbd->htimer);
378 del_timer_sync(&corgikbd->timer);
379
380 input_unregister_device(corgikbd->input);
381
382 kfree(corgikbd);
383
384 return 0;
385}
386
387static struct platform_driver corgikbd_driver = {
388 .probe = corgikbd_probe,
389 .remove = __devexit_p(corgikbd_remove),
390 .suspend = corgikbd_suspend,
391 .resume = corgikbd_resume,
392 .driver = {
393 .name = "corgi-keyboard",
394 .owner = THIS_MODULE,
395 },
396};
397
398static int __init corgikbd_init(void)
399{
400 return platform_driver_register(&corgikbd_driver);
401}
402
403static void __exit corgikbd_exit(void)
404{
405 platform_driver_unregister(&corgikbd_driver);
406}
407
408module_init(corgikbd_init);
409module_exit(corgikbd_exit);
410
411MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
412MODULE_DESCRIPTION("Corgi Keyboard Driver");
413MODULE_LICENSE("GPL v2");
414MODULE_ALIAS("platform:corgi-keyboard");
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
deleted file mode 100644
index 13967422658c..000000000000
--- a/drivers/input/keyboard/spitzkbd.c
+++ /dev/null
@@ -1,496 +0,0 @@
1/*
2 * Keyboard driver for Sharp Spitz, Borzoi and Akita (SL-Cxx00 series)
3 *
4 * Copyright (c) 2005 Richard Purdie
5 *
6 * Based on corgikbd.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/delay.h>
15#include <linux/platform_device.h>
16#include <linux/init.h>
17#include <linux/input.h>
18#include <linux/interrupt.h>
19#include <linux/jiffies.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22
23#include <mach/spitz.h>
24#include <mach/pxa2xx-gpio.h>
25
26#define KB_ROWS 7
27#define KB_COLS 11
28#define KB_ROWMASK(r) (1 << (r))
29#define SCANCODE(r,c) (((r)<<4) + (c) + 1)
30#define NR_SCANCODES ((KB_ROWS<<4) + 1)
31
32#define SCAN_INTERVAL (50) /* ms */
33#define HINGE_SCAN_INTERVAL (150) /* ms */
34
35#define SPITZ_KEY_CALENDER KEY_F1
36#define SPITZ_KEY_ADDRESS KEY_F2
37#define SPITZ_KEY_FN KEY_F3
38#define SPITZ_KEY_CANCEL KEY_F4
39#define SPITZ_KEY_EXOK KEY_F5
40#define SPITZ_KEY_EXCANCEL KEY_F6
41#define SPITZ_KEY_EXJOGDOWN KEY_F7
42#define SPITZ_KEY_EXJOGUP KEY_F8
43#define SPITZ_KEY_JAP1 KEY_LEFTALT
44#define SPITZ_KEY_JAP2 KEY_RIGHTCTRL
45#define SPITZ_KEY_SYNC KEY_F9
46#define SPITZ_KEY_MAIL KEY_F10
47#define SPITZ_KEY_OK KEY_F11
48#define SPITZ_KEY_MENU KEY_F12
49
50static unsigned char spitzkbd_keycode[NR_SCANCODES] = {
51 0, /* 0 */
52 KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */
53 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */
54 KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
55 SPITZ_KEY_ADDRESS, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
56 SPITZ_KEY_CALENDER, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */
57 SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */
58 KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */
59};
60
61static int spitz_strobes[] = {
62 SPITZ_GPIO_KEY_STROBE0,
63 SPITZ_GPIO_KEY_STROBE1,
64 SPITZ_GPIO_KEY_STROBE2,
65 SPITZ_GPIO_KEY_STROBE3,
66 SPITZ_GPIO_KEY_STROBE4,
67 SPITZ_GPIO_KEY_STROBE5,
68 SPITZ_GPIO_KEY_STROBE6,
69 SPITZ_GPIO_KEY_STROBE7,
70 SPITZ_GPIO_KEY_STROBE8,
71 SPITZ_GPIO_KEY_STROBE9,
72 SPITZ_GPIO_KEY_STROBE10,
73};
74
75static int spitz_senses[] = {
76 SPITZ_GPIO_KEY_SENSE0,
77 SPITZ_GPIO_KEY_SENSE1,
78 SPITZ_GPIO_KEY_SENSE2,
79 SPITZ_GPIO_KEY_SENSE3,
80 SPITZ_GPIO_KEY_SENSE4,
81 SPITZ_GPIO_KEY_SENSE5,
82 SPITZ_GPIO_KEY_SENSE6,
83};
84
85struct spitzkbd {
86 unsigned char keycode[ARRAY_SIZE(spitzkbd_keycode)];
87 struct input_dev *input;
88 char phys[32];
89
90 spinlock_t lock;
91 struct timer_list timer;
92 struct timer_list htimer;
93
94 unsigned int suspended;
95 unsigned long suspend_jiffies;
96};
97
98#define KB_DISCHARGE_DELAY 10
99#define KB_ACTIVATE_DELAY 10
100
101/* Helper functions for reading the keyboard matrix
102 * Note: We should really be using the generic gpio functions to alter
103 * GPDR but it requires a function call per GPIO bit which is
104 * excessive when we need to access 11 bits at once, multiple times.
105 * These functions must be called within local_irq_save()/local_irq_restore()
106 * or similar.
107 */
108static inline void spitzkbd_discharge_all(void)
109{
110 /* STROBE All HiZ */
111 GPCR0 = SPITZ_GPIO_G0_STROBE_BIT;
112 GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
113 GPCR1 = SPITZ_GPIO_G1_STROBE_BIT;
114 GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
115 GPCR2 = SPITZ_GPIO_G2_STROBE_BIT;
116 GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
117 GPCR3 = SPITZ_GPIO_G3_STROBE_BIT;
118 GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
119}
120
121static inline void spitzkbd_activate_all(void)
122{
123 /* STROBE ALL -> High */
124 GPSR0 = SPITZ_GPIO_G0_STROBE_BIT;
125 GPDR0 |= SPITZ_GPIO_G0_STROBE_BIT;
126 GPSR1 = SPITZ_GPIO_G1_STROBE_BIT;
127 GPDR1 |= SPITZ_GPIO_G1_STROBE_BIT;
128 GPSR2 = SPITZ_GPIO_G2_STROBE_BIT;
129 GPDR2 |= SPITZ_GPIO_G2_STROBE_BIT;
130 GPSR3 = SPITZ_GPIO_G3_STROBE_BIT;
131 GPDR3 |= SPITZ_GPIO_G3_STROBE_BIT;
132
133 udelay(KB_DISCHARGE_DELAY);
134
135 /* Clear any interrupts we may have triggered when altering the GPIO lines */
136 GEDR0 = SPITZ_GPIO_G0_SENSE_BIT;
137 GEDR1 = SPITZ_GPIO_G1_SENSE_BIT;
138 GEDR2 = SPITZ_GPIO_G2_SENSE_BIT;
139 GEDR3 = SPITZ_GPIO_G3_SENSE_BIT;
140}
141
142static inline void spitzkbd_activate_col(int col)
143{
144 int gpio = spitz_strobes[col];
145 GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
146 GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
147 GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
148 GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
149 GPSR(gpio) = GPIO_bit(gpio);
150 GPDR(gpio) |= GPIO_bit(gpio);
151}
152
153static inline void spitzkbd_reset_col(int col)
154{
155 int gpio = spitz_strobes[col];
156 GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
157 GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
158 GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
159 GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
160 GPCR(gpio) = GPIO_bit(gpio);
161 GPDR(gpio) |= GPIO_bit(gpio);
162}
163
164static inline int spitzkbd_get_row_status(int col)
165{
166 return ((GPLR0 >> 12) & 0x01) | ((GPLR0 >> 16) & 0x02)
167 | ((GPLR2 >> 25) & 0x04) | ((GPLR1 << 1) & 0x08)
168 | ((GPLR1 >> 0) & 0x10) | ((GPLR1 >> 1) & 0x60);
169}
170
171/*
172 * The spitz keyboard only generates interrupts when a key is pressed.
173 * When a key is pressed, we enable a timer which then scans the
174 * keyboard to detect when the key is released.
175 */
176
177/* Scan the hardware keyboard and push any changes up through the input layer */
178static void spitzkbd_scankeyboard(struct spitzkbd *spitzkbd_data)
179{
180 unsigned int row, col, rowd;
181 unsigned long flags;
182 unsigned int num_pressed, pwrkey = ((GPLR(SPITZ_GPIO_ON_KEY) & GPIO_bit(SPITZ_GPIO_ON_KEY)) != 0);
183
184 if (spitzkbd_data->suspended)
185 return;
186
187 spin_lock_irqsave(&spitzkbd_data->lock, flags);
188
189 num_pressed = 0;
190 for (col = 0; col < KB_COLS; col++) {
191 /*
192 * Discharge the output driver capacitatance
193 * in the keyboard matrix. (Yes it is significant..)
194 */
195
196 spitzkbd_discharge_all();
197 udelay(KB_DISCHARGE_DELAY);
198
199 spitzkbd_activate_col(col);
200 udelay(KB_ACTIVATE_DELAY);
201
202 rowd = spitzkbd_get_row_status(col);
203 for (row = 0; row < KB_ROWS; row++) {
204 unsigned int scancode, pressed;
205
206 scancode = SCANCODE(row, col);
207 pressed = rowd & KB_ROWMASK(row);
208
209 input_report_key(spitzkbd_data->input, spitzkbd_data->keycode[scancode], pressed);
210
211 if (pressed)
212 num_pressed++;
213 }
214 spitzkbd_reset_col(col);
215 }
216
217 spitzkbd_activate_all();
218
219 input_report_key(spitzkbd_data->input, SPITZ_KEY_SYNC, (GPLR(SPITZ_GPIO_SYNC) & GPIO_bit(SPITZ_GPIO_SYNC)) != 0 );
220 input_report_key(spitzkbd_data->input, KEY_SUSPEND, pwrkey);
221
222 if (pwrkey && time_after(jiffies, spitzkbd_data->suspend_jiffies + msecs_to_jiffies(1000))) {
223 input_event(spitzkbd_data->input, EV_PWR, KEY_SUSPEND, 1);
224 spitzkbd_data->suspend_jiffies = jiffies;
225 }
226
227 input_sync(spitzkbd_data->input);
228
229 /* if any keys are pressed, enable the timer */
230 if (num_pressed)
231 mod_timer(&spitzkbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
232
233 spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
234}
235
236/*
237 * spitz keyboard interrupt handler.
238 */
239static irqreturn_t spitzkbd_interrupt(int irq, void *dev_id)
240{
241 struct spitzkbd *spitzkbd_data = dev_id;
242
243 if (!timer_pending(&spitzkbd_data->timer)) {
244 /** wait chattering delay **/
245 udelay(20);
246 spitzkbd_scankeyboard(spitzkbd_data);
247 }
248
249 return IRQ_HANDLED;
250}
251
252/*
253 * spitz timer checking for released keys
254 */
255static void spitzkbd_timer_callback(unsigned long data)
256{
257 struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
258
259 spitzkbd_scankeyboard(spitzkbd_data);
260}
261
262/*
263 * The hinge switches generate an interrupt.
264 * We debounce the switches and pass them to the input system.
265 */
266
267static irqreturn_t spitzkbd_hinge_isr(int irq, void *dev_id)
268{
269 struct spitzkbd *spitzkbd_data = dev_id;
270
271 if (!timer_pending(&spitzkbd_data->htimer))
272 mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
273
274 return IRQ_HANDLED;
275}
276
277#define HINGE_STABLE_COUNT 2
278static int sharpsl_hinge_state;
279static int hinge_count;
280
281static void spitzkbd_hinge_timer(unsigned long data)
282{
283 struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
284 unsigned long state;
285 unsigned long flags;
286
287 state = GPLR(SPITZ_GPIO_SWA) & (GPIO_bit(SPITZ_GPIO_SWA)|GPIO_bit(SPITZ_GPIO_SWB));
288 state |= (GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT));
289 if (state != sharpsl_hinge_state) {
290 hinge_count = 0;
291 sharpsl_hinge_state = state;
292 } else if (hinge_count < HINGE_STABLE_COUNT) {
293 hinge_count++;
294 }
295
296 if (hinge_count >= HINGE_STABLE_COUNT) {
297 spin_lock_irqsave(&spitzkbd_data->lock, flags);
298
299 input_report_switch(spitzkbd_data->input, SW_LID, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0));
300 input_report_switch(spitzkbd_data->input, SW_TABLET_MODE, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0));
301 input_report_switch(spitzkbd_data->input, SW_HEADPHONE_INSERT, ((GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT)) != 0));
302 input_sync(spitzkbd_data->input);
303
304 spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
305 } else {
306 mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
307 }
308}
309
310#ifdef CONFIG_PM
311static int spitzkbd_suspend(struct platform_device *dev, pm_message_t state)
312{
313 int i;
314 struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
315 spitzkbd->suspended = 1;
316
317 /* Set Strobe lines as inputs - *except* strobe line 0 leave this
318 enabled so we can detect a power button press for resume */
319 for (i = 1; i < SPITZ_KEY_STROBE_NUM; i++)
320 pxa_gpio_mode(spitz_strobes[i] | GPIO_IN);
321
322 return 0;
323}
324
325static int spitzkbd_resume(struct platform_device *dev)
326{
327 int i;
328 struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
329
330 for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
331 pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
332
333 /* Upon resume, ignore the suspend key for a short while */
334 spitzkbd->suspend_jiffies = jiffies;
335 spitzkbd->suspended = 0;
336
337 return 0;
338}
339#else
340#define spitzkbd_suspend NULL
341#define spitzkbd_resume NULL
342#endif
343
344static int __devinit spitzkbd_probe(struct platform_device *dev)
345{
346 struct spitzkbd *spitzkbd;
347 struct input_dev *input_dev;
348 int i, err = -ENOMEM;
349
350 spitzkbd = kzalloc(sizeof(struct spitzkbd), GFP_KERNEL);
351 input_dev = input_allocate_device();
352 if (!spitzkbd || !input_dev)
353 goto fail;
354
355 platform_set_drvdata(dev, spitzkbd);
356 strcpy(spitzkbd->phys, "spitzkbd/input0");
357
358 spin_lock_init(&spitzkbd->lock);
359
360 /* Init Keyboard rescan timer */
361 init_timer(&spitzkbd->timer);
362 spitzkbd->timer.function = spitzkbd_timer_callback;
363 spitzkbd->timer.data = (unsigned long) spitzkbd;
364
365 /* Init Hinge Timer */
366 init_timer(&spitzkbd->htimer);
367 spitzkbd->htimer.function = spitzkbd_hinge_timer;
368 spitzkbd->htimer.data = (unsigned long) spitzkbd;
369
370 spitzkbd->suspend_jiffies = jiffies;
371
372 spitzkbd->input = input_dev;
373
374 input_dev->name = "Spitz Keyboard";
375 input_dev->phys = spitzkbd->phys;
376 input_dev->dev.parent = &dev->dev;
377
378 input_dev->id.bustype = BUS_HOST;
379 input_dev->id.vendor = 0x0001;
380 input_dev->id.product = 0x0001;
381 input_dev->id.version = 0x0100;
382
383 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
384 BIT_MASK(EV_PWR) | BIT_MASK(EV_SW);
385 input_dev->keycode = spitzkbd->keycode;
386 input_dev->keycodesize = sizeof(unsigned char);
387 input_dev->keycodemax = ARRAY_SIZE(spitzkbd_keycode);
388
389 memcpy(spitzkbd->keycode, spitzkbd_keycode, sizeof(spitzkbd->keycode));
390 for (i = 0; i < ARRAY_SIZE(spitzkbd_keycode); i++)
391 set_bit(spitzkbd->keycode[i], input_dev->keybit);
392 clear_bit(0, input_dev->keybit);
393 set_bit(KEY_SUSPEND, input_dev->keybit);
394 set_bit(SW_LID, input_dev->swbit);
395 set_bit(SW_TABLET_MODE, input_dev->swbit);
396 set_bit(SW_HEADPHONE_INSERT, input_dev->swbit);
397
398 err = input_register_device(input_dev);
399 if (err)
400 goto fail;
401
402 mod_timer(&spitzkbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
403
404 /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
405 for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++) {
406 pxa_gpio_mode(spitz_senses[i] | GPIO_IN);
407 if (request_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd_interrupt,
408 IRQF_DISABLED|IRQF_TRIGGER_RISING,
409 "Spitzkbd Sense", spitzkbd))
410 printk(KERN_WARNING "spitzkbd: Can't get Sense IRQ: %d!\n", i);
411 }
412
413 /* Set Strobe lines as outputs - set high */
414 for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
415 pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
416
417 pxa_gpio_mode(SPITZ_GPIO_SYNC | GPIO_IN);
418 pxa_gpio_mode(SPITZ_GPIO_ON_KEY | GPIO_IN);
419 pxa_gpio_mode(SPITZ_GPIO_SWA | GPIO_IN);
420 pxa_gpio_mode(SPITZ_GPIO_SWB | GPIO_IN);
421
422 request_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd_interrupt,
423 IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
424 "Spitzkbd Sync", spitzkbd);
425 request_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd_interrupt,
426 IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
427 "Spitzkbd PwrOn", spitzkbd);
428 request_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd_hinge_isr,
429 IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
430 "Spitzkbd SWA", spitzkbd);
431 request_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd_hinge_isr,
432 IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
433 "Spitzkbd SWB", spitzkbd);
434 request_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd_hinge_isr,
435 IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
436 "Spitzkbd HP", spitzkbd);
437
438 return 0;
439
440 fail: input_free_device(input_dev);
441 kfree(spitzkbd);
442 return err;
443}
444
445static int __devexit spitzkbd_remove(struct platform_device *dev)
446{
447 int i;
448 struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
449
450 for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++)
451 free_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd);
452
453 free_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd);
454 free_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd);
455 free_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd);
456 free_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd);
457 free_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd);
458
459 del_timer_sync(&spitzkbd->htimer);
460 del_timer_sync(&spitzkbd->timer);
461
462 input_unregister_device(spitzkbd->input);
463
464 kfree(spitzkbd);
465
466 return 0;
467}
468
469static struct platform_driver spitzkbd_driver = {
470 .probe = spitzkbd_probe,
471 .remove = __devexit_p(spitzkbd_remove),
472 .suspend = spitzkbd_suspend,
473 .resume = spitzkbd_resume,
474 .driver = {
475 .name = "spitz-keyboard",
476 .owner = THIS_MODULE,
477 },
478};
479
480static int __init spitzkbd_init(void)
481{
482 return platform_driver_register(&spitzkbd_driver);
483}
484
485static void __exit spitzkbd_exit(void)
486{
487 platform_driver_unregister(&spitzkbd_driver);
488}
489
490module_init(spitzkbd_init);
491module_exit(spitzkbd_exit);
492
493MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
494MODULE_DESCRIPTION("Spitz Keyboard Driver");
495MODULE_LICENSE("GPL v2");
496MODULE_ALIAS("platform:spitz-keyboard");
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c
deleted file mode 100644
index 3910f269cfc8..000000000000
--- a/drivers/input/keyboard/tosakbd.c
+++ /dev/null
@@ -1,431 +0,0 @@
1/*
2 * Keyboard driver for Sharp Tosa models (SL-6000x)
3 *
4 * Copyright (c) 2005 Dirk Opfer
5 * Copyright (c) 2007 Dmitry Baryshkov
6 *
7 * Based on xtkbd.c/locomkbd.c/corgikbd.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/input.h>
19#include <linux/delay.h>
20#include <linux/interrupt.h>
21#include <linux/slab.h>
22
23#include <mach/gpio.h>
24#include <mach/tosa.h>
25
26#define KB_ROWMASK(r) (1 << (r))
27#define SCANCODE(r, c) (((r)<<4) + (c) + 1)
28#define NR_SCANCODES SCANCODE(TOSA_KEY_SENSE_NUM - 1, TOSA_KEY_STROBE_NUM - 1) + 1
29
30#define SCAN_INTERVAL (HZ/10)
31
32#define KB_DISCHARGE_DELAY 10
33#define KB_ACTIVATE_DELAY 10
34
35static unsigned short tosakbd_keycode[NR_SCANCODES] = {
360,
370, KEY_W, 0, 0, 0, KEY_K, KEY_BACKSPACE, KEY_P,
380, 0, 0, 0, 0, 0, 0, 0,
39KEY_Q, KEY_E, KEY_T, KEY_Y, 0, KEY_O, KEY_I, KEY_COMMA,
400, 0, 0, 0, 0, 0, 0, 0,
41KEY_A, KEY_D, KEY_G, KEY_U, 0, KEY_L, KEY_ENTER, KEY_DOT,
420, 0, 0, 0, 0, 0, 0, 0,
43KEY_Z, KEY_C, KEY_V, KEY_J, TOSA_KEY_ADDRESSBOOK, TOSA_KEY_CANCEL, TOSA_KEY_CENTER, TOSA_KEY_OK,
44KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, 0,
45KEY_S, KEY_R, KEY_B, KEY_N, TOSA_KEY_CALENDAR, TOSA_KEY_HOMEPAGE, KEY_LEFTCTRL, TOSA_KEY_LIGHT,
460, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0,
47KEY_TAB, KEY_SLASH, KEY_H, KEY_M, TOSA_KEY_MENU, 0, KEY_UP, 0,
480, 0, TOSA_KEY_FN, 0, 0, 0, 0, 0,
49KEY_X, KEY_F, KEY_SPACE, KEY_APOSTROPHE, TOSA_KEY_MAIL, KEY_LEFT, KEY_DOWN, KEY_RIGHT,
500, 0, 0,
51};
52
53struct tosakbd {
54 unsigned short keycode[ARRAY_SIZE(tosakbd_keycode)];
55 struct input_dev *input;
56 bool suspended;
57 spinlock_t lock; /* protect kbd scanning */
58 struct timer_list timer;
59};
60
61
62/* Helper functions for reading the keyboard matrix
63 * Note: We should really be using the generic gpio functions to alter
64 * GPDR but it requires a function call per GPIO bit which is
65 * excessive when we need to access 12 bits at once, multiple times.
66 * These functions must be called within local_irq_save()/local_irq_restore()
67 * or similar.
68 */
69#define GET_ROWS_STATUS(c) ((GPLR2 & TOSA_GPIO_ALL_SENSE_BIT) >> TOSA_GPIO_ALL_SENSE_RSHIFT)
70
71static inline void tosakbd_discharge_all(void)
72{
73 /* STROBE All HiZ */
74 GPCR1 = TOSA_GPIO_HIGH_STROBE_BIT;
75 GPDR1 &= ~TOSA_GPIO_HIGH_STROBE_BIT;
76 GPCR2 = TOSA_GPIO_LOW_STROBE_BIT;
77 GPDR2 &= ~TOSA_GPIO_LOW_STROBE_BIT;
78}
79
80static inline void tosakbd_activate_all(void)
81{
82 /* STROBE ALL -> High */
83 GPSR1 = TOSA_GPIO_HIGH_STROBE_BIT;
84 GPDR1 |= TOSA_GPIO_HIGH_STROBE_BIT;
85 GPSR2 = TOSA_GPIO_LOW_STROBE_BIT;
86 GPDR2 |= TOSA_GPIO_LOW_STROBE_BIT;
87
88 udelay(KB_DISCHARGE_DELAY);
89
90 /* STATE CLEAR */
91 GEDR2 |= TOSA_GPIO_ALL_SENSE_BIT;
92}
93
94static inline void tosakbd_activate_col(int col)
95{
96 if (col <= 5) {
97 /* STROBE col -> High, not col -> HiZ */
98 GPSR1 = TOSA_GPIO_STROBE_BIT(col);
99 GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
100 } else {
101 /* STROBE col -> High, not col -> HiZ */
102 GPSR2 = TOSA_GPIO_STROBE_BIT(col);
103 GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
104 }
105}
106
107static inline void tosakbd_reset_col(int col)
108{
109 if (col <= 5) {
110 /* STROBE col -> Low */
111 GPCR1 = TOSA_GPIO_STROBE_BIT(col);
112 /* STROBE col -> out, not col -> HiZ */
113 GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
114 } else {
115 /* STROBE col -> Low */
116 GPCR2 = TOSA_GPIO_STROBE_BIT(col);
117 /* STROBE col -> out, not col -> HiZ */
118 GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
119 }
120}
121/*
122 * The tosa keyboard only generates interrupts when a key is pressed.
123 * So when a key is pressed, we enable a timer. This timer scans the
124 * keyboard, and this is how we detect when the key is released.
125 */
126
127/* Scan the hardware keyboard and push any changes up through the input layer */
128static void tosakbd_scankeyboard(struct platform_device *dev)
129{
130 struct tosakbd *tosakbd = platform_get_drvdata(dev);
131 unsigned int row, col, rowd;
132 unsigned long flags;
133 unsigned int num_pressed = 0;
134
135 spin_lock_irqsave(&tosakbd->lock, flags);
136
137 if (tosakbd->suspended)
138 goto out;
139
140 for (col = 0; col < TOSA_KEY_STROBE_NUM; col++) {
141 /*
142 * Discharge the output driver capacitatance
143 * in the keyboard matrix. (Yes it is significant..)
144 */
145 tosakbd_discharge_all();
146 udelay(KB_DISCHARGE_DELAY);
147
148 tosakbd_activate_col(col);
149 udelay(KB_ACTIVATE_DELAY);
150
151 rowd = GET_ROWS_STATUS(col);
152
153 for (row = 0; row < TOSA_KEY_SENSE_NUM; row++) {
154 unsigned int scancode, pressed;
155 scancode = SCANCODE(row, col);
156 pressed = rowd & KB_ROWMASK(row);
157
158 if (pressed && !tosakbd->keycode[scancode])
159 dev_warn(&dev->dev,
160 "unhandled scancode: 0x%02x\n",
161 scancode);
162
163 input_report_key(tosakbd->input,
164 tosakbd->keycode[scancode],
165 pressed);
166 if (pressed)
167 num_pressed++;
168 }
169
170 tosakbd_reset_col(col);
171 }
172
173 tosakbd_activate_all();
174
175 input_sync(tosakbd->input);
176
177 /* if any keys are pressed, enable the timer */
178 if (num_pressed)
179 mod_timer(&tosakbd->timer, jiffies + SCAN_INTERVAL);
180
181 out:
182 spin_unlock_irqrestore(&tosakbd->lock, flags);
183}
184
185/*
186 * tosa keyboard interrupt handler.
187 */
188static irqreturn_t tosakbd_interrupt(int irq, void *__dev)
189{
190 struct platform_device *dev = __dev;
191 struct tosakbd *tosakbd = platform_get_drvdata(dev);
192
193 if (!timer_pending(&tosakbd->timer)) {
194 /** wait chattering delay **/
195 udelay(20);
196 tosakbd_scankeyboard(dev);
197 }
198
199 return IRQ_HANDLED;
200}
201
202/*
203 * tosa timer checking for released keys
204 */
205static void tosakbd_timer_callback(unsigned long __dev)
206{
207 struct platform_device *dev = (struct platform_device *)__dev;
208
209 tosakbd_scankeyboard(dev);
210}
211
212#ifdef CONFIG_PM
213static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
214{
215 struct tosakbd *tosakbd = platform_get_drvdata(dev);
216 unsigned long flags;
217
218 spin_lock_irqsave(&tosakbd->lock, flags);
219 tosakbd->suspended = true;
220 spin_unlock_irqrestore(&tosakbd->lock, flags);
221
222 del_timer_sync(&tosakbd->timer);
223
224 return 0;
225}
226
227static int tosakbd_resume(struct platform_device *dev)
228{
229 struct tosakbd *tosakbd = platform_get_drvdata(dev);
230
231 tosakbd->suspended = false;
232 tosakbd_scankeyboard(dev);
233
234 return 0;
235}
236#else
237#define tosakbd_suspend NULL
238#define tosakbd_resume NULL
239#endif
240
241static int __devinit tosakbd_probe(struct platform_device *pdev) {
242
243 int i;
244 struct tosakbd *tosakbd;
245 struct input_dev *input_dev;
246 int error;
247
248 tosakbd = kzalloc(sizeof(struct tosakbd), GFP_KERNEL);
249 if (!tosakbd)
250 return -ENOMEM;
251
252 input_dev = input_allocate_device();
253 if (!input_dev) {
254 kfree(tosakbd);
255 return -ENOMEM;
256 }
257
258 platform_set_drvdata(pdev, tosakbd);
259
260 spin_lock_init(&tosakbd->lock);
261
262 /* Init Keyboard rescan timer */
263 init_timer(&tosakbd->timer);
264 tosakbd->timer.function = tosakbd_timer_callback;
265 tosakbd->timer.data = (unsigned long) pdev;
266
267 tosakbd->input = input_dev;
268
269 input_set_drvdata(input_dev, tosakbd);
270 input_dev->name = "Tosa Keyboard";
271 input_dev->phys = "tosakbd/input0";
272 input_dev->dev.parent = &pdev->dev;
273
274 input_dev->id.bustype = BUS_HOST;
275 input_dev->id.vendor = 0x0001;
276 input_dev->id.product = 0x0001;
277 input_dev->id.version = 0x0100;
278
279 input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
280 input_dev->keycode = tosakbd->keycode;
281 input_dev->keycodesize = sizeof(tosakbd->keycode[0]);
282 input_dev->keycodemax = ARRAY_SIZE(tosakbd_keycode);
283
284 memcpy(tosakbd->keycode, tosakbd_keycode, sizeof(tosakbd_keycode));
285
286 for (i = 0; i < ARRAY_SIZE(tosakbd_keycode); i++)
287 __set_bit(tosakbd->keycode[i], input_dev->keybit);
288 __clear_bit(KEY_RESERVED, input_dev->keybit);
289
290 /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
291 for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
292 int gpio = TOSA_GPIO_KEY_SENSE(i);
293 int irq;
294 error = gpio_request(gpio, "tosakbd");
295 if (error < 0) {
296 printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
297 " error %d\n", gpio, error);
298 goto fail;
299 }
300
301 error = gpio_direction_input(TOSA_GPIO_KEY_SENSE(i));
302 if (error < 0) {
303 printk(KERN_ERR "tosakbd: failed to configure input"
304 " direction for GPIO %d, error %d\n",
305 gpio, error);
306 gpio_free(gpio);
307 goto fail;
308 }
309
310 irq = gpio_to_irq(gpio);
311 if (irq < 0) {
312 error = irq;
313 printk(KERN_ERR "gpio-keys: Unable to get irq number"
314 " for GPIO %d, error %d\n",
315 gpio, error);
316 gpio_free(gpio);
317 goto fail;
318 }
319
320 error = request_irq(irq, tosakbd_interrupt,
321 IRQF_DISABLED | IRQF_TRIGGER_RISING,
322 "tosakbd", pdev);
323
324 if (error) {
325 printk("tosakbd: Can't get IRQ: %d: error %d!\n",
326 irq, error);
327 gpio_free(gpio);
328 goto fail;
329 }
330 }
331
332 /* Set Strobe lines as outputs - set high */
333 for (i = 0; i < TOSA_KEY_STROBE_NUM; i++) {
334 int gpio = TOSA_GPIO_KEY_STROBE(i);
335 error = gpio_request(gpio, "tosakbd");
336 if (error < 0) {
337 printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
338 " error %d\n", gpio, error);
339 goto fail2;
340 }
341
342 error = gpio_direction_output(gpio, 1);
343 if (error < 0) {
344 printk(KERN_ERR "tosakbd: failed to configure input"
345 " direction for GPIO %d, error %d\n",
346 gpio, error);
347 gpio_free(gpio);
348 goto fail2;
349 }
350
351 }
352
353 error = input_register_device(input_dev);
354 if (error) {
355 printk(KERN_ERR "tosakbd: Unable to register input device, "
356 "error: %d\n", error);
357 goto fail2;
358 }
359
360 printk(KERN_INFO "input: Tosa Keyboard Registered\n");
361
362 return 0;
363
364fail2:
365 while (--i >= 0)
366 gpio_free(TOSA_GPIO_KEY_STROBE(i));
367
368 i = TOSA_KEY_SENSE_NUM;
369fail:
370 while (--i >= 0) {
371 free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), pdev);
372 gpio_free(TOSA_GPIO_KEY_SENSE(i));
373 }
374
375 platform_set_drvdata(pdev, NULL);
376 input_free_device(input_dev);
377 kfree(tosakbd);
378
379 return error;
380}
381
382static int __devexit tosakbd_remove(struct platform_device *dev)
383{
384 int i;
385 struct tosakbd *tosakbd = platform_get_drvdata(dev);
386
387 for (i = 0; i < TOSA_KEY_STROBE_NUM; i++)
388 gpio_free(TOSA_GPIO_KEY_STROBE(i));
389
390 for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
391 free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), dev);
392 gpio_free(TOSA_GPIO_KEY_SENSE(i));
393 }
394
395 del_timer_sync(&tosakbd->timer);
396
397 input_unregister_device(tosakbd->input);
398
399 kfree(tosakbd);
400
401 return 0;
402}
403
404static struct platform_driver tosakbd_driver = {
405 .probe = tosakbd_probe,
406 .remove = __devexit_p(tosakbd_remove),
407 .suspend = tosakbd_suspend,
408 .resume = tosakbd_resume,
409 .driver = {
410 .name = "tosa-keyboard",
411 .owner = THIS_MODULE,
412 },
413};
414
415static int __devinit tosakbd_init(void)
416{
417 return platform_driver_register(&tosakbd_driver);
418}
419
420static void __exit tosakbd_exit(void)
421{
422 platform_driver_unregister(&tosakbd_driver);
423}
424
425module_init(tosakbd_init);
426module_exit(tosakbd_exit);
427
428MODULE_AUTHOR("Dirk Opfer <Dirk@Opfer-Online.de>");
429MODULE_DESCRIPTION("Tosa Keyboard Driver");
430MODULE_LICENSE("GPL v2");
431MODULE_ALIAS("platform:tosa-keyboard");
diff --git a/drivers/input/misc/ati_remote.c b/drivers/input/misc/ati_remote.c
index 614b65d78fe9..e8bbc619f6df 100644
--- a/drivers/input/misc/ati_remote.c
+++ b/drivers/input/misc/ati_remote.c
@@ -98,10 +98,12 @@
98 * Module and Version Information, Module Parameters 98 * Module and Version Information, Module Parameters
99 */ 99 */
100 100
101#define ATI_REMOTE_VENDOR_ID 0x0bc7 101#define ATI_REMOTE_VENDOR_ID 0x0bc7
102#define ATI_REMOTE_PRODUCT_ID 0x004 102#define LOLA_REMOTE_PRODUCT_ID 0x0002
103#define LOLA_REMOTE_PRODUCT_ID 0x002 103#define LOLA2_REMOTE_PRODUCT_ID 0x0003
104#define MEDION_REMOTE_PRODUCT_ID 0x006 104#define ATI_REMOTE_PRODUCT_ID 0x0004
105#define NVIDIA_REMOTE_PRODUCT_ID 0x0005
106#define MEDION_REMOTE_PRODUCT_ID 0x0006
105 107
106#define DRIVER_VERSION "2.2.1" 108#define DRIVER_VERSION "2.2.1"
107#define DRIVER_AUTHOR "Torrey Hoffman <thoffman@arnor.net>" 109#define DRIVER_AUTHOR "Torrey Hoffman <thoffman@arnor.net>"
@@ -142,8 +144,10 @@ MODULE_PARM_DESC(repeat_delay, "Delay before sending repeats, default = 500 msec
142#define err(format, arg...) printk(KERN_ERR format , ## arg) 144#define err(format, arg...) printk(KERN_ERR format , ## arg)
143 145
144static struct usb_device_id ati_remote_table[] = { 146static struct usb_device_id ati_remote_table[] = {
145 { USB_DEVICE(ATI_REMOTE_VENDOR_ID, ATI_REMOTE_PRODUCT_ID) },
146 { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID) }, 147 { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID) },
148 { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA2_REMOTE_PRODUCT_ID) },
149 { USB_DEVICE(ATI_REMOTE_VENDOR_ID, ATI_REMOTE_PRODUCT_ID) },
150 { USB_DEVICE(ATI_REMOTE_VENDOR_ID, NVIDIA_REMOTE_PRODUCT_ID) },
147 { USB_DEVICE(ATI_REMOTE_VENDOR_ID, MEDION_REMOTE_PRODUCT_ID) }, 151 { USB_DEVICE(ATI_REMOTE_VENDOR_ID, MEDION_REMOTE_PRODUCT_ID) },
148 {} /* Terminating entry */ 152 {} /* Terminating entry */
149}; 153};
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index ea4e1fd12651..f080dd31499b 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -30,7 +30,7 @@ MODULE_ALIAS("platform:pcspkr");
30#include <asm/i8253.h> 30#include <asm/i8253.h>
31#else 31#else
32#include <asm/8253pit.h> 32#include <asm/8253pit.h>
33static DEFINE_SPINLOCK(i8253_lock); 33static DEFINE_RAW_SPINLOCK(i8253_lock);
34#endif 34#endif
35 35
36static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) 36static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
@@ -50,7 +50,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
50 if (value > 20 && value < 32767) 50 if (value > 20 && value < 32767)
51 count = PIT_TICK_RATE / value; 51 count = PIT_TICK_RATE / value;
52 52
53 spin_lock_irqsave(&i8253_lock, flags); 53 raw_spin_lock_irqsave(&i8253_lock, flags);
54 54
55 if (count) { 55 if (count) {
56 /* set command for counter 2, 2 byte write */ 56 /* set command for counter 2, 2 byte write */
@@ -65,7 +65,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
65 outb(inb_p(0x61) & 0xFC, 0x61); 65 outb(inb_p(0x61) & 0xFC, 0x61);
66 } 66 }
67 67
68 spin_unlock_irqrestore(&i8253_lock, flags); 68 raw_spin_unlock_irqrestore(&i8253_lock, flags);
69 69
70 return 0; 70 return 0;
71} 71}
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 0d22cb9ce42e..99d58764ef03 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -64,7 +64,6 @@ static const struct alps_model_info alps_model_data[] = {
64 { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, 64 { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
65 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, 65 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
66 { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 66 { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
67 { { 0x73, 0x02, 0x64 }, 0xf8, 0xf8, 0 }, /* HP Pavilion dm3 */
68 { { 0x52, 0x01, 0x14 }, 0xff, 0xff, 67 { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
69 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ 68 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
70}; 69};
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index a138b5da79f9..112b4ee52ff2 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -25,6 +25,10 @@
25 printk(KERN_DEBUG format, ##arg); \ 25 printk(KERN_DEBUG format, ##arg); \
26 } while (0) 26 } while (0)
27 27
28static bool force_elantech;
29module_param_named(force_elantech, force_elantech, bool, 0644);
30MODULE_PARM_DESC(force_elantech, "Force the Elantech PS/2 protocol extension to be used, 1 = enabled, 0 = disabled (default).");
31
28/* 32/*
29 * Send a Synaptics style sliced query command 33 * Send a Synaptics style sliced query command
30 */ 34 */
@@ -181,14 +185,18 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
181 int fingers; 185 int fingers;
182 static int old_fingers; 186 static int old_fingers;
183 187
184 if (etd->fw_version_maj == 0x01) { 188 if (etd->fw_version < 0x020000) {
185 /* byte 0: D U p1 p2 1 p3 R L 189 /*
186 byte 1: f 0 th tw x9 x8 y9 y8 */ 190 * byte 0: D U p1 p2 1 p3 R L
191 * byte 1: f 0 th tw x9 x8 y9 y8
192 */
187 fingers = ((packet[1] & 0x80) >> 7) + 193 fingers = ((packet[1] & 0x80) >> 7) +
188 ((packet[1] & 0x30) >> 4); 194 ((packet[1] & 0x30) >> 4);
189 } else { 195 } else {
190 /* byte 0: n1 n0 p2 p1 1 p3 R L 196 /*
191 byte 1: 0 0 0 0 x9 x8 y9 y8 */ 197 * byte 0: n1 n0 p2 p1 1 p3 R L
198 * byte 1: 0 0 0 0 x9 x8 y9 y8
199 */
192 fingers = (packet[0] & 0xc0) >> 6; 200 fingers = (packet[0] & 0xc0) >> 6;
193 } 201 }
194 202
@@ -202,13 +210,15 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
202 210
203 input_report_key(dev, BTN_TOUCH, fingers != 0); 211 input_report_key(dev, BTN_TOUCH, fingers != 0);
204 212
205 /* byte 2: x7 x6 x5 x4 x3 x2 x1 x0 213 /*
206 byte 3: y7 y6 y5 y4 y3 y2 y1 y0 */ 214 * byte 2: x7 x6 x5 x4 x3 x2 x1 x0
215 * byte 3: y7 y6 y5 y4 y3 y2 y1 y0
216 */
207 if (fingers) { 217 if (fingers) {
208 input_report_abs(dev, ABS_X, 218 input_report_abs(dev, ABS_X,
209 ((packet[1] & 0x0c) << 6) | packet[2]); 219 ((packet[1] & 0x0c) << 6) | packet[2]);
210 input_report_abs(dev, ABS_Y, ETP_YMAX_V1 - 220 input_report_abs(dev, ABS_Y,
211 (((packet[1] & 0x03) << 8) | packet[3])); 221 ETP_YMAX_V1 - (((packet[1] & 0x03) << 8) | packet[3]));
212 } 222 }
213 223
214 input_report_key(dev, BTN_TOOL_FINGER, fingers == 1); 224 input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
@@ -217,7 +227,7 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
217 input_report_key(dev, BTN_LEFT, packet[0] & 0x01); 227 input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
218 input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); 228 input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
219 229
220 if ((etd->fw_version_maj == 0x01) && 230 if (etd->fw_version < 0x020000 &&
221 (etd->capabilities & ETP_CAP_HAS_ROCKER)) { 231 (etd->capabilities & ETP_CAP_HAS_ROCKER)) {
222 /* rocker up */ 232 /* rocker up */
223 input_report_key(dev, BTN_FORWARD, packet[0] & 0x40); 233 input_report_key(dev, BTN_FORWARD, packet[0] & 0x40);
@@ -247,34 +257,47 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
247 257
248 switch (fingers) { 258 switch (fingers) {
249 case 1: 259 case 1:
250 /* byte 1: x15 x14 x13 x12 x11 x10 x9 x8 260 /*
251 byte 2: x7 x6 x5 x4 x4 x2 x1 x0 */ 261 * byte 1: . . . . . x10 x9 x8
252 input_report_abs(dev, ABS_X, (packet[1] << 8) | packet[2]); 262 * byte 2: x7 x6 x5 x4 x4 x2 x1 x0
253 /* byte 4: y15 y14 y13 y12 y11 y10 y8 y8 263 */
254 byte 5: y7 y6 y5 y4 y3 y2 y1 y0 */ 264 input_report_abs(dev, ABS_X,
255 input_report_abs(dev, ABS_Y, ETP_YMAX_V2 - 265 ((packet[1] & 0x07) << 8) | packet[2]);
256 ((packet[4] << 8) | packet[5])); 266 /*
267 * byte 4: . . . . . . y9 y8
268 * byte 5: y7 y6 y5 y4 y3 y2 y1 y0
269 */
270 input_report_abs(dev, ABS_Y,
271 ETP_YMAX_V2 - (((packet[4] & 0x03) << 8) | packet[5]));
257 break; 272 break;
258 273
259 case 2: 274 case 2:
260 /* The coordinate of each finger is reported separately with 275 /*
261 a lower resolution for two finger touches */ 276 * The coordinate of each finger is reported separately
262 /* byte 0: . . ay8 ax8 . . . . 277 * with a lower resolution for two finger touches:
263 byte 1: ax7 ax6 ax5 ax4 ax3 ax2 ax1 ax0 */ 278 * byte 0: . . ay8 ax8 . . . .
279 * byte 1: ax7 ax6 ax5 ax4 ax3 ax2 ax1 ax0
280 */
264 x1 = ((packet[0] & 0x10) << 4) | packet[1]; 281 x1 = ((packet[0] & 0x10) << 4) | packet[1];
265 /* byte 2: ay7 ay6 ay5 ay4 ay3 ay2 ay1 ay0 */ 282 /* byte 2: ay7 ay6 ay5 ay4 ay3 ay2 ay1 ay0 */
266 y1 = ETP_2FT_YMAX - (((packet[0] & 0x20) << 3) | packet[2]); 283 y1 = ETP_2FT_YMAX - (((packet[0] & 0x20) << 3) | packet[2]);
267 /* byte 3: . . by8 bx8 . . . . 284 /*
268 byte 4: bx7 bx6 bx5 bx4 bx3 bx2 bx1 bx0 */ 285 * byte 3: . . by8 bx8 . . . .
286 * byte 4: bx7 bx6 bx5 bx4 bx3 bx2 bx1 bx0
287 */
269 x2 = ((packet[3] & 0x10) << 4) | packet[4]; 288 x2 = ((packet[3] & 0x10) << 4) | packet[4];
270 /* byte 5: by7 by8 by5 by4 by3 by2 by1 by0 */ 289 /* byte 5: by7 by8 by5 by4 by3 by2 by1 by0 */
271 y2 = ETP_2FT_YMAX - (((packet[3] & 0x20) << 3) | packet[5]); 290 y2 = ETP_2FT_YMAX - (((packet[3] & 0x20) << 3) | packet[5]);
272 /* For compatibility with the X Synaptics driver scale up one 291 /*
273 coordinate and report as ordinary mouse movent */ 292 * For compatibility with the X Synaptics driver scale up
293 * one coordinate and report as ordinary mouse movent
294 */
274 input_report_abs(dev, ABS_X, x1 << 2); 295 input_report_abs(dev, ABS_X, x1 << 2);
275 input_report_abs(dev, ABS_Y, y1 << 2); 296 input_report_abs(dev, ABS_Y, y1 << 2);
276 /* For compatibility with the proprietary X Elantech driver 297 /*
277 report both coordinates as hat coordinates */ 298 * For compatibility with the proprietary X Elantech driver
299 * report both coordinates as hat coordinates
300 */
278 input_report_abs(dev, ABS_HAT0X, x1); 301 input_report_abs(dev, ABS_HAT0X, x1);
279 input_report_abs(dev, ABS_HAT0Y, y1); 302 input_report_abs(dev, ABS_HAT0Y, y1);
280 input_report_abs(dev, ABS_HAT1X, x2); 303 input_report_abs(dev, ABS_HAT1X, x2);
@@ -298,7 +321,7 @@ static int elantech_check_parity_v1(struct psmouse *psmouse)
298 unsigned char p1, p2, p3; 321 unsigned char p1, p2, p3;
299 322
300 /* Parity bits are placed differently */ 323 /* Parity bits are placed differently */
301 if (etd->fw_version_maj == 0x01) { 324 if (etd->fw_version < 0x020000) {
302 /* byte 0: D U p1 p2 1 p3 R L */ 325 /* byte 0: D U p1 p2 1 p3 R L */
303 p1 = (packet[0] & 0x20) >> 5; 326 p1 = (packet[0] & 0x20) >> 5;
304 p2 = (packet[0] & 0x10) >> 4; 327 p2 = (packet[0] & 0x10) >> 4;
@@ -434,7 +457,7 @@ static void elantech_set_input_params(struct psmouse *psmouse)
434 switch (etd->hw_version) { 457 switch (etd->hw_version) {
435 case 1: 458 case 1:
436 /* Rocker button */ 459 /* Rocker button */
437 if ((etd->fw_version_maj == 0x01) && 460 if (etd->fw_version < 0x020000 &&
438 (etd->capabilities & ETP_CAP_HAS_ROCKER)) { 461 (etd->capabilities & ETP_CAP_HAS_ROCKER)) {
439 __set_bit(BTN_FORWARD, dev->keybit); 462 __set_bit(BTN_FORWARD, dev->keybit);
440 __set_bit(BTN_BACK, dev->keybit); 463 __set_bit(BTN_BACK, dev->keybit);
@@ -596,8 +619,12 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties)
596 param[0], param[1], param[2]); 619 param[0], param[1], param[2]);
597 620
598 if (param[0] == 0 || param[1] != 0) { 621 if (param[0] == 0 || param[1] != 0) {
599 pr_debug("elantech.c: Probably not a real Elantech touchpad. Aborting.\n"); 622 if (!force_elantech) {
600 return -1; 623 pr_debug("elantech.c: Probably not a real Elantech touchpad. Aborting.\n");
624 return -1;
625 }
626
627 pr_debug("elantech.c: Probably not a real Elantech touchpad. Enabling anyway due to force_elantech.\n");
601 } 628 }
602 629
603 if (set_properties) { 630 if (set_properties) {
@@ -659,14 +686,14 @@ int elantech_init(struct psmouse *psmouse)
659 pr_err("elantech.c: failed to query firmware version.\n"); 686 pr_err("elantech.c: failed to query firmware version.\n");
660 goto init_fail; 687 goto init_fail;
661 } 688 }
662 etd->fw_version_maj = param[0]; 689
663 etd->fw_version_min = param[2]; 690 etd->fw_version = (param[0] << 16) | (param[1] << 8) | param[2];
664 691
665 /* 692 /*
666 * Assume every version greater than this is new EeePC style 693 * Assume every version greater than this is new EeePC style
667 * hardware with 6 byte packets 694 * hardware with 6 byte packets
668 */ 695 */
669 if (etd->fw_version_maj >= 0x02 && etd->fw_version_min >= 0x30) { 696 if (etd->fw_version >= 0x020030) {
670 etd->hw_version = 2; 697 etd->hw_version = 2;
671 /* For now show extra debug information */ 698 /* For now show extra debug information */
672 etd->debug = 1; 699 etd->debug = 1;
@@ -676,8 +703,9 @@ int elantech_init(struct psmouse *psmouse)
676 etd->hw_version = 1; 703 etd->hw_version = 1;
677 etd->paritycheck = 1; 704 etd->paritycheck = 1;
678 } 705 }
679 pr_info("elantech.c: assuming hardware version %d, firmware version %d.%d\n", 706
680 etd->hw_version, etd->fw_version_maj, etd->fw_version_min); 707 pr_info("elantech.c: assuming hardware version %d, firmware version %d.%d.%d\n",
708 etd->hw_version, param[0], param[1], param[2]);
681 709
682 if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY, param)) { 710 if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY, param)) {
683 pr_err("elantech.c: failed to query capabilities.\n"); 711 pr_err("elantech.c: failed to query capabilities.\n");
@@ -692,8 +720,8 @@ int elantech_init(struct psmouse *psmouse)
692 * a touch action starts causing the mouse cursor or scrolled page 720 * a touch action starts causing the mouse cursor or scrolled page
693 * to jump. Enable a workaround. 721 * to jump. Enable a workaround.
694 */ 722 */
695 if (etd->fw_version_maj == 0x02 && etd->fw_version_min == 0x22) { 723 if (etd->fw_version == 0x020022) {
696 pr_info("elantech.c: firmware version 2.34 detected, " 724 pr_info("elantech.c: firmware version 2.0.34 detected, "
697 "enabling jumpy cursor workaround\n"); 725 "enabling jumpy cursor workaround\n");
698 etd->jumpy_cursor = 1; 726 etd->jumpy_cursor = 1;
699 } 727 }
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index feac5f7af966..ac57bde1bb9f 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -100,11 +100,10 @@ struct elantech_data {
100 unsigned char reg_26; 100 unsigned char reg_26;
101 unsigned char debug; 101 unsigned char debug;
102 unsigned char capabilities; 102 unsigned char capabilities;
103 unsigned char fw_version_maj;
104 unsigned char fw_version_min;
105 unsigned char hw_version;
106 unsigned char paritycheck; 103 unsigned char paritycheck;
107 unsigned char jumpy_cursor; 104 unsigned char jumpy_cursor;
105 unsigned char hw_version;
106 unsigned int fw_version;
108 unsigned char parity[256]; 107 unsigned char parity[256];
109}; 108};
110 109
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index d8c0c8d6992c..a3c97315a473 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -110,6 +110,7 @@ static struct workqueue_struct *kpsmoused_wq;
110struct psmouse_protocol { 110struct psmouse_protocol {
111 enum psmouse_type type; 111 enum psmouse_type type;
112 bool maxproto; 112 bool maxproto;
113 bool ignore_parity; /* Protocol should ignore parity errors from KBC */
113 const char *name; 114 const char *name;
114 const char *alias; 115 const char *alias;
115 int (*detect)(struct psmouse *, bool); 116 int (*detect)(struct psmouse *, bool);
@@ -288,7 +289,9 @@ static irqreturn_t psmouse_interrupt(struct serio *serio,
288 if (psmouse->state == PSMOUSE_IGNORE) 289 if (psmouse->state == PSMOUSE_IGNORE)
289 goto out; 290 goto out;
290 291
291 if (flags & (SERIO_PARITY|SERIO_TIMEOUT)) { 292 if (unlikely((flags & SERIO_TIMEOUT) ||
293 ((flags & SERIO_PARITY) && !psmouse->ignore_parity))) {
294
292 if (psmouse->state == PSMOUSE_ACTIVATED) 295 if (psmouse->state == PSMOUSE_ACTIVATED)
293 printk(KERN_WARNING "psmouse.c: bad data from KBC -%s%s\n", 296 printk(KERN_WARNING "psmouse.c: bad data from KBC -%s%s\n",
294 flags & SERIO_TIMEOUT ? " timeout" : "", 297 flags & SERIO_TIMEOUT ? " timeout" : "",
@@ -759,6 +762,7 @@ static const struct psmouse_protocol psmouse_protocols[] = {
759 .name = "PS/2", 762 .name = "PS/2",
760 .alias = "bare", 763 .alias = "bare",
761 .maxproto = true, 764 .maxproto = true,
765 .ignore_parity = true,
762 .detect = ps2bare_detect, 766 .detect = ps2bare_detect,
763 }, 767 },
764#ifdef CONFIG_MOUSE_PS2_LOGIPS2PP 768#ifdef CONFIG_MOUSE_PS2_LOGIPS2PP
@@ -786,6 +790,7 @@ static const struct psmouse_protocol psmouse_protocols[] = {
786 .name = "ImPS/2", 790 .name = "ImPS/2",
787 .alias = "imps", 791 .alias = "imps",
788 .maxproto = true, 792 .maxproto = true,
793 .ignore_parity = true,
789 .detect = intellimouse_detect, 794 .detect = intellimouse_detect,
790 }, 795 },
791 { 796 {
@@ -793,6 +798,7 @@ static const struct psmouse_protocol psmouse_protocols[] = {
793 .name = "ImExPS/2", 798 .name = "ImExPS/2",
794 .alias = "exps", 799 .alias = "exps",
795 .maxproto = true, 800 .maxproto = true,
801 .ignore_parity = true,
796 .detect = im_explorer_detect, 802 .detect = im_explorer_detect,
797 }, 803 },
798#ifdef CONFIG_MOUSE_PS2_SYNAPTICS 804#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
@@ -1222,6 +1228,7 @@ static void psmouse_disconnect(struct serio *serio)
1222static int psmouse_switch_protocol(struct psmouse *psmouse, 1228static int psmouse_switch_protocol(struct psmouse *psmouse,
1223 const struct psmouse_protocol *proto) 1229 const struct psmouse_protocol *proto)
1224{ 1230{
1231 const struct psmouse_protocol *selected_proto;
1225 struct input_dev *input_dev = psmouse->dev; 1232 struct input_dev *input_dev = psmouse->dev;
1226 1233
1227 input_dev->dev.parent = &psmouse->ps2dev.serio->dev; 1234 input_dev->dev.parent = &psmouse->ps2dev.serio->dev;
@@ -1245,9 +1252,14 @@ static int psmouse_switch_protocol(struct psmouse *psmouse,
1245 return -1; 1252 return -1;
1246 1253
1247 psmouse->type = proto->type; 1254 psmouse->type = proto->type;
1248 } else 1255 selected_proto = proto;
1256 } else {
1249 psmouse->type = psmouse_extensions(psmouse, 1257 psmouse->type = psmouse_extensions(psmouse,
1250 psmouse_max_proto, true); 1258 psmouse_max_proto, true);
1259 selected_proto = psmouse_protocol_by_type(psmouse->type);
1260 }
1261
1262 psmouse->ignore_parity = selected_proto->ignore_parity;
1251 1263
1252 /* 1264 /*
1253 * If mouse's packet size is 3 there is no point in polling the 1265 * If mouse's packet size is 3 there is no point in polling the
@@ -1267,7 +1279,7 @@ static int psmouse_switch_protocol(struct psmouse *psmouse,
1267 psmouse->resync_time = 0; 1279 psmouse->resync_time = 0;
1268 1280
1269 snprintf(psmouse->devname, sizeof(psmouse->devname), "%s %s %s", 1281 snprintf(psmouse->devname, sizeof(psmouse->devname), "%s %s %s",
1270 psmouse_protocol_by_type(psmouse->type)->name, psmouse->vendor, psmouse->name); 1282 selected_proto->name, psmouse->vendor, psmouse->name);
1271 1283
1272 input_dev->name = psmouse->devname; 1284 input_dev->name = psmouse->devname;
1273 input_dev->phys = psmouse->phys; 1285 input_dev->phys = psmouse->phys;
@@ -1382,6 +1394,7 @@ static int psmouse_reconnect(struct serio *serio)
1382 struct psmouse *psmouse = serio_get_drvdata(serio); 1394 struct psmouse *psmouse = serio_get_drvdata(serio);
1383 struct psmouse *parent = NULL; 1395 struct psmouse *parent = NULL;
1384 struct serio_driver *drv = serio->drv; 1396 struct serio_driver *drv = serio->drv;
1397 unsigned char type;
1385 int rc = -1; 1398 int rc = -1;
1386 1399
1387 if (!drv || !psmouse) { 1400 if (!drv || !psmouse) {
@@ -1401,10 +1414,15 @@ static int psmouse_reconnect(struct serio *serio)
1401 if (psmouse->reconnect) { 1414 if (psmouse->reconnect) {
1402 if (psmouse->reconnect(psmouse)) 1415 if (psmouse->reconnect(psmouse))
1403 goto out; 1416 goto out;
1404 } else if (psmouse_probe(psmouse) < 0 || 1417 } else {
1405 psmouse->type != psmouse_extensions(psmouse, 1418 psmouse_reset(psmouse);
1406 psmouse_max_proto, false)) { 1419
1407 goto out; 1420 if (psmouse_probe(psmouse) < 0)
1421 goto out;
1422
1423 type = psmouse_extensions(psmouse, psmouse_max_proto, false);
1424 if (psmouse->type != type)
1425 goto out;
1408 } 1426 }
1409 1427
1410 /* ok, the device type (and capabilities) match the old one, 1428 /* ok, the device type (and capabilities) match the old one,
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index e053bdd137ff..593e910bfc7a 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -47,6 +47,7 @@ struct psmouse {
47 unsigned char pktcnt; 47 unsigned char pktcnt;
48 unsigned char pktsize; 48 unsigned char pktsize;
49 unsigned char type; 49 unsigned char type;
50 bool ignore_parity;
50 bool acks_disable_command; 51 bool acks_disable_command;
51 unsigned int model; 52 unsigned int model;
52 unsigned long last; 53 unsigned long last;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 026df6010161..ebd7a99efeae 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -137,7 +137,8 @@ static int synaptics_capability(struct psmouse *psmouse)
137 if (synaptics_send_cmd(psmouse, SYN_QUE_CAPABILITIES, cap)) 137 if (synaptics_send_cmd(psmouse, SYN_QUE_CAPABILITIES, cap))
138 return -1; 138 return -1;
139 priv->capabilities = (cap[0] << 16) | (cap[1] << 8) | cap[2]; 139 priv->capabilities = (cap[0] << 16) | (cap[1] << 8) | cap[2];
140 priv->ext_cap = 0; 140 priv->ext_cap = priv->ext_cap_0c = 0;
141
141 if (!SYN_CAP_VALID(priv->capabilities)) 142 if (!SYN_CAP_VALID(priv->capabilities))
142 return -1; 143 return -1;
143 144
@@ -150,7 +151,7 @@ static int synaptics_capability(struct psmouse *psmouse)
150 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 1) { 151 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 1) {
151 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_CAPAB, cap)) { 152 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_CAPAB, cap)) {
152 printk(KERN_ERR "Synaptics claims to have extended capabilities," 153 printk(KERN_ERR "Synaptics claims to have extended capabilities,"
153 " but I'm not able to read them."); 154 " but I'm not able to read them.\n");
154 } else { 155 } else {
155 priv->ext_cap = (cap[0] << 16) | (cap[1] << 8) | cap[2]; 156 priv->ext_cap = (cap[0] << 16) | (cap[1] << 8) | cap[2];
156 157
@@ -162,6 +163,16 @@ static int synaptics_capability(struct psmouse *psmouse)
162 priv->ext_cap &= 0xff0fff; 163 priv->ext_cap &= 0xff0fff;
163 } 164 }
164 } 165 }
166
167 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 4) {
168 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_CAPAB_0C, cap)) {
169 printk(KERN_ERR "Synaptics claims to have extended capability 0x0c,"
170 " but I'm not able to read it.\n");
171 } else {
172 priv->ext_cap_0c = (cap[0] << 16) | (cap[1] << 8) | cap[2];
173 }
174 }
175
165 return 0; 176 return 0;
166} 177}
167 178
@@ -348,7 +359,15 @@ static void synaptics_parse_hw_state(unsigned char buf[], struct synaptics_data
348 hw->left = (buf[0] & 0x01) ? 1 : 0; 359 hw->left = (buf[0] & 0x01) ? 1 : 0;
349 hw->right = (buf[0] & 0x02) ? 1 : 0; 360 hw->right = (buf[0] & 0x02) ? 1 : 0;
350 361
351 if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) { 362 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
363 /*
364 * Clickpad's button is transmitted as middle button,
365 * however, since it is primary button, we will report
366 * it as BTN_LEFT.
367 */
368 hw->left = ((buf[0] ^ buf[3]) & 0x01) ? 1 : 0;
369
370 } else if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) {
352 hw->middle = ((buf[0] ^ buf[3]) & 0x01) ? 1 : 0; 371 hw->middle = ((buf[0] ^ buf[3]) & 0x01) ? 1 : 0;
353 if (hw->w == 2) 372 if (hw->w == 2)
354 hw->scroll = (signed char)(buf[1]); 373 hw->scroll = (signed char)(buf[1]);
@@ -593,6 +612,12 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
593 612
594 dev->absres[ABS_X] = priv->x_res; 613 dev->absres[ABS_X] = priv->x_res;
595 dev->absres[ABS_Y] = priv->y_res; 614 dev->absres[ABS_Y] = priv->y_res;
615
616 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
617 /* Clickpads report only left button */
618 __clear_bit(BTN_RIGHT, dev->keybit);
619 __clear_bit(BTN_MIDDLE, dev->keybit);
620 }
596} 621}
597 622
598static void synaptics_disconnect(struct psmouse *psmouse) 623static void synaptics_disconnect(struct psmouse *psmouse)
@@ -697,10 +722,10 @@ int synaptics_init(struct psmouse *psmouse)
697 722
698 priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS; 723 priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
699 724
700 printk(KERN_INFO "Synaptics Touchpad, model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx\n", 725 printk(KERN_INFO "Synaptics Touchpad, model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx\n",
701 SYN_ID_MODEL(priv->identity), 726 SYN_ID_MODEL(priv->identity),
702 SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity), 727 SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
703 priv->model_id, priv->capabilities, priv->ext_cap); 728 priv->model_id, priv->capabilities, priv->ext_cap, priv->ext_cap_0c);
704 729
705 set_input_params(psmouse->dev, priv); 730 set_input_params(psmouse->dev, priv);
706 731
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index f0f40a331dc8..ae37c5d162a4 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -18,6 +18,7 @@
18#define SYN_QUE_SERIAL_NUMBER_SUFFIX 0x07 18#define SYN_QUE_SERIAL_NUMBER_SUFFIX 0x07
19#define SYN_QUE_RESOLUTION 0x08 19#define SYN_QUE_RESOLUTION 0x08
20#define SYN_QUE_EXT_CAPAB 0x09 20#define SYN_QUE_EXT_CAPAB 0x09
21#define SYN_QUE_EXT_CAPAB_0C 0x0c
21 22
22/* synatics modes */ 23/* synatics modes */
23#define SYN_BIT_ABSOLUTE_MODE (1 << 7) 24#define SYN_BIT_ABSOLUTE_MODE (1 << 7)
@@ -48,6 +49,8 @@
48#define SYN_CAP_VALID(c) ((((c) & 0x00ff00) >> 8) == 0x47) 49#define SYN_CAP_VALID(c) ((((c) & 0x00ff00) >> 8) == 0x47)
49#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) 50#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
50#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) 51#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
52#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
53#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100100)
51 54
52/* synaptics modes query bits */ 55/* synaptics modes query bits */
53#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) 56#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
@@ -96,6 +99,7 @@ struct synaptics_data {
96 unsigned long int model_id; /* Model-ID */ 99 unsigned long int model_id; /* Model-ID */
97 unsigned long int capabilities; /* Capabilities */ 100 unsigned long int capabilities; /* Capabilities */
98 unsigned long int ext_cap; /* Extended Capabilities */ 101 unsigned long int ext_cap; /* Extended Capabilities */
102 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */
99 unsigned long int identity; /* Identification */ 103 unsigned long int identity; /* Identification */
100 int x_res; /* X resolution in units/mm */ 104 int x_res; /* X resolution in units/mm */
101 int y_res; /* Y resolution in units/mm */ 105 int y_res; /* Y resolution in units/mm */
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 8a8fa4d2d6a8..6c0f1712f55b 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -99,22 +99,6 @@ config TOUCHSCREEN_BITSY
99 To compile this driver as a module, choose M here: the 99 To compile this driver as a module, choose M here: the
100 module will be called h3600_ts_input. 100 module will be called h3600_ts_input.
101 101
102config TOUCHSCREEN_CORGI
103 tristate "SharpSL (Corgi and Spitz series) touchscreen driver (DEPRECATED)"
104 depends on PXA_SHARPSL
105 select CORGI_SSP_DEPRECATED
106 help
107 Say Y here to enable the driver for the touchscreen on the
108 Sharp SL-C7xx and SL-Cxx00 series of PDAs.
109
110 If unsure, say N.
111
112 To compile this driver as a module, choose M here: the
113 module will be called corgi_ts.
114
115 NOTE: this driver is deprecated, try enable SPI and generic
116 ADS7846-based touchscreen driver.
117
118config TOUCHSCREEN_DA9034 102config TOUCHSCREEN_DA9034
119 tristate "Touchscreen support for Dialog Semiconductor DA9034" 103 tristate "Touchscreen support for Dialog Semiconductor DA9034"
120 depends on PMIC_DA903X 104 depends on PMIC_DA903X
@@ -158,8 +142,8 @@ config TOUCHSCREEN_FUJITSU
158 module will be called fujitsu-ts. 142 module will be called fujitsu-ts.
159 143
160config TOUCHSCREEN_S3C2410 144config TOUCHSCREEN_S3C2410
161 tristate "Samsung S3C2410 touchscreen input driver" 145 tristate "Samsung S3C2410/generic touchscreen input driver"
162 depends on ARCH_S3C2410 146 depends on ARCH_S3C2410 || SAMSUNG_DEV_TS
163 select S3C24XX_ADC 147 select S3C24XX_ADC
164 help 148 help
165 Say Y here if you have the s3c2410 touchscreen. 149 Say Y here if you have the s3c2410 touchscreen.
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 7fef7d5cca23..41145d074dec 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
12obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o 12obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
13obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o 13obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
14obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o 14obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
15obj-$(CONFIG_TOUCHSCREEN_CORGI) += corgi_ts.o
16obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o 15obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
17obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o 16obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
18obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o 17obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index e019d53d1ab4..0d2d7e54b465 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -156,9 +156,14 @@ struct ser_req {
156 u16 reset; 156 u16 reset;
157 u16 ref_on; 157 u16 ref_on;
158 u16 command; 158 u16 command;
159 u16 sample;
160 struct spi_message msg; 159 struct spi_message msg;
161 struct spi_transfer xfer[6]; 160 struct spi_transfer xfer[6];
161
162 /*
163 * DMA (thus cache coherency maintenance) requires the
164 * transfer buffers to live in their own cache lines.
165 */
166 u16 sample ____cacheline_aligned;
162}; 167};
163 168
164struct ad7877 { 169struct ad7877 {
@@ -182,8 +187,6 @@ struct ad7877 {
182 u8 averaging; 187 u8 averaging;
183 u8 pen_down_acc_interval; 188 u8 pen_down_acc_interval;
184 189
185 u16 conversion_data[AD7877_NR_SENSE];
186
187 struct spi_transfer xfer[AD7877_NR_SENSE + 2]; 190 struct spi_transfer xfer[AD7877_NR_SENSE + 2];
188 struct spi_message msg; 191 struct spi_message msg;
189 192
@@ -195,6 +198,12 @@ struct ad7877 {
195 spinlock_t lock; 198 spinlock_t lock;
196 struct timer_list timer; /* P: lock */ 199 struct timer_list timer; /* P: lock */
197 unsigned pending:1; /* P: lock */ 200 unsigned pending:1; /* P: lock */
201
202 /*
203 * DMA (thus cache coherency maintenance) requires the
204 * transfer buffers to live in their own cache lines.
205 */
206 u16 conversion_data[AD7877_NR_SENSE] ____cacheline_aligned;
198}; 207};
199 208
200static int gpio3; 209static int gpio3;
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
deleted file mode 100644
index 94a1919d439d..000000000000
--- a/drivers/input/touchscreen/corgi_ts.c
+++ /dev/null
@@ -1,385 +0,0 @@
1/*
2 * Touchscreen driver for Sharp SL-C7xx and SL-Cxx00 models
3 *
4 * Copyright (c) 2004-2005 Richard Purdie
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12
13#include <linux/delay.h>
14#include <linux/platform_device.h>
15#include <linux/init.h>
16#include <linux/input.h>
17#include <linux/interrupt.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/irq.h>
21
22#include <mach/sharpsl.h>
23#include <mach/hardware.h>
24#include <mach/pxa2xx-gpio.h>
25
26
27#define PWR_MODE_ACTIVE 0
28#define PWR_MODE_SUSPEND 1
29
30#define X_AXIS_MAX 3830
31#define X_AXIS_MIN 150
32#define Y_AXIS_MAX 3830
33#define Y_AXIS_MIN 190
34#define PRESSURE_MIN 0
35#define PRESSURE_MAX 15000
36
37struct ts_event {
38 short pressure;
39 short x;
40 short y;
41};
42
43struct corgi_ts {
44 struct input_dev *input;
45 struct timer_list timer;
46 struct ts_event tc;
47 int pendown;
48 int power_mode;
49 int irq_gpio;
50 struct corgits_machinfo *machinfo;
51};
52
53#ifdef CONFIG_PXA25x
54#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C0, 0" : "=r"(a))
55#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C0, 0" : "=r"(x))
56#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(x))
57#endif
58#ifdef CONFIG_PXA27x
59#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C1, 0" : "=r"(a))
60#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C1, 0" : "=r"(x))
61#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C1, 0" : : "r"(x))
62#endif
63
64/* ADS7846 Touch Screen Controller bit definitions */
65#define ADSCTRL_PD0 (1u << 0) /* PD0 */
66#define ADSCTRL_PD1 (1u << 1) /* PD1 */
67#define ADSCTRL_DFR (1u << 2) /* SER/DFR */
68#define ADSCTRL_MOD (1u << 3) /* Mode */
69#define ADSCTRL_ADR_SH 4 /* Address setting */
70#define ADSCTRL_STS (1u << 7) /* Start Bit */
71
72/* External Functions */
73extern unsigned int get_clk_frequency_khz(int info);
74
75static unsigned long calc_waittime(struct corgi_ts *corgi_ts)
76{
77 unsigned long hsync_invperiod = corgi_ts->machinfo->get_hsync_invperiod();
78
79 if (hsync_invperiod)
80 return get_clk_frequency_khz(0)*1000/hsync_invperiod;
81 else
82 return 0;
83}
84
85static int sync_receive_data_send_cmd(struct corgi_ts *corgi_ts, int doRecive, int doSend,
86 unsigned int address, unsigned long wait_time)
87{
88 unsigned long timer1 = 0, timer2, pmnc = 0;
89 int pos = 0;
90
91 if (wait_time && doSend) {
92 PMNC_GET(pmnc);
93 if (!(pmnc & 0x01))
94 PMNC_SET(0x01);
95
96 /* polling HSync */
97 corgi_ts->machinfo->wait_hsync();
98 /* get CCNT */
99 CCNT(timer1);
100 }
101
102 if (doRecive)
103 pos = corgi_ssp_ads7846_get();
104
105 if (doSend) {
106 int cmd = ADSCTRL_PD0 | ADSCTRL_PD1 | (address << ADSCTRL_ADR_SH) | ADSCTRL_STS;
107 /* dummy command */
108 corgi_ssp_ads7846_put(cmd);
109 corgi_ssp_ads7846_get();
110
111 if (wait_time) {
112 /* Wait after HSync */
113 CCNT(timer2);
114 if (timer2-timer1 > wait_time) {
115 /* too slow - timeout, try again */
116 corgi_ts->machinfo->wait_hsync();
117 /* get CCNT */
118 CCNT(timer1);
119 /* Wait after HSync */
120 CCNT(timer2);
121 }
122 while (timer2 - timer1 < wait_time)
123 CCNT(timer2);
124 }
125 corgi_ssp_ads7846_put(cmd);
126 if (wait_time && !(pmnc & 0x01))
127 PMNC_SET(pmnc);
128 }
129 return pos;
130}
131
132static int read_xydata(struct corgi_ts *corgi_ts)
133{
134 unsigned int x, y, z1, z2;
135 unsigned long flags, wait_time;
136
137 /* critical section */
138 local_irq_save(flags);
139 corgi_ssp_ads7846_lock();
140 wait_time = calc_waittime(corgi_ts);
141
142 /* Y-axis */
143 sync_receive_data_send_cmd(corgi_ts, 0, 1, 1u, wait_time);
144
145 /* Y-axis */
146 sync_receive_data_send_cmd(corgi_ts, 1, 1, 1u, wait_time);
147
148 /* X-axis */
149 y = sync_receive_data_send_cmd(corgi_ts, 1, 1, 5u, wait_time);
150
151 /* Z1 */
152 x = sync_receive_data_send_cmd(corgi_ts, 1, 1, 3u, wait_time);
153
154 /* Z2 */
155 z1 = sync_receive_data_send_cmd(corgi_ts, 1, 1, 4u, wait_time);
156 z2 = sync_receive_data_send_cmd(corgi_ts, 1, 0, 4u, wait_time);
157
158 /* Power-Down Enable */
159 corgi_ssp_ads7846_put((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
160 corgi_ssp_ads7846_get();
161
162 corgi_ssp_ads7846_unlock();
163 local_irq_restore(flags);
164
165 if (x== 0 || y == 0 || z1 == 0 || (x * (z2 - z1) / z1) >= 15000) {
166 corgi_ts->tc.pressure = 0;
167 return 0;
168 }
169
170 corgi_ts->tc.x = x;
171 corgi_ts->tc.y = y;
172 corgi_ts->tc.pressure = (x * (z2 - z1)) / z1;
173 return 1;
174}
175
176static void new_data(struct corgi_ts *corgi_ts)
177{
178 struct input_dev *dev = corgi_ts->input;
179
180 if (corgi_ts->power_mode != PWR_MODE_ACTIVE)
181 return;
182
183 if (!corgi_ts->tc.pressure && corgi_ts->pendown == 0)
184 return;
185
186 input_report_abs(dev, ABS_X, corgi_ts->tc.x);
187 input_report_abs(dev, ABS_Y, corgi_ts->tc.y);
188 input_report_abs(dev, ABS_PRESSURE, corgi_ts->tc.pressure);
189 input_report_key(dev, BTN_TOUCH, corgi_ts->pendown);
190 input_sync(dev);
191}
192
193static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer)
194{
195 if ((GPLR(IRQ_TO_GPIO(corgi_ts->irq_gpio)) & GPIO_bit(IRQ_TO_GPIO(corgi_ts->irq_gpio))) == 0) {
196 /* Disable Interrupt */
197 set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_NONE);
198 if (read_xydata(corgi_ts)) {
199 corgi_ts->pendown = 1;
200 new_data(corgi_ts);
201 }
202 mod_timer(&corgi_ts->timer, jiffies + HZ / 100);
203 } else {
204 if (corgi_ts->pendown == 1 || corgi_ts->pendown == 2) {
205 mod_timer(&corgi_ts->timer, jiffies + HZ / 100);
206 corgi_ts->pendown++;
207 return;
208 }
209
210 if (corgi_ts->pendown) {
211 corgi_ts->tc.pressure = 0;
212 new_data(corgi_ts);
213 }
214
215 /* Enable Falling Edge */
216 set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
217 corgi_ts->pendown = 0;
218 }
219}
220
221static void corgi_ts_timer(unsigned long data)
222{
223 struct corgi_ts *corgits_data = (struct corgi_ts *) data;
224
225 ts_interrupt_main(corgits_data, 1);
226}
227
228static irqreturn_t ts_interrupt(int irq, void *dev_id)
229{
230 struct corgi_ts *corgits_data = dev_id;
231
232 ts_interrupt_main(corgits_data, 0);
233 return IRQ_HANDLED;
234}
235
236#ifdef CONFIG_PM
237static int corgits_suspend(struct platform_device *dev, pm_message_t state)
238{
239 struct corgi_ts *corgi_ts = platform_get_drvdata(dev);
240
241 if (corgi_ts->pendown) {
242 del_timer_sync(&corgi_ts->timer);
243 corgi_ts->tc.pressure = 0;
244 new_data(corgi_ts);
245 corgi_ts->pendown = 0;
246 }
247 corgi_ts->power_mode = PWR_MODE_SUSPEND;
248
249 corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
250
251 return 0;
252}
253
254static int corgits_resume(struct platform_device *dev)
255{
256 struct corgi_ts *corgi_ts = platform_get_drvdata(dev);
257
258 corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
259 /* Enable Falling Edge */
260 set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
261 corgi_ts->power_mode = PWR_MODE_ACTIVE;
262
263 return 0;
264}
265#else
266#define corgits_suspend NULL
267#define corgits_resume NULL
268#endif
269
270static int __devinit corgits_probe(struct platform_device *pdev)
271{
272 struct corgi_ts *corgi_ts;
273 struct input_dev *input_dev;
274 int err = -ENOMEM;
275
276 corgi_ts = kzalloc(sizeof(struct corgi_ts), GFP_KERNEL);
277 input_dev = input_allocate_device();
278 if (!corgi_ts || !input_dev)
279 goto fail1;
280
281 platform_set_drvdata(pdev, corgi_ts);
282
283 corgi_ts->machinfo = pdev->dev.platform_data;
284 corgi_ts->irq_gpio = platform_get_irq(pdev, 0);
285
286 if (corgi_ts->irq_gpio < 0) {
287 err = -ENODEV;
288 goto fail1;
289 }
290
291 corgi_ts->input = input_dev;
292
293 init_timer(&corgi_ts->timer);
294 corgi_ts->timer.data = (unsigned long) corgi_ts;
295 corgi_ts->timer.function = corgi_ts_timer;
296
297 input_dev->name = "Corgi Touchscreen";
298 input_dev->phys = "corgits/input0";
299 input_dev->id.bustype = BUS_HOST;
300 input_dev->id.vendor = 0x0001;
301 input_dev->id.product = 0x0002;
302 input_dev->id.version = 0x0100;
303 input_dev->dev.parent = &pdev->dev;
304
305 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
306 input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
307 input_set_abs_params(input_dev, ABS_X, X_AXIS_MIN, X_AXIS_MAX, 0, 0);
308 input_set_abs_params(input_dev, ABS_Y, Y_AXIS_MIN, Y_AXIS_MAX, 0, 0);
309 input_set_abs_params(input_dev, ABS_PRESSURE, PRESSURE_MIN, PRESSURE_MAX, 0, 0);
310
311 pxa_gpio_mode(IRQ_TO_GPIO(corgi_ts->irq_gpio) | GPIO_IN);
312
313 /* Initiaize ADS7846 Difference Reference mode */
314 corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
315 mdelay(5);
316 corgi_ssp_ads7846_putget((3u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
317 mdelay(5);
318 corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
319 mdelay(5);
320 corgi_ssp_ads7846_putget((5u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
321 mdelay(5);
322
323 if (request_irq(corgi_ts->irq_gpio, ts_interrupt, IRQF_DISABLED, "ts", corgi_ts)) {
324 err = -EBUSY;
325 goto fail1;
326 }
327
328 err = input_register_device(corgi_ts->input);
329 if (err)
330 goto fail2;
331
332 corgi_ts->power_mode = PWR_MODE_ACTIVE;
333
334 /* Enable Falling Edge */
335 set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
336
337 return 0;
338
339 fail2: free_irq(corgi_ts->irq_gpio, corgi_ts);
340 fail1: input_free_device(input_dev);
341 kfree(corgi_ts);
342 return err;
343}
344
345static int __devexit corgits_remove(struct platform_device *pdev)
346{
347 struct corgi_ts *corgi_ts = platform_get_drvdata(pdev);
348
349 free_irq(corgi_ts->irq_gpio, corgi_ts);
350 del_timer_sync(&corgi_ts->timer);
351 corgi_ts->machinfo->put_hsync();
352 input_unregister_device(corgi_ts->input);
353 kfree(corgi_ts);
354
355 return 0;
356}
357
358static struct platform_driver corgits_driver = {
359 .probe = corgits_probe,
360 .remove = __devexit_p(corgits_remove),
361 .suspend = corgits_suspend,
362 .resume = corgits_resume,
363 .driver = {
364 .name = "corgi-ts",
365 .owner = THIS_MODULE,
366 },
367};
368
369static int __init corgits_init(void)
370{
371 return platform_driver_register(&corgits_driver);
372}
373
374static void __exit corgits_exit(void)
375{
376 platform_driver_unregister(&corgits_driver);
377}
378
379module_init(corgits_init);
380module_exit(corgits_exit);
381
382MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
383MODULE_DESCRIPTION("Corgi TouchScreen Driver");
384MODULE_LICENSE("GPL");
385MODULE_ALIAS("platform:corgi-ts");
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 204b8a1a601c..75f8b73010fa 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -124,14 +124,25 @@ static irqreturn_t eeti_ts_isr(int irq, void *dev_id)
124 return IRQ_HANDLED; 124 return IRQ_HANDLED;
125} 125}
126 126
127static int eeti_ts_open(struct input_dev *dev) 127static void eeti_ts_start(struct eeti_ts_priv *priv)
128{ 128{
129 struct eeti_ts_priv *priv = input_get_drvdata(dev);
130
131 enable_irq(priv->irq); 129 enable_irq(priv->irq);
132 130
133 /* Read the events once to arm the IRQ */ 131 /* Read the events once to arm the IRQ */
134 eeti_ts_read(&priv->work); 132 eeti_ts_read(&priv->work);
133}
134
135static void eeti_ts_stop(struct eeti_ts_priv *priv)
136{
137 disable_irq(priv->irq);
138 cancel_work_sync(&priv->work);
139}
140
141static int eeti_ts_open(struct input_dev *dev)
142{
143 struct eeti_ts_priv *priv = input_get_drvdata(dev);
144
145 eeti_ts_start(priv);
135 146
136 return 0; 147 return 0;
137} 148}
@@ -140,8 +151,7 @@ static void eeti_ts_close(struct input_dev *dev)
140{ 151{
141 struct eeti_ts_priv *priv = input_get_drvdata(dev); 152 struct eeti_ts_priv *priv = input_get_drvdata(dev);
142 153
143 disable_irq(priv->irq); 154 eeti_ts_stop(priv);
144 cancel_work_sync(&priv->work);
145} 155}
146 156
147static int __devinit eeti_ts_probe(struct i2c_client *client, 157static int __devinit eeti_ts_probe(struct i2c_client *client,
@@ -153,10 +163,12 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
153 unsigned int irq_flags; 163 unsigned int irq_flags;
154 int err = -ENOMEM; 164 int err = -ENOMEM;
155 165
156 /* In contrast to what's described in the datasheet, there seems 166 /*
167 * In contrast to what's described in the datasheet, there seems
157 * to be no way of probing the presence of that device using I2C 168 * to be no way of probing the presence of that device using I2C
158 * commands. So we need to blindly believe it is there, and wait 169 * commands. So we need to blindly believe it is there, and wait
159 * for interrupts to occur. */ 170 * for interrupts to occur.
171 */
160 172
161 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 173 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
162 if (!priv) { 174 if (!priv) {
@@ -212,9 +224,11 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
212 goto err2; 224 goto err2;
213 } 225 }
214 226
215 /* Disable the irq for now. It will be enabled once the input device 227 /*
216 * is opened. */ 228 * Disable the device for now. It will be enabled once the
217 disable_irq(priv->irq); 229 * input device is opened.
230 */
231 eeti_ts_stop(priv);
218 232
219 device_init_wakeup(&client->dev, 0); 233 device_init_wakeup(&client->dev, 0);
220 return 0; 234 return 0;
@@ -235,6 +249,12 @@ static int __devexit eeti_ts_remove(struct i2c_client *client)
235 struct eeti_ts_priv *priv = i2c_get_clientdata(client); 249 struct eeti_ts_priv *priv = i2c_get_clientdata(client);
236 250
237 free_irq(priv->irq, priv); 251 free_irq(priv->irq, priv);
252 /*
253 * eeti_ts_stop() leaves IRQ disabled. We need to re-enable it
254 * so that device still works if we reload the driver.
255 */
256 enable_irq(priv->irq);
257
238 input_unregister_device(priv->input); 258 input_unregister_device(priv->input);
239 i2c_set_clientdata(client, NULL); 259 i2c_set_clientdata(client, NULL);
240 kfree(priv); 260 kfree(priv);
@@ -246,6 +266,14 @@ static int __devexit eeti_ts_remove(struct i2c_client *client)
246static int eeti_ts_suspend(struct i2c_client *client, pm_message_t mesg) 266static int eeti_ts_suspend(struct i2c_client *client, pm_message_t mesg)
247{ 267{
248 struct eeti_ts_priv *priv = i2c_get_clientdata(client); 268 struct eeti_ts_priv *priv = i2c_get_clientdata(client);
269 struct input_dev *input_dev = priv->input;
270
271 mutex_lock(&input_dev->mutex);
272
273 if (input_dev->users)
274 eeti_ts_stop(priv);
275
276 mutex_unlock(&input_dev->mutex);
249 277
250 if (device_may_wakeup(&client->dev)) 278 if (device_may_wakeup(&client->dev))
251 enable_irq_wake(priv->irq); 279 enable_irq_wake(priv->irq);
@@ -256,10 +284,18 @@ static int eeti_ts_suspend(struct i2c_client *client, pm_message_t mesg)
256static int eeti_ts_resume(struct i2c_client *client) 284static int eeti_ts_resume(struct i2c_client *client)
257{ 285{
258 struct eeti_ts_priv *priv = i2c_get_clientdata(client); 286 struct eeti_ts_priv *priv = i2c_get_clientdata(client);
287 struct input_dev *input_dev = priv->input;
259 288
260 if (device_may_wakeup(&client->dev)) 289 if (device_may_wakeup(&client->dev))
261 disable_irq_wake(priv->irq); 290 disable_irq_wake(priv->irq);
262 291
292 mutex_lock(&input_dev->mutex);
293
294 if (input_dev->users)
295 eeti_ts_start(priv);
296
297 mutex_unlock(&input_dev->mutex);
298
263 return 0; 299 return 0;
264} 300}
265#else 301#else
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 98a7d1279486..e0b7c834111d 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -37,9 +37,7 @@
37 37
38#include <plat/adc.h> 38#include <plat/adc.h>
39#include <plat/regs-adc.h> 39#include <plat/regs-adc.h>
40 40#include <plat/ts.h>
41#include <mach/regs-gpio.h>
42#include <mach/ts.h>
43 41
44#define TSC_SLEEP (S3C2410_ADCTSC_PULL_UP_DISABLE | S3C2410_ADCTSC_XY_PST(0)) 42#define TSC_SLEEP (S3C2410_ADCTSC_PULL_UP_DISABLE | S3C2410_ADCTSC_XY_PST(0))
45 43
@@ -57,6 +55,8 @@
57 S3C2410_ADCTSC_AUTO_PST | \ 55 S3C2410_ADCTSC_AUTO_PST | \
58 S3C2410_ADCTSC_XY_PST(0)) 56 S3C2410_ADCTSC_XY_PST(0))
59 57
58#define FEAT_PEN_IRQ (1 << 0) /* HAS ADCCLRINTPNDNUP */
59
60/* Per-touchscreen data. */ 60/* Per-touchscreen data. */
61 61
62/** 62/**
@@ -71,6 +71,7 @@
71 * @irq_tc: The interrupt number for pen up/down interrupt 71 * @irq_tc: The interrupt number for pen up/down interrupt
72 * @count: The number of samples collected. 72 * @count: The number of samples collected.
73 * @shift: The log2 of the maximum count to read in one go. 73 * @shift: The log2 of the maximum count to read in one go.
74 * @features: The features supported by the TSADC MOdule.
74 */ 75 */
75struct s3c2410ts { 76struct s3c2410ts {
76 struct s3c_adc_client *client; 77 struct s3c_adc_client *client;
@@ -83,26 +84,12 @@ struct s3c2410ts {
83 int irq_tc; 84 int irq_tc;
84 int count; 85 int count;
85 int shift; 86 int shift;
87 int features;
86}; 88};
87 89
88static struct s3c2410ts ts; 90static struct s3c2410ts ts;
89 91
90/** 92/**
91 * s3c2410_ts_connect - configure gpio for s3c2410 systems
92 *
93 * Configure the GPIO for the S3C2410 system, where we have external FETs
94 * connected to the device (later systems such as the S3C2440 integrate
95 * these into the device).
96*/
97static inline void s3c2410_ts_connect(void)
98{
99 s3c2410_gpio_cfgpin(S3C2410_GPG(12), S3C2410_GPG12_XMON);
100 s3c2410_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPG13_nXPON);
101 s3c2410_gpio_cfgpin(S3C2410_GPG(14), S3C2410_GPG14_YMON);
102 s3c2410_gpio_cfgpin(S3C2410_GPG(15), S3C2410_GPG15_nYPON);
103}
104
105/**
106 * get_down - return the down state of the pen 93 * get_down - return the down state of the pen
107 * @data0: The data read from ADCDAT0 register. 94 * @data0: The data read from ADCDAT0 register.
108 * @data1: The data read from ADCDAT1 register. 95 * @data1: The data read from ADCDAT1 register.
@@ -188,6 +175,11 @@ static irqreturn_t stylus_irq(int irq, void *dev_id)
188 else 175 else
189 dev_info(ts.dev, "%s: count=%d\n", __func__, ts.count); 176 dev_info(ts.dev, "%s: count=%d\n", __func__, ts.count);
190 177
178 if (ts.features & FEAT_PEN_IRQ) {
179 /* Clear pen down/up interrupt */
180 writel(0x0, ts.io + S3C64XX_ADCCLRINTPNDNUP);
181 }
182
191 return IRQ_HANDLED; 183 return IRQ_HANDLED;
192} 184}
193 185
@@ -296,9 +288,9 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
296 goto err_clk; 288 goto err_clk;
297 } 289 }
298 290
299 /* Configure the touchscreen external FETs on the S3C2410 */ 291 /* inititalise the gpio */
300 if (!platform_get_device_id(pdev)->driver_data) 292 if (info->cfg_gpio)
301 s3c2410_ts_connect(); 293 info->cfg_gpio(to_platform_device(ts.dev));
302 294
303 ts.client = s3c_adc_register(pdev, s3c24xx_ts_select, 295 ts.client = s3c_adc_register(pdev, s3c24xx_ts_select,
304 s3c24xx_ts_conversion, 1); 296 s3c24xx_ts_conversion, 1);
@@ -334,6 +326,7 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
334 ts.input->id.version = 0x0102; 326 ts.input->id.version = 0x0102;
335 327
336 ts.shift = info->oversampling_shift; 328 ts.shift = info->oversampling_shift;
329 ts.features = platform_get_device_id(pdev)->driver_data;
337 330
338 ret = request_irq(ts.irq_tc, stylus_irq, IRQF_DISABLED, 331 ret = request_irq(ts.irq_tc, stylus_irq, IRQF_DISABLED,
339 "s3c2410_ts_pen", ts.input); 332 "s3c2410_ts_pen", ts.input);
@@ -420,15 +413,14 @@ static struct dev_pm_ops s3c_ts_pmops = {
420#endif 413#endif
421 414
422static struct platform_device_id s3cts_driver_ids[] = { 415static struct platform_device_id s3cts_driver_ids[] = {
423 { "s3c2410-ts", 0 }, 416 { "s3c64xx-ts", FEAT_PEN_IRQ },
424 { "s3c2440-ts", 1 },
425 { } 417 { }
426}; 418};
427MODULE_DEVICE_TABLE(platform, s3cts_driver_ids); 419MODULE_DEVICE_TABLE(platform, s3cts_driver_ids);
428 420
429static struct platform_driver s3c_ts_driver = { 421static struct platform_driver s3c_ts_driver = {
430 .driver = { 422 .driver = {
431 .name = "s3c24xx-ts", 423 .name = "samsung-ts",
432 .owner = THIS_MODULE, 424 .owner = THIS_MODULE,
433#ifdef CONFIG_PM 425#ifdef CONFIG_PM
434 .pm = &s3c_ts_pmops, 426 .pm = &s3c_ts_pmops,
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 9f49d9065791..c53e2417e7d4 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -20,6 +20,7 @@
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/isdnif.h> 21#include <linux/isdnif.h>
22#include <net/net_namespace.h> 22#include <net/net_namespace.h>
23#include <linux/smp_lock.h>
23#include "isdn_divert.h" 24#include "isdn_divert.h"
24 25
25 26
@@ -177,9 +178,7 @@ isdn_divert_close(struct inode *ino, struct file *filep)
177/*********/ 178/*********/
178/* IOCTL */ 179/* IOCTL */
179/*********/ 180/*********/
180static int 181static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
181isdn_divert_ioctl(struct inode *inode, struct file *file,
182 uint cmd, ulong arg)
183{ 182{
184 divert_ioctl dioctl; 183 divert_ioctl dioctl;
185 int i; 184 int i;
@@ -258,6 +257,17 @@ isdn_divert_ioctl(struct inode *inode, struct file *file,
258 return copy_to_user((void __user *)arg, &dioctl, sizeof(dioctl)) ? -EFAULT : 0; 257 return copy_to_user((void __user *)arg, &dioctl, sizeof(dioctl)) ? -EFAULT : 0;
259} /* isdn_divert_ioctl */ 258} /* isdn_divert_ioctl */
260 259
260static long isdn_divert_ioctl(struct file *file, uint cmd, ulong arg)
261{
262 long ret;
263
264 lock_kernel();
265 ret = isdn_divert_ioctl_unlocked(file, cmd, arg);
266 unlock_kernel();
267
268 return ret;
269}
270
261static const struct file_operations isdn_fops = 271static const struct file_operations isdn_fops =
262{ 272{
263 .owner = THIS_MODULE, 273 .owner = THIS_MODULE,
@@ -265,7 +275,7 @@ static const struct file_operations isdn_fops =
265 .read = isdn_divert_read, 275 .read = isdn_divert_read,
266 .write = isdn_divert_write, 276 .write = isdn_divert_write,
267 .poll = isdn_divert_poll, 277 .poll = isdn_divert_poll,
268 .ioctl = isdn_divert_ioctl, 278 .unlocked_ioctl = isdn_divert_ioctl,
269 .open = isdn_divert_open, 279 .open = isdn_divert_open,
270 .release = isdn_divert_close, 280 .release = isdn_divert_close,
271}; 281};
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index 94b796d84053..f410d0eb2fef 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -13,7 +13,6 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/ptrace.h> 15#include <linux/ptrace.h>
16#include <linux/slab.h>
17#include <linux/string.h> 16#include <linux/string.h>
18#include <linux/tty.h> 17#include <linux/tty.h>
19#include <linux/serial.h> 18#include <linux/serial.h>
@@ -61,31 +60,6 @@ static void avmcs_release(struct pcmcia_device *link);
61 60
62static void avmcs_detach(struct pcmcia_device *p_dev); 61static void avmcs_detach(struct pcmcia_device *p_dev);
63 62
64/*
65 A linked list of "instances" of the skeleton device. Each actual
66 PCMCIA card corresponds to one device instance, and is described
67 by one struct pcmcia_device structure (defined in ds.h).
68
69 You may not want to use a linked list for this -- for example, the
70 memory card driver uses an array of struct pcmcia_device pointers, where minor
71 device numbers are used to derive the corresponding array index.
72*/
73
74/*
75 A driver needs to provide a dev_node_t structure for each device
76 on a card. In some cases, there is only one device per card (for
77 example, ethernet cards, modems). In other cases, there may be
78 many actual or logical devices (SCSI adapters, memory cards with
79 multiple partitions). The dev_node_t structures need to be kept
80 in a linked list starting at the 'dev' field of a struct pcmcia_device
81 structure. We allocate them in the card's private data structure,
82 because they generally can't be allocated dynamically.
83*/
84
85typedef struct local_info_t {
86 dev_node_t node;
87} local_info_t;
88
89/*====================================================================== 63/*======================================================================
90 64
91 avmcs_attach() creates an "instance" of the driver, allocating 65 avmcs_attach() creates an "instance" of the driver, allocating
@@ -100,32 +74,19 @@ typedef struct local_info_t {
100 74
101static int avmcs_probe(struct pcmcia_device *p_dev) 75static int avmcs_probe(struct pcmcia_device *p_dev)
102{ 76{
103 local_info_t *local;
104 77
105 /* The io structure describes IO port mapping */ 78 /* The io structure describes IO port mapping */
106 p_dev->io.NumPorts1 = 16; 79 p_dev->io.NumPorts1 = 16;
107 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 80 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
108 p_dev->io.NumPorts2 = 0; 81 p_dev->io.NumPorts2 = 0;
109 82
110 /* Interrupt setup */
111 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
112
113 /* General socket configuration */ 83 /* General socket configuration */
114 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 84 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
115 p_dev->conf.IntType = INT_MEMORY_AND_IO; 85 p_dev->conf.IntType = INT_MEMORY_AND_IO;
116 p_dev->conf.ConfigIndex = 1; 86 p_dev->conf.ConfigIndex = 1;
117 p_dev->conf.Present = PRESENT_OPTION; 87 p_dev->conf.Present = PRESENT_OPTION;
118 88
119 /* Allocate space for private device-specific data */
120 local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
121 if (!local)
122 goto err;
123 p_dev->priv = local;
124
125 return avmcs_config(p_dev); 89 return avmcs_config(p_dev);
126
127 err:
128 return -ENOMEM;
129} /* avmcs_attach */ 90} /* avmcs_attach */
130 91
131/*====================================================================== 92/*======================================================================
@@ -140,7 +101,6 @@ static int avmcs_probe(struct pcmcia_device *p_dev)
140static void avmcs_detach(struct pcmcia_device *link) 101static void avmcs_detach(struct pcmcia_device *link)
141{ 102{
142 avmcs_release(link); 103 avmcs_release(link);
143 kfree(link->priv);
144} /* avmcs_detach */ 104} /* avmcs_detach */
145 105
146/*====================================================================== 106/*======================================================================
@@ -171,14 +131,11 @@ static int avmcs_configcheck(struct pcmcia_device *p_dev,
171 131
172static int avmcs_config(struct pcmcia_device *link) 132static int avmcs_config(struct pcmcia_device *link)
173{ 133{
174 local_info_t *dev; 134 int i = -1;
175 int i;
176 char devname[128]; 135 char devname[128];
177 int cardtype; 136 int cardtype;
178 int (*addcard)(unsigned int port, unsigned irq); 137 int (*addcard)(unsigned int port, unsigned irq);
179 138
180 dev = link->priv;
181
182 devname[0] = 0; 139 devname[0] = 0;
183 if (link->prod_id[1]) 140 if (link->prod_id[1])
184 strlcpy(devname, link->prod_id[1], sizeof(devname)); 141 strlcpy(devname, link->prod_id[1], sizeof(devname));
@@ -190,11 +147,7 @@ static int avmcs_config(struct pcmcia_device *link)
190 return -ENODEV; 147 return -ENODEV;
191 148
192 do { 149 do {
193 /* 150 if (!link->irq) {
194 * allocate an interrupt line
195 */
196 i = pcmcia_request_irq(link, &link->irq);
197 if (i != 0) {
198 /* undo */ 151 /* undo */
199 pcmcia_disable_device(link); 152 pcmcia_disable_device(link);
200 break; 153 break;
@@ -211,15 +164,11 @@ static int avmcs_config(struct pcmcia_device *link)
211 164
212 } while (0); 165 } while (0);
213 166
214 /* At this point, the dev_node_t structure(s) should be
215 initialized and arranged in a linked list at link->dev. */
216
217 if (devname[0]) { 167 if (devname[0]) {
218 char *s = strrchr(devname, ' '); 168 char *s = strrchr(devname, ' ');
219 if (!s) 169 if (!s)
220 s = devname; 170 s = devname;
221 else s++; 171 else s++;
222 strcpy(dev->node.dev_name, s);
223 if (strcmp("M1", s) == 0) { 172 if (strcmp("M1", s) == 0) {
224 cardtype = AVM_CARDTYPE_M1; 173 cardtype = AVM_CARDTYPE_M1;
225 } else if (strcmp("M2", s) == 0) { 174 } else if (strcmp("M2", s) == 0) {
@@ -227,14 +176,8 @@ static int avmcs_config(struct pcmcia_device *link)
227 } else { 176 } else {
228 cardtype = AVM_CARDTYPE_B1; 177 cardtype = AVM_CARDTYPE_B1;
229 } 178 }
230 } else { 179 } else
231 strcpy(dev->node.dev_name, "b1");
232 cardtype = AVM_CARDTYPE_B1; 180 cardtype = AVM_CARDTYPE_B1;
233 }
234
235 dev->node.major = 64;
236 dev->node.minor = 0;
237 link->dev_node = &dev->node;
238 181
239 /* If any step failed, release any partially configured state */ 182 /* If any step failed, release any partially configured state */
240 if (i != 0) { 183 if (i != 0) {
@@ -249,13 +192,12 @@ static int avmcs_config(struct pcmcia_device *link)
249 default: 192 default:
250 case AVM_CARDTYPE_B1: addcard = b1pcmcia_addcard_b1; break; 193 case AVM_CARDTYPE_B1: addcard = b1pcmcia_addcard_b1; break;
251 } 194 }
252 if ((i = (*addcard)(link->io.BasePort1, link->irq.AssignedIRQ)) < 0) { 195 if ((i = (*addcard)(link->io.BasePort1, link->irq)) < 0) {
253 printk(KERN_ERR "avm_cs: failed to add AVM-%s-Controller at i/o %#x, irq %d\n", 196 dev_err(&link->dev, "avm_cs: failed to add AVM-Controller at i/o %#x, irq %d\n",
254 dev->node.dev_name, link->io.BasePort1, link->irq.AssignedIRQ); 197 link->io.BasePort1, link->irq);
255 avmcs_release(link); 198 avmcs_release(link);
256 return -ENODEV; 199 return -ENODEV;
257 } 200 }
258 dev->node.minor = i;
259 return 0; 201 return 0;
260 202
261} /* avmcs_config */ 203} /* avmcs_config */
@@ -270,7 +212,7 @@ static int avmcs_config(struct pcmcia_device *link)
270 212
271static void avmcs_release(struct pcmcia_device *link) 213static void avmcs_release(struct pcmcia_device *link)
272{ 214{
273 b1pcmcia_delcard(link->io.BasePort1, link->irq.AssignedIRQ); 215 b1pcmcia_delcard(link->io.BasePort1, link->irq);
274 pcmcia_disable_device(link); 216 pcmcia_disable_device(link);
275} /* avmcs_release */ 217} /* avmcs_release */
276 218
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index 8d1d63a02b34..a80a7617f16f 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -62,31 +62,6 @@ static void avma1cs_release(struct pcmcia_device *link);
62static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ; 62static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ;
63 63
64 64
65/*
66 A linked list of "instances" of the skeleton device. Each actual
67 PCMCIA card corresponds to one device instance, and is described
68 by one struct pcmcia_device structure (defined in ds.h).
69
70 You may not want to use a linked list for this -- for example, the
71 memory card driver uses an array of struct pcmcia_device pointers, where minor
72 device numbers are used to derive the corresponding array index.
73*/
74
75/*
76 A driver needs to provide a dev_node_t structure for each device
77 on a card. In some cases, there is only one device per card (for
78 example, ethernet cards, modems). In other cases, there may be
79 many actual or logical devices (SCSI adapters, memory cards with
80 multiple partitions). The dev_node_t structures need to be kept
81 in a linked list starting at the 'dev' field of a struct pcmcia_device
82 structure. We allocate them in the card's private data structure,
83 because they generally can't be allocated dynamically.
84*/
85
86typedef struct local_info_t {
87 dev_node_t node;
88} local_info_t;
89
90/*====================================================================== 65/*======================================================================
91 66
92 avma1cs_attach() creates an "instance" of the driver, allocating 67 avma1cs_attach() creates an "instance" of the driver, allocating
@@ -101,17 +76,8 @@ typedef struct local_info_t {
101 76
102static int __devinit avma1cs_probe(struct pcmcia_device *p_dev) 77static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
103{ 78{
104 local_info_t *local;
105
106 dev_dbg(&p_dev->dev, "avma1cs_attach()\n"); 79 dev_dbg(&p_dev->dev, "avma1cs_attach()\n");
107 80
108 /* Allocate space for private device-specific data */
109 local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
110 if (!local)
111 return -ENOMEM;
112
113 p_dev->priv = local;
114
115 /* The io structure describes IO port mapping */ 81 /* The io structure describes IO port mapping */
116 p_dev->io.NumPorts1 = 16; 82 p_dev->io.NumPorts1 = 16;
117 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 83 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -119,9 +85,6 @@ static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
119 p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_16; 85 p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
120 p_dev->io.IOAddrLines = 5; 86 p_dev->io.IOAddrLines = 5;
121 87
122 /* Interrupt setup */
123 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
124
125 /* General socket configuration */ 88 /* General socket configuration */
126 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 89 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
127 p_dev->conf.IntType = INT_MEMORY_AND_IO; 90 p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -176,14 +139,11 @@ static int avma1cs_configcheck(struct pcmcia_device *p_dev,
176 139
177static int __devinit avma1cs_config(struct pcmcia_device *link) 140static int __devinit avma1cs_config(struct pcmcia_device *link)
178{ 141{
179 local_info_t *dev; 142 int i = -1;
180 int i;
181 char devname[128]; 143 char devname[128];
182 IsdnCard_t icard; 144 IsdnCard_t icard;
183 int busy = 0; 145 int busy = 0;
184 146
185 dev = link->priv;
186
187 dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link); 147 dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link);
188 148
189 devname[0] = 0; 149 devname[0] = 0;
@@ -197,8 +157,7 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
197 /* 157 /*
198 * allocate an interrupt line 158 * allocate an interrupt line
199 */ 159 */
200 i = pcmcia_request_irq(link, &link->irq); 160 if (!link->irq) {
201 if (i != 0) {
202 /* undo */ 161 /* undo */
203 pcmcia_disable_device(link); 162 pcmcia_disable_device(link);
204 break; 163 break;
@@ -215,14 +174,6 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
215 174
216 } while (0); 175 } while (0);
217 176
218 /* At this point, the dev_node_t structure(s) should be
219 initialized and arranged in a linked list at link->dev. */
220
221 strcpy(dev->node.dev_name, "A1");
222 dev->node.major = 45;
223 dev->node.minor = 0;
224 link->dev_node = &dev->node;
225
226 /* If any step failed, release any partially configured state */ 177 /* If any step failed, release any partially configured state */
227 if (i != 0) { 178 if (i != 0) {
228 avma1cs_release(link); 179 avma1cs_release(link);
@@ -230,9 +181,9 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
230 } 181 }
231 182
232 printk(KERN_NOTICE "avma1_cs: checking at i/o %#x, irq %d\n", 183 printk(KERN_NOTICE "avma1_cs: checking at i/o %#x, irq %d\n",
233 link->io.BasePort1, link->irq.AssignedIRQ); 184 link->io.BasePort1, link->irq);
234 185
235 icard.para[0] = link->irq.AssignedIRQ; 186 icard.para[0] = link->irq;
236 icard.para[1] = link->io.BasePort1; 187 icard.para[1] = link->io.BasePort1;
237 icard.protocol = isdnprot; 188 icard.protocol = isdnprot;
238 icard.typ = ISDN_CTYPE_A1_PCMCIA; 189 icard.typ = ISDN_CTYPE_A1_PCMCIA;
@@ -243,7 +194,7 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
243 avma1cs_release(link); 194 avma1cs_release(link);
244 return -ENODEV; 195 return -ENODEV;
245 } 196 }
246 dev->node.minor = i; 197 link->priv = (void *) (unsigned long) i;
247 198
248 return 0; 199 return 0;
249} /* avma1cs_config */ 200} /* avma1cs_config */
@@ -258,12 +209,12 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
258 209
259static void avma1cs_release(struct pcmcia_device *link) 210static void avma1cs_release(struct pcmcia_device *link)
260{ 211{
261 local_info_t *local = link->priv; 212 unsigned long minor = (unsigned long) link->priv;
262 213
263 dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link); 214 dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link);
264 215
265 /* now unregister function with hisax */ 216 /* now unregister function with hisax */
266 HiSax_closecard(local->node.minor); 217 HiSax_closecard(minor);
267 218
268 pcmcia_disable_device(link); 219 pcmcia_disable_device(link);
269} /* avma1cs_release */ 220} /* avma1cs_release */
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index c9f2279e21f5..218927e3a4ea 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -87,24 +87,8 @@ static void elsa_cs_release(struct pcmcia_device *link);
87 87
88static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit; 88static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit;
89 89
90/*
91 A driver needs to provide a dev_node_t structure for each device
92 on a card. In some cases, there is only one device per card (for
93 example, ethernet cards, modems). In other cases, there may be
94 many actual or logical devices (SCSI adapters, memory cards with
95 multiple partitions). The dev_node_t structures need to be kept
96 in a linked list starting at the 'dev' field of a struct pcmcia_device
97 structure. We allocate them in the card's private data structure,
98 because they generally shouldn't be allocated dynamically.
99 In this case, we also provide a flag to indicate if a device is
100 "stopped" due to a power management event, or card ejection. The
101 device IO routines can use a flag like this to throttle IO to a
102 card that is not ready to accept it.
103*/
104
105typedef struct local_info_t { 90typedef struct local_info_t {
106 struct pcmcia_device *p_dev; 91 struct pcmcia_device *p_dev;
107 dev_node_t node;
108 int busy; 92 int busy;
109 int cardnr; 93 int cardnr;
110} local_info_t; 94} local_info_t;
@@ -136,10 +120,6 @@ static int __devinit elsa_cs_probe(struct pcmcia_device *link)
136 120
137 local->cardnr = -1; 121 local->cardnr = -1;
138 122
139 /* Interrupt setup */
140 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
141 link->irq.Handler = NULL;
142
143 /* 123 /*
144 General socket configuration defaults can go here. In this 124 General socket configuration defaults can go here. In this
145 client, we assume very little, and rely on the CIS for almost 125 client, we assume very little, and rely on the CIS for almost
@@ -223,28 +203,18 @@ static int __devinit elsa_cs_config(struct pcmcia_device *link)
223 if (i != 0) 203 if (i != 0)
224 goto failed; 204 goto failed;
225 205
226 i = pcmcia_request_irq(link, &link->irq); 206 if (!link->irq)
227 if (i != 0) {
228 link->irq.AssignedIRQ = 0;
229 goto failed; 207 goto failed;
230 }
231 208
232 i = pcmcia_request_configuration(link, &link->conf); 209 i = pcmcia_request_configuration(link, &link->conf);
233 if (i != 0) 210 if (i != 0)
234 goto failed; 211 goto failed;
235 212
236 /* At this point, the dev_node_t structure(s) should be
237 initialized and arranged in a linked list at link->dev. *//* */
238 sprintf(dev->node.dev_name, "elsa");
239 dev->node.major = dev->node.minor = 0x0;
240
241 link->dev_node = &dev->node;
242
243 /* Finally, report what we've done */ 213 /* Finally, report what we've done */
244 printk(KERN_INFO "%s: index 0x%02x: ", 214 dev_info(&link->dev, "index 0x%02x: ",
245 dev->node.dev_name, link->conf.ConfigIndex); 215 link->conf.ConfigIndex);
246 if (link->conf.Attributes & CONF_ENABLE_IRQ) 216 if (link->conf.Attributes & CONF_ENABLE_IRQ)
247 printk(", irq %d", link->irq.AssignedIRQ); 217 printk(", irq %d", link->irq);
248 if (link->io.NumPorts1) 218 if (link->io.NumPorts1)
249 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 219 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
250 link->io.BasePort1+link->io.NumPorts1-1); 220 link->io.BasePort1+link->io.NumPorts1-1);
@@ -253,7 +223,7 @@ static int __devinit elsa_cs_config(struct pcmcia_device *link)
253 link->io.BasePort2+link->io.NumPorts2-1); 223 link->io.BasePort2+link->io.NumPorts2-1);
254 printk("\n"); 224 printk("\n");
255 225
256 icard.para[0] = link->irq.AssignedIRQ; 226 icard.para[0] = link->irq;
257 icard.para[1] = link->io.BasePort1; 227 icard.para[1] = link->io.BasePort1;
258 icard.protocol = protocol; 228 icard.protocol = protocol;
259 icard.typ = ISDN_CTYPE_ELSA_PCMCIA; 229 icard.typ = ISDN_CTYPE_ELSA_PCMCIA;
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 71b3ddef03bb..1f4feaab21af 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -87,32 +87,8 @@ static void sedlbauer_release(struct pcmcia_device *link);
87 87
88static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit; 88static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit;
89 89
90/*
91 You'll also need to prototype all the functions that will actually
92 be used to talk to your device. See 'memory_cs' for a good example
93 of a fully self-sufficient driver; the other drivers rely more or
94 less on other parts of the kernel.
95*/
96
97/*
98 A driver needs to provide a dev_node_t structure for each device
99 on a card. In some cases, there is only one device per card (for
100 example, ethernet cards, modems). In other cases, there may be
101 many actual or logical devices (SCSI adapters, memory cards with
102 multiple partitions). The dev_node_t structures need to be kept
103 in a linked list starting at the 'dev' field of a struct pcmcia_device
104 structure. We allocate them in the card's private data structure,
105 because they generally shouldn't be allocated dynamically.
106
107 In this case, we also provide a flag to indicate if a device is
108 "stopped" due to a power management event, or card ejection. The
109 device IO routines can use a flag like this to throttle IO to a
110 card that is not ready to accept it.
111*/
112
113typedef struct local_info_t { 90typedef struct local_info_t {
114 struct pcmcia_device *p_dev; 91 struct pcmcia_device *p_dev;
115 dev_node_t node;
116 int stop; 92 int stop;
117 int cardnr; 93 int cardnr;
118} local_info_t; 94} local_info_t;
@@ -143,10 +119,6 @@ static int __devinit sedlbauer_probe(struct pcmcia_device *link)
143 local->p_dev = link; 119 local->p_dev = link;
144 link->priv = local; 120 link->priv = local;
145 121
146 /* Interrupt setup */
147 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
148 link->irq.Handler = NULL;
149
150 /* 122 /*
151 General socket configuration defaults can go here. In this 123 General socket configuration defaults can go here. In this
152 client, we assume very little, and rely on the CIS for almost 124 client, we assume very little, and rely on the CIS for almost
@@ -227,9 +199,7 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
227 else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM)) 199 else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
228 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000; 200 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
229 201
230 /* Do we need to allocate an interrupt? */ 202 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
231 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
232 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
233 203
234 /* IO window settings */ 204 /* IO window settings */
235 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 205 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -285,7 +255,6 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
285 255
286static int __devinit sedlbauer_config(struct pcmcia_device *link) 256static int __devinit sedlbauer_config(struct pcmcia_device *link)
287{ 257{
288 local_info_t *dev = link->priv;
289 win_req_t *req; 258 win_req_t *req;
290 int ret; 259 int ret;
291 IsdnCard_t icard; 260 IsdnCard_t icard;
@@ -313,17 +282,6 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
313 goto failed; 282 goto failed;
314 283
315 /* 284 /*
316 Allocate an interrupt line. Note that this does not assign a
317 handler to the interrupt, unless the 'Handler' member of the
318 irq structure is initialized.
319 */
320 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
321 ret = pcmcia_request_irq(link, &link->irq);
322 if (ret)
323 goto failed;
324 }
325
326 /*
327 This actually configures the PCMCIA socket -- setting up 285 This actually configures the PCMCIA socket -- setting up
328 the I/O windows and the interrupt mapping, and putting the 286 the I/O windows and the interrupt mapping, and putting the
329 card and host interface into "Memory and IO" mode. 287 card and host interface into "Memory and IO" mode.
@@ -332,21 +290,13 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
332 if (ret) 290 if (ret)
333 goto failed; 291 goto failed;
334 292
335 /*
336 At this point, the dev_node_t structure(s) need to be
337 initialized and arranged in a linked list at link->dev.
338 */
339 sprintf(dev->node.dev_name, "sedlbauer");
340 dev->node.major = dev->node.minor = 0;
341 link->dev_node = &dev->node;
342
343 /* Finally, report what we've done */ 293 /* Finally, report what we've done */
344 printk(KERN_INFO "%s: index 0x%02x:", 294 dev_info(&link->dev, "index 0x%02x:",
345 dev->node.dev_name, link->conf.ConfigIndex); 295 link->conf.ConfigIndex);
346 if (link->conf.Vpp) 296 if (link->conf.Vpp)
347 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10); 297 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
348 if (link->conf.Attributes & CONF_ENABLE_IRQ) 298 if (link->conf.Attributes & CONF_ENABLE_IRQ)
349 printk(", irq %d", link->irq.AssignedIRQ); 299 printk(", irq %d", link->irq);
350 if (link->io.NumPorts1) 300 if (link->io.NumPorts1)
351 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 301 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
352 link->io.BasePort1+link->io.NumPorts1-1); 302 link->io.BasePort1+link->io.NumPorts1-1);
@@ -358,7 +308,7 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
358 req->Base+req->Size-1); 308 req->Base+req->Size-1);
359 printk("\n"); 309 printk("\n");
360 310
361 icard.para[0] = link->irq.AssignedIRQ; 311 icard.para[0] = link->irq;
362 icard.para[1] = link->io.BasePort1; 312 icard.para[1] = link->io.BasePort1;
363 icard.protocol = protocol; 313 icard.protocol = protocol;
364 icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA; 314 icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA;
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index d010a0da8e19..5771955cc532 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -68,34 +68,8 @@ static void teles_cs_release(struct pcmcia_device *link);
68 68
69static void teles_detach(struct pcmcia_device *p_dev) __devexit ; 69static void teles_detach(struct pcmcia_device *p_dev) __devexit ;
70 70
71/*
72 A linked list of "instances" of the teles_cs device. Each actual
73 PCMCIA card corresponds to one device instance, and is described
74 by one struct pcmcia_device structure (defined in ds.h).
75
76 You may not want to use a linked list for this -- for example, the
77 memory card driver uses an array of struct pcmcia_device pointers, where minor
78 device numbers are used to derive the corresponding array index.
79*/
80
81/*
82 A driver needs to provide a dev_node_t structure for each device
83 on a card. In some cases, there is only one device per card (for
84 example, ethernet cards, modems). In other cases, there may be
85 many actual or logical devices (SCSI adapters, memory cards with
86 multiple partitions). The dev_node_t structures need to be kept
87 in a linked list starting at the 'dev' field of a struct pcmcia_device
88 structure. We allocate them in the card's private data structure,
89 because they generally shouldn't be allocated dynamically.
90 In this case, we also provide a flag to indicate if a device is
91 "stopped" due to a power management event, or card ejection. The
92 device IO routines can use a flag like this to throttle IO to a
93 card that is not ready to accept it.
94*/
95
96typedef struct local_info_t { 71typedef struct local_info_t {
97 struct pcmcia_device *p_dev; 72 struct pcmcia_device *p_dev;
98 dev_node_t node;
99 int busy; 73 int busy;
100 int cardnr; 74 int cardnr;
101} local_info_t; 75} local_info_t;
@@ -126,10 +100,6 @@ static int __devinit teles_probe(struct pcmcia_device *link)
126 local->p_dev = link; 100 local->p_dev = link;
127 link->priv = local; 101 link->priv = local;
128 102
129 /* Interrupt setup */
130 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
131 link->irq.Handler = NULL;
132
133 /* 103 /*
134 General socket configuration defaults can go here. In this 104 General socket configuration defaults can go here. In this
135 client, we assume very little, and rely on the CIS for almost 105 client, we assume very little, and rely on the CIS for almost
@@ -213,28 +183,18 @@ static int __devinit teles_cs_config(struct pcmcia_device *link)
213 if (i != 0) 183 if (i != 0)
214 goto cs_failed; 184 goto cs_failed;
215 185
216 i = pcmcia_request_irq(link, &link->irq); 186 if (!link->irq)
217 if (i != 0) {
218 link->irq.AssignedIRQ = 0;
219 goto cs_failed; 187 goto cs_failed;
220 }
221 188
222 i = pcmcia_request_configuration(link, &link->conf); 189 i = pcmcia_request_configuration(link, &link->conf);
223 if (i != 0) 190 if (i != 0)
224 goto cs_failed; 191 goto cs_failed;
225 192
226 /* At this point, the dev_node_t structure(s) should be
227 initialized and arranged in a linked list at link->dev. *//* */
228 sprintf(dev->node.dev_name, "teles");
229 dev->node.major = dev->node.minor = 0x0;
230
231 link->dev_node = &dev->node;
232
233 /* Finally, report what we've done */ 193 /* Finally, report what we've done */
234 printk(KERN_INFO "%s: index 0x%02x:", 194 dev_info(&link->dev, "index 0x%02x:",
235 dev->node.dev_name, link->conf.ConfigIndex); 195 link->conf.ConfigIndex);
236 if (link->conf.Attributes & CONF_ENABLE_IRQ) 196 if (link->conf.Attributes & CONF_ENABLE_IRQ)
237 printk(", irq %d", link->irq.AssignedIRQ); 197 printk(", irq %d", link->irq);
238 if (link->io.NumPorts1) 198 if (link->io.NumPorts1)
239 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 199 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
240 link->io.BasePort1+link->io.NumPorts1-1); 200 link->io.BasePort1+link->io.NumPorts1-1);
@@ -243,7 +203,7 @@ static int __devinit teles_cs_config(struct pcmcia_device *link)
243 link->io.BasePort2+link->io.NumPorts2-1); 203 link->io.BasePort2+link->io.NumPorts2-1);
244 printk("\n"); 204 printk("\n");
245 205
246 icard.para[0] = link->irq.AssignedIRQ; 206 icard.para[0] = link->irq;
247 icard.para[1] = link->io.BasePort1; 207 icard.para[1] = link->io.BasePort1;
248 icard.protocol = protocol; 208 icard.protocol = protocol;
249 icard.typ = ISDN_CTYPE_TELESPCMCIA; 209 icard.typ = ISDN_CTYPE_TELESPCMCIA;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9712b2e97be4..cefd63daff31 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2109,12 +2109,18 @@ repeat:
2109 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ 2109 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
2110 /* .. if the array isn't clean, an 'even' event must also go 2110 /* .. if the array isn't clean, an 'even' event must also go
2111 * to spares. */ 2111 * to spares. */
2112 if ((mddev->events&1)==0) 2112 if ((mddev->events&1)==0) {
2113 nospares = 0; 2113 nospares = 0;
2114 sync_req = 2; /* force a second update to get the
2115 * even/odd in sync */
2116 }
2114 } else { 2117 } else {
2115 /* otherwise an 'odd' event must go to spares */ 2118 /* otherwise an 'odd' event must go to spares */
2116 if ((mddev->events&1)) 2119 if ((mddev->events&1)) {
2117 nospares = 0; 2120 nospares = 0;
2121 sync_req = 2; /* force a second update to get the
2122 * even/odd in sync */
2123 }
2118 } 2124 }
2119 } 2125 }
2120 2126
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 20e48401910e..15348c393b5d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1527,7 +1527,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
1527 1527
1528 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1528 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1529 atomic_inc(&rdev->read_errors); 1529 atomic_inc(&rdev->read_errors);
1530 if (conf->mddev->degraded) 1530 if (conf->mddev->degraded >= conf->max_degraded)
1531 printk_rl(KERN_WARNING 1531 printk_rl(KERN_WARNING
1532 "raid5:%s: read error not correctable " 1532 "raid5:%s: read error not correctable "
1533 "(sector %llu on %s).\n", 1533 "(sector %llu on %s).\n",
@@ -1650,7 +1650,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1650 int previous, int *dd_idx, 1650 int previous, int *dd_idx,
1651 struct stripe_head *sh) 1651 struct stripe_head *sh)
1652{ 1652{
1653 sector_t stripe; 1653 sector_t stripe, stripe2;
1654 sector_t chunk_number; 1654 sector_t chunk_number;
1655 unsigned int chunk_offset; 1655 unsigned int chunk_offset;
1656 int pd_idx, qd_idx; 1656 int pd_idx, qd_idx;
@@ -1677,7 +1677,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1677 */ 1677 */
1678 stripe = chunk_number; 1678 stripe = chunk_number;
1679 *dd_idx = sector_div(stripe, data_disks); 1679 *dd_idx = sector_div(stripe, data_disks);
1680 1680 stripe2 = stripe;
1681 /* 1681 /*
1682 * Select the parity disk based on the user selected algorithm. 1682 * Select the parity disk based on the user selected algorithm.
1683 */ 1683 */
@@ -1689,21 +1689,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1689 case 5: 1689 case 5:
1690 switch (algorithm) { 1690 switch (algorithm) {
1691 case ALGORITHM_LEFT_ASYMMETRIC: 1691 case ALGORITHM_LEFT_ASYMMETRIC:
1692 pd_idx = data_disks - stripe % raid_disks; 1692 pd_idx = data_disks - sector_div(stripe2, raid_disks);
1693 if (*dd_idx >= pd_idx) 1693 if (*dd_idx >= pd_idx)
1694 (*dd_idx)++; 1694 (*dd_idx)++;
1695 break; 1695 break;
1696 case ALGORITHM_RIGHT_ASYMMETRIC: 1696 case ALGORITHM_RIGHT_ASYMMETRIC:
1697 pd_idx = stripe % raid_disks; 1697 pd_idx = sector_div(stripe2, raid_disks);
1698 if (*dd_idx >= pd_idx) 1698 if (*dd_idx >= pd_idx)
1699 (*dd_idx)++; 1699 (*dd_idx)++;
1700 break; 1700 break;
1701 case ALGORITHM_LEFT_SYMMETRIC: 1701 case ALGORITHM_LEFT_SYMMETRIC:
1702 pd_idx = data_disks - stripe % raid_disks; 1702 pd_idx = data_disks - sector_div(stripe2, raid_disks);
1703 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1703 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1704 break; 1704 break;
1705 case ALGORITHM_RIGHT_SYMMETRIC: 1705 case ALGORITHM_RIGHT_SYMMETRIC:
1706 pd_idx = stripe % raid_disks; 1706 pd_idx = sector_div(stripe2, raid_disks);
1707 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1707 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1708 break; 1708 break;
1709 case ALGORITHM_PARITY_0: 1709 case ALGORITHM_PARITY_0:
@@ -1723,7 +1723,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1723 1723
1724 switch (algorithm) { 1724 switch (algorithm) {
1725 case ALGORITHM_LEFT_ASYMMETRIC: 1725 case ALGORITHM_LEFT_ASYMMETRIC:
1726 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1726 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1727 qd_idx = pd_idx + 1; 1727 qd_idx = pd_idx + 1;
1728 if (pd_idx == raid_disks-1) { 1728 if (pd_idx == raid_disks-1) {
1729 (*dd_idx)++; /* Q D D D P */ 1729 (*dd_idx)++; /* Q D D D P */
@@ -1732,7 +1732,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1732 (*dd_idx) += 2; /* D D P Q D */ 1732 (*dd_idx) += 2; /* D D P Q D */
1733 break; 1733 break;
1734 case ALGORITHM_RIGHT_ASYMMETRIC: 1734 case ALGORITHM_RIGHT_ASYMMETRIC:
1735 pd_idx = stripe % raid_disks; 1735 pd_idx = sector_div(stripe2, raid_disks);
1736 qd_idx = pd_idx + 1; 1736 qd_idx = pd_idx + 1;
1737 if (pd_idx == raid_disks-1) { 1737 if (pd_idx == raid_disks-1) {
1738 (*dd_idx)++; /* Q D D D P */ 1738 (*dd_idx)++; /* Q D D D P */
@@ -1741,12 +1741,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1741 (*dd_idx) += 2; /* D D P Q D */ 1741 (*dd_idx) += 2; /* D D P Q D */
1742 break; 1742 break;
1743 case ALGORITHM_LEFT_SYMMETRIC: 1743 case ALGORITHM_LEFT_SYMMETRIC:
1744 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1744 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1745 qd_idx = (pd_idx + 1) % raid_disks; 1745 qd_idx = (pd_idx + 1) % raid_disks;
1746 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1746 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1747 break; 1747 break;
1748 case ALGORITHM_RIGHT_SYMMETRIC: 1748 case ALGORITHM_RIGHT_SYMMETRIC:
1749 pd_idx = stripe % raid_disks; 1749 pd_idx = sector_div(stripe2, raid_disks);
1750 qd_idx = (pd_idx + 1) % raid_disks; 1750 qd_idx = (pd_idx + 1) % raid_disks;
1751 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1751 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1752 break; 1752 break;
@@ -1765,7 +1765,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1765 /* Exactly the same as RIGHT_ASYMMETRIC, but or 1765 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1766 * of blocks for computing Q is different. 1766 * of blocks for computing Q is different.
1767 */ 1767 */
1768 pd_idx = stripe % raid_disks; 1768 pd_idx = sector_div(stripe2, raid_disks);
1769 qd_idx = pd_idx + 1; 1769 qd_idx = pd_idx + 1;
1770 if (pd_idx == raid_disks-1) { 1770 if (pd_idx == raid_disks-1) {
1771 (*dd_idx)++; /* Q D D D P */ 1771 (*dd_idx)++; /* Q D D D P */
@@ -1780,7 +1780,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1780 * D D D P Q rather than 1780 * D D D P Q rather than
1781 * Q D D D P 1781 * Q D D D P
1782 */ 1782 */
1783 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); 1783 stripe2 += 1;
1784 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1784 qd_idx = pd_idx + 1; 1785 qd_idx = pd_idx + 1;
1785 if (pd_idx == raid_disks-1) { 1786 if (pd_idx == raid_disks-1) {
1786 (*dd_idx)++; /* Q D D D P */ 1787 (*dd_idx)++; /* Q D D D P */
@@ -1792,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1792 1793
1793 case ALGORITHM_ROTATING_N_CONTINUE: 1794 case ALGORITHM_ROTATING_N_CONTINUE:
1794 /* Same as left_symmetric but Q is before P */ 1795 /* Same as left_symmetric but Q is before P */
1795 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1796 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1796 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 1797 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1797 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1798 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1798 ddf_layout = 1; 1799 ddf_layout = 1;
@@ -1800,27 +1801,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1800 1801
1801 case ALGORITHM_LEFT_ASYMMETRIC_6: 1802 case ALGORITHM_LEFT_ASYMMETRIC_6:
1802 /* RAID5 left_asymmetric, with Q on last device */ 1803 /* RAID5 left_asymmetric, with Q on last device */
1803 pd_idx = data_disks - stripe % (raid_disks-1); 1804 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1804 if (*dd_idx >= pd_idx) 1805 if (*dd_idx >= pd_idx)
1805 (*dd_idx)++; 1806 (*dd_idx)++;
1806 qd_idx = raid_disks - 1; 1807 qd_idx = raid_disks - 1;
1807 break; 1808 break;
1808 1809
1809 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1810 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1810 pd_idx = stripe % (raid_disks-1); 1811 pd_idx = sector_div(stripe2, raid_disks-1);
1811 if (*dd_idx >= pd_idx) 1812 if (*dd_idx >= pd_idx)
1812 (*dd_idx)++; 1813 (*dd_idx)++;
1813 qd_idx = raid_disks - 1; 1814 qd_idx = raid_disks - 1;
1814 break; 1815 break;
1815 1816
1816 case ALGORITHM_LEFT_SYMMETRIC_6: 1817 case ALGORITHM_LEFT_SYMMETRIC_6:
1817 pd_idx = data_disks - stripe % (raid_disks-1); 1818 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1818 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1819 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1819 qd_idx = raid_disks - 1; 1820 qd_idx = raid_disks - 1;
1820 break; 1821 break;
1821 1822
1822 case ALGORITHM_RIGHT_SYMMETRIC_6: 1823 case ALGORITHM_RIGHT_SYMMETRIC_6:
1823 pd_idx = stripe % (raid_disks-1); 1824 pd_idx = sector_div(stripe2, raid_disks-1);
1824 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1825 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1825 qd_idx = raid_disks - 1; 1826 qd_idx = raid_disks - 1;
1826 break; 1827 break;
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index fd8e1f45be36..7364b9642d00 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -423,15 +423,14 @@ static void vv_callback(struct saa7146_dev *dev, unsigned long status)
423 } 423 }
424} 424}
425 425
426int saa7146_vv_devinit(struct saa7146_dev *dev)
427{
428 return v4l2_device_register(&dev->pci->dev, &dev->v4l2_dev);
429}
430EXPORT_SYMBOL_GPL(saa7146_vv_devinit);
431
432int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv) 426int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
433{ 427{
434 struct saa7146_vv *vv; 428 struct saa7146_vv *vv;
429 int err;
430
431 err = v4l2_device_register(&dev->pci->dev, &dev->v4l2_dev);
432 if (err)
433 return err;
435 434
436 vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL); 435 vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL);
437 if (vv == NULL) { 436 if (vv == NULL) {
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 5ed75263340a..b8b2c551a1e2 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -558,9 +558,11 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
558 /* ok, accept it */ 558 /* ok, accept it */
559 vv->ov_fb = *fb; 559 vv->ov_fb = *fb;
560 vv->ov_fmt = fmt; 560 vv->ov_fmt = fmt;
561 if (0 == vv->ov_fb.fmt.bytesperline) 561
562 vv->ov_fb.fmt.bytesperline = 562 if (vv->ov_fb.fmt.bytesperline < vv->ov_fb.fmt.width) {
563 vv->ov_fb.fmt.width * fmt->depth / 8; 563 vv->ov_fb.fmt.bytesperline = vv->ov_fb.fmt.width * fmt->depth / 8;
564 DEB_D(("setting bytesperline to %d\n", vv->ov_fb.fmt.bytesperline));
565 }
564 566
565 mutex_unlock(&dev->lock); 567 mutex_unlock(&dev->lock);
566 return 0; 568 return 0;
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index a3c07fe0e6c4..96972804f4ad 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -4470,6 +4470,10 @@ static int stv090x_setup(struct dvb_frontend *fe)
4470 if (stv090x_write_reg(state, STV090x_TSTRES0, 0x00) < 0) 4470 if (stv090x_write_reg(state, STV090x_TSTRES0, 0x00) < 0)
4471 goto err; 4471 goto err;
4472 4472
4473 /* workaround for stuck DiSEqC output */
4474 if (config->diseqc_envelope_mode)
4475 stv090x_send_diseqc_burst(fe, SEC_MINI_A);
4476
4473 return 0; 4477 return 0;
4474err: 4478err:
4475 dprintk(FE_ERROR, 1, "I/O error"); 4479 dprintk(FE_ERROR, 1, "I/O error");
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index 9fdf26cc6998..1500210c06cf 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -643,9 +643,6 @@ static void frontend_init(struct budget *budget)
643 &budget->i2c_adap, 643 &budget->i2c_adap,
644 &tt1600_isl6423_config); 644 &tt1600_isl6423_config);
645 645
646 } else {
647 dvb_frontend_detach(budget->dvb_frontend);
648 budget->dvb_frontend = NULL;
649 } 646 }
650 } 647 }
651 break; 648 break;
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index f8fc8654693d..9644cf760aaa 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -361,7 +361,7 @@ config VIDEO_SAA717X
361 361
362config VIDEO_SAA7191 362config VIDEO_SAA7191
363 tristate "Philips SAA7191 video decoder" 363 tristate "Philips SAA7191 video decoder"
364 depends on VIDEO_V4L1 && I2C 364 depends on VIDEO_V4L2 && I2C
365 ---help--- 365 ---help---
366 Support for the Philips SAA7191 video decoder. 366 Support for the Philips SAA7191 video decoder.
367 367
@@ -756,7 +756,7 @@ source "drivers/media/video/saa7134/Kconfig"
756 756
757config VIDEO_MXB 757config VIDEO_MXB
758 tristate "Siemens-Nixdorf 'Multimedia eXtension Board'" 758 tristate "Siemens-Nixdorf 'Multimedia eXtension Board'"
759 depends on PCI && VIDEO_V4L1 && I2C 759 depends on PCI && VIDEO_V4L2 && I2C
760 select VIDEO_SAA7146_VV 760 select VIDEO_SAA7146_VV
761 select VIDEO_TUNER 761 select VIDEO_TUNER
762 select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO 762 select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index b88b6174a331..c51c386559f2 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -160,8 +160,6 @@ obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
160obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o 160obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
161obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o 161obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
162 162
163obj-$(CONFIG_ARCH_DAVINCI) += davinci/
164
165obj-$(CONFIG_VIDEO_AU0828) += au0828/ 163obj-$(CONFIG_VIDEO_AU0828) += au0828/
166 164
167obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/ 165obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index 7cf042f9b377..398dbe71cb82 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -223,7 +223,6 @@ int vpfe_register_ccdc_device(struct ccdc_hw_device *dev)
223 BUG_ON(!dev->hw_ops.get_frame_format); 223 BUG_ON(!dev->hw_ops.get_frame_format);
224 BUG_ON(!dev->hw_ops.get_pixel_format); 224 BUG_ON(!dev->hw_ops.get_pixel_format);
225 BUG_ON(!dev->hw_ops.set_pixel_format); 225 BUG_ON(!dev->hw_ops.set_pixel_format);
226 BUG_ON(!dev->hw_ops.set_params);
227 BUG_ON(!dev->hw_ops.set_image_window); 226 BUG_ON(!dev->hw_ops.set_image_window);
228 BUG_ON(!dev->hw_ops.get_image_window); 227 BUG_ON(!dev->hw_ops.get_image_window);
229 BUG_ON(!dev->hw_ops.get_line_length); 228 BUG_ON(!dev->hw_ops.get_line_length);
@@ -1689,11 +1688,12 @@ static long vpfe_param_handler(struct file *file, void *priv,
1689 struct vpfe_device *vpfe_dev = video_drvdata(file); 1688 struct vpfe_device *vpfe_dev = video_drvdata(file);
1690 int ret = 0; 1689 int ret = 0;
1691 1690
1692 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n"); 1691 v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n");
1693 1692
1694 if (vpfe_dev->started) { 1693 if (vpfe_dev->started) {
1695 /* only allowed if streaming is not started */ 1694 /* only allowed if streaming is not started */
1696 v4l2_err(&vpfe_dev->v4l2_dev, "device already started\n"); 1695 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
1696 "device already started\n");
1697 return -EBUSY; 1697 return -EBUSY;
1698 } 1698 }
1699 1699
@@ -1705,16 +1705,23 @@ static long vpfe_param_handler(struct file *file, void *priv,
1705 case VPFE_CMD_S_CCDC_RAW_PARAMS: 1705 case VPFE_CMD_S_CCDC_RAW_PARAMS:
1706 v4l2_warn(&vpfe_dev->v4l2_dev, 1706 v4l2_warn(&vpfe_dev->v4l2_dev,
1707 "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); 1707 "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
1708 ret = ccdc_dev->hw_ops.set_params(param); 1708 if (ccdc_dev->hw_ops.set_params) {
1709 if (ret) { 1709 ret = ccdc_dev->hw_ops.set_params(param);
1710 v4l2_err(&vpfe_dev->v4l2_dev, 1710 if (ret) {
1711 "Error in setting parameters in CCDC\n"); 1711 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
1712 goto unlock_out; 1712 "Error setting parameters in CCDC\n");
1713 } 1713 goto unlock_out;
1714 if (vpfe_get_ccdc_image_format(vpfe_dev, &vpfe_dev->fmt) < 0) { 1714 }
1715 v4l2_err(&vpfe_dev->v4l2_dev, 1715 if (vpfe_get_ccdc_image_format(vpfe_dev,
1716 "Invalid image format at CCDC\n"); 1716 &vpfe_dev->fmt) < 0) {
1717 goto unlock_out; 1717 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
1718 "Invalid image format at CCDC\n");
1719 goto unlock_out;
1720 }
1721 } else {
1722 ret = -EINVAL;
1723 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
1724 "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
1718 } 1725 }
1719 break; 1726 break;
1720 default: 1727 default:
@@ -1830,7 +1837,7 @@ static __init int vpfe_probe(struct platform_device *pdev)
1830 if (NULL == ccdc_cfg) { 1837 if (NULL == ccdc_cfg) {
1831 v4l2_err(pdev->dev.driver, 1838 v4l2_err(pdev->dev.driver,
1832 "Memory allocation failed for ccdc_cfg\n"); 1839 "Memory allocation failed for ccdc_cfg\n");
1833 goto probe_free_dev_mem; 1840 goto probe_free_lock;
1834 } 1841 }
1835 1842
1836 strncpy(ccdc_cfg->name, vpfe_cfg->ccdc, 32); 1843 strncpy(ccdc_cfg->name, vpfe_cfg->ccdc, 32);
@@ -1982,8 +1989,9 @@ probe_out_video_release:
1982probe_out_release_irq: 1989probe_out_release_irq:
1983 free_irq(vpfe_dev->ccdc_irq0, vpfe_dev); 1990 free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
1984probe_free_ccdc_cfg_mem: 1991probe_free_ccdc_cfg_mem:
1985 mutex_unlock(&ccdc_lock);
1986 kfree(ccdc_cfg); 1992 kfree(ccdc_cfg);
1993probe_free_lock:
1994 mutex_unlock(&ccdc_lock);
1987probe_free_dev_mem: 1995probe_free_dev_mem:
1988 kfree(vpfe_dev); 1996 kfree(vpfe_dev);
1989 return ret; 1997 return ret;
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 38a6e15e096b..3dee3e5844b6 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -1427,7 +1427,7 @@ static int input_kthread(void *data)
1427 struct gspca_dev *gspca_dev = (struct gspca_dev *)data; 1427 struct gspca_dev *gspca_dev = (struct gspca_dev *)data;
1428 struct sd *sd = (struct sd *) gspca_dev; 1428 struct sd *sd = (struct sd *) gspca_dev;
1429 1429
1430 DECLARE_WAIT_QUEUE_HEAD(wait); 1430 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait);
1431 set_freezable(); 1431 set_freezable();
1432 for (;;) { 1432 for (;;) {
1433 if (kthread_should_stop()) 1433 if (kthread_should_stop())
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index 15b2eef8a3f6..edf0fe157501 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -1513,7 +1513,6 @@ static const struct sd_desc sd_desc = {
1513static const __devinitdata struct usb_device_id device_table[] = { 1513static const __devinitdata struct usb_device_id device_table[] = {
1514 {USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam}, 1514 {USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam},
1515 {USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista}, 1515 {USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista},
1516 {USB_DEVICE(0x0461, 0x0815), .driver_info = MicroInnovationIC200},
1517 {USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110}, 1516 {USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110},
1518 {USB_DEVICE(0x0af9, 0x0010), .driver_info = HamaUSBSightcam}, 1517 {USB_DEVICE(0x0af9, 0x0010), .driver_info = HamaUSBSightcam},
1519 {USB_DEVICE(0x0af9, 0x0011), .driver_info = HamaUSBSightcam2}, 1518 {USB_DEVICE(0x0af9, 0x0011), .driver_info = HamaUSBSightcam2},
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index dc7f2b0fbc79..b9c80e2103b9 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -1053,6 +1053,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
1053 {USB_DEVICE(0x041e, 0x401a), .driver_info = Rev072A}, 1053 {USB_DEVICE(0x041e, 0x401a), .driver_info = Rev072A},
1054 {USB_DEVICE(0x041e, 0x403b), .driver_info = Rev012A}, 1054 {USB_DEVICE(0x041e, 0x403b), .driver_info = Rev012A},
1055 {USB_DEVICE(0x0458, 0x7004), .driver_info = Rev072A}, 1055 {USB_DEVICE(0x0458, 0x7004), .driver_info = Rev072A},
1056 {USB_DEVICE(0x0461, 0x0815), .driver_info = Rev072A},
1056 {USB_DEVICE(0x046d, 0x0928), .driver_info = Rev012A}, 1057 {USB_DEVICE(0x046d, 0x0928), .driver_info = Rev012A},
1057 {USB_DEVICE(0x046d, 0x0929), .driver_info = Rev012A}, 1058 {USB_DEVICE(0x046d, 0x0929), .driver_info = Rev012A},
1058 {USB_DEVICE(0x046d, 0x092a), .driver_info = Rev012A}, 1059 {USB_DEVICE(0x046d, 0x092a), .driver_info = Rev012A},
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index af73da34c83f..14f179a19485 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -524,8 +524,6 @@ static const __devinitdata struct usb_device_id device_table[] = {
524 {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 }, 524 {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 },
525 /* QuickCam Messenger (new) */ 525 /* QuickCam Messenger (new) */
526 {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 }, 526 {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 },
527 /* QuickCam Messenger (new) */
528 {USB_DEVICE(0x046D, 0x08DA), .driver_info = BRIDGE_ST6422 },
529 {} 527 {}
530}; 528};
531MODULE_DEVICE_TABLE(usb, device_table); 529MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/media/video/hexium_gemini.c b/drivers/media/video/hexium_gemini.c
index e620a3a92f25..ad2c232baa6d 100644
--- a/drivers/media/video/hexium_gemini.c
+++ b/drivers/media/video/hexium_gemini.c
@@ -356,9 +356,6 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
356 356
357 DEB_EE((".\n")); 357 DEB_EE((".\n"));
358 358
359 ret = saa7146_vv_devinit(dev);
360 if (ret)
361 return ret;
362 hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL); 359 hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
363 if (NULL == hexium) { 360 if (NULL == hexium) {
364 printk("hexium_gemini: not enough kernel memory in hexium_attach().\n"); 361 printk("hexium_gemini: not enough kernel memory in hexium_attach().\n");
diff --git a/drivers/media/video/hexium_orion.c b/drivers/media/video/hexium_orion.c
index fe596a1c12a8..938a1f8f880a 100644
--- a/drivers/media/video/hexium_orion.c
+++ b/drivers/media/video/hexium_orion.c
@@ -216,10 +216,6 @@ static int hexium_probe(struct saa7146_dev *dev)
216 return -EFAULT; 216 return -EFAULT;
217 } 217 }
218 218
219 err = saa7146_vv_devinit(dev);
220 if (err)
221 return err;
222
223 hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL); 219 hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
224 if (NULL == hexium) { 220 if (NULL == hexium) {
225 printk("hexium_orion: hexium_probe: not enough kernel memory.\n"); 221 printk("hexium_orion: hexium_probe: not enough kernel memory.\n");
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 3c8ebfcb742e..34a66019190e 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -49,8 +49,6 @@
49/* 49/*
50 * CSI registers 50 * CSI registers
51 */ 51 */
52#define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
53#define DMA_DIMR 0x08 /* Interrupt mask Register */
54#define CSICR1 0x00 /* CSI Control Register 1 */ 52#define CSICR1 0x00 /* CSI Control Register 1 */
55#define CSISR 0x08 /* CSI Status Register */ 53#define CSISR 0x08 /* CSI Status Register */
56#define CSIRXR 0x10 /* CSI RxFIFO Register */ 54#define CSIRXR 0x10 /* CSI RxFIFO Register */
@@ -784,7 +782,7 @@ static int __init mx1_camera_probe(struct platform_device *pdev)
784 pcdev); 782 pcdev);
785 783
786 imx_dma_config_channel(pcdev->dma_chan, IMX_DMA_TYPE_FIFO, 784 imx_dma_config_channel(pcdev->dma_chan, IMX_DMA_TYPE_FIFO,
787 IMX_DMA_MEMSIZE_32, DMA_REQ_CSI_R, 0); 785 IMX_DMA_MEMSIZE_32, MX1_DMA_REQ_CSI_R, 0);
788 /* burst length : 16 words = 64 bytes */ 786 /* burst length : 16 words = 64 bytes */
789 imx_dma_config_burstlen(pcdev->dma_chan, 0); 787 imx_dma_config_burstlen(pcdev->dma_chan, 0);
790 788
@@ -798,8 +796,8 @@ static int __init mx1_camera_probe(struct platform_device *pdev)
798 set_fiq_handler(&mx1_camera_sof_fiq_start, &mx1_camera_sof_fiq_end - 796 set_fiq_handler(&mx1_camera_sof_fiq_start, &mx1_camera_sof_fiq_end -
799 &mx1_camera_sof_fiq_start); 797 &mx1_camera_sof_fiq_start);
800 798
801 regs.ARM_r8 = DMA_BASE + DMA_DIMR; 799 regs.ARM_r8 = (long)MX1_DMA_DIMR;
802 regs.ARM_r9 = DMA_BASE + DMA_CCR(pcdev->dma_chan); 800 regs.ARM_r9 = (long)MX1_DMA_CCR(pcdev->dma_chan);
803 regs.ARM_r10 = (long)pcdev->base + CSICR1; 801 regs.ARM_r10 = (long)pcdev->base + CSICR1;
804 regs.ARM_fp = (long)pcdev->base + CSISR; 802 regs.ARM_fp = (long)pcdev->base + CSISR;
805 regs.ARM_sp = 1 << pcdev->dma_chan; 803 regs.ARM_sp = 1 << pcdev->dma_chan;
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index 9f01f14e4aa2..ef0c8178f255 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -169,11 +169,7 @@ static struct saa7146_extension extension;
169static int mxb_probe(struct saa7146_dev *dev) 169static int mxb_probe(struct saa7146_dev *dev)
170{ 170{
171 struct mxb *mxb = NULL; 171 struct mxb *mxb = NULL;
172 int err;
173 172
174 err = saa7146_vv_devinit(dev);
175 if (err)
176 return err;
177 mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL); 173 mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL);
178 if (mxb == NULL) { 174 if (mxb == NULL) {
179 DEB_D(("not enough kernel memory.\n")); 175 DEB_D(("not enough kernel memory.\n"));
@@ -699,14 +695,17 @@ static struct saa7146_ext_vv vv_data;
699/* this function only gets called when the probing was successful */ 695/* this function only gets called when the probing was successful */
700static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) 696static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info)
701{ 697{
702 struct mxb *mxb = (struct mxb *)dev->ext_priv; 698 struct mxb *mxb;
703 699
704 DEB_EE(("dev:%p\n", dev)); 700 DEB_EE(("dev:%p\n", dev));
705 701
706 /* checking for i2c-devices can be omitted here, because we
707 already did this in "mxb_vl42_probe" */
708
709 saa7146_vv_init(dev, &vv_data); 702 saa7146_vv_init(dev, &vv_data);
703 if (mxb_probe(dev)) {
704 saa7146_vv_release(dev);
705 return -1;
706 }
707 mxb = (struct mxb *)dev->ext_priv;
708
710 vv_data.ops.vidioc_queryctrl = vidioc_queryctrl; 709 vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
711 vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl; 710 vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
712 vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl; 711 vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
@@ -726,6 +725,7 @@ static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data
726 vv_data.ops.vidioc_default = vidioc_default; 725 vv_data.ops.vidioc_default = vidioc_default;
727 if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) { 726 if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
728 ERR(("cannot register capture v4l2 device. skipping.\n")); 727 ERR(("cannot register capture v4l2 device. skipping.\n"));
728 saa7146_vv_release(dev);
729 return -1; 729 return -1;
730 } 730 }
731 731
@@ -846,7 +846,6 @@ static struct saa7146_extension extension = {
846 .pci_tbl = &pci_tbl[0], 846 .pci_tbl = &pci_tbl[0],
847 .module = THIS_MODULE, 847 .module = THIS_MODULE,
848 848
849 .probe = mxb_probe,
850 .attach = mxb_attach, 849 .attach = mxb_attach,
851 .detach = mxb_detach, 850 .detach = mxb_detach,
852 851
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index b189fe63394b..ce76d952e161 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -1405,7 +1405,7 @@ static int omap24xxcam_mmap_buffers(struct file *file,
1405 } 1405 }
1406 1406
1407 size = 0; 1407 size = 0;
1408 for (i = first; i <= last; i++) { 1408 for (i = first; i <= last && i < VIDEO_MAX_FRAME; i++) {
1409 struct videobuf_dmabuf *dma = videobuf_to_dma(vbq->bufs[i]); 1409 struct videobuf_dmabuf *dma = videobuf_to_dma(vbq->bufs[i]);
1410 1410
1411 for (j = 0; j < dma->sglen; j++) { 1411 for (j = 0; j < dma->sglen; j++) {
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 5ecc30daef2d..04bf5c11308d 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -609,12 +609,9 @@ static void pxa_dma_add_tail_buf(struct pxa_camera_dev *pcdev,
609 */ 609 */
610static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev) 610static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev)
611{ 611{
612 unsigned long cicr0, cifr; 612 unsigned long cicr0;
613 613
614 dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__); 614 dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__);
615 /* Reset the FIFOs */
616 cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
617 __raw_writel(cifr, pcdev->base + CIFR);
618 /* Enable End-Of-Frame Interrupt */ 615 /* Enable End-Of-Frame Interrupt */
619 cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB; 616 cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB;
620 cicr0 &= ~CICR0_EOFM; 617 cicr0 &= ~CICR0_EOFM;
@@ -935,7 +932,7 @@ static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev)
935static irqreturn_t pxa_camera_irq(int irq, void *data) 932static irqreturn_t pxa_camera_irq(int irq, void *data)
936{ 933{
937 struct pxa_camera_dev *pcdev = data; 934 struct pxa_camera_dev *pcdev = data;
938 unsigned long status, cicr0; 935 unsigned long status, cifr, cicr0;
939 struct pxa_buffer *buf; 936 struct pxa_buffer *buf;
940 struct videobuf_buffer *vb; 937 struct videobuf_buffer *vb;
941 938
@@ -949,6 +946,10 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
949 __raw_writel(status, pcdev->base + CISR); 946 __raw_writel(status, pcdev->base + CISR);
950 947
951 if (status & CISR_EOF) { 948 if (status & CISR_EOF) {
949 /* Reset the FIFOs */
950 cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
951 __raw_writel(cifr, pcdev->base + CIFR);
952
952 pcdev->active = list_first_entry(&pcdev->capture, 953 pcdev->active = list_first_entry(&pcdev->capture,
953 struct pxa_buffer, vb.queue); 954 struct pxa_buffer, vb.queue);
954 vb = &pcdev->active->vb; 955 vb = &pcdev->active->vb;
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 6e16b3979326..1ad980f8e770 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -1633,7 +1633,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
1633 height = pix->height; 1633 height = pix->height;
1634 1634
1635 pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt); 1635 pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt);
1636 if (pix->bytesperline < 0) 1636 if ((int)pix->bytesperline < 0)
1637 return pix->bytesperline; 1637 return pix->bytesperline;
1638 pix->sizeimage = height * pix->bytesperline; 1638 pix->sizeimage = height * pix->bytesperline;
1639 1639
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index a3d5728b6449..f2ab025ad97a 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -349,6 +349,9 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
349 goto disable; 349 goto disable;
350 } 350 }
351 351
352 /* If an interrupt arrived late clean up after it */
353 try_wait_for_completion(&wm831x->auxadc_done);
354
352 /* Ignore the result to allow us to soldier on without IRQ hookup */ 355 /* Ignore the result to allow us to soldier on without IRQ hookup */
353 wait_for_completion_timeout(&wm831x->auxadc_done, msecs_to_jiffies(5)); 356 wait_for_completion_timeout(&wm831x->auxadc_done, msecs_to_jiffies(5));
354 357
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index e400a3bed063..b5807484b4c9 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -363,6 +363,10 @@ int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, int vref)
363 reg |= 1 << channel | WM8350_AUXADC_POLL; 363 reg |= 1 << channel | WM8350_AUXADC_POLL;
364 wm8350_reg_write(wm8350, WM8350_DIGITISER_CONTROL_1, reg); 364 wm8350_reg_write(wm8350, WM8350_DIGITISER_CONTROL_1, reg);
365 365
366 /* If a late IRQ left the completion signalled then consume
367 * the completion. */
368 try_wait_for_completion(&wm8350->auxadc_done);
369
366 /* We ignore the result of the completion and just check for a 370 /* We ignore the result of the completion and just check for a
367 * conversion result, allowing us to soldier on if the IRQ 371 * conversion result, allowing us to soldier on if the IRQ
368 * infrastructure is not set up for the chip. */ 372 * infrastructure is not set up for the chip. */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2191c8d896a0..0d0d625fece2 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -311,6 +311,22 @@ config TI_DAC7512
311 This driver can also be built as a module. If so, the module 311 This driver can also be built as a module. If so, the module
312 will be calles ti_dac7512. 312 will be calles ti_dac7512.
313 313
314config VMWARE_BALLOON
315 tristate "VMware Balloon Driver"
316 depends on X86
317 help
318 This is VMware physical memory management driver which acts
319 like a "balloon" that can be inflated to reclaim physical pages
320 by reserving them in the guest and invalidating them in the
321 monitor, freeing up the underlying machine pages so they can
322 be allocated to other guests. The balloon can also be deflated
323 to allow the guest to use more physical memory.
324
325 If unsure, say N.
326
327 To compile this driver as a module, choose M here: the
328 module will be called vmware_balloon.
329
314source "drivers/misc/c2port/Kconfig" 330source "drivers/misc/c2port/Kconfig"
315source "drivers/misc/eeprom/Kconfig" 331source "drivers/misc/eeprom/Kconfig"
316source "drivers/misc/cb710/Kconfig" 332source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 27c484355414..7b6f7eefdf8d 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -29,3 +29,4 @@ obj-$(CONFIG_C2PORT) += c2port/
29obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ 29obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
30obj-y += eeprom/ 30obj-y += eeprom/
31obj-y += cb710/ 31obj-y += cb710/
32obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c
new file mode 100644
index 000000000000..db9cd0240c6f
--- /dev/null
+++ b/drivers/misc/vmware_balloon.c
@@ -0,0 +1,832 @@
1/*
2 * VMware Balloon driver.
3 *
4 * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Maintained by: Dmitry Torokhov <dtor@vmware.com>
21 */
22
23/*
24 * This is VMware physical memory management driver for Linux. The driver
25 * acts like a "balloon" that can be inflated to reclaim physical pages by
26 * reserving them in the guest and invalidating them in the monitor,
27 * freeing up the underlying machine pages so they can be allocated to
28 * other guests. The balloon can also be deflated to allow the guest to
29 * use more physical memory. Higher level policies can control the sizes
30 * of balloons in VMs in order to manage physical memory resources.
31 */
32
33//#define DEBUG
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/mm.h>
39#include <linux/sched.h>
40#include <linux/module.h>
41#include <linux/workqueue.h>
42#include <linux/debugfs.h>
43#include <linux/seq_file.h>
44#include <asm/hypervisor.h>
45
46MODULE_AUTHOR("VMware, Inc.");
47MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
48MODULE_VERSION("1.2.1.0-K");
49MODULE_ALIAS("dmi:*:svnVMware*:*");
50MODULE_ALIAS("vmware_vmmemctl");
51MODULE_LICENSE("GPL");
52
53/*
54 * Various constants controlling rate of inflaint/deflating balloon,
55 * measured in pages.
56 */
57
58/*
59 * Rate of allocating memory when there is no memory pressure
60 * (driver performs non-sleeping allocations).
61 */
62#define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U
63
64/*
65 * Rates of memory allocaton when guest experiences memory pressure
66 * (driver performs sleeping allocations).
67 */
68#define VMW_BALLOON_RATE_ALLOC_MIN 512U
69#define VMW_BALLOON_RATE_ALLOC_MAX 2048U
70#define VMW_BALLOON_RATE_ALLOC_INC 16U
71
72/*
73 * Rates for releasing pages while deflating balloon.
74 */
75#define VMW_BALLOON_RATE_FREE_MIN 512U
76#define VMW_BALLOON_RATE_FREE_MAX 16384U
77#define VMW_BALLOON_RATE_FREE_INC 16U
78
79/*
80 * When guest is under memory pressure, use a reduced page allocation
81 * rate for next several cycles.
82 */
83#define VMW_BALLOON_SLOW_CYCLES 4
84
85/*
86 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
87 * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
88 * __GFP_NOWARN, to suppress page allocation failure warnings.
89 */
90#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
91
92/*
93 * Use GFP_HIGHUSER when executing in a separate kernel thread
94 * context and allocation can sleep. This is less stressful to
95 * the guest memory system, since it allows the thread to block
96 * while memory is reclaimed, and won't take pages from emergency
97 * low-memory pools.
98 */
99#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
100
101/* Maximum number of page allocations without yielding processor */
102#define VMW_BALLOON_YIELD_THRESHOLD 1024
103
104
105/*
106 * Hypervisor communication port definitions.
107 */
108#define VMW_BALLOON_HV_PORT 0x5670
109#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
110#define VMW_BALLOON_PROTOCOL_VERSION 2
111#define VMW_BALLOON_GUEST_ID 1 /* Linux */
112
113#define VMW_BALLOON_CMD_START 0
114#define VMW_BALLOON_CMD_GET_TARGET 1
115#define VMW_BALLOON_CMD_LOCK 2
116#define VMW_BALLOON_CMD_UNLOCK 3
117#define VMW_BALLOON_CMD_GUEST_ID 4
118
119/* error codes */
120#define VMW_BALLOON_SUCCESS 0
121#define VMW_BALLOON_FAILURE -1
122#define VMW_BALLOON_ERROR_CMD_INVALID 1
123#define VMW_BALLOON_ERROR_PPN_INVALID 2
124#define VMW_BALLOON_ERROR_PPN_LOCKED 3
125#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
126#define VMW_BALLOON_ERROR_PPN_PINNED 5
127#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
128#define VMW_BALLOON_ERROR_RESET 7
129#define VMW_BALLOON_ERROR_BUSY 8
130
131#define VMWARE_BALLOON_CMD(cmd, data, result) \
132({ \
133 unsigned long __stat, __dummy1, __dummy2; \
134 __asm__ __volatile__ ("inl (%%dx)" : \
135 "=a"(__stat), \
136 "=c"(__dummy1), \
137 "=d"(__dummy2), \
138 "=b"(result) : \
139 "0"(VMW_BALLOON_HV_MAGIC), \
140 "1"(VMW_BALLOON_CMD_##cmd), \
141 "2"(VMW_BALLOON_HV_PORT), \
142 "3"(data) : \
143 "memory"); \
144 result &= -1UL; \
145 __stat & -1UL; \
146})
147
148#ifdef CONFIG_DEBUG_FS
149struct vmballoon_stats {
150 unsigned int timer;
151
152 /* allocation statustics */
153 unsigned int alloc;
154 unsigned int alloc_fail;
155 unsigned int sleep_alloc;
156 unsigned int sleep_alloc_fail;
157 unsigned int refused_alloc;
158 unsigned int refused_free;
159 unsigned int free;
160
161 /* monitor operations */
162 unsigned int lock;
163 unsigned int lock_fail;
164 unsigned int unlock;
165 unsigned int unlock_fail;
166 unsigned int target;
167 unsigned int target_fail;
168 unsigned int start;
169 unsigned int start_fail;
170 unsigned int guest_type;
171 unsigned int guest_type_fail;
172};
173
174#define STATS_INC(stat) (stat)++
175#else
176#define STATS_INC(stat)
177#endif
178
179struct vmballoon {
180
181 /* list of reserved physical pages */
182 struct list_head pages;
183
184 /* transient list of non-balloonable pages */
185 struct list_head refused_pages;
186
187 /* balloon size in pages */
188 unsigned int size;
189 unsigned int target;
190
191 /* reset flag */
192 bool reset_required;
193
194 /* adjustment rates (pages per second) */
195 unsigned int rate_alloc;
196 unsigned int rate_free;
197
198 /* slowdown page allocations for next few cycles */
199 unsigned int slow_allocation_cycles;
200
201#ifdef CONFIG_DEBUG_FS
202 /* statistics */
203 struct vmballoon_stats stats;
204
205 /* debugfs file exporting statistics */
206 struct dentry *dbg_entry;
207#endif
208
209 struct sysinfo sysinfo;
210
211 struct delayed_work dwork;
212};
213
214static struct vmballoon balloon;
215static struct workqueue_struct *vmballoon_wq;
216
217/*
218 * Send "start" command to the host, communicating supported version
219 * of the protocol.
220 */
221static bool vmballoon_send_start(struct vmballoon *b)
222{
223 unsigned long status, dummy;
224
225 STATS_INC(b->stats.start);
226
227 status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
228 if (status == VMW_BALLOON_SUCCESS)
229 return true;
230
231 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
232 STATS_INC(b->stats.start_fail);
233 return false;
234}
235
236static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
237{
238 switch (status) {
239 case VMW_BALLOON_SUCCESS:
240 return true;
241
242 case VMW_BALLOON_ERROR_RESET:
243 b->reset_required = true;
244 /* fall through */
245
246 default:
247 return false;
248 }
249}
250
251/*
252 * Communicate guest type to the host so that it can adjust ballooning
253 * algorithm to the one most appropriate for the guest. This command
254 * is normally issued after sending "start" command and is part of
255 * standard reset sequence.
256 */
257static bool vmballoon_send_guest_id(struct vmballoon *b)
258{
259 unsigned long status, dummy;
260
261 status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy);
262
263 STATS_INC(b->stats.guest_type);
264
265 if (vmballoon_check_status(b, status))
266 return true;
267
268 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
269 STATS_INC(b->stats.guest_type_fail);
270 return false;
271}
272
273/*
274 * Retrieve desired balloon size from the host.
275 */
276static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
277{
278 unsigned long status;
279 unsigned long target;
280 unsigned long limit;
281 u32 limit32;
282
283 /*
284 * si_meminfo() is cheap. Moreover, we want to provide dynamic
285 * max balloon size later. So let us call si_meminfo() every
286 * iteration.
287 */
288 si_meminfo(&b->sysinfo);
289 limit = b->sysinfo.totalram;
290
291 /* Ensure limit fits in 32-bits */
292 limit32 = (u32)limit;
293 if (limit != limit32)
294 return false;
295
296 /* update stats */
297 STATS_INC(b->stats.target);
298
299 status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target);
300 if (vmballoon_check_status(b, status)) {
301 *new_target = target;
302 return true;
303 }
304
305 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
306 STATS_INC(b->stats.target_fail);
307 return false;
308}
309
310/*
311 * Notify the host about allocated page so that host can use it without
312 * fear that guest will need it. Host may reject some pages, we need to
313 * check the return value and maybe submit a different page.
314 */
315static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
316{
317 unsigned long status, dummy;
318 u32 pfn32;
319
320 pfn32 = (u32)pfn;
321 if (pfn32 != pfn)
322 return false;
323
324 STATS_INC(b->stats.lock);
325
326 status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
327 if (vmballoon_check_status(b, status))
328 return true;
329
330 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
331 STATS_INC(b->stats.lock_fail);
332 return false;
333}
334
335/*
336 * Notify the host that guest intends to release given page back into
337 * the pool of available (to the guest) pages.
338 */
339static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
340{
341 unsigned long status, dummy;
342 u32 pfn32;
343
344 pfn32 = (u32)pfn;
345 if (pfn32 != pfn)
346 return false;
347
348 STATS_INC(b->stats.unlock);
349
350 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy);
351 if (vmballoon_check_status(b, status))
352 return true;
353
354 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
355 STATS_INC(b->stats.unlock_fail);
356 return false;
357}
358
359/*
360 * Quickly release all pages allocated for the balloon. This function is
361 * called when host decides to "reset" balloon for one reason or another.
362 * Unlike normal "deflate" we do not (shall not) notify host of the pages
363 * being released.
364 */
365static void vmballoon_pop(struct vmballoon *b)
366{
367 struct page *page, *next;
368 unsigned int count = 0;
369
370 list_for_each_entry_safe(page, next, &b->pages, lru) {
371 list_del(&page->lru);
372 __free_page(page);
373 STATS_INC(b->stats.free);
374 b->size--;
375
376 if (++count >= b->rate_free) {
377 count = 0;
378 cond_resched();
379 }
380 }
381}
382
383/*
384 * Perform standard reset sequence by popping the balloon (in case it
385 * is not empty) and then restarting protocol. This operation normally
386 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
387 */
388static void vmballoon_reset(struct vmballoon *b)
389{
390 /* free all pages, skipping monitor unlock */
391 vmballoon_pop(b);
392
393 if (vmballoon_send_start(b)) {
394 b->reset_required = false;
395 if (!vmballoon_send_guest_id(b))
396 pr_err("failed to send guest ID to the host\n");
397 }
398}
399
400/*
401 * Allocate (or reserve) a page for the balloon and notify the host. If host
402 * refuses the page put it on "refuse" list and allocate another one until host
403 * is satisfied. "Refused" pages are released at the end of inflation cycle
404 * (when we allocate b->rate_alloc pages).
405 */
406static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
407{
408 struct page *page;
409 gfp_t flags;
410 bool locked = false;
411
412 do {
413 if (!can_sleep)
414 STATS_INC(b->stats.alloc);
415 else
416 STATS_INC(b->stats.sleep_alloc);
417
418 flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
419 page = alloc_page(flags);
420 if (!page) {
421 if (!can_sleep)
422 STATS_INC(b->stats.alloc_fail);
423 else
424 STATS_INC(b->stats.sleep_alloc_fail);
425 return -ENOMEM;
426 }
427
428 /* inform monitor */
429 locked = vmballoon_send_lock_page(b, page_to_pfn(page));
430 if (!locked) {
431 if (b->reset_required) {
432 __free_page(page);
433 return -EIO;
434 }
435
436 /* place on list of non-balloonable pages, retry allocation */
437 list_add(&page->lru, &b->refused_pages);
438 STATS_INC(b->stats.refused_alloc);
439 }
440 } while (!locked);
441
442 /* track allocated page */
443 list_add(&page->lru, &b->pages);
444
445 /* update balloon size */
446 b->size++;
447
448 return 0;
449}
450
451/*
452 * Release the page allocated for the balloon. Note that we first notify
453 * the host so it can make sure the page will be available for the guest
454 * to use, if needed.
455 */
456static int vmballoon_release_page(struct vmballoon *b, struct page *page)
457{
458 if (!vmballoon_send_unlock_page(b, page_to_pfn(page)))
459 return -EIO;
460
461 list_del(&page->lru);
462
463 /* deallocate page */
464 __free_page(page);
465 STATS_INC(b->stats.free);
466
467 /* update balloon size */
468 b->size--;
469
470 return 0;
471}
472
473/*
474 * Release pages that were allocated while attempting to inflate the
475 * balloon but were refused by the host for one reason or another.
476 */
477static void vmballoon_release_refused_pages(struct vmballoon *b)
478{
479 struct page *page, *next;
480
481 list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
482 list_del(&page->lru);
483 __free_page(page);
484 STATS_INC(b->stats.refused_free);
485 }
486}
487
488/*
489 * Inflate the balloon towards its target size. Note that we try to limit
490 * the rate of allocation to make sure we are not choking the rest of the
491 * system.
492 */
493static void vmballoon_inflate(struct vmballoon *b)
494{
495 unsigned int goal;
496 unsigned int rate;
497 unsigned int i;
498 unsigned int allocations = 0;
499 int error = 0;
500 bool alloc_can_sleep = false;
501
502 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
503
504 /*
505 * First try NOSLEEP page allocations to inflate balloon.
506 *
507 * If we do not throttle nosleep allocations, we can drain all
508 * free pages in the guest quickly (if the balloon target is high).
509 * As a side-effect, draining free pages helps to inform (force)
510 * the guest to start swapping if balloon target is not met yet,
511 * which is a desired behavior. However, balloon driver can consume
512 * all available CPU cycles if too many pages are allocated in a
513 * second. Therefore, we throttle nosleep allocations even when
514 * the guest is not under memory pressure. OTOH, if we have already
515 * predicted that the guest is under memory pressure, then we
516 * slowdown page allocations considerably.
517 */
518
519 goal = b->target - b->size;
520 /*
521 * Start with no sleep allocation rate which may be higher
522 * than sleeping allocation rate.
523 */
524 rate = b->slow_allocation_cycles ?
525 b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX;
526
527 pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n",
528 __func__, goal, rate, b->rate_alloc);
529
530 for (i = 0; i < goal; i++) {
531
532 error = vmballoon_reserve_page(b, alloc_can_sleep);
533 if (error) {
534 if (error != -ENOMEM) {
535 /*
536 * Not a page allocation failure, stop this
537 * cycle. Maybe we'll get new target from
538 * the host soon.
539 */
540 break;
541 }
542
543 if (alloc_can_sleep) {
544 /*
545 * CANSLEEP page allocation failed, so guest
546 * is under severe memory pressure. Quickly
547 * decrease allocation rate.
548 */
549 b->rate_alloc = max(b->rate_alloc / 2,
550 VMW_BALLOON_RATE_ALLOC_MIN);
551 break;
552 }
553
554 /*
555 * NOSLEEP page allocation failed, so the guest is
556 * under memory pressure. Let us slow down page
557 * allocations for next few cycles so that the guest
558 * gets out of memory pressure. Also, if we already
559 * allocated b->rate_alloc pages, let's pause,
560 * otherwise switch to sleeping allocations.
561 */
562 b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
563
564 if (i >= b->rate_alloc)
565 break;
566
567 alloc_can_sleep = true;
568 /* Lower rate for sleeping allocations. */
569 rate = b->rate_alloc;
570 }
571
572 if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
573 cond_resched();
574 allocations = 0;
575 }
576
577 if (i >= rate) {
578 /* We allocated enough pages, let's take a break. */
579 break;
580 }
581 }
582
583 /*
584 * We reached our goal without failures so try increasing
585 * allocation rate.
586 */
587 if (error == 0 && i >= b->rate_alloc) {
588 unsigned int mult = i / b->rate_alloc;
589
590 b->rate_alloc =
591 min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
592 VMW_BALLOON_RATE_ALLOC_MAX);
593 }
594
595 vmballoon_release_refused_pages(b);
596}
597
598/*
599 * Decrease the size of the balloon allowing guest to use more memory.
600 */
601static void vmballoon_deflate(struct vmballoon *b)
602{
603 struct page *page, *next;
604 unsigned int i = 0;
605 unsigned int goal;
606 int error;
607
608 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
609
610 /* limit deallocation rate */
611 goal = min(b->size - b->target, b->rate_free);
612
613 pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free);
614
615 /* free pages to reach target */
616 list_for_each_entry_safe(page, next, &b->pages, lru) {
617 error = vmballoon_release_page(b, page);
618 if (error) {
619 /* quickly decrease rate in case of error */
620 b->rate_free = max(b->rate_free / 2,
621 VMW_BALLOON_RATE_FREE_MIN);
622 return;
623 }
624
625 if (++i >= goal)
626 break;
627 }
628
629 /* slowly increase rate if there were no errors */
630 b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC,
631 VMW_BALLOON_RATE_FREE_MAX);
632}
633
634/*
635 * Balloon work function: reset protocol, if needed, get the new size and
636 * adjust balloon as needed. Repeat in 1 sec.
637 */
638static void vmballoon_work(struct work_struct *work)
639{
640 struct delayed_work *dwork = to_delayed_work(work);
641 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
642 unsigned int target;
643
644 STATS_INC(b->stats.timer);
645
646 if (b->reset_required)
647 vmballoon_reset(b);
648
649 if (b->slow_allocation_cycles > 0)
650 b->slow_allocation_cycles--;
651
652 if (vmballoon_send_get_target(b, &target)) {
653 /* update target, adjust size */
654 b->target = target;
655
656 if (b->size < target)
657 vmballoon_inflate(b);
658 else if (b->size > target)
659 vmballoon_deflate(b);
660 }
661
662 queue_delayed_work(vmballoon_wq, dwork, round_jiffies_relative(HZ));
663}
664
665/*
666 * DEBUGFS Interface
667 */
668#ifdef CONFIG_DEBUG_FS
669
670static int vmballoon_debug_show(struct seq_file *f, void *offset)
671{
672 struct vmballoon *b = f->private;
673 struct vmballoon_stats *stats = &b->stats;
674
675 /* format size info */
676 seq_printf(f,
677 "target: %8d pages\n"
678 "current: %8d pages\n",
679 b->target, b->size);
680
681 /* format rate info */
682 seq_printf(f,
683 "rateNoSleepAlloc: %8d pages/sec\n"
684 "rateSleepAlloc: %8d pages/sec\n"
685 "rateFree: %8d pages/sec\n",
686 VMW_BALLOON_NOSLEEP_ALLOC_MAX,
687 b->rate_alloc, b->rate_free);
688
689 seq_printf(f,
690 "\n"
691 "timer: %8u\n"
692 "start: %8u (%4u failed)\n"
693 "guestType: %8u (%4u failed)\n"
694 "lock: %8u (%4u failed)\n"
695 "unlock: %8u (%4u failed)\n"
696 "target: %8u (%4u failed)\n"
697 "primNoSleepAlloc: %8u (%4u failed)\n"
698 "primCanSleepAlloc: %8u (%4u failed)\n"
699 "primFree: %8u\n"
700 "errAlloc: %8u\n"
701 "errFree: %8u\n",
702 stats->timer,
703 stats->start, stats->start_fail,
704 stats->guest_type, stats->guest_type_fail,
705 stats->lock, stats->lock_fail,
706 stats->unlock, stats->unlock_fail,
707 stats->target, stats->target_fail,
708 stats->alloc, stats->alloc_fail,
709 stats->sleep_alloc, stats->sleep_alloc_fail,
710 stats->free,
711 stats->refused_alloc, stats->refused_free);
712
713 return 0;
714}
715
716static int vmballoon_debug_open(struct inode *inode, struct file *file)
717{
718 return single_open(file, vmballoon_debug_show, inode->i_private);
719}
720
721static const struct file_operations vmballoon_debug_fops = {
722 .owner = THIS_MODULE,
723 .open = vmballoon_debug_open,
724 .read = seq_read,
725 .llseek = seq_lseek,
726 .release = single_release,
727};
728
729static int __init vmballoon_debugfs_init(struct vmballoon *b)
730{
731 int error;
732
733 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
734 &vmballoon_debug_fops);
735 if (IS_ERR(b->dbg_entry)) {
736 error = PTR_ERR(b->dbg_entry);
737 pr_err("failed to create debugfs entry, error: %d\n", error);
738 return error;
739 }
740
741 return 0;
742}
743
744static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
745{
746 debugfs_remove(b->dbg_entry);
747}
748
749#else
750
751static inline int vmballoon_debugfs_init(struct vmballoon *b)
752{
753 return 0;
754}
755
756static inline void vmballoon_debugfs_exit(struct vmballoon *b)
757{
758}
759
760#endif /* CONFIG_DEBUG_FS */
761
762static int __init vmballoon_init(void)
763{
764 int error;
765
766 /*
767 * Check if we are running on VMware's hypervisor and bail out
768 * if we are not.
769 */
770 if (x86_hyper != &x86_hyper_vmware)
771 return -ENODEV;
772
773 vmballoon_wq = create_freezeable_workqueue("vmmemctl");
774 if (!vmballoon_wq) {
775 pr_err("failed to create workqueue\n");
776 return -ENOMEM;
777 }
778
779 INIT_LIST_HEAD(&balloon.pages);
780 INIT_LIST_HEAD(&balloon.refused_pages);
781
782 /* initialize rates */
783 balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
784 balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX;
785
786 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
787
788 /*
789 * Start balloon.
790 */
791 if (!vmballoon_send_start(&balloon)) {
792 pr_err("failed to send start command to the host\n");
793 error = -EIO;
794 goto fail;
795 }
796
797 if (!vmballoon_send_guest_id(&balloon)) {
798 pr_err("failed to send guest ID to the host\n");
799 error = -EIO;
800 goto fail;
801 }
802
803 error = vmballoon_debugfs_init(&balloon);
804 if (error)
805 goto fail;
806
807 queue_delayed_work(vmballoon_wq, &balloon.dwork, 0);
808
809 return 0;
810
811fail:
812 destroy_workqueue(vmballoon_wq);
813 return error;
814}
815module_init(vmballoon_init);
816
817static void __exit vmballoon_exit(void)
818{
819 cancel_delayed_work_sync(&balloon.dwork);
820 destroy_workqueue(vmballoon_wq);
821
822 vmballoon_debugfs_exit(&balloon);
823
824 /*
825 * Deallocate all reserved memory, and reset connection with monitor.
826 * Reset connection before deallocating memory to avoid potential for
827 * additional spurious resets from guest touching deallocated pages.
828 */
829 vmballoon_send_start(&balloon);
830 vmballoon_pop(&balloon);
831}
832module_exit(vmballoon_exit);
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index a6dd7da37357..336d9f553f3e 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -314,8 +314,8 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
314 dmabuf = (unsigned *)tmpv; 314 dmabuf = (unsigned *)tmpv;
315 } 315 }
316 316
317 flush_kernel_dcache_page(sg_page(sg));
317 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); 318 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
318 dmac_flush_range((void *)sgbuffer, ((void *)sgbuffer) + amount);
319 data->bytes_xfered += amount; 319 data->bytes_xfered += amount;
320 if (size == 0) 320 if (size == 0)
321 break; 321 break;
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 88be37d9e9a5..fb279f4ed8b3 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -266,7 +266,7 @@ static int atmci_req_show(struct seq_file *s, void *v)
266 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 266 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
267 cmd->opcode, cmd->arg, cmd->flags, 267 cmd->opcode, cmd->arg, cmd->flags,
268 cmd->resp[0], cmd->resp[1], cmd->resp[2], 268 cmd->resp[0], cmd->resp[1], cmd->resp[2],
269 cmd->resp[2], cmd->error); 269 cmd->resp[3], cmd->error);
270 if (data) 270 if (data)
271 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 271 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
272 data->bytes_xfered, data->blocks, 272 data->bytes_xfered, data->blocks,
@@ -276,7 +276,7 @@ static int atmci_req_show(struct seq_file *s, void *v)
276 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 276 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
277 stop->opcode, stop->arg, stop->flags, 277 stop->opcode, stop->arg, stop->flags,
278 stop->resp[0], stop->resp[1], stop->resp[2], 278 stop->resp[0], stop->resp[1], stop->resp[2],
279 stop->resp[2], stop->error); 279 stop->resp[3], stop->error);
280 } 280 }
281 281
282 spin_unlock_bh(&slot->host->lock); 282 spin_unlock_bh(&slot->host->lock);
@@ -569,9 +569,10 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
569{ 569{
570 struct mmc_data *data = host->data; 570 struct mmc_data *data = host->data;
571 571
572 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, 572 if (data)
573 ((data->flags & MMC_DATA_WRITE) 573 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
574 ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 574 ((data->flags & MMC_DATA_WRITE)
575 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
575} 576}
576 577
577static void atmci_stop_dma(struct atmel_mci *host) 578static void atmci_stop_dma(struct atmel_mci *host)
@@ -1099,8 +1100,8 @@ static void atmci_command_complete(struct atmel_mci *host,
1099 "command error: status=0x%08x\n", status); 1100 "command error: status=0x%08x\n", status);
1100 1101
1101 if (cmd->data) { 1102 if (cmd->data) {
1102 host->data = NULL;
1103 atmci_stop_dma(host); 1103 atmci_stop_dma(host);
1104 host->data = NULL;
1104 mci_writel(host, IDR, MCI_NOTBUSY 1105 mci_writel(host, IDR, MCI_NOTBUSY
1105 | MCI_TXRDY | MCI_RXRDY 1106 | MCI_TXRDY | MCI_RXRDY
1106 | ATMCI_DATA_ERROR_FLAGS); 1107 | ATMCI_DATA_ERROR_FLAGS);
@@ -1293,6 +1294,7 @@ static void atmci_tasklet_func(unsigned long priv)
1293 } else { 1294 } else {
1294 data->bytes_xfered = data->blocks * data->blksz; 1295 data->bytes_xfered = data->blocks * data->blksz;
1295 data->error = 0; 1296 data->error = 0;
1297 mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS);
1296 } 1298 }
1297 1299
1298 if (!data->stop) { 1300 if (!data->stop) {
@@ -1751,13 +1753,13 @@ static int __init atmci_probe(struct platform_device *pdev)
1751 ret = -ENODEV; 1753 ret = -ENODEV;
1752 if (pdata->slot[0].bus_width) { 1754 if (pdata->slot[0].bus_width) {
1753 ret = atmci_init_slot(host, &pdata->slot[0], 1755 ret = atmci_init_slot(host, &pdata->slot[0],
1754 MCI_SDCSEL_SLOT_A, 0); 1756 0, MCI_SDCSEL_SLOT_A);
1755 if (!ret) 1757 if (!ret)
1756 nr_slots++; 1758 nr_slots++;
1757 } 1759 }
1758 if (pdata->slot[1].bus_width) { 1760 if (pdata->slot[1].bus_width) {
1759 ret = atmci_init_slot(host, &pdata->slot[1], 1761 ret = atmci_init_slot(host, &pdata->slot[1],
1760 MCI_SDCSEL_SLOT_B, 1); 1762 1, MCI_SDCSEL_SLOT_B);
1761 if (!ret) 1763 if (!ret)
1762 nr_slots++; 1764 nr_slots++;
1763 } 1765 }
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 84c103a7ee13..ff115d920888 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -55,14 +55,16 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
55 host->cclk = host->mclk / (2 * (clk + 1)); 55 host->cclk = host->mclk / (2 * (clk + 1));
56 } 56 }
57 if (host->hw_designer == AMBA_VENDOR_ST) 57 if (host->hw_designer == AMBA_VENDOR_ST)
58 clk |= MCI_FCEN; /* Bug fix in ST IP block */ 58 clk |= MCI_ST_FCEN; /* Bug fix in ST IP block */
59 clk |= MCI_CLK_ENABLE; 59 clk |= MCI_CLK_ENABLE;
60 /* This hasn't proven to be worthwhile */ 60 /* This hasn't proven to be worthwhile */
61 /* clk |= MCI_CLK_PWRSAVE; */ 61 /* clk |= MCI_CLK_PWRSAVE; */
62 } 62 }
63 63
64 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 64 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
65 clk |= MCI_WIDE_BUS; 65 clk |= MCI_4BIT_BUS;
66 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
67 clk |= MCI_ST_8BIT_BUS;
66 68
67 writel(clk, host->base + MMCICLOCK); 69 writel(clk, host->base + MMCICLOCK);
68} 70}
@@ -629,7 +631,18 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
629 631
630 mmc->ops = &mmci_ops; 632 mmc->ops = &mmci_ops;
631 mmc->f_min = (host->mclk + 511) / 512; 633 mmc->f_min = (host->mclk + 511) / 512;
632 mmc->f_max = min(host->mclk, fmax); 634 /*
635 * If the platform data supplies a maximum operating
636 * frequency, this takes precedence. Else, we fall back
637 * to using the module parameter, which has a (low)
638 * default value in case it is not specified. Either
639 * value must not exceed the clock rate into the block,
640 * of course.
641 */
642 if (plat->f_max)
643 mmc->f_max = min(host->mclk, plat->f_max);
644 else
645 mmc->f_max = min(host->mclk, fmax);
633 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 646 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
634 647
635#ifdef CONFIG_REGULATOR 648#ifdef CONFIG_REGULATOR
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 1ceb9a90f59b..d77062e5e3af 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -25,9 +25,11 @@
25#define MCI_CLK_ENABLE (1 << 8) 25#define MCI_CLK_ENABLE (1 << 8)
26#define MCI_CLK_PWRSAVE (1 << 9) 26#define MCI_CLK_PWRSAVE (1 << 9)
27#define MCI_CLK_BYPASS (1 << 10) 27#define MCI_CLK_BYPASS (1 << 10)
28#define MCI_WIDE_BUS (1 << 11) 28#define MCI_4BIT_BUS (1 << 11)
29/* 8bit wide buses supported in ST Micro versions */
30#define MCI_ST_8BIT_BUS (1 << 12)
29/* HW flow control on the ST Micro version */ 31/* HW flow control on the ST Micro version */
30#define MCI_FCEN (1 << 13) 32#define MCI_ST_FCEN (1 << 13)
31 33
32#define MMCIARGUMENT 0x008 34#define MMCIARGUMENT 0x008
33#define MMCICOMMAND 0x00c 35#define MMCICOMMAND 0x00c
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0ed48959b590..e4f00e70a749 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -544,7 +544,7 @@ static irqreturn_t pxamci_detect_irq(int irq, void *devid)
544{ 544{
545 struct pxamci_host *host = mmc_priv(devid); 545 struct pxamci_host *host = mmc_priv(devid);
546 546
547 mmc_detect_change(devid, host->pdata->detect_delay); 547 mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms));
548 return IRQ_HANDLED; 548 return IRQ_HANDLED;
549} 549}
550 550
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 82d1e4de475b..4521b1ecce45 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -4,7 +4,7 @@
4 4
5# Core functionality. 5# Core functionality.
6obj-$(CONFIG_MTD) += mtd.o 6obj-$(CONFIG_MTD) += mtd.o
7mtd-y := mtdcore.o mtdsuper.o mtdbdi.o 7mtd-y := mtdcore.o mtdsuper.o
8mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o 8mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
9 9
10obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o 10obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
diff --git a/drivers/mtd/internal.h b/drivers/mtd/internal.h
deleted file mode 100644
index c658fe7216b5..000000000000
--- a/drivers/mtd/internal.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/* Internal MTD definitions
2 *
3 * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12/*
13 * mtdbdi.c
14 */
15extern struct backing_dev_info mtd_bdi_unmappable;
16extern struct backing_dev_info mtd_bdi_ro_mappable;
17extern struct backing_dev_info mtd_bdi_rw_mappable;
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 689d6a79ffc0..87b2b8ff331e 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -52,7 +52,6 @@ static const int debug = 0;
52 52
53struct pcmciamtd_dev { 53struct pcmciamtd_dev {
54 struct pcmcia_device *p_dev; 54 struct pcmcia_device *p_dev;
55 dev_node_t node; /* device node */
56 caddr_t win_base; /* ioremapped address of PCMCIA window */ 55 caddr_t win_base; /* ioremapped address of PCMCIA window */
57 unsigned int win_size; /* size of window */ 56 unsigned int win_size; /* size of window */
58 unsigned int offset; /* offset into card the window currently points at */ 57 unsigned int offset; /* offset into card the window currently points at */
@@ -647,9 +646,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
647 pcmciamtd_release(link); 646 pcmciamtd_release(link);
648 return -ENODEV; 647 return -ENODEV;
649 } 648 }
650 snprintf(dev->node.dev_name, sizeof(dev->node.dev_name), "mtd%d", mtd->index);
651 info("mtd%d: %s", mtd->index, mtd->name); 649 info("mtd%d: %s", mtd->index, mtd->name);
652 link->dev_node = &dev->node;
653 return 0; 650 return 0;
654 651
655 failed: 652 failed:
diff --git a/drivers/mtd/mtdbdi.c b/drivers/mtd/mtdbdi.c
deleted file mode 100644
index 5ca5aed0b225..000000000000
--- a/drivers/mtd/mtdbdi.c
+++ /dev/null
@@ -1,43 +0,0 @@
1/* MTD backing device capabilities
2 *
3 * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/backing-dev.h>
13#include <linux/mtd/mtd.h>
14#include "internal.h"
15
16/*
17 * backing device capabilities for non-mappable devices (such as NAND flash)
18 * - permits private mappings, copies are taken of the data
19 */
20struct backing_dev_info mtd_bdi_unmappable = {
21 .capabilities = BDI_CAP_MAP_COPY,
22};
23
24/*
25 * backing device capabilities for R/O mappable devices (such as ROM)
26 * - permits private mappings, copies are taken of the data
27 * - permits non-writable shared mappings
28 */
29struct backing_dev_info mtd_bdi_ro_mappable = {
30 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
31 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
32};
33
34/*
35 * backing device capabilities for writable mappable devices (such as RAM)
36 * - permits private mappings, copies are taken of the data
37 * - permits non-writable shared mappings
38 */
39struct backing_dev_info mtd_bdi_rw_mappable = {
40 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
41 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
42 BDI_CAP_WRITE_MAP),
43};
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5b38b17d2229..b177e750efc3 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -2,6 +2,9 @@
2 * Core registration and callback routines for MTD 2 * Core registration and callback routines for MTD
3 * drivers and users. 3 * drivers and users.
4 * 4 *
5 * bdi bits are:
6 * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
7 * Written by David Howells (dhowells@redhat.com)
5 */ 8 */
6 9
7#include <linux/module.h> 10#include <linux/module.h>
@@ -16,11 +19,39 @@
16#include <linux/init.h> 19#include <linux/init.h>
17#include <linux/mtd/compatmac.h> 20#include <linux/mtd/compatmac.h>
18#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/backing-dev.h>
19 23
20#include <linux/mtd/mtd.h> 24#include <linux/mtd/mtd.h>
21#include "internal.h"
22 25
23#include "mtdcore.h" 26#include "mtdcore.h"
27/*
28 * backing device capabilities for non-mappable devices (such as NAND flash)
29 * - permits private mappings, copies are taken of the data
30 */
31struct backing_dev_info mtd_bdi_unmappable = {
32 .capabilities = BDI_CAP_MAP_COPY,
33};
34
35/*
36 * backing device capabilities for R/O mappable devices (such as ROM)
37 * - permits private mappings, copies are taken of the data
38 * - permits non-writable shared mappings
39 */
40struct backing_dev_info mtd_bdi_ro_mappable = {
41 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
42 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
43};
44
45/*
46 * backing device capabilities for writable mappable devices (such as RAM)
47 * - permits private mappings, copies are taken of the data
48 * - permits non-writable shared mappings
49 */
50struct backing_dev_info mtd_bdi_rw_mappable = {
51 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
52 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
53 BDI_CAP_WRITE_MAP),
54};
24 55
25static int mtd_cls_suspend(struct device *dev, pm_message_t state); 56static int mtd_cls_suspend(struct device *dev, pm_message_t state);
26static int mtd_cls_resume(struct device *dev); 57static int mtd_cls_resume(struct device *dev);
@@ -628,20 +659,55 @@ done:
628/*====================================================================*/ 659/*====================================================================*/
629/* Init code */ 660/* Init code */
630 661
662static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
663{
664 int ret;
665
666 ret = bdi_init(bdi);
667 if (!ret)
668 ret = bdi_register(bdi, NULL, name);
669
670 if (ret)
671 bdi_destroy(bdi);
672
673 return ret;
674}
675
631static int __init init_mtd(void) 676static int __init init_mtd(void)
632{ 677{
633 int ret; 678 int ret;
679
634 ret = class_register(&mtd_class); 680 ret = class_register(&mtd_class);
681 if (ret)
682 goto err_reg;
683
684 ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
685 if (ret)
686 goto err_bdi1;
687
688 ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
689 if (ret)
690 goto err_bdi2;
691
692 ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
693 if (ret)
694 goto err_bdi3;
635 695
636 if (ret) {
637 pr_err("Error registering mtd class: %d\n", ret);
638 return ret;
639 }
640#ifdef CONFIG_PROC_FS 696#ifdef CONFIG_PROC_FS
641 if ((proc_mtd = create_proc_entry( "mtd", 0, NULL ))) 697 if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
642 proc_mtd->read_proc = mtd_read_proc; 698 proc_mtd->read_proc = mtd_read_proc;
643#endif /* CONFIG_PROC_FS */ 699#endif /* CONFIG_PROC_FS */
644 return 0; 700 return 0;
701
702err_bdi3:
703 bdi_destroy(&mtd_bdi_ro_mappable);
704err_bdi2:
705 bdi_destroy(&mtd_bdi_unmappable);
706err_bdi1:
707 class_unregister(&mtd_class);
708err_reg:
709 pr_err("Error registering mtd class or bdi: %d\n", ret);
710 return ret;
645} 711}
646 712
647static void __exit cleanup_mtd(void) 713static void __exit cleanup_mtd(void)
@@ -651,6 +717,9 @@ static void __exit cleanup_mtd(void)
651 remove_proc_entry( "mtd", NULL); 717 remove_proc_entry( "mtd", NULL);
652#endif /* CONFIG_PROC_FS */ 718#endif /* CONFIG_PROC_FS */
653 class_unregister(&mtd_class); 719 class_unregister(&mtd_class);
720 bdi_destroy(&mtd_bdi_unmappable);
721 bdi_destroy(&mtd_bdi_ro_mappable);
722 bdi_destroy(&mtd_bdi_rw_mappable);
654} 723}
655 724
656module_init(init_mtd); 725module_init(init_mtd);
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index af8b42e0a55b..7c003191fca4 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -13,6 +13,7 @@
13#include <linux/mtd/super.h> 13#include <linux/mtd/super.h>
14#include <linux/namei.h> 14#include <linux/namei.h>
15#include <linux/ctype.h> 15#include <linux/ctype.h>
16#include <linux/slab.h>
16 17
17/* 18/*
18 * compare superblocks to see if they're equivalent 19 * compare superblocks to see if they're equivalent
@@ -44,6 +45,7 @@ static int get_sb_mtd_set(struct super_block *sb, void *_mtd)
44 45
45 sb->s_mtd = mtd; 46 sb->s_mtd = mtd;
46 sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index); 47 sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
48 sb->s_bdi = mtd->backing_dev_info;
47 return 0; 49 return 0;
48} 50}
49 51
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index f59c07427af3..d60fc5719fef 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -60,7 +60,13 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
60 } 60 }
61 buf64 = (uint64_t *)buf; 61 buf64 = (uint64_t *)buf;
62 while (i < len/8) { 62 while (i < len/8) {
63 uint64_t x; 63 /*
64 * Since GCC has no proper constraint (PR 43518)
65 * force x variable to r2/r3 registers as ldrd instruction
66 * requires first register to be even.
67 */
68 register uint64_t x asm ("r2");
69
64 asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base)); 70 asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
65 buf64[i++] = x; 71 buf64[i++] = x;
66 } 72 }
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index a03d291de854..f0d23de32967 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1944,7 +1944,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
1944 netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n", 1944 netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
1945 __func__, rx_status, rx_size, cur_rx); 1945 __func__, rx_status, rx_size, cur_rx);
1946#if RTL8139_DEBUG > 2 1946#if RTL8139_DEBUG > 2
1947 print_dump_hex(KERN_DEBUG, "Frame contents: ", 1947 print_hex_dump(KERN_DEBUG, "Frame contents: ",
1948 DUMP_PREFIX_OFFSET, 16, 1, 1948 DUMP_PREFIX_OFFSET, 16, 1,
1949 &rx_ring[ring_offset], 70, true); 1949 &rx_ring[ring_offset], 70, true);
1950#endif 1950#endif
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a583b50d9de8..12b280afdd51 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -273,6 +273,7 @@ obj-$(CONFIG_USB_RTL8150) += usb/
273obj-$(CONFIG_USB_HSO) += usb/ 273obj-$(CONFIG_USB_HSO) += usb/
274obj-$(CONFIG_USB_USBNET) += usb/ 274obj-$(CONFIG_USB_USBNET) += usb/
275obj-$(CONFIG_USB_ZD1201) += usb/ 275obj-$(CONFIG_USB_ZD1201) += usb/
276obj-$(CONFIG_USB_IPHETH) += usb/
276 277
277obj-y += wireless/ 278obj-y += wireless/
278obj-$(CONFIG_NET_TULIP) += tulip/ 279obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index ed5e9742be2c..a8f0512bad38 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -674,6 +674,7 @@ static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
674 { ZORRO_PROD_AMERISTAR_A2065 }, 674 { ZORRO_PROD_AMERISTAR_A2065 },
675 { 0 } 675 { 0 }
676}; 676};
677MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
677 678
678static struct zorro_driver a2065_driver = { 679static struct zorro_driver a2065_driver = {
679 .name = "a2065", 680 .name = "a2065",
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index fa1a2354f5f9..4b30a46486e2 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -145,6 +145,7 @@ static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = {
145 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE }, 145 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
146 { 0 } 146 { 0 }
147}; 147};
148MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
148 149
149static struct zorro_driver ariadne_driver = { 150static struct zorro_driver ariadne_driver = {
150 .name = "ariadne", 151 .name = "ariadne",
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 6995169d285a..cd17d09f385c 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -311,11 +311,6 @@ err:
311 processed++; 311 processed++;
312 } 312 }
313 313
314 if (processed) {
315 wrw(ep, REG_RXDENQ, processed);
316 wrw(ep, REG_RXSTSENQ, processed);
317 }
318
319 return processed; 314 return processed;
320} 315}
321 316
@@ -350,6 +345,11 @@ poll_some_more:
350 goto poll_some_more; 345 goto poll_some_more;
351 } 346 }
352 347
348 if (rx) {
349 wrw(ep, REG_RXDENQ, rx);
350 wrw(ep, REG_RXSTSENQ, rx);
351 }
352
353 return rx; 353 return rx;
354} 354}
355 355
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index a257babd1bb4..ac90a3828f69 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,8 +58,8 @@
58#include "bnx2_fw.h" 58#include "bnx2_fw.h"
59 59
60#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
61#define DRV_MODULE_VERSION "2.0.8" 61#define DRV_MODULE_VERSION "2.0.9"
62#define DRV_MODULE_RELDATE "Feb 15, 2010" 62#define DRV_MODULE_RELDATE "April 27, 2010"
63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw" 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
@@ -651,9 +651,10 @@ bnx2_napi_enable(struct bnx2 *bp)
651} 651}
652 652
653static void 653static void
654bnx2_netif_stop(struct bnx2 *bp) 654bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
655{ 655{
656 bnx2_cnic_stop(bp); 656 if (stop_cnic)
657 bnx2_cnic_stop(bp);
657 if (netif_running(bp->dev)) { 658 if (netif_running(bp->dev)) {
658 int i; 659 int i;
659 660
@@ -671,14 +672,15 @@ bnx2_netif_stop(struct bnx2 *bp)
671} 672}
672 673
673static void 674static void
674bnx2_netif_start(struct bnx2 *bp) 675bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
675{ 676{
676 if (atomic_dec_and_test(&bp->intr_sem)) { 677 if (atomic_dec_and_test(&bp->intr_sem)) {
677 if (netif_running(bp->dev)) { 678 if (netif_running(bp->dev)) {
678 netif_tx_wake_all_queues(bp->dev); 679 netif_tx_wake_all_queues(bp->dev);
679 bnx2_napi_enable(bp); 680 bnx2_napi_enable(bp);
680 bnx2_enable_int(bp); 681 bnx2_enable_int(bp);
681 bnx2_cnic_start(bp); 682 if (start_cnic)
683 bnx2_cnic_start(bp);
682 } 684 }
683 } 685 }
684} 686}
@@ -4759,8 +4761,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4759 rc = bnx2_alloc_bad_rbuf(bp); 4761 rc = bnx2_alloc_bad_rbuf(bp);
4760 } 4762 }
4761 4763
4762 if (bp->flags & BNX2_FLAG_USING_MSIX) 4764 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4763 bnx2_setup_msix_tbl(bp); 4765 bnx2_setup_msix_tbl(bp);
4766 /* Prevent MSIX table reads and write from timing out */
4767 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4768 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4769 }
4764 4770
4765 return rc; 4771 return rc;
4766} 4772}
@@ -6273,12 +6279,12 @@ bnx2_reset_task(struct work_struct *work)
6273 return; 6279 return;
6274 } 6280 }
6275 6281
6276 bnx2_netif_stop(bp); 6282 bnx2_netif_stop(bp, true);
6277 6283
6278 bnx2_init_nic(bp, 1); 6284 bnx2_init_nic(bp, 1);
6279 6285
6280 atomic_set(&bp->intr_sem, 1); 6286 atomic_set(&bp->intr_sem, 1);
6281 bnx2_netif_start(bp); 6287 bnx2_netif_start(bp, true);
6282 rtnl_unlock(); 6288 rtnl_unlock();
6283} 6289}
6284 6290
@@ -6320,7 +6326,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6320 struct bnx2 *bp = netdev_priv(dev); 6326 struct bnx2 *bp = netdev_priv(dev);
6321 6327
6322 if (netif_running(dev)) 6328 if (netif_running(dev))
6323 bnx2_netif_stop(bp); 6329 bnx2_netif_stop(bp, false);
6324 6330
6325 bp->vlgrp = vlgrp; 6331 bp->vlgrp = vlgrp;
6326 6332
@@ -6331,7 +6337,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6331 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) 6337 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6332 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); 6338 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6333 6339
6334 bnx2_netif_start(bp); 6340 bnx2_netif_start(bp, false);
6335} 6341}
6336#endif 6342#endif
6337 6343
@@ -7051,9 +7057,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7051 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; 7057 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7052 7058
7053 if (netif_running(bp->dev)) { 7059 if (netif_running(bp->dev)) {
7054 bnx2_netif_stop(bp); 7060 bnx2_netif_stop(bp, true);
7055 bnx2_init_nic(bp, 0); 7061 bnx2_init_nic(bp, 0);
7056 bnx2_netif_start(bp); 7062 bnx2_netif_start(bp, true);
7057 } 7063 }
7058 7064
7059 return 0; 7065 return 0;
@@ -7083,7 +7089,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7083 /* Reset will erase chipset stats; save them */ 7089 /* Reset will erase chipset stats; save them */
7084 bnx2_save_stats(bp); 7090 bnx2_save_stats(bp);
7085 7091
7086 bnx2_netif_stop(bp); 7092 bnx2_netif_stop(bp, true);
7087 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 7093 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7088 bnx2_free_skbs(bp); 7094 bnx2_free_skbs(bp);
7089 bnx2_free_mem(bp); 7095 bnx2_free_mem(bp);
@@ -7111,7 +7117,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7111 bnx2_setup_cnic_irq_info(bp); 7117 bnx2_setup_cnic_irq_info(bp);
7112 mutex_unlock(&bp->cnic_lock); 7118 mutex_unlock(&bp->cnic_lock);
7113#endif 7119#endif
7114 bnx2_netif_start(bp); 7120 bnx2_netif_start(bp, true);
7115 } 7121 }
7116 return 0; 7122 return 0;
7117} 7123}
@@ -7364,7 +7370,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7364 if (etest->flags & ETH_TEST_FL_OFFLINE) { 7370 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7365 int i; 7371 int i;
7366 7372
7367 bnx2_netif_stop(bp); 7373 bnx2_netif_stop(bp, true);
7368 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); 7374 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7369 bnx2_free_skbs(bp); 7375 bnx2_free_skbs(bp);
7370 7376
@@ -7383,7 +7389,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7383 bnx2_shutdown_chip(bp); 7389 bnx2_shutdown_chip(bp);
7384 else { 7390 else {
7385 bnx2_init_nic(bp, 1); 7391 bnx2_init_nic(bp, 1);
7386 bnx2_netif_start(bp); 7392 bnx2_netif_start(bp, true);
7387 } 7393 }
7388 7394
7389 /* wait for link up */ 7395 /* wait for link up */
@@ -8377,7 +8383,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8377 return 0; 8383 return 0;
8378 8384
8379 flush_scheduled_work(); 8385 flush_scheduled_work();
8380 bnx2_netif_stop(bp); 8386 bnx2_netif_stop(bp, true);
8381 netif_device_detach(dev); 8387 netif_device_detach(dev);
8382 del_timer_sync(&bp->timer); 8388 del_timer_sync(&bp->timer);
8383 bnx2_shutdown_chip(bp); 8389 bnx2_shutdown_chip(bp);
@@ -8399,7 +8405,7 @@ bnx2_resume(struct pci_dev *pdev)
8399 bnx2_set_power_state(bp, PCI_D0); 8405 bnx2_set_power_state(bp, PCI_D0);
8400 netif_device_attach(dev); 8406 netif_device_attach(dev);
8401 bnx2_init_nic(bp, 1); 8407 bnx2_init_nic(bp, 1);
8402 bnx2_netif_start(bp); 8408 bnx2_netif_start(bp, true);
8403 return 0; 8409 return 0;
8404} 8410}
8405 8411
@@ -8426,7 +8432,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8426 } 8432 }
8427 8433
8428 if (netif_running(dev)) { 8434 if (netif_running(dev)) {
8429 bnx2_netif_stop(bp); 8435 bnx2_netif_stop(bp, true);
8430 del_timer_sync(&bp->timer); 8436 del_timer_sync(&bp->timer);
8431 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); 8437 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8432 } 8438 }
@@ -8483,7 +8489,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
8483 8489
8484 rtnl_lock(); 8490 rtnl_lock();
8485 if (netif_running(dev)) 8491 if (netif_running(dev))
8486 bnx2_netif_start(bp); 8492 bnx2_netif_start(bp, true);
8487 8493
8488 netif_device_attach(dev); 8494 netif_device_attach(dev);
8489 rtnl_unlock(); 8495 rtnl_unlock();
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 33451092b8e8..d800b598ae3d 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1006,7 +1006,7 @@ static int ems_usb_probe(struct usb_interface *intf,
1006 1006
1007 netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS); 1007 netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS);
1008 if (!netdev) { 1008 if (!netdev) {
1009 dev_err(netdev->dev.parent, "Couldn't alloc candev\n"); 1009 dev_err(&intf->dev, "ems_usb: Couldn't alloc candev\n");
1010 return -ENOMEM; 1010 return -ENOMEM;
1011 } 1011 }
1012 1012
@@ -1036,20 +1036,20 @@ static int ems_usb_probe(struct usb_interface *intf,
1036 1036
1037 dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); 1037 dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
1038 if (!dev->intr_urb) { 1038 if (!dev->intr_urb) {
1039 dev_err(netdev->dev.parent, "Couldn't alloc intr URB\n"); 1039 dev_err(&intf->dev, "Couldn't alloc intr URB\n");
1040 goto cleanup_candev; 1040 goto cleanup_candev;
1041 } 1041 }
1042 1042
1043 dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); 1043 dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
1044 if (!dev->intr_in_buffer) { 1044 if (!dev->intr_in_buffer) {
1045 dev_err(netdev->dev.parent, "Couldn't alloc Intr buffer\n"); 1045 dev_err(&intf->dev, "Couldn't alloc Intr buffer\n");
1046 goto cleanup_intr_urb; 1046 goto cleanup_intr_urb;
1047 } 1047 }
1048 1048
1049 dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE + 1049 dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
1050 sizeof(struct ems_cpc_msg), GFP_KERNEL); 1050 sizeof(struct ems_cpc_msg), GFP_KERNEL);
1051 if (!dev->tx_msg_buffer) { 1051 if (!dev->tx_msg_buffer) {
1052 dev_err(netdev->dev.parent, "Couldn't alloc Tx buffer\n"); 1052 dev_err(&intf->dev, "Couldn't alloc Tx buffer\n");
1053 goto cleanup_intr_in_buffer; 1053 goto cleanup_intr_in_buffer;
1054 } 1054 }
1055 1055
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
index 5248f9e0b2f4..35cd36729155 100644
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/cxgb3/ael1002.c
@@ -934,7 +934,7 @@ static struct cphy_ops xaui_direct_ops = {
934int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, 934int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
935 int phy_addr, const struct mdio_ops *mdio_ops) 935 int phy_addr, const struct mdio_ops *mdio_ops)
936{ 936{
937 cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops, 937 cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
938 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, 938 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
939 "10GBASE-CX4"); 939 "10GBASE-CX4");
940 return 0; 940 return 0;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index aced6c5e635c..e3f1b8566495 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -439,7 +439,7 @@ static void free_irq_resources(struct adapter *adapter)
439static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, 439static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
440 unsigned long n) 440 unsigned long n)
441{ 441{
442 int attempts = 5; 442 int attempts = 10;
443 443
444 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { 444 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
445 if (!--attempts) 445 if (!--attempts)
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index b997e578e58f..791080303db1 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -166,6 +166,7 @@
166#include <linux/ethtool.h> 166#include <linux/ethtool.h>
167#include <linux/string.h> 167#include <linux/string.h>
168#include <linux/firmware.h> 168#include <linux/firmware.h>
169#include <linux/rtnetlink.h>
169#include <asm/unaligned.h> 170#include <asm/unaligned.h>
170 171
171 172
@@ -2265,8 +2266,13 @@ static void e100_tx_timeout_task(struct work_struct *work)
2265 2266
2266 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", 2267 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
2267 ioread8(&nic->csr->scb.status)); 2268 ioread8(&nic->csr->scb.status));
2268 e100_down(netdev_priv(netdev)); 2269
2269 e100_up(netdev_priv(netdev)); 2270 rtnl_lock();
2271 if (netif_running(netdev)) {
2272 e100_down(netdev_priv(netdev));
2273 e100_up(netdev_priv(netdev));
2274 }
2275 rtnl_unlock();
2270} 2276}
2271 2277
2272static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) 2278static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 712ccc66ba25..90155552ea09 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -336,7 +336,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
336 struct e1000_hw *hw = &adapter->hw; 336 struct e1000_hw *hw = &adapter->hw;
337 static int global_quad_port_a; /* global port a indication */ 337 static int global_quad_port_a; /* global port a indication */
338 struct pci_dev *pdev = adapter->pdev; 338 struct pci_dev *pdev = adapter->pdev;
339 u16 eeprom_data = 0;
340 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; 339 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
341 s32 rc; 340 s32 rc;
342 341
@@ -387,16 +386,15 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
387 if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) 386 if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
388 adapter->flags &= ~FLAG_HAS_WOL; 387 adapter->flags &= ~FLAG_HAS_WOL;
389 break; 388 break;
390
391 case e1000_82573: 389 case e1000_82573:
390 case e1000_82574:
391 case e1000_82583:
392 /* Disable ASPM L0s due to hardware errata */
393 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S);
394
392 if (pdev->device == E1000_DEV_ID_82573L) { 395 if (pdev->device == E1000_DEV_ID_82573L) {
393 if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, 396 adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
394 &eeprom_data) < 0) 397 adapter->max_hw_frame_size = DEFAULT_JUMBO;
395 break;
396 if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) {
397 adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
398 adapter->max_hw_frame_size = DEFAULT_JUMBO;
399 }
400 } 398 }
401 break; 399 break;
402 default: 400 default:
@@ -1792,6 +1790,7 @@ struct e1000_info e1000_82571_info = {
1792 | FLAG_RESET_OVERWRITES_LAA /* errata */ 1790 | FLAG_RESET_OVERWRITES_LAA /* errata */
1793 | FLAG_TARC_SPEED_MODE_BIT /* errata */ 1791 | FLAG_TARC_SPEED_MODE_BIT /* errata */
1794 | FLAG_APME_CHECK_PORT_B, 1792 | FLAG_APME_CHECK_PORT_B,
1793 .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
1795 .pba = 38, 1794 .pba = 38,
1796 .max_hw_frame_size = DEFAULT_JUMBO, 1795 .max_hw_frame_size = DEFAULT_JUMBO,
1797 .get_variants = e1000_get_variants_82571, 1796 .get_variants = e1000_get_variants_82571,
@@ -1809,6 +1808,7 @@ struct e1000_info e1000_82572_info = {
1809 | FLAG_RX_CSUM_ENABLED 1808 | FLAG_RX_CSUM_ENABLED
1810 | FLAG_HAS_CTRLEXT_ON_LOAD 1809 | FLAG_HAS_CTRLEXT_ON_LOAD
1811 | FLAG_TARC_SPEED_MODE_BIT, /* errata */ 1810 | FLAG_TARC_SPEED_MODE_BIT, /* errata */
1811 .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
1812 .pba = 38, 1812 .pba = 38,
1813 .max_hw_frame_size = DEFAULT_JUMBO, 1813 .max_hw_frame_size = DEFAULT_JUMBO,
1814 .get_variants = e1000_get_variants_82571, 1814 .get_variants = e1000_get_variants_82571,
@@ -1820,13 +1820,11 @@ struct e1000_info e1000_82572_info = {
1820struct e1000_info e1000_82573_info = { 1820struct e1000_info e1000_82573_info = {
1821 .mac = e1000_82573, 1821 .mac = e1000_82573,
1822 .flags = FLAG_HAS_HW_VLAN_FILTER 1822 .flags = FLAG_HAS_HW_VLAN_FILTER
1823 | FLAG_HAS_JUMBO_FRAMES
1824 | FLAG_HAS_WOL 1823 | FLAG_HAS_WOL
1825 | FLAG_APME_IN_CTRL3 1824 | FLAG_APME_IN_CTRL3
1826 | FLAG_RX_CSUM_ENABLED 1825 | FLAG_RX_CSUM_ENABLED
1827 | FLAG_HAS_SMART_POWER_DOWN 1826 | FLAG_HAS_SMART_POWER_DOWN
1828 | FLAG_HAS_AMT 1827 | FLAG_HAS_AMT
1829 | FLAG_HAS_ERT
1830 | FLAG_HAS_SWSM_ON_LOAD, 1828 | FLAG_HAS_SWSM_ON_LOAD,
1831 .pba = 20, 1829 .pba = 20,
1832 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 1830 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 118bdf483593..ee32b9b27a9f 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -37,6 +37,7 @@
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/pci-aspm.h>
40 41
41#include "hw.h" 42#include "hw.h"
42 43
@@ -374,7 +375,7 @@ struct e1000_adapter {
374struct e1000_info { 375struct e1000_info {
375 enum e1000_mac_type mac; 376 enum e1000_mac_type mac;
376 unsigned int flags; 377 unsigned int flags;
377 unsigned int flags2; 378 unsigned int flags2;
378 u32 pba; 379 u32 pba;
379 u32 max_hw_frame_size; 380 u32 max_hw_frame_size;
380 s32 (*get_variants)(struct e1000_adapter *); 381 s32 (*get_variants)(struct e1000_adapter *);
@@ -421,6 +422,7 @@ struct e1000_info {
421#define FLAG2_CRC_STRIPPING (1 << 0) 422#define FLAG2_CRC_STRIPPING (1 << 0)
422#define FLAG2_HAS_PHY_WAKEUP (1 << 1) 423#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
423#define FLAG2_IS_DISCARDING (1 << 2) 424#define FLAG2_IS_DISCARDING (1 << 2)
425#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
424 426
425#define E1000_RX_DESC_PS(R, i) \ 427#define E1000_RX_DESC_PS(R, i) \
426 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 428 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -461,6 +463,7 @@ extern void e1000e_update_stats(struct e1000_adapter *adapter);
461extern bool e1000e_has_link(struct e1000_adapter *adapter); 463extern bool e1000e_has_link(struct e1000_adapter *adapter);
462extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 464extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
463extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 465extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
466extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
464 467
465extern unsigned int copybreak; 468extern unsigned int copybreak;
466 469
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 73d43c53015a..d5d55c6a373f 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2524,12 +2524,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2524 * excessive C-state transition latencies result in 2524 * excessive C-state transition latencies result in
2525 * dropped transactions. 2525 * dropped transactions.
2526 */ 2526 */
2527 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, 2527 pm_qos_update_request(
2528 adapter->netdev->name, 55); 2528 adapter->netdev->pm_qos_req, 55);
2529 } else { 2529 } else {
2530 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, 2530 pm_qos_update_request(
2531 adapter->netdev->name, 2531 adapter->netdev->pm_qos_req,
2532 PM_QOS_DEFAULT_VALUE); 2532 PM_QOS_DEFAULT_VALUE);
2533 } 2533 }
2534 } 2534 }
2535 2535
@@ -2824,8 +2824,8 @@ int e1000e_up(struct e1000_adapter *adapter)
2824 2824
2825 /* DMA latency requirement to workaround early-receive/jumbo issue */ 2825 /* DMA latency requirement to workaround early-receive/jumbo issue */
2826 if (adapter->flags & FLAG_HAS_ERT) 2826 if (adapter->flags & FLAG_HAS_ERT)
2827 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, 2827 adapter->netdev->pm_qos_req =
2828 adapter->netdev->name, 2828 pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
2829 PM_QOS_DEFAULT_VALUE); 2829 PM_QOS_DEFAULT_VALUE);
2830 2830
2831 /* hardware has been reset, we need to reload some things */ 2831 /* hardware has been reset, we need to reload some things */
@@ -2887,9 +2887,11 @@ void e1000e_down(struct e1000_adapter *adapter)
2887 e1000_clean_tx_ring(adapter); 2887 e1000_clean_tx_ring(adapter);
2888 e1000_clean_rx_ring(adapter); 2888 e1000_clean_rx_ring(adapter);
2889 2889
2890 if (adapter->flags & FLAG_HAS_ERT) 2890 if (adapter->flags & FLAG_HAS_ERT) {
2891 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, 2891 pm_qos_remove_request(
2892 adapter->netdev->name); 2892 adapter->netdev->pm_qos_req);
2893 adapter->netdev->pm_qos_req = NULL;
2894 }
2893 2895
2894 /* 2896 /*
2895 * TODO: for power management, we could drop the link and 2897 * TODO: for power management, we could drop the link and
@@ -4283,6 +4285,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4283 return -EINVAL; 4285 return -EINVAL;
4284 } 4286 }
4285 4287
4288 /* 82573 Errata 17 */
4289 if (((adapter->hw.mac.type == e1000_82573) ||
4290 (adapter->hw.mac.type == e1000_82574)) &&
4291 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
4292 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
4293 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
4294 }
4295
4286 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 4296 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4287 msleep(1); 4297 msleep(1);
4288 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 4298 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -4605,29 +4615,42 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
4605 } 4615 }
4606} 4616}
4607 4617
4608static void e1000e_disable_l1aspm(struct pci_dev *pdev) 4618#ifdef CONFIG_PCIEASPM
4619static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
4620{
4621 pci_disable_link_state(pdev, state);
4622}
4623#else
4624static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
4609{ 4625{
4610 int pos; 4626 int pos;
4611 u16 val; 4627 u16 reg16;
4612 4628
4613 /* 4629 /*
4614 * 82573 workaround - disable L1 ASPM on mobile chipsets 4630 * Both device and parent should have the same ASPM setting.
4615 * 4631 * Disable ASPM in downstream component first and then upstream.
4616 * L1 ASPM on various mobile (ich7) chipsets do not behave properly
4617 * resulting in lost data or garbage information on the pci-e link
4618 * level. This could result in (false) bad EEPROM checksum errors,
4619 * long ping times (up to 2s) or even a system freeze/hang.
4620 *
4621 * Unfortunately this feature saves about 1W power consumption when
4622 * active.
4623 */ 4632 */
4624 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 4633 pos = pci_pcie_cap(pdev);
4625 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val); 4634 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
4626 if (val & 0x2) { 4635 reg16 &= ~state;
4627 dev_warn(&pdev->dev, "Disabling L1 ASPM\n"); 4636 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
4628 val &= ~0x2; 4637
4629 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val); 4638 if (!pdev->bus->self)
4630 } 4639 return;
4640
4641 pos = pci_pcie_cap(pdev->bus->self);
4642 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
4643 reg16 &= ~state;
4644 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
4645}
4646#endif
4647void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
4648{
4649 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
4650 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
4651 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
4652
4653 __e1000e_disable_aspm(pdev, state);
4631} 4654}
4632 4655
4633#ifdef CONFIG_PM 4656#ifdef CONFIG_PM
@@ -4653,7 +4676,8 @@ static int e1000_resume(struct pci_dev *pdev)
4653 pci_set_power_state(pdev, PCI_D0); 4676 pci_set_power_state(pdev, PCI_D0);
4654 pci_restore_state(pdev); 4677 pci_restore_state(pdev);
4655 pci_save_state(pdev); 4678 pci_save_state(pdev);
4656 e1000e_disable_l1aspm(pdev); 4679 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
4680 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
4657 4681
4658 err = pci_enable_device_mem(pdev); 4682 err = pci_enable_device_mem(pdev);
4659 if (err) { 4683 if (err) {
@@ -4795,7 +4819,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4795 int err; 4819 int err;
4796 pci_ers_result_t result; 4820 pci_ers_result_t result;
4797 4821
4798 e1000e_disable_l1aspm(pdev); 4822 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
4823 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
4799 err = pci_enable_device_mem(pdev); 4824 err = pci_enable_device_mem(pdev);
4800 if (err) { 4825 if (err) {
4801 dev_err(&pdev->dev, 4826 dev_err(&pdev->dev,
@@ -4889,13 +4914,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
4889 dev_warn(&adapter->pdev->dev, 4914 dev_warn(&adapter->pdev->dev,
4890 "Warning: detected DSPD enabled in EEPROM\n"); 4915 "Warning: detected DSPD enabled in EEPROM\n");
4891 } 4916 }
4892
4893 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
4894 if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
4895 /* ASPM enable */
4896 dev_warn(&adapter->pdev->dev,
4897 "Warning: detected ASPM enabled in EEPROM\n");
4898 }
4899} 4917}
4900 4918
4901static const struct net_device_ops e1000e_netdev_ops = { 4919static const struct net_device_ops e1000e_netdev_ops = {
@@ -4944,7 +4962,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4944 u16 eeprom_data = 0; 4962 u16 eeprom_data = 0;
4945 u16 eeprom_apme_mask = E1000_EEPROM_APME; 4963 u16 eeprom_apme_mask = E1000_EEPROM_APME;
4946 4964
4947 e1000e_disable_l1aspm(pdev); 4965 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
4966 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
4948 4967
4949 err = pci_enable_device_mem(pdev); 4968 err = pci_enable_device_mem(pdev);
4950 if (err) 4969 if (err)
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 9f98c1c4a344..9b4e8f797a7a 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1653,7 +1653,7 @@ fec_set_mac_address(struct net_device *dev, void *p)
1653 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), 1653 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
1654 fep->hwp + FEC_ADDR_LOW); 1654 fep->hwp + FEC_ADDR_LOW);
1655 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), 1655 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
1656 fep + FEC_ADDR_HIGH); 1656 fep->hwp + FEC_ADDR_HIGH);
1657 return 0; 1657 return 0;
1658} 1658}
1659 1659
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index d5160edf2fcf..3acac5f930c8 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -205,8 +205,6 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
205static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) 205static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
206{ 206{
207 struct gfar __iomem *enet_regs; 207 struct gfar __iomem *enet_regs;
208 u32 __iomem *ioremap_tbipa;
209 u64 addr, size;
210 208
211 /* 209 /*
212 * This is mildly evil, but so is our hardware for doing this. 210 * This is mildly evil, but so is our hardware for doing this.
@@ -220,9 +218,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
220 return &enet_regs->tbipa; 218 return &enet_regs->tbipa;
221 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || 219 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
222 of_device_is_compatible(np, "fsl,etsec2-tbi")) { 220 of_device_is_compatible(np, "fsl,etsec2-tbi")) {
223 addr = of_translate_address(np, of_get_address(np, 1, &size, NULL)); 221 return of_iomap(np, 1);
224 ioremap_tbipa = ioremap(addr, size);
225 return ioremap_tbipa;
226 } else 222 } else
227 return NULL; 223 return NULL;
228} 224}
@@ -279,6 +275,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
279 u32 __iomem *tbipa; 275 u32 __iomem *tbipa;
280 struct mii_bus *new_bus; 276 struct mii_bus *new_bus;
281 int tbiaddr = -1; 277 int tbiaddr = -1;
278 const u32 *addrp;
282 u64 addr = 0, size = 0; 279 u64 addr = 0, size = 0;
283 int err = 0; 280 int err = 0;
284 281
@@ -297,8 +294,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
297 new_bus->priv = priv; 294 new_bus->priv = priv;
298 fsl_pq_mdio_bus_name(new_bus->id, np); 295 fsl_pq_mdio_bus_name(new_bus->id, np);
299 296
297 addrp = of_get_address(np, 0, &size, NULL);
298 if (!addrp) {
299 err = -EINVAL;
300 goto err_free_bus;
301 }
302
300 /* Set the PHY base address */ 303 /* Set the PHY base address */
301 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); 304 addr = of_translate_address(np, addrp);
305 if (addr == OF_BAD_ADDR) {
306 err = -EINVAL;
307 goto err_free_bus;
308 }
309
302 map = ioremap(addr, size); 310 map = ioremap(addr, size);
303 if (!map) { 311 if (!map) {
304 err = -ENOMEM; 312 err = -ENOMEM;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 080d1cea5b26..5d3763fb3472 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -549,12 +549,8 @@ static int gfar_parse_group(struct device_node *np,
549 struct gfar_private *priv, const char *model) 549 struct gfar_private *priv, const char *model)
550{ 550{
551 u32 *queue_mask; 551 u32 *queue_mask;
552 u64 addr, size;
553
554 addr = of_translate_address(np,
555 of_get_address(np, 0, &size, NULL));
556 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
557 552
553 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
558 if (!priv->gfargrp[priv->num_grps].regs) 554 if (!priv->gfargrp[priv->num_grps].regs)
559 return -ENOMEM; 555 return -ENOMEM;
560 556
@@ -1515,9 +1511,9 @@ static void gfar_halt_nodisable(struct net_device *dev)
1515 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1511 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1516 gfar_write(&regs->dmactrl, tempval); 1512 gfar_write(&regs->dmactrl, tempval);
1517 1513
1518 while (!(gfar_read(&regs->ievent) & 1514 spin_event_timeout(((gfar_read(&regs->ievent) &
1519 (IEVENT_GRSC | IEVENT_GTSC))) 1515 (IEVENT_GRSC | IEVENT_GTSC)) ==
1520 cpu_relax(); 1516 (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
1521 } 1517 }
1522} 1518}
1523 1519
@@ -1653,6 +1649,7 @@ static void free_skb_resources(struct gfar_private *priv)
1653 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1649 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1654 priv->tx_queue[0]->tx_bd_base, 1650 priv->tx_queue[0]->tx_bd_base,
1655 priv->tx_queue[0]->tx_bd_dma_base); 1651 priv->tx_queue[0]->tx_bd_dma_base);
1652 skb_queue_purge(&priv->rx_recycle);
1656} 1653}
1657 1654
1658void gfar_start(struct net_device *dev) 1655void gfar_start(struct net_device *dev)
@@ -2092,7 +2089,6 @@ static int gfar_close(struct net_device *dev)
2092 2089
2093 disable_napi(priv); 2090 disable_napi(priv);
2094 2091
2095 skb_queue_purge(&priv->rx_recycle);
2096 cancel_work_sync(&priv->reset_task); 2092 cancel_work_sync(&priv->reset_task);
2097 stop_gfar(dev); 2093 stop_gfar(dev);
2098 2094
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index 24724b4ad709..07d8e5b634f3 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -71,6 +71,7 @@ static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = {
71 { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET }, 71 { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
72 { 0 } 72 { 0 }
73}; 73};
74MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl);
74 75
75static struct zorro_driver hydra_driver = { 76static struct zorro_driver hydra_driver = {
76 .name = "hydra", 77 .name = "hydra",
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1b1edad1eb5e..f16e981812a9 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -48,6 +48,7 @@
48#define DRV_VERSION "1.0.0-k0" 48#define DRV_VERSION "1.0.0-k0"
49char igbvf_driver_name[] = "igbvf"; 49char igbvf_driver_name[] = "igbvf";
50const char igbvf_driver_version[] = DRV_VERSION; 50const char igbvf_driver_version[] = DRV_VERSION;
51struct pm_qos_request_list *igbvf_driver_pm_qos_req;
51static const char igbvf_driver_string[] = 52static const char igbvf_driver_string[] =
52 "Intel(R) Virtual Function Network Driver"; 53 "Intel(R) Virtual Function Network Driver";
53static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 54static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
@@ -2899,7 +2900,7 @@ static int __init igbvf_init_module(void)
2899 printk(KERN_INFO "%s\n", igbvf_copyright); 2900 printk(KERN_INFO "%s\n", igbvf_copyright);
2900 2901
2901 ret = pci_register_driver(&igbvf_driver); 2902 ret = pci_register_driver(&igbvf_driver);
2902 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name, 2903 igbvf_driver_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
2903 PM_QOS_DEFAULT_VALUE); 2904 PM_QOS_DEFAULT_VALUE);
2904 2905
2905 return ret; 2906 return ret;
@@ -2915,7 +2916,8 @@ module_init(igbvf_init_module);
2915static void __exit igbvf_exit_module(void) 2916static void __exit igbvf_exit_module(void)
2916{ 2917{
2917 pci_unregister_driver(&igbvf_driver); 2918 pci_unregister_driver(&igbvf_driver);
2918 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name); 2919 pm_qos_remove_request(igbvf_driver_pm_qos_req);
2920 igbvf_driver_pm_qos_req = NULL;
2919} 2921}
2920module_exit(igbvf_exit_module); 2922module_exit(igbvf_exit_module);
2921 2923
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index b405a00817c6..12fc0e7ba2ca 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -39,6 +39,8 @@
39#define IXGBE_82599_MC_TBL_SIZE 128 39#define IXGBE_82599_MC_TBL_SIZE 128
40#define IXGBE_82599_VFT_TBL_SIZE 128 40#define IXGBE_82599_VFT_TBL_SIZE 128
41 41
42void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
43void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
42void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 44void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
43s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 45s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
44 ixgbe_link_speed speed, 46 ixgbe_link_speed speed,
@@ -69,8 +71,14 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
69 if (hw->phy.multispeed_fiber) { 71 if (hw->phy.multispeed_fiber) {
70 /* Set up dual speed SFP+ support */ 72 /* Set up dual speed SFP+ support */
71 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
74 mac->ops.disable_tx_laser =
75 &ixgbe_disable_tx_laser_multispeed_fiber;
76 mac->ops.enable_tx_laser =
77 &ixgbe_enable_tx_laser_multispeed_fiber;
72 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 78 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
73 } else { 79 } else {
80 mac->ops.disable_tx_laser = NULL;
81 mac->ops.enable_tx_laser = NULL;
74 mac->ops.flap_tx_laser = NULL; 82 mac->ops.flap_tx_laser = NULL;
75 if ((mac->ops.get_media_type(hw) == 83 if ((mac->ops.get_media_type(hw) ==
76 ixgbe_media_type_backplane) && 84 ixgbe_media_type_backplane) &&
@@ -415,6 +423,44 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
415 return status; 423 return status;
416} 424}
417 425
426 /**
427 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
428 * @hw: pointer to hardware structure
429 *
430 * The base drivers may require better control over SFP+ module
431 * PHY states. This includes selectively shutting down the Tx
432 * laser on the PHY, effectively halting physical link.
433 **/
434void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
435{
436 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
437
438 /* Disable tx laser; allow 100us to go dark per spec */
439 esdp_reg |= IXGBE_ESDP_SDP3;
440 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
441 IXGBE_WRITE_FLUSH(hw);
442 udelay(100);
443}
444
445/**
446 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
447 * @hw: pointer to hardware structure
448 *
449 * The base drivers may require better control over SFP+ module
450 * PHY states. This includes selectively turning on the Tx
451 * laser on the PHY, effectively starting physical link.
452 **/
453void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
454{
455 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
456
457 /* Enable tx laser; allow 100ms to light up */
458 esdp_reg &= ~IXGBE_ESDP_SDP3;
459 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
460 IXGBE_WRITE_FLUSH(hw);
461 msleep(100);
462}
463
418/** 464/**
419 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 465 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
420 * @hw: pointer to hardware structure 466 * @hw: pointer to hardware structure
@@ -429,23 +475,11 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
429 **/ 475 **/
430void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 476void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
431{ 477{
432 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
433
434 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); 478 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
435 479
436 if (hw->mac.autotry_restart) { 480 if (hw->mac.autotry_restart) {
437 /* Disable tx laser; allow 100us to go dark per spec */ 481 ixgbe_disable_tx_laser_multispeed_fiber(hw);
438 esdp_reg |= IXGBE_ESDP_SDP3; 482 ixgbe_enable_tx_laser_multispeed_fiber(hw);
439 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
440 IXGBE_WRITE_FLUSH(hw);
441 udelay(100);
442
443 /* Enable tx laser; allow 100ms to light up */
444 esdp_reg &= ~IXGBE_ESDP_SDP3;
445 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
446 IXGBE_WRITE_FLUSH(hw);
447 msleep(100);
448
449 hw->mac.autotry_restart = false; 483 hw->mac.autotry_restart = false;
450 } 484 }
451} 485}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 8f677cb86290..6c00ee493a3b 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2982,6 +2982,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2982 else 2982 else
2983 ixgbe_configure_msi_and_legacy(adapter); 2983 ixgbe_configure_msi_and_legacy(adapter);
2984 2984
2985 /* enable the optics */
2986 if (hw->phy.multispeed_fiber)
2987 hw->mac.ops.enable_tx_laser(hw);
2988
2985 clear_bit(__IXGBE_DOWN, &adapter->state); 2989 clear_bit(__IXGBE_DOWN, &adapter->state);
2986 ixgbe_napi_enable_all(adapter); 2990 ixgbe_napi_enable_all(adapter);
2987 2991
@@ -3243,6 +3247,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3243 /* signal that we are down to the interrupt handler */ 3247 /* signal that we are down to the interrupt handler */
3244 set_bit(__IXGBE_DOWN, &adapter->state); 3248 set_bit(__IXGBE_DOWN, &adapter->state);
3245 3249
3250 /* power down the optics */
3251 if (hw->phy.multispeed_fiber)
3252 hw->mac.ops.disable_tx_laser(hw);
3253
3246 /* disable receive for all VFs and wait one second */ 3254 /* disable receive for all VFs and wait one second */
3247 if (adapter->num_vfs) { 3255 if (adapter->num_vfs) {
3248 /* ping all the active vfs to let them know we are going down */ 3256 /* ping all the active vfs to let them know we are going down */
@@ -6253,6 +6261,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6253 goto err_eeprom; 6261 goto err_eeprom;
6254 } 6262 }
6255 6263
6264 /* power down the optics */
6265 if (hw->phy.multispeed_fiber)
6266 hw->mac.ops.disable_tx_laser(hw);
6267
6256 init_timer(&adapter->watchdog_timer); 6268 init_timer(&adapter->watchdog_timer);
6257 adapter->watchdog_timer.function = &ixgbe_watchdog; 6269 adapter->watchdog_timer.function = &ixgbe_watchdog;
6258 adapter->watchdog_timer.data = (unsigned long)adapter; 6270 adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -6400,16 +6412,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6400 del_timer_sync(&adapter->sfp_timer); 6412 del_timer_sync(&adapter->sfp_timer);
6401 cancel_work_sync(&adapter->watchdog_task); 6413 cancel_work_sync(&adapter->watchdog_task);
6402 cancel_work_sync(&adapter->sfp_task); 6414 cancel_work_sync(&adapter->sfp_task);
6403 if (adapter->hw.phy.multispeed_fiber) {
6404 struct ixgbe_hw *hw = &adapter->hw;
6405 /*
6406 * Restart clause 37 autoneg, disable and re-enable
6407 * the tx laser, to clear & alert the link partner
6408 * that it needs to restart autotry
6409 */
6410 hw->mac.autotry_restart = true;
6411 hw->mac.ops.flap_tx_laser(hw);
6412 }
6413 cancel_work_sync(&adapter->multispeed_fiber_task); 6415 cancel_work_sync(&adapter->multispeed_fiber_task);
6414 cancel_work_sync(&adapter->sfp_config_module_task); 6416 cancel_work_sync(&adapter->sfp_config_module_task);
6415 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 6417 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 4ec6dc1a5b75..534affcc38ca 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -2398,6 +2398,8 @@ struct ixgbe_mac_operations {
2398 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2398 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2399 2399
2400 /* Link */ 2400 /* Link */
2401 void (*disable_tx_laser)(struct ixgbe_hw *);
2402 void (*enable_tx_laser)(struct ixgbe_hw *);
2401 void (*flap_tx_laser)(struct ixgbe_hw *); 2403 void (*flap_tx_laser)(struct ixgbe_hw *);
2402 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); 2404 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
2403 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); 2405 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 13cc1ca261d9..9e9f9b349766 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -722,12 +722,14 @@ static void ks8851_tx_work(struct work_struct *work)
722 txb = skb_dequeue(&ks->txq); 722 txb = skb_dequeue(&ks->txq);
723 last = skb_queue_empty(&ks->txq); 723 last = skb_queue_empty(&ks->txq);
724 724
725 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); 725 if (txb != NULL) {
726 ks8851_wrpkt(ks, txb, last); 726 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
727 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); 727 ks8851_wrpkt(ks, txb, last);
728 ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE); 728 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
729 ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
729 730
730 ks8851_done_tx(ks, txb); 731 ks8851_done_tx(ks, txb);
732 }
731 } 733 }
732 734
733 mutex_unlock(&ks->lock); 735 mutex_unlock(&ks->lock);
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 3d1d3a7b7ed3..30b7cf70fbe6 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -93,7 +93,6 @@ earlier 3Com products.
93#include <pcmcia/cisreg.h> 93#include <pcmcia/cisreg.h>
94#include <pcmcia/ciscode.h> 94#include <pcmcia/ciscode.h>
95#include <pcmcia/ds.h> 95#include <pcmcia/ds.h>
96#include <pcmcia/mem_op.h>
97 96
98#include <asm/uaccess.h> 97#include <asm/uaccess.h>
99#include <asm/io.h> 98#include <asm/io.h>
@@ -200,7 +199,6 @@ enum Window4 { /* Window 4: Xcvr/media bits. */
200 199
201struct el3_private { 200struct el3_private {
202 struct pcmcia_device *p_dev; 201 struct pcmcia_device *p_dev;
203 dev_node_t node;
204 u16 advertising, partner; /* NWay media advertisement */ 202 u16 advertising, partner; /* NWay media advertisement */
205 unsigned char phys; /* MII device address */ 203 unsigned char phys; /* MII device address */
206 unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */ 204 unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */
@@ -283,8 +281,6 @@ static int tc574_probe(struct pcmcia_device *link)
283 spin_lock_init(&lp->window_lock); 281 spin_lock_init(&lp->window_lock);
284 link->io.NumPorts1 = 32; 282 link->io.NumPorts1 = 32;
285 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 283 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
286 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
287 link->irq.Handler = &el3_interrupt;
288 link->conf.Attributes = CONF_ENABLE_IRQ; 284 link->conf.Attributes = CONF_ENABLE_IRQ;
289 link->conf.IntType = INT_MEMORY_AND_IO; 285 link->conf.IntType = INT_MEMORY_AND_IO;
290 link->conf.ConfigIndex = 1; 286 link->conf.ConfigIndex = 1;
@@ -311,8 +307,7 @@ static void tc574_detach(struct pcmcia_device *link)
311 307
312 dev_dbg(&link->dev, "3c574_detach()\n"); 308 dev_dbg(&link->dev, "3c574_detach()\n");
313 309
314 if (link->dev_node) 310 unregister_netdev(dev);
315 unregister_netdev(dev);
316 311
317 tc574_release(link); 312 tc574_release(link);
318 313
@@ -353,7 +348,7 @@ static int tc574_config(struct pcmcia_device *link)
353 if (i != 0) 348 if (i != 0)
354 goto failed; 349 goto failed;
355 350
356 ret = pcmcia_request_irq(link, &link->irq); 351 ret = pcmcia_request_irq(link, el3_interrupt);
357 if (ret) 352 if (ret)
358 goto failed; 353 goto failed;
359 354
@@ -361,7 +356,7 @@ static int tc574_config(struct pcmcia_device *link)
361 if (ret) 356 if (ret)
362 goto failed; 357 goto failed;
363 358
364 dev->irq = link->irq.AssignedIRQ; 359 dev->irq = link->irq;
365 dev->base_addr = link->io.BasePort1; 360 dev->base_addr = link->io.BasePort1;
366 361
367 ioaddr = dev->base_addr; 362 ioaddr = dev->base_addr;
@@ -446,17 +441,13 @@ static int tc574_config(struct pcmcia_device *link)
446 } 441 }
447 } 442 }
448 443
449 link->dev_node = &lp->node;
450 SET_NETDEV_DEV(dev, &link->dev); 444 SET_NETDEV_DEV(dev, &link->dev);
451 445
452 if (register_netdev(dev) != 0) { 446 if (register_netdev(dev) != 0) {
453 printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n"); 447 printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
454 link->dev_node = NULL;
455 goto failed; 448 goto failed;
456 } 449 }
457 450
458 strcpy(lp->node.dev_name, dev->name);
459
460 printk(KERN_INFO "%s: %s at io %#3lx, irq %d, " 451 printk(KERN_INFO "%s: %s at io %#3lx, irq %d, "
461 "hw_addr %pM.\n", 452 "hw_addr %pM.\n",
462 dev->name, cardname, dev->base_addr, dev->irq, 453 dev->name, cardname, dev->base_addr, dev->irq,
@@ -781,8 +772,13 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
781 inw(ioaddr + EL3_STATUS)); 772 inw(ioaddr + EL3_STATUS));
782 773
783 spin_lock_irqsave(&lp->window_lock, flags); 774 spin_lock_irqsave(&lp->window_lock, flags);
775
776 dev->stats.tx_bytes += skb->len;
777
778 /* Put out the doubleword header... */
784 outw(skb->len, ioaddr + TX_FIFO); 779 outw(skb->len, ioaddr + TX_FIFO);
785 outw(0, ioaddr + TX_FIFO); 780 outw(0, ioaddr + TX_FIFO);
781 /* ... and the packet rounded to a doubleword. */
786 outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2); 782 outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
787 783
788 dev->trans_start = jiffies; 784 dev->trans_start = jiffies;
@@ -1021,8 +1017,6 @@ static void update_stats(struct net_device *dev)
1021 /* BadSSD */ inb(ioaddr + 12); 1017 /* BadSSD */ inb(ioaddr + 12);
1022 up = inb(ioaddr + 13); 1018 up = inb(ioaddr + 13);
1023 1019
1024 dev->stats.tx_bytes += tx + ((up & 0xf0) << 12);
1025
1026 EL3WINDOW(1); 1020 EL3WINDOW(1);
1027} 1021}
1028 1022
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 091e0b00043e..5ab589d3b385 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -106,7 +106,6 @@ enum RxFilter {
106 106
107struct el3_private { 107struct el3_private {
108 struct pcmcia_device *p_dev; 108 struct pcmcia_device *p_dev;
109 dev_node_t node;
110 /* For transceiver monitoring */ 109 /* For transceiver monitoring */
111 struct timer_list media; 110 struct timer_list media;
112 u16 media_status; 111 u16 media_status;
@@ -194,8 +193,7 @@ static int tc589_probe(struct pcmcia_device *link)
194 spin_lock_init(&lp->lock); 193 spin_lock_init(&lp->lock);
195 link->io.NumPorts1 = 16; 194 link->io.NumPorts1 = 16;
196 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 195 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
197 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 196
198 link->irq.Handler = &el3_interrupt;
199 link->conf.Attributes = CONF_ENABLE_IRQ; 197 link->conf.Attributes = CONF_ENABLE_IRQ;
200 link->conf.IntType = INT_MEMORY_AND_IO; 198 link->conf.IntType = INT_MEMORY_AND_IO;
201 link->conf.ConfigIndex = 1; 199 link->conf.ConfigIndex = 1;
@@ -223,8 +221,7 @@ static void tc589_detach(struct pcmcia_device *link)
223 221
224 dev_dbg(&link->dev, "3c589_detach\n"); 222 dev_dbg(&link->dev, "3c589_detach\n");
225 223
226 if (link->dev_node) 224 unregister_netdev(dev);
227 unregister_netdev(dev);
228 225
229 tc589_release(link); 226 tc589_release(link);
230 227
@@ -242,7 +239,6 @@ static void tc589_detach(struct pcmcia_device *link)
242static int tc589_config(struct pcmcia_device *link) 239static int tc589_config(struct pcmcia_device *link)
243{ 240{
244 struct net_device *dev = link->priv; 241 struct net_device *dev = link->priv;
245 struct el3_private *lp = netdev_priv(dev);
246 __be16 *phys_addr; 242 __be16 *phys_addr;
247 int ret, i, j, multi = 0, fifo; 243 int ret, i, j, multi = 0, fifo;
248 unsigned int ioaddr; 244 unsigned int ioaddr;
@@ -271,7 +267,7 @@ static int tc589_config(struct pcmcia_device *link)
271 if (i != 0) 267 if (i != 0)
272 goto failed; 268 goto failed;
273 269
274 ret = pcmcia_request_irq(link, &link->irq); 270 ret = pcmcia_request_irq(link, el3_interrupt);
275 if (ret) 271 if (ret)
276 goto failed; 272 goto failed;
277 273
@@ -279,7 +275,7 @@ static int tc589_config(struct pcmcia_device *link)
279 if (ret) 275 if (ret)
280 goto failed; 276 goto failed;
281 277
282 dev->irq = link->irq.AssignedIRQ; 278 dev->irq = link->irq;
283 dev->base_addr = link->io.BasePort1; 279 dev->base_addr = link->io.BasePort1;
284 ioaddr = dev->base_addr; 280 ioaddr = dev->base_addr;
285 EL3WINDOW(0); 281 EL3WINDOW(0);
@@ -313,17 +309,13 @@ static int tc589_config(struct pcmcia_device *link)
313 else 309 else
314 printk(KERN_ERR "3c589_cs: invalid if_port requested\n"); 310 printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
315 311
316 link->dev_node = &lp->node;
317 SET_NETDEV_DEV(dev, &link->dev); 312 SET_NETDEV_DEV(dev, &link->dev);
318 313
319 if (register_netdev(dev) != 0) { 314 if (register_netdev(dev) != 0) {
320 printk(KERN_ERR "3c589_cs: register_netdev() failed\n"); 315 printk(KERN_ERR "3c589_cs: register_netdev() failed\n");
321 link->dev_node = NULL;
322 goto failed; 316 goto failed;
323 } 317 }
324 318
325 strcpy(lp->node.dev_name, dev->name);
326
327 printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, " 319 printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, "
328 "hw_addr %pM\n", 320 "hw_addr %pM\n",
329 dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq, 321 dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq,
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 9f3d593f14ed..59f6fa3c9ddc 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -113,7 +113,6 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
113 113
114typedef struct axnet_dev_t { 114typedef struct axnet_dev_t {
115 struct pcmcia_device *p_dev; 115 struct pcmcia_device *p_dev;
116 dev_node_t node;
117 caddr_t base; 116 caddr_t base;
118 struct timer_list watchdog; 117 struct timer_list watchdog;
119 int stale, fast_poll; 118 int stale, fast_poll;
@@ -168,7 +167,6 @@ static int axnet_probe(struct pcmcia_device *link)
168 info = PRIV(dev); 167 info = PRIV(dev);
169 info->p_dev = link; 168 info->p_dev = link;
170 link->priv = dev; 169 link->priv = dev;
171 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
172 link->conf.Attributes = CONF_ENABLE_IRQ; 170 link->conf.Attributes = CONF_ENABLE_IRQ;
173 link->conf.IntType = INT_MEMORY_AND_IO; 171 link->conf.IntType = INT_MEMORY_AND_IO;
174 172
@@ -195,8 +193,7 @@ static void axnet_detach(struct pcmcia_device *link)
195 193
196 dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link); 194 dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link);
197 195
198 if (link->dev_node) 196 unregister_netdev(dev);
199 unregister_netdev(dev);
200 197
201 axnet_release(link); 198 axnet_release(link);
202 199
@@ -265,12 +262,9 @@ static int try_io_port(struct pcmcia_device *link)
265 int j, ret; 262 int j, ret;
266 if (link->io.NumPorts1 == 32) { 263 if (link->io.NumPorts1 == 32) {
267 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 264 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
268 if (link->io.NumPorts2 > 0) { 265 /* for master/slave multifunction cards */
269 /* for master/slave multifunction cards */ 266 if (link->io.NumPorts2 > 0)
270 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 267 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
271 link->irq.Attributes =
272 IRQ_TYPE_DYNAMIC_SHARING;
273 }
274 } else { 268 } else {
275 /* This should be two 16-port windows */ 269 /* This should be two 16-port windows */
276 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 270 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -336,8 +330,7 @@ static int axnet_config(struct pcmcia_device *link)
336 if (ret != 0) 330 if (ret != 0)
337 goto failed; 331 goto failed;
338 332
339 ret = pcmcia_request_irq(link, &link->irq); 333 if (!link->irq)
340 if (ret)
341 goto failed; 334 goto failed;
342 335
343 if (link->io.NumPorts2 == 8) { 336 if (link->io.NumPorts2 == 8) {
@@ -349,7 +342,7 @@ static int axnet_config(struct pcmcia_device *link)
349 if (ret) 342 if (ret)
350 goto failed; 343 goto failed;
351 344
352 dev->irq = link->irq.AssignedIRQ; 345 dev->irq = link->irq;
353 dev->base_addr = link->io.BasePort1; 346 dev->base_addr = link->io.BasePort1;
354 347
355 if (!get_prom(link)) { 348 if (!get_prom(link)) {
@@ -397,17 +390,13 @@ static int axnet_config(struct pcmcia_device *link)
397 } 390 }
398 391
399 info->phy_id = (i < 32) ? i : -1; 392 info->phy_id = (i < 32) ? i : -1;
400 link->dev_node = &info->node;
401 SET_NETDEV_DEV(dev, &link->dev); 393 SET_NETDEV_DEV(dev, &link->dev);
402 394
403 if (register_netdev(dev) != 0) { 395 if (register_netdev(dev) != 0) {
404 printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n"); 396 printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
405 link->dev_node = NULL;
406 goto failed; 397 goto failed;
407 } 398 }
408 399
409 strcpy(info->node.dev_name, dev->name);
410
411 printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, " 400 printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, "
412 "hw_addr %pM\n", 401 "hw_addr %pM\n",
413 dev->name, ((info->flags & IS_AX88790) ? 7 : 1), 402 dev->name, ((info->flags & IS_AX88790) ? 7 : 1),
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 21d9c9d815d1..5643f94541bc 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -122,7 +122,6 @@ static void com20020_detach(struct pcmcia_device *p_dev);
122 122
123typedef struct com20020_dev_t { 123typedef struct com20020_dev_t {
124 struct net_device *dev; 124 struct net_device *dev;
125 dev_node_t node;
126} com20020_dev_t; 125} com20020_dev_t;
127 126
128/*====================================================================== 127/*======================================================================
@@ -163,7 +162,6 @@ static int com20020_probe(struct pcmcia_device *p_dev)
163 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 162 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
164 p_dev->io.NumPorts1 = 16; 163 p_dev->io.NumPorts1 = 16;
165 p_dev->io.IOAddrLines = 16; 164 p_dev->io.IOAddrLines = 16;
166 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
167 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 165 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
168 p_dev->conf.IntType = INT_MEMORY_AND_IO; 166 p_dev->conf.IntType = INT_MEMORY_AND_IO;
169 167
@@ -196,18 +194,16 @@ static void com20020_detach(struct pcmcia_device *link)
196 194
197 dev_dbg(&link->dev, "com20020_detach\n"); 195 dev_dbg(&link->dev, "com20020_detach\n");
198 196
199 if (link->dev_node) { 197 dev_dbg(&link->dev, "unregister...\n");
200 dev_dbg(&link->dev, "unregister...\n");
201 198
202 unregister_netdev(dev); 199 unregister_netdev(dev);
203 200
204 /* 201 /*
205 * this is necessary because we register our IRQ separately 202 * this is necessary because we register our IRQ separately
206 * from card services. 203 * from card services.
207 */ 204 */
208 if (dev->irq) 205 if (dev->irq)
209 free_irq(dev->irq, dev); 206 free_irq(dev->irq, dev);
210 }
211 207
212 com20020_release(link); 208 com20020_release(link);
213 209
@@ -275,15 +271,14 @@ static int com20020_config(struct pcmcia_device *link)
275 dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr); 271 dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr);
276 272
277 dev_dbg(&link->dev, "request IRQ %d\n", 273 dev_dbg(&link->dev, "request IRQ %d\n",
278 link->irq.AssignedIRQ); 274 link->irq);
279 i = pcmcia_request_irq(link, &link->irq); 275 if (!link->irq)
280 if (i != 0)
281 { 276 {
282 dev_dbg(&link->dev, "requestIRQ failed totally!\n"); 277 dev_dbg(&link->dev, "requestIRQ failed totally!\n");
283 goto failed; 278 goto failed;
284 } 279 }
285 280
286 dev->irq = link->irq.AssignedIRQ; 281 dev->irq = link->irq;
287 282
288 ret = pcmcia_request_configuration(link, &link->conf); 283 ret = pcmcia_request_configuration(link, &link->conf);
289 if (ret) 284 if (ret)
@@ -299,7 +294,6 @@ static int com20020_config(struct pcmcia_device *link)
299 lp->card_name = "PCMCIA COM20020"; 294 lp->card_name = "PCMCIA COM20020";
300 lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */ 295 lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
301 296
302 link->dev_node = &info->node;
303 SET_NETDEV_DEV(dev, &link->dev); 297 SET_NETDEV_DEV(dev, &link->dev);
304 298
305 i = com20020_found(dev, 0); /* calls register_netdev */ 299 i = com20020_found(dev, 0); /* calls register_netdev */
@@ -307,12 +301,9 @@ static int com20020_config(struct pcmcia_device *link)
307 if (i != 0) { 301 if (i != 0) {
308 dev_printk(KERN_NOTICE, &link->dev, 302 dev_printk(KERN_NOTICE, &link->dev,
309 "com20020_cs: com20020_found() failed\n"); 303 "com20020_cs: com20020_found() failed\n");
310 link->dev_node = NULL;
311 goto failed; 304 goto failed;
312 } 305 }
313 306
314 strcpy(info->node.dev_name, dev->name);
315
316 dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n", 307 dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n",
317 dev->name, dev->base_addr, dev->irq); 308 dev->name, dev->base_addr, dev->irq);
318 return 0; 309 return 0;
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index b9dc80b9d04a..6580d78397d1 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -110,7 +110,6 @@ typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN,
110*/ 110*/
111typedef struct local_info_t { 111typedef struct local_info_t {
112 struct pcmcia_device *p_dev; 112 struct pcmcia_device *p_dev;
113 dev_node_t node;
114 long open_time; 113 long open_time;
115 uint tx_started:1; 114 uint tx_started:1;
116 uint tx_queue; 115 uint tx_queue;
@@ -254,10 +253,6 @@ static int fmvj18x_probe(struct pcmcia_device *link)
254 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 253 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
255 link->io.IOAddrLines = 5; 254 link->io.IOAddrLines = 5;
256 255
257 /* Interrupt setup */
258 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
259 link->irq.Handler = fjn_interrupt;
260
261 /* General socket configuration */ 256 /* General socket configuration */
262 link->conf.Attributes = CONF_ENABLE_IRQ; 257 link->conf.Attributes = CONF_ENABLE_IRQ;
263 link->conf.IntType = INT_MEMORY_AND_IO; 258 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -278,8 +273,7 @@ static void fmvj18x_detach(struct pcmcia_device *link)
278 273
279 dev_dbg(&link->dev, "fmvj18x_detach\n"); 274 dev_dbg(&link->dev, "fmvj18x_detach\n");
280 275
281 if (link->dev_node) 276 unregister_netdev(dev);
282 unregister_netdev(dev);
283 277
284 fmvj18x_release(link); 278 fmvj18x_release(link);
285 279
@@ -425,8 +419,6 @@ static int fmvj18x_config(struct pcmcia_device *link)
425 } 419 }
426 420
427 if (link->io.NumPorts2 != 0) { 421 if (link->io.NumPorts2 != 0) {
428 link->irq.Attributes =
429 IRQ_TYPE_DYNAMIC_SHARING;
430 ret = mfc_try_io_port(link); 422 ret = mfc_try_io_port(link);
431 if (ret != 0) goto failed; 423 if (ret != 0) goto failed;
432 } else if (cardtype == UNGERMANN) { 424 } else if (cardtype == UNGERMANN) {
@@ -437,14 +429,14 @@ static int fmvj18x_config(struct pcmcia_device *link)
437 if (ret) 429 if (ret)
438 goto failed; 430 goto failed;
439 } 431 }
440 ret = pcmcia_request_irq(link, &link->irq); 432 ret = pcmcia_request_irq(link, fjn_interrupt);
441 if (ret) 433 if (ret)
442 goto failed; 434 goto failed;
443 ret = pcmcia_request_configuration(link, &link->conf); 435 ret = pcmcia_request_configuration(link, &link->conf);
444 if (ret) 436 if (ret)
445 goto failed; 437 goto failed;
446 438
447 dev->irq = link->irq.AssignedIRQ; 439 dev->irq = link->irq;
448 dev->base_addr = link->io.BasePort1; 440 dev->base_addr = link->io.BasePort1;
449 441
450 if (link->io.BasePort2 != 0) { 442 if (link->io.BasePort2 != 0) {
@@ -529,17 +521,13 @@ static int fmvj18x_config(struct pcmcia_device *link)
529 } 521 }
530 522
531 lp->cardtype = cardtype; 523 lp->cardtype = cardtype;
532 link->dev_node = &lp->node;
533 SET_NETDEV_DEV(dev, &link->dev); 524 SET_NETDEV_DEV(dev, &link->dev);
534 525
535 if (register_netdev(dev) != 0) { 526 if (register_netdev(dev) != 0) {
536 printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n"); 527 printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
537 link->dev_node = NULL;
538 goto failed; 528 goto failed;
539 } 529 }
540 530
541 strcpy(lp->node.dev_name, dev->name);
542
543 /* print current configuration */ 531 /* print current configuration */
544 printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, " 532 printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, "
545 "hw_addr %pM\n", 533 "hw_addr %pM\n",
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 37f4a6fdc3ef..2e42d80f8cae 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -104,7 +104,6 @@ static void ibmtr_detach(struct pcmcia_device *p_dev);
104typedef struct ibmtr_dev_t { 104typedef struct ibmtr_dev_t {
105 struct pcmcia_device *p_dev; 105 struct pcmcia_device *p_dev;
106 struct net_device *dev; 106 struct net_device *dev;
107 dev_node_t node;
108 window_handle_t sram_win_handle; 107 window_handle_t sram_win_handle;
109 struct tok_info *ti; 108 struct tok_info *ti;
110} ibmtr_dev_t; 109} ibmtr_dev_t;
@@ -156,8 +155,6 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
156 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 155 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
157 link->io.NumPorts1 = 4; 156 link->io.NumPorts1 = 4;
158 link->io.IOAddrLines = 16; 157 link->io.IOAddrLines = 16;
159 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
160 link->irq.Handler = ibmtr_interrupt;
161 link->conf.Attributes = CONF_ENABLE_IRQ; 158 link->conf.Attributes = CONF_ENABLE_IRQ;
162 link->conf.IntType = INT_MEMORY_AND_IO; 159 link->conf.IntType = INT_MEMORY_AND_IO;
163 link->conf.Present = PRESENT_OPTION; 160 link->conf.Present = PRESENT_OPTION;
@@ -192,8 +189,7 @@ static void ibmtr_detach(struct pcmcia_device *link)
192 */ 189 */
193 ti->sram_phys |= 1; 190 ti->sram_phys |= 1;
194 191
195 if (link->dev_node) 192 unregister_netdev(dev);
196 unregister_netdev(dev);
197 193
198 del_timer_sync(&(ti->tr_timer)); 194 del_timer_sync(&(ti->tr_timer));
199 195
@@ -238,11 +234,11 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
238 } 234 }
239 dev->base_addr = link->io.BasePort1; 235 dev->base_addr = link->io.BasePort1;
240 236
241 ret = pcmcia_request_irq(link, &link->irq); 237 ret = pcmcia_request_exclusive_irq(link, ibmtr_interrupt);
242 if (ret) 238 if (ret)
243 goto failed; 239 goto failed;
244 dev->irq = link->irq.AssignedIRQ; 240 dev->irq = link->irq;
245 ti->irq = link->irq.AssignedIRQ; 241 ti->irq = link->irq;
246 ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq); 242 ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
247 243
248 /* Allocate the MMIO memory window */ 244 /* Allocate the MMIO memory window */
@@ -291,18 +287,14 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
291 Adapters Technical Reference" SC30-3585 for this info. */ 287 Adapters Technical Reference" SC30-3585 for this info. */
292 ibmtr_hw_setup(dev, mmiobase); 288 ibmtr_hw_setup(dev, mmiobase);
293 289
294 link->dev_node = &info->node;
295 SET_NETDEV_DEV(dev, &link->dev); 290 SET_NETDEV_DEV(dev, &link->dev);
296 291
297 i = ibmtr_probe_card(dev); 292 i = ibmtr_probe_card(dev);
298 if (i != 0) { 293 if (i != 0) {
299 printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n"); 294 printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n");
300 link->dev_node = NULL;
301 goto failed; 295 goto failed;
302 } 296 }
303 297
304 strcpy(info->node.dev_name, dev->name);
305
306 printk(KERN_INFO 298 printk(KERN_INFO
307 "%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n", 299 "%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
308 dev->name, dev->base_addr, dev->irq, 300 dev->name, dev->base_addr, dev->irq,
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index c717b143f11a..d8a3b3cf246e 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -363,7 +363,6 @@ typedef struct _mace_statistics {
363 363
364typedef struct _mace_private { 364typedef struct _mace_private {
365 struct pcmcia_device *p_dev; 365 struct pcmcia_device *p_dev;
366 dev_node_t node;
367 struct net_device_stats linux_stats; /* Linux statistics counters */ 366 struct net_device_stats linux_stats; /* Linux statistics counters */
368 mace_statistics mace_stats; /* MACE chip statistics counters */ 367 mace_statistics mace_stats; /* MACE chip statistics counters */
369 368
@@ -463,8 +462,6 @@ static int nmclan_probe(struct pcmcia_device *link)
463 link->io.NumPorts1 = 32; 462 link->io.NumPorts1 = 32;
464 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 463 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
465 link->io.IOAddrLines = 5; 464 link->io.IOAddrLines = 5;
466 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
467 link->irq.Handler = mace_interrupt;
468 link->conf.Attributes = CONF_ENABLE_IRQ; 465 link->conf.Attributes = CONF_ENABLE_IRQ;
469 link->conf.IntType = INT_MEMORY_AND_IO; 466 link->conf.IntType = INT_MEMORY_AND_IO;
470 link->conf.ConfigIndex = 1; 467 link->conf.ConfigIndex = 1;
@@ -493,8 +490,7 @@ static void nmclan_detach(struct pcmcia_device *link)
493 490
494 dev_dbg(&link->dev, "nmclan_detach\n"); 491 dev_dbg(&link->dev, "nmclan_detach\n");
495 492
496 if (link->dev_node) 493 unregister_netdev(dev);
497 unregister_netdev(dev);
498 494
499 nmclan_release(link); 495 nmclan_release(link);
500 496
@@ -652,14 +648,14 @@ static int nmclan_config(struct pcmcia_device *link)
652 ret = pcmcia_request_io(link, &link->io); 648 ret = pcmcia_request_io(link, &link->io);
653 if (ret) 649 if (ret)
654 goto failed; 650 goto failed;
655 ret = pcmcia_request_irq(link, &link->irq); 651 ret = pcmcia_request_exclusive_irq(link, mace_interrupt);
656 if (ret) 652 if (ret)
657 goto failed; 653 goto failed;
658 ret = pcmcia_request_configuration(link, &link->conf); 654 ret = pcmcia_request_configuration(link, &link->conf);
659 if (ret) 655 if (ret)
660 goto failed; 656 goto failed;
661 657
662 dev->irq = link->irq.AssignedIRQ; 658 dev->irq = link->irq;
663 dev->base_addr = link->io.BasePort1; 659 dev->base_addr = link->io.BasePort1;
664 660
665 ioaddr = dev->base_addr; 661 ioaddr = dev->base_addr;
@@ -698,18 +694,14 @@ static int nmclan_config(struct pcmcia_device *link)
698 else 694 else
699 printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n"); 695 printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
700 696
701 link->dev_node = &lp->node;
702 SET_NETDEV_DEV(dev, &link->dev); 697 SET_NETDEV_DEV(dev, &link->dev);
703 698
704 i = register_netdev(dev); 699 i = register_netdev(dev);
705 if (i != 0) { 700 if (i != 0) {
706 printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n"); 701 printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n");
707 link->dev_node = NULL;
708 goto failed; 702 goto failed;
709 } 703 }
710 704
711 strcpy(lp->node.dev_name, dev->name);
712
713 printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port," 705 printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port,"
714 " hw_addr %pM\n", 706 " hw_addr %pM\n",
715 dev->name, dev->base_addr, dev->irq, if_names[dev->if_port], 707 dev->name, dev->base_addr, dev->irq, if_names[dev->if_port],
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 4c0368de1815..6f77a768ba88 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -208,7 +208,6 @@ static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII };
208 208
209typedef struct pcnet_dev_t { 209typedef struct pcnet_dev_t {
210 struct pcmcia_device *p_dev; 210 struct pcmcia_device *p_dev;
211 dev_node_t node;
212 u_int flags; 211 u_int flags;
213 void __iomem *base; 212 void __iomem *base;
214 struct timer_list watchdog; 213 struct timer_list watchdog;
@@ -264,7 +263,6 @@ static int pcnet_probe(struct pcmcia_device *link)
264 info->p_dev = link; 263 info->p_dev = link;
265 link->priv = dev; 264 link->priv = dev;
266 265
267 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
268 link->conf.Attributes = CONF_ENABLE_IRQ; 266 link->conf.Attributes = CONF_ENABLE_IRQ;
269 link->conf.IntType = INT_MEMORY_AND_IO; 267 link->conf.IntType = INT_MEMORY_AND_IO;
270 268
@@ -288,8 +286,7 @@ static void pcnet_detach(struct pcmcia_device *link)
288 286
289 dev_dbg(&link->dev, "pcnet_detach\n"); 287 dev_dbg(&link->dev, "pcnet_detach\n");
290 288
291 if (link->dev_node) 289 unregister_netdev(dev);
292 unregister_netdev(dev);
293 290
294 pcnet_release(link); 291 pcnet_release(link);
295 292
@@ -488,8 +485,6 @@ static int try_io_port(struct pcmcia_device *link)
488 if (link->io.NumPorts2 > 0) { 485 if (link->io.NumPorts2 > 0) {
489 /* for master/slave multifunction cards */ 486 /* for master/slave multifunction cards */
490 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 487 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
491 link->irq.Attributes =
492 IRQ_TYPE_DYNAMIC_SHARING;
493 } 488 }
494 } else { 489 } else {
495 /* This should be two 16-port windows */ 490 /* This should be two 16-port windows */
@@ -559,8 +554,7 @@ static int pcnet_config(struct pcmcia_device *link)
559 if (ret) 554 if (ret)
560 goto failed; 555 goto failed;
561 556
562 ret = pcmcia_request_irq(link, &link->irq); 557 if (!link->irq)
563 if (ret)
564 goto failed; 558 goto failed;
565 559
566 if (link->io.NumPorts2 == 8) { 560 if (link->io.NumPorts2 == 8) {
@@ -574,7 +568,7 @@ static int pcnet_config(struct pcmcia_device *link)
574 ret = pcmcia_request_configuration(link, &link->conf); 568 ret = pcmcia_request_configuration(link, &link->conf);
575 if (ret) 569 if (ret)
576 goto failed; 570 goto failed;
577 dev->irq = link->irq.AssignedIRQ; 571 dev->irq = link->irq;
578 dev->base_addr = link->io.BasePort1; 572 dev->base_addr = link->io.BasePort1;
579 if (info->flags & HAS_MISC_REG) { 573 if (info->flags & HAS_MISC_REG) {
580 if ((if_port == 1) || (if_port == 2)) 574 if ((if_port == 1) || (if_port == 2))
@@ -643,17 +637,13 @@ static int pcnet_config(struct pcmcia_device *link)
643 if (info->flags & (IS_DL10019|IS_DL10022)) 637 if (info->flags & (IS_DL10019|IS_DL10022))
644 mii_phy_probe(dev); 638 mii_phy_probe(dev);
645 639
646 link->dev_node = &info->node;
647 SET_NETDEV_DEV(dev, &link->dev); 640 SET_NETDEV_DEV(dev, &link->dev);
648 641
649 if (register_netdev(dev) != 0) { 642 if (register_netdev(dev) != 0) {
650 printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n"); 643 printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
651 link->dev_node = NULL;
652 goto failed; 644 goto failed;
653 } 645 }
654 646
655 strcpy(info->node.dev_name, dev->name);
656
657 if (info->flags & (IS_DL10019|IS_DL10022)) { 647 if (info->flags & (IS_DL10019|IS_DL10022)) {
658 u_char id = inb(dev->base_addr + 0x1a); 648 u_char id = inb(dev->base_addr + 0x1a);
659 printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ", 649 printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ",
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index fd9d6e34fda4..59796e7d09c4 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -103,7 +103,6 @@ struct smc_private {
103 u_short manfid; 103 u_short manfid;
104 u_short cardid; 104 u_short cardid;
105 105
106 dev_node_t node;
107 struct sk_buff *saved_skb; 106 struct sk_buff *saved_skb;
108 int packets_waiting; 107 int packets_waiting;
109 void __iomem *base; 108 void __iomem *base;
@@ -323,14 +322,11 @@ static int smc91c92_probe(struct pcmcia_device *link)
323 return -ENOMEM; 322 return -ENOMEM;
324 smc = netdev_priv(dev); 323 smc = netdev_priv(dev);
325 smc->p_dev = link; 324 smc->p_dev = link;
326 link->priv = dev;
327 325
328 spin_lock_init(&smc->lock); 326 spin_lock_init(&smc->lock);
329 link->io.NumPorts1 = 16; 327 link->io.NumPorts1 = 16;
330 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 328 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
331 link->io.IOAddrLines = 4; 329 link->io.IOAddrLines = 4;
332 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
333 link->irq.Handler = &smc_interrupt;
334 link->conf.Attributes = CONF_ENABLE_IRQ; 330 link->conf.Attributes = CONF_ENABLE_IRQ;
335 link->conf.IntType = INT_MEMORY_AND_IO; 331 link->conf.IntType = INT_MEMORY_AND_IO;
336 332
@@ -363,8 +359,7 @@ static void smc91c92_detach(struct pcmcia_device *link)
363 359
364 dev_dbg(&link->dev, "smc91c92_detach\n"); 360 dev_dbg(&link->dev, "smc91c92_detach\n");
365 361
366 if (link->dev_node) 362 unregister_netdev(dev);
367 unregister_netdev(dev);
368 363
369 smc91c92_release(link); 364 smc91c92_release(link);
370 365
@@ -453,7 +448,6 @@ static int mhz_mfc_config(struct pcmcia_device *link)
453 448
454 link->conf.Attributes |= CONF_ENABLE_SPKR; 449 link->conf.Attributes |= CONF_ENABLE_SPKR;
455 link->conf.Status = CCSR_AUDIO_ENA; 450 link->conf.Status = CCSR_AUDIO_ENA;
456 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
457 link->io.IOAddrLines = 16; 451 link->io.IOAddrLines = 16;
458 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 452 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
459 link->io.NumPorts2 = 8; 453 link->io.NumPorts2 = 8;
@@ -652,7 +646,6 @@ static int osi_config(struct pcmcia_device *link)
652 646
653 link->conf.Attributes |= CONF_ENABLE_SPKR; 647 link->conf.Attributes |= CONF_ENABLE_SPKR;
654 link->conf.Status = CCSR_AUDIO_ENA; 648 link->conf.Status = CCSR_AUDIO_ENA;
655 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
656 link->io.NumPorts1 = 64; 649 link->io.NumPorts1 = 64;
657 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 650 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
658 link->io.NumPorts2 = 8; 651 link->io.NumPorts2 = 8;
@@ -877,7 +870,7 @@ static int smc91c92_config(struct pcmcia_device *link)
877 if (i) 870 if (i)
878 goto config_failed; 871 goto config_failed;
879 872
880 i = pcmcia_request_irq(link, &link->irq); 873 i = pcmcia_request_irq(link, smc_interrupt);
881 if (i) 874 if (i)
882 goto config_failed; 875 goto config_failed;
883 i = pcmcia_request_configuration(link, &link->conf); 876 i = pcmcia_request_configuration(link, &link->conf);
@@ -887,7 +880,7 @@ static int smc91c92_config(struct pcmcia_device *link)
887 if (smc->manfid == MANFID_MOTOROLA) 880 if (smc->manfid == MANFID_MOTOROLA)
888 mot_config(link); 881 mot_config(link);
889 882
890 dev->irq = link->irq.AssignedIRQ; 883 dev->irq = link->irq;
891 884
892 if ((if_port >= 0) && (if_port <= 2)) 885 if ((if_port >= 0) && (if_port <= 2))
893 dev->if_port = if_port; 886 dev->if_port = if_port;
@@ -960,17 +953,13 @@ static int smc91c92_config(struct pcmcia_device *link)
960 SMC_SELECT_BANK(0); 953 SMC_SELECT_BANK(0);
961 } 954 }
962 955
963 link->dev_node = &smc->node;
964 SET_NETDEV_DEV(dev, &link->dev); 956 SET_NETDEV_DEV(dev, &link->dev);
965 957
966 if (register_netdev(dev) != 0) { 958 if (register_netdev(dev) != 0) {
967 printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n"); 959 printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
968 link->dev_node = NULL;
969 goto config_undo; 960 goto config_undo;
970 } 961 }
971 962
972 strcpy(smc->node.dev_name, dev->name);
973
974 printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, " 963 printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, "
975 "hw_addr %pM\n", 964 "hw_addr %pM\n",
976 dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq, 965 dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq,
@@ -1804,23 +1793,30 @@ static void media_check(u_long arg)
1804 SMC_SELECT_BANK(1); 1793 SMC_SELECT_BANK(1);
1805 media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; 1794 media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
1806 1795
1796 SMC_SELECT_BANK(saved_bank);
1797 spin_unlock_irqrestore(&smc->lock, flags);
1798
1807 /* Check for pending interrupt with watchdog flag set: with 1799 /* Check for pending interrupt with watchdog flag set: with
1808 this, we can limp along even if the interrupt is blocked */ 1800 this, we can limp along even if the interrupt is blocked */
1809 if (smc->watchdog++ && ((i>>8) & i)) { 1801 if (smc->watchdog++ && ((i>>8) & i)) {
1810 if (!smc->fast_poll) 1802 if (!smc->fast_poll)
1811 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 1803 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
1804 local_irq_save(flags);
1812 smc_interrupt(dev->irq, dev); 1805 smc_interrupt(dev->irq, dev);
1806 local_irq_restore(flags);
1813 smc->fast_poll = HZ; 1807 smc->fast_poll = HZ;
1814 } 1808 }
1815 if (smc->fast_poll) { 1809 if (smc->fast_poll) {
1816 smc->fast_poll--; 1810 smc->fast_poll--;
1817 smc->media.expires = jiffies + HZ/100; 1811 smc->media.expires = jiffies + HZ/100;
1818 add_timer(&smc->media); 1812 add_timer(&smc->media);
1819 SMC_SELECT_BANK(saved_bank);
1820 spin_unlock_irqrestore(&smc->lock, flags);
1821 return; 1813 return;
1822 } 1814 }
1823 1815
1816 spin_lock_irqsave(&smc->lock, flags);
1817
1818 saved_bank = inw(ioaddr + BANK_SELECT);
1819
1824 if (smc->cfg & CFG_MII_SELECT) { 1820 if (smc->cfg & CFG_MII_SELECT) {
1825 if (smc->mii_if.phy_id < 0) 1821 if (smc->mii_if.phy_id < 0)
1826 goto reschedule; 1822 goto reschedule;
@@ -1978,15 +1974,16 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1978 unsigned int ioaddr = dev->base_addr; 1974 unsigned int ioaddr = dev->base_addr;
1979 u16 saved_bank = inw(ioaddr + BANK_SELECT); 1975 u16 saved_bank = inw(ioaddr + BANK_SELECT);
1980 int ret; 1976 int ret;
1977 unsigned long flags;
1981 1978
1982 spin_lock_irq(&smc->lock); 1979 spin_lock_irqsave(&smc->lock, flags);
1983 SMC_SELECT_BANK(3); 1980 SMC_SELECT_BANK(3);
1984 if (smc->cfg & CFG_MII_SELECT) 1981 if (smc->cfg & CFG_MII_SELECT)
1985 ret = mii_ethtool_gset(&smc->mii_if, ecmd); 1982 ret = mii_ethtool_gset(&smc->mii_if, ecmd);
1986 else 1983 else
1987 ret = smc_netdev_get_ecmd(dev, ecmd); 1984 ret = smc_netdev_get_ecmd(dev, ecmd);
1988 SMC_SELECT_BANK(saved_bank); 1985 SMC_SELECT_BANK(saved_bank);
1989 spin_unlock_irq(&smc->lock); 1986 spin_unlock_irqrestore(&smc->lock, flags);
1990 return ret; 1987 return ret;
1991} 1988}
1992 1989
@@ -1996,15 +1993,16 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1996 unsigned int ioaddr = dev->base_addr; 1993 unsigned int ioaddr = dev->base_addr;
1997 u16 saved_bank = inw(ioaddr + BANK_SELECT); 1994 u16 saved_bank = inw(ioaddr + BANK_SELECT);
1998 int ret; 1995 int ret;
1996 unsigned long flags;
1999 1997
2000 spin_lock_irq(&smc->lock); 1998 spin_lock_irqsave(&smc->lock, flags);
2001 SMC_SELECT_BANK(3); 1999 SMC_SELECT_BANK(3);
2002 if (smc->cfg & CFG_MII_SELECT) 2000 if (smc->cfg & CFG_MII_SELECT)
2003 ret = mii_ethtool_sset(&smc->mii_if, ecmd); 2001 ret = mii_ethtool_sset(&smc->mii_if, ecmd);
2004 else 2002 else
2005 ret = smc_netdev_set_ecmd(dev, ecmd); 2003 ret = smc_netdev_set_ecmd(dev, ecmd);
2006 SMC_SELECT_BANK(saved_bank); 2004 SMC_SELECT_BANK(saved_bank);
2007 spin_unlock_irq(&smc->lock); 2005 spin_unlock_irqrestore(&smc->lock, flags);
2008 return ret; 2006 return ret;
2009} 2007}
2010 2008
@@ -2014,12 +2012,13 @@ static u32 smc_get_link(struct net_device *dev)
2014 unsigned int ioaddr = dev->base_addr; 2012 unsigned int ioaddr = dev->base_addr;
2015 u16 saved_bank = inw(ioaddr + BANK_SELECT); 2013 u16 saved_bank = inw(ioaddr + BANK_SELECT);
2016 u32 ret; 2014 u32 ret;
2015 unsigned long flags;
2017 2016
2018 spin_lock_irq(&smc->lock); 2017 spin_lock_irqsave(&smc->lock, flags);
2019 SMC_SELECT_BANK(3); 2018 SMC_SELECT_BANK(3);
2020 ret = smc_link_ok(dev); 2019 ret = smc_link_ok(dev);
2021 SMC_SELECT_BANK(saved_bank); 2020 SMC_SELECT_BANK(saved_bank);
2022 spin_unlock_irq(&smc->lock); 2021 spin_unlock_irqrestore(&smc->lock, flags);
2023 return ret; 2022 return ret;
2024} 2023}
2025 2024
@@ -2056,16 +2055,17 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
2056 int rc = 0; 2055 int rc = 0;
2057 u16 saved_bank; 2056 u16 saved_bank;
2058 unsigned int ioaddr = dev->base_addr; 2057 unsigned int ioaddr = dev->base_addr;
2058 unsigned long flags;
2059 2059
2060 if (!netif_running(dev)) 2060 if (!netif_running(dev))
2061 return -EINVAL; 2061 return -EINVAL;
2062 2062
2063 spin_lock_irq(&smc->lock); 2063 spin_lock_irqsave(&smc->lock, flags);
2064 saved_bank = inw(ioaddr + BANK_SELECT); 2064 saved_bank = inw(ioaddr + BANK_SELECT);
2065 SMC_SELECT_BANK(3); 2065 SMC_SELECT_BANK(3);
2066 rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); 2066 rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
2067 SMC_SELECT_BANK(saved_bank); 2067 SMC_SELECT_BANK(saved_bank);
2068 spin_unlock_irq(&smc->lock); 2068 spin_unlock_irqrestore(&smc->lock, flags);
2069 return rc; 2069 return rc;
2070} 2070}
2071 2071
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 4d1802e457be..5e6b62ba8887 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -297,31 +297,9 @@ static void xirc2ps_detach(struct pcmcia_device *p_dev);
297 297
298static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id); 298static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id);
299 299
300/****************
301 * A linked list of "instances" of the device. Each actual
302 * PCMCIA card corresponds to one device instance, and is described
303 * by one struct pcmcia_device structure (defined in ds.h).
304 *
305 * You may not want to use a linked list for this -- for example, the
306 * memory card driver uses an array of struct pcmcia_device pointers, where minor
307 * device numbers are used to derive the corresponding array index.
308 */
309
310/****************
311 * A driver needs to provide a dev_node_t structure for each device
312 * on a card. In some cases, there is only one device per card (for
313 * example, ethernet cards, modems). In other cases, there may be
314 * many actual or logical devices (SCSI adapters, memory cards with
315 * multiple partitions). The dev_node_t structures need to be kept
316 * in a linked list starting at the 'dev' field of a struct pcmcia_device
317 * structure. We allocate them in the card's private data structure,
318 * because they generally can't be allocated dynamically.
319 */
320
321typedef struct local_info_t { 300typedef struct local_info_t {
322 struct net_device *dev; 301 struct net_device *dev;
323 struct pcmcia_device *p_dev; 302 struct pcmcia_device *p_dev;
324 dev_node_t node;
325 303
326 int card_type; 304 int card_type;
327 int probe_port; 305 int probe_port;
@@ -555,7 +533,6 @@ xirc2ps_probe(struct pcmcia_device *link)
555 link->conf.Attributes = CONF_ENABLE_IRQ; 533 link->conf.Attributes = CONF_ENABLE_IRQ;
556 link->conf.IntType = INT_MEMORY_AND_IO; 534 link->conf.IntType = INT_MEMORY_AND_IO;
557 link->conf.ConfigIndex = 1; 535 link->conf.ConfigIndex = 1;
558 link->irq.Handler = xirc2ps_interrupt;
559 536
560 /* Fill in card specific entries */ 537 /* Fill in card specific entries */
561 dev->netdev_ops = &netdev_ops; 538 dev->netdev_ops = &netdev_ops;
@@ -580,8 +557,7 @@ xirc2ps_detach(struct pcmcia_device *link)
580 557
581 dev_dbg(&link->dev, "detach\n"); 558 dev_dbg(&link->dev, "detach\n");
582 559
583 if (link->dev_node) 560 unregister_netdev(dev);
584 unregister_netdev(dev);
585 561
586 xirc2ps_release(link); 562 xirc2ps_release(link);
587 563
@@ -841,7 +817,6 @@ xirc2ps_config(struct pcmcia_device * link)
841 link->conf.Attributes |= CONF_ENABLE_SPKR; 817 link->conf.Attributes |= CONF_ENABLE_SPKR;
842 link->conf.Status |= CCSR_AUDIO_ENA; 818 link->conf.Status |= CCSR_AUDIO_ENA;
843 } 819 }
844 link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
845 link->io.NumPorts2 = 8; 820 link->io.NumPorts2 = 8;
846 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 821 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
847 if (local->dingo) { 822 if (local->dingo) {
@@ -866,7 +841,6 @@ xirc2ps_config(struct pcmcia_device * link)
866 } 841 }
867 printk(KNOT_XIRC "no ports available\n"); 842 printk(KNOT_XIRC "no ports available\n");
868 } else { 843 } else {
869 link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
870 link->io.NumPorts1 = 16; 844 link->io.NumPorts1 = 16;
871 for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { 845 for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
872 link->io.BasePort1 = ioaddr; 846 link->io.BasePort1 = ioaddr;
@@ -885,7 +859,7 @@ xirc2ps_config(struct pcmcia_device * link)
885 * Now allocate an interrupt line. Note that this does not 859 * Now allocate an interrupt line. Note that this does not
886 * actually assign a handler to the interrupt. 860 * actually assign a handler to the interrupt.
887 */ 861 */
888 if ((err=pcmcia_request_irq(link, &link->irq))) 862 if ((err=pcmcia_request_irq(link, xirc2ps_interrupt)))
889 goto config_error; 863 goto config_error;
890 864
891 /**************** 865 /****************
@@ -982,23 +956,19 @@ xirc2ps_config(struct pcmcia_device * link)
982 printk(KNOT_XIRC "invalid if_port requested\n"); 956 printk(KNOT_XIRC "invalid if_port requested\n");
983 957
984 /* we can now register the device with the net subsystem */ 958 /* we can now register the device with the net subsystem */
985 dev->irq = link->irq.AssignedIRQ; 959 dev->irq = link->irq;
986 dev->base_addr = link->io.BasePort1; 960 dev->base_addr = link->io.BasePort1;
987 961
988 if (local->dingo) 962 if (local->dingo)
989 do_reset(dev, 1); /* a kludge to make the cem56 work */ 963 do_reset(dev, 1); /* a kludge to make the cem56 work */
990 964
991 link->dev_node = &local->node;
992 SET_NETDEV_DEV(dev, &link->dev); 965 SET_NETDEV_DEV(dev, &link->dev);
993 966
994 if ((err=register_netdev(dev))) { 967 if ((err=register_netdev(dev))) {
995 printk(KNOT_XIRC "register_netdev() failed\n"); 968 printk(KNOT_XIRC "register_netdev() failed\n");
996 link->dev_node = NULL;
997 goto config_error; 969 goto config_error;
998 } 970 }
999 971
1000 strcpy(local->node.dev_name, dev->name);
1001
1002 /* give some infos about the hardware */ 972 /* give some infos about the hardware */
1003 printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %pM\n", 973 printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %pM\n",
1004 dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq, 974 dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index fc5938ba3d78..a527e37728cd 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -88,6 +88,11 @@ config LSI_ET1011C_PHY
88 ---help--- 88 ---help---
89 Supports the LSI ET1011C PHY. 89 Supports the LSI ET1011C PHY.
90 90
91config MICREL_PHY
92 tristate "Driver for Micrel PHYs"
93 ---help---
94 Supports the KSZ9021, VSC8201, KS8001 PHYs.
95
91config FIXED_PHY 96config FIXED_PHY
92 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" 97 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
93 depends on PHYLIB=y 98 depends on PHYLIB=y
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 1342585af381..13bebab65d02 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -20,4 +20,5 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o 20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
21obj-$(CONFIG_NATIONAL_PHY) += national.o 21obj-$(CONFIG_NATIONAL_PHY) += national.o
22obj-$(CONFIG_STE10XP) += ste10Xp.o 22obj-$(CONFIG_STE10XP) += ste10Xp.o
23obj-$(CONFIG_MICREL_PHY) += micrel.o
23obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o 24obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index a872aea4ed74..f443d43edd80 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -88,6 +88,7 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
88static int __init octeon_mdiobus_probe(struct platform_device *pdev) 88static int __init octeon_mdiobus_probe(struct platform_device *pdev)
89{ 89{
90 struct octeon_mdiobus *bus; 90 struct octeon_mdiobus *bus;
91 union cvmx_smix_en smi_en;
91 int i; 92 int i;
92 int err = -ENOENT; 93 int err = -ENOENT;
93 94
@@ -103,6 +104,10 @@ static int __init octeon_mdiobus_probe(struct platform_device *pdev)
103 if (!bus->mii_bus) 104 if (!bus->mii_bus)
104 goto err; 105 goto err;
105 106
107 smi_en.u64 = 0;
108 smi_en.s.en = 1;
109 cvmx_write_csr(CVMX_SMIX_EN(bus->unit), smi_en.u64);
110
106 /* 111 /*
107 * Standard Octeon evaluation boards don't support phy 112 * Standard Octeon evaluation boards don't support phy
108 * interrupts, we need to poll. 113 * interrupts, we need to poll.
@@ -133,17 +138,22 @@ err_register:
133 138
134err: 139err:
135 devm_kfree(&pdev->dev, bus); 140 devm_kfree(&pdev->dev, bus);
141 smi_en.u64 = 0;
142 cvmx_write_csr(CVMX_SMIX_EN(bus->unit), smi_en.u64);
136 return err; 143 return err;
137} 144}
138 145
139static int __exit octeon_mdiobus_remove(struct platform_device *pdev) 146static int __exit octeon_mdiobus_remove(struct platform_device *pdev)
140{ 147{
141 struct octeon_mdiobus *bus; 148 struct octeon_mdiobus *bus;
149 union cvmx_smix_en smi_en;
142 150
143 bus = dev_get_drvdata(&pdev->dev); 151 bus = dev_get_drvdata(&pdev->dev);
144 152
145 mdiobus_unregister(bus->mii_bus); 153 mdiobus_unregister(bus->mii_bus);
146 mdiobus_free(bus->mii_bus); 154 mdiobus_free(bus->mii_bus);
155 smi_en.u64 = 0;
156 cvmx_write_csr(CVMX_SMIX_EN(bus->unit), smi_en.u64);
147 return 0; 157 return 0;
148} 158}
149 159
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
new file mode 100644
index 000000000000..e67691dca4ab
--- /dev/null
+++ b/drivers/net/phy/micrel.c
@@ -0,0 +1,105 @@
1/*
2 * drivers/net/phy/micrel.c
3 *
4 * Driver for Micrel PHYs
5 *
6 * Author: David J. Choi
7 *
8 * Copyright (c) 2010 Micrel, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * Support : ksz9021 , vsc8201, ks8001
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/phy.h>
21
22#define PHY_ID_KSZ9021 0x00221611
23#define PHY_ID_VSC8201 0x000FC413
24#define PHY_ID_KS8001 0x0022161A
25
26
27static int kszphy_config_init(struct phy_device *phydev)
28{
29 return 0;
30}
31
32
33static struct phy_driver ks8001_driver = {
34 .phy_id = PHY_ID_KS8001,
35 .name = "Micrel KS8001",
36 .phy_id_mask = 0x00fffff0,
37 .features = PHY_BASIC_FEATURES,
38 .flags = PHY_POLL,
39 .config_init = kszphy_config_init,
40 .config_aneg = genphy_config_aneg,
41 .read_status = genphy_read_status,
42 .driver = { .owner = THIS_MODULE,},
43};
44
45static struct phy_driver vsc8201_driver = {
46 .phy_id = PHY_ID_VSC8201,
47 .name = "Micrel VSC8201",
48 .phy_id_mask = 0x00fffff0,
49 .features = PHY_BASIC_FEATURES,
50 .flags = PHY_POLL,
51 .config_init = kszphy_config_init,
52 .config_aneg = genphy_config_aneg,
53 .read_status = genphy_read_status,
54 .driver = { .owner = THIS_MODULE,},
55};
56
57static struct phy_driver ksz9021_driver = {
58 .phy_id = PHY_ID_KSZ9021,
59 .phy_id_mask = 0x000fff10,
60 .name = "Micrel KSZ9021 Gigabit PHY",
61 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause,
62 .flags = PHY_POLL,
63 .config_init = kszphy_config_init,
64 .config_aneg = genphy_config_aneg,
65 .read_status = genphy_read_status,
66 .driver = { .owner = THIS_MODULE, },
67};
68
69static int __init ksphy_init(void)
70{
71 int ret;
72
73 ret = phy_driver_register(&ks8001_driver);
74 if (ret)
75 goto err1;
76 ret = phy_driver_register(&vsc8201_driver);
77 if (ret)
78 goto err2;
79
80 ret = phy_driver_register(&ksz9021_driver);
81 if (ret)
82 goto err3;
83 return 0;
84
85err3:
86 phy_driver_unregister(&vsc8201_driver);
87err2:
88 phy_driver_unregister(&ks8001_driver);
89err1:
90 return ret;
91}
92
93static void __exit ksphy_exit(void)
94{
95 phy_driver_unregister(&ks8001_driver);
96 phy_driver_unregister(&vsc8201_driver);
97 phy_driver_unregister(&ksz9021_driver);
98}
99
100module_init(ksphy_init);
101module_exit(ksphy_exit);
102
103MODULE_DESCRIPTION("Micrel PHY driver");
104MODULE_AUTHOR("David J. Choi");
105MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6e281bc825e5..8518a2e58e53 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -405,6 +405,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
405 DECLARE_WAITQUEUE(wait, current); 405 DECLARE_WAITQUEUE(wait, current);
406 ssize_t ret; 406 ssize_t ret;
407 struct sk_buff *skb = NULL; 407 struct sk_buff *skb = NULL;
408 struct iovec iov;
408 409
409 ret = count; 410 ret = count;
410 411
@@ -448,7 +449,9 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
448 if (skb->len > count) 449 if (skb->len > count)
449 goto outf; 450 goto outf;
450 ret = -EFAULT; 451 ret = -EFAULT;
451 if (copy_to_user(buf, skb->data, skb->len)) 452 iov.iov_base = buf;
453 iov.iov_len = count;
454 if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
452 goto outf; 455 goto outf;
453 ret = skb->len; 456 ret = skb->len;
454 457
@@ -1567,13 +1570,22 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
1567 struct channel *pch = chan->ppp; 1570 struct channel *pch = chan->ppp;
1568 int proto; 1571 int proto;
1569 1572
1570 if (!pch || skb->len == 0) { 1573 if (!pch) {
1571 kfree_skb(skb); 1574 kfree_skb(skb);
1572 return; 1575 return;
1573 } 1576 }
1574 1577
1575 proto = PPP_PROTO(skb);
1576 read_lock_bh(&pch->upl); 1578 read_lock_bh(&pch->upl);
1579 if (!pskb_may_pull(skb, 2)) {
1580 kfree_skb(skb);
1581 if (pch->ppp) {
1582 ++pch->ppp->dev->stats.rx_length_errors;
1583 ppp_receive_error(pch->ppp);
1584 }
1585 goto done;
1586 }
1587
1588 proto = PPP_PROTO(skb);
1577 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { 1589 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
1578 /* put it on the channel queue */ 1590 /* put it on the channel queue */
1579 skb_queue_tail(&pch->file.rq, skb); 1591 skb_queue_tail(&pch->file.rq, skb);
@@ -1585,6 +1597,8 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
1585 } else { 1597 } else {
1586 ppp_do_recv(pch->ppp, skb, pch); 1598 ppp_do_recv(pch->ppp, skb, pch);
1587 } 1599 }
1600
1601done:
1588 read_unlock_bh(&pch->upl); 1602 read_unlock_bh(&pch->upl);
1589} 1603}
1590 1604
@@ -1617,7 +1631,8 @@ ppp_input_error(struct ppp_channel *chan, int code)
1617static void 1631static void
1618ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1632ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1619{ 1633{
1620 if (pskb_may_pull(skb, 2)) { 1634 /* note: a 0-length skb is used as an error indication */
1635 if (skb->len > 0) {
1621#ifdef CONFIG_PPP_MULTILINK 1636#ifdef CONFIG_PPP_MULTILINK
1622 /* XXX do channel-level decompression here */ 1637 /* XXX do channel-level decompression here */
1623 if (PPP_PROTO(skb) == PPP_MP) 1638 if (PPP_PROTO(skb) == PPP_MP)
@@ -1625,15 +1640,10 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1625 else 1640 else
1626#endif /* CONFIG_PPP_MULTILINK */ 1641#endif /* CONFIG_PPP_MULTILINK */
1627 ppp_receive_nonmp_frame(ppp, skb); 1642 ppp_receive_nonmp_frame(ppp, skb);
1628 return; 1643 } else {
1644 kfree_skb(skb);
1645 ppp_receive_error(ppp);
1629 } 1646 }
1630
1631 if (skb->len > 0)
1632 /* note: a 0-length skb is used as an error indication */
1633 ++ppp->dev->stats.rx_length_errors;
1634
1635 kfree_skb(skb);
1636 ppp_receive_error(ppp);
1637} 1647}
1638 1648
1639static void 1649static void
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index dbb1f5a1824c..dd8106ff35aa 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1042,14 +1042,14 @@ static void rtl8169_vlan_rx_register(struct net_device *dev,
1042} 1042}
1043 1043
1044static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, 1044static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1045 struct sk_buff *skb) 1045 struct sk_buff *skb, int polling)
1046{ 1046{
1047 u32 opts2 = le32_to_cpu(desc->opts2); 1047 u32 opts2 = le32_to_cpu(desc->opts2);
1048 struct vlan_group *vlgrp = tp->vlgrp; 1048 struct vlan_group *vlgrp = tp->vlgrp;
1049 int ret; 1049 int ret;
1050 1050
1051 if (vlgrp && (opts2 & RxVlanTag)) { 1051 if (vlgrp && (opts2 & RxVlanTag)) {
1052 vlan_hwaccel_receive_skb(skb, vlgrp, swab16(opts2 & 0xffff)); 1052 __vlan_hwaccel_rx(skb, vlgrp, swab16(opts2 & 0xffff), polling);
1053 ret = 0; 1053 ret = 0;
1054 } else 1054 } else
1055 ret = -1; 1055 ret = -1;
@@ -1066,7 +1066,7 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1066} 1066}
1067 1067
1068static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, 1068static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1069 struct sk_buff *skb) 1069 struct sk_buff *skb, int polling)
1070{ 1070{
1071 return -1; 1071 return -1;
1072} 1072}
@@ -2759,6 +2759,7 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
2759{ 2759{
2760 iounmap(ioaddr); 2760 iounmap(ioaddr);
2761 pci_release_regions(pdev); 2761 pci_release_regions(pdev);
2762 pci_clear_mwi(pdev);
2762 pci_disable_device(pdev); 2763 pci_disable_device(pdev);
2763 free_netdev(dev); 2764 free_netdev(dev);
2764} 2765}
@@ -2825,8 +2826,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
2825 spin_lock_irq(&tp->lock); 2826 spin_lock_irq(&tp->lock);
2826 2827
2827 RTL_W8(Cfg9346, Cfg9346_Unlock); 2828 RTL_W8(Cfg9346, Cfg9346_Unlock);
2829
2828 RTL_W32(MAC4, high); 2830 RTL_W32(MAC4, high);
2831 RTL_R32(MAC4);
2832
2829 RTL_W32(MAC0, low); 2833 RTL_W32(MAC0, low);
2834 RTL_R32(MAC0);
2835
2830 RTL_W8(Cfg9346, Cfg9346_Lock); 2836 RTL_W8(Cfg9346, Cfg9346_Lock);
2831 2837
2832 spin_unlock_irq(&tp->lock); 2838 spin_unlock_irq(&tp->lock);
@@ -3014,9 +3020,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3014 goto err_out_free_dev_1; 3020 goto err_out_free_dev_1;
3015 } 3021 }
3016 3022
3017 rc = pci_set_mwi(pdev); 3023 if (pci_set_mwi(pdev) < 0)
3018 if (rc < 0) 3024 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
3019 goto err_out_disable_2;
3020 3025
3021 /* make sure PCI base addr 1 is MMIO */ 3026 /* make sure PCI base addr 1 is MMIO */
3022 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { 3027 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
@@ -3024,7 +3029,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3024 "region #%d not an MMIO resource, aborting\n", 3029 "region #%d not an MMIO resource, aborting\n",
3025 region); 3030 region);
3026 rc = -ENODEV; 3031 rc = -ENODEV;
3027 goto err_out_mwi_3; 3032 goto err_out_mwi_2;
3028 } 3033 }
3029 3034
3030 /* check for weird/broken PCI region reporting */ 3035 /* check for weird/broken PCI region reporting */
@@ -3032,13 +3037,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3032 netif_err(tp, probe, dev, 3037 netif_err(tp, probe, dev,
3033 "Invalid PCI region size(s), aborting\n"); 3038 "Invalid PCI region size(s), aborting\n");
3034 rc = -ENODEV; 3039 rc = -ENODEV;
3035 goto err_out_mwi_3; 3040 goto err_out_mwi_2;
3036 } 3041 }
3037 3042
3038 rc = pci_request_regions(pdev, MODULENAME); 3043 rc = pci_request_regions(pdev, MODULENAME);
3039 if (rc < 0) { 3044 if (rc < 0) {
3040 netif_err(tp, probe, dev, "could not request regions\n"); 3045 netif_err(tp, probe, dev, "could not request regions\n");
3041 goto err_out_mwi_3; 3046 goto err_out_mwi_2;
3042 } 3047 }
3043 3048
3044 tp->cp_cmd = PCIMulRW | RxChkSum; 3049 tp->cp_cmd = PCIMulRW | RxChkSum;
@@ -3051,7 +3056,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3051 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3056 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3052 if (rc < 0) { 3057 if (rc < 0) {
3053 netif_err(tp, probe, dev, "DMA configuration failed\n"); 3058 netif_err(tp, probe, dev, "DMA configuration failed\n");
3054 goto err_out_free_res_4; 3059 goto err_out_free_res_3;
3055 } 3060 }
3056 } 3061 }
3057 3062
@@ -3060,7 +3065,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3060 if (!ioaddr) { 3065 if (!ioaddr) {
3061 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); 3066 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
3062 rc = -EIO; 3067 rc = -EIO;
3063 goto err_out_free_res_4; 3068 goto err_out_free_res_3;
3064 } 3069 }
3065 3070
3066 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 3071 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
@@ -3102,7 +3107,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3102 if (i == ARRAY_SIZE(rtl_chip_info)) { 3107 if (i == ARRAY_SIZE(rtl_chip_info)) {
3103 dev_err(&pdev->dev, 3108 dev_err(&pdev->dev,
3104 "driver bug, MAC version not found in rtl_chip_info\n"); 3109 "driver bug, MAC version not found in rtl_chip_info\n");
3105 goto err_out_msi_5; 3110 goto err_out_msi_4;
3106 } 3111 }
3107 tp->chipset = i; 3112 tp->chipset = i;
3108 3113
@@ -3167,7 +3172,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3167 3172
3168 rc = register_netdev(dev); 3173 rc = register_netdev(dev);
3169 if (rc < 0) 3174 if (rc < 0)
3170 goto err_out_msi_5; 3175 goto err_out_msi_4;
3171 3176
3172 pci_set_drvdata(pdev, dev); 3177 pci_set_drvdata(pdev, dev);
3173 3178
@@ -3190,14 +3195,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3190out: 3195out:
3191 return rc; 3196 return rc;
3192 3197
3193err_out_msi_5: 3198err_out_msi_4:
3194 rtl_disable_msi(pdev, tp); 3199 rtl_disable_msi(pdev, tp);
3195 iounmap(ioaddr); 3200 iounmap(ioaddr);
3196err_out_free_res_4: 3201err_out_free_res_3:
3197 pci_release_regions(pdev); 3202 pci_release_regions(pdev);
3198err_out_mwi_3: 3203err_out_mwi_2:
3199 pci_clear_mwi(pdev); 3204 pci_clear_mwi(pdev);
3200err_out_disable_2:
3201 pci_disable_device(pdev); 3205 pci_disable_device(pdev);
3202err_out_free_dev_1: 3206err_out_free_dev_1:
3203 free_netdev(dev); 3207 free_netdev(dev);
@@ -4441,12 +4445,20 @@ out:
4441 return done; 4445 return done;
4442} 4446}
4443 4447
4448/*
4449 * Warning : rtl8169_rx_interrupt() might be called :
4450 * 1) from NAPI (softirq) context
4451 * (polling = 1 : we should call netif_receive_skb())
4452 * 2) from process context (rtl8169_reset_task())
4453 * (polling = 0 : we must call netif_rx() instead)
4454 */
4444static int rtl8169_rx_interrupt(struct net_device *dev, 4455static int rtl8169_rx_interrupt(struct net_device *dev,
4445 struct rtl8169_private *tp, 4456 struct rtl8169_private *tp,
4446 void __iomem *ioaddr, u32 budget) 4457 void __iomem *ioaddr, u32 budget)
4447{ 4458{
4448 unsigned int cur_rx, rx_left; 4459 unsigned int cur_rx, rx_left;
4449 unsigned int delta, count; 4460 unsigned int delta, count;
4461 int polling = (budget != ~(u32)0) ? 1 : 0;
4450 4462
4451 cur_rx = tp->cur_rx; 4463 cur_rx = tp->cur_rx;
4452 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; 4464 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
@@ -4508,8 +4520,12 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4508 skb_put(skb, pkt_size); 4520 skb_put(skb, pkt_size);
4509 skb->protocol = eth_type_trans(skb, dev); 4521 skb->protocol = eth_type_trans(skb, dev);
4510 4522
4511 if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0) 4523 if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
4512 netif_receive_skb(skb); 4524 if (likely(polling))
4525 netif_receive_skb(skb);
4526 else
4527 netif_rx(skb);
4528 }
4513 4529
4514 dev->stats.rx_bytes += pkt_size; 4530 dev->stats.rx_bytes += pkt_size;
4515 dev->stats.rx_packets++; 4531 dev->stats.rx_packets++;
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 9944e5d662c0..04efc0c1bda9 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2353,17 +2353,36 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2353 2353
2354 sc->mii_bus = mdiobus_alloc(); 2354 sc->mii_bus = mdiobus_alloc();
2355 if (sc->mii_bus == NULL) { 2355 if (sc->mii_bus == NULL) {
2356 sbmac_uninitctx(sc); 2356 err = -ENOMEM;
2357 return -ENOMEM; 2357 goto uninit_ctx;
2358 } 2358 }
2359 2359
2360 sc->mii_bus->name = sbmac_mdio_string;
2361 snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
2362 sc->mii_bus->priv = sc;
2363 sc->mii_bus->read = sbmac_mii_read;
2364 sc->mii_bus->write = sbmac_mii_write;
2365 sc->mii_bus->irq = sc->phy_irq;
2366 for (i = 0; i < PHY_MAX_ADDR; ++i)
2367 sc->mii_bus->irq[i] = SBMAC_PHY_INT;
2368
2369 sc->mii_bus->parent = &pldev->dev;
2370 /*
2371 * Probe PHY address
2372 */
2373 err = mdiobus_register(sc->mii_bus);
2374 if (err) {
2375 printk(KERN_ERR "%s: unable to register MDIO bus\n",
2376 dev->name);
2377 goto free_mdio;
2378 }
2379 dev_set_drvdata(&pldev->dev, sc->mii_bus);
2380
2360 err = register_netdev(dev); 2381 err = register_netdev(dev);
2361 if (err) { 2382 if (err) {
2362 printk(KERN_ERR "%s.%d: unable to register netdev\n", 2383 printk(KERN_ERR "%s.%d: unable to register netdev\n",
2363 sbmac_string, idx); 2384 sbmac_string, idx);
2364 mdiobus_free(sc->mii_bus); 2385 goto unreg_mdio;
2365 sbmac_uninitctx(sc);
2366 return err;
2367 } 2386 }
2368 2387
2369 pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); 2388 pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
@@ -2379,19 +2398,15 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2379 pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n", 2398 pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
2380 dev->name, base, eaddr); 2399 dev->name, base, eaddr);
2381 2400
2382 sc->mii_bus->name = sbmac_mdio_string;
2383 snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
2384 sc->mii_bus->priv = sc;
2385 sc->mii_bus->read = sbmac_mii_read;
2386 sc->mii_bus->write = sbmac_mii_write;
2387 sc->mii_bus->irq = sc->phy_irq;
2388 for (i = 0; i < PHY_MAX_ADDR; ++i)
2389 sc->mii_bus->irq[i] = SBMAC_PHY_INT;
2390
2391 sc->mii_bus->parent = &pldev->dev;
2392 dev_set_drvdata(&pldev->dev, sc->mii_bus);
2393
2394 return 0; 2401 return 0;
2402unreg_mdio:
2403 mdiobus_unregister(sc->mii_bus);
2404 dev_set_drvdata(&pldev->dev, NULL);
2405free_mdio:
2406 mdiobus_free(sc->mii_bus);
2407uninit_ctx:
2408 sbmac_uninitctx(sc);
2409 return err;
2395} 2410}
2396 2411
2397 2412
@@ -2417,16 +2432,6 @@ static int sbmac_open(struct net_device *dev)
2417 goto out_err; 2432 goto out_err;
2418 } 2433 }
2419 2434
2420 /*
2421 * Probe PHY address
2422 */
2423 err = mdiobus_register(sc->mii_bus);
2424 if (err) {
2425 printk(KERN_ERR "%s: unable to register MDIO bus\n",
2426 dev->name);
2427 goto out_unirq;
2428 }
2429
2430 sc->sbm_speed = sbmac_speed_none; 2435 sc->sbm_speed = sbmac_speed_none;
2431 sc->sbm_duplex = sbmac_duplex_none; 2436 sc->sbm_duplex = sbmac_duplex_none;
2432 sc->sbm_fc = sbmac_fc_none; 2437 sc->sbm_fc = sbmac_fc_none;
@@ -2457,11 +2462,7 @@ static int sbmac_open(struct net_device *dev)
2457 return 0; 2462 return 0;
2458 2463
2459out_unregister: 2464out_unregister:
2460 mdiobus_unregister(sc->mii_bus);
2461
2462out_unirq:
2463 free_irq(dev->irq, dev); 2465 free_irq(dev->irq, dev);
2464
2465out_err: 2466out_err:
2466 return err; 2467 return err;
2467} 2468}
@@ -2650,9 +2651,6 @@ static int sbmac_close(struct net_device *dev)
2650 2651
2651 phy_disconnect(sc->phy_dev); 2652 phy_disconnect(sc->phy_dev);
2652 sc->phy_dev = NULL; 2653 sc->phy_dev = NULL;
2653
2654 mdiobus_unregister(sc->mii_bus);
2655
2656 free_irq(dev->irq, dev); 2654 free_irq(dev->irq, dev);
2657 2655
2658 sbdma_emptyring(&(sc->sbm_txdma)); 2656 sbdma_emptyring(&(sc->sbm_txdma));
@@ -2760,6 +2758,7 @@ static int __exit sbmac_remove(struct platform_device *pldev)
2760 2758
2761 unregister_netdev(dev); 2759 unregister_netdev(dev);
2762 sbmac_uninitctx(sc); 2760 sbmac_uninitctx(sc);
2761 mdiobus_unregister(sc->mii_bus);
2763 mdiobus_free(sc->mii_bus); 2762 mdiobus_free(sc->mii_bus);
2764 iounmap(sc->sbm_base); 2763 iounmap(sc->sbm_base);
2765 free_netdev(dev); 2764 free_netdev(dev);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 6486657c47b8..649a264d6a81 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1861,6 +1861,7 @@ out:
1861 } 1861 }
1862 1862
1863 if (disabled) { 1863 if (disabled) {
1864 dev_close(efx->net_dev);
1864 EFX_ERR(efx, "has been disabled\n"); 1865 EFX_ERR(efx, "has been disabled\n");
1865 efx->state = STATE_DISABLED; 1866 efx->state = STATE_DISABLED;
1866 } else { 1867 } else {
@@ -1884,8 +1885,7 @@ static void efx_reset_work(struct work_struct *data)
1884 } 1885 }
1885 1886
1886 rtnl_lock(); 1887 rtnl_lock();
1887 if (efx_reset(efx, efx->reset_pending)) 1888 (void)efx_reset(efx, efx->reset_pending);
1888 dev_close(efx->net_dev);
1889 rtnl_unlock(); 1889 rtnl_unlock();
1890} 1890}
1891 1891
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index d294d66fd600..08278e7302b3 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1320,7 +1320,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1320 1320
1321 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); 1321 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
1322 1322
1323 falcon_probe_board(efx, board_rev); 1323 rc = falcon_probe_board(efx, board_rev);
1324 if (rc)
1325 goto fail2;
1324 1326
1325 kfree(nvconfig); 1327 kfree(nvconfig);
1326 return 0; 1328 return 0;
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 5712fddd72f2..c7a933a3292e 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
728 }, 728 },
729}; 729};
730 730
731static const struct falcon_board_type falcon_dummy_board = { 731int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
732 .init = efx_port_dummy_op_int,
733 .init_phy = efx_port_dummy_op_void,
734 .fini = efx_port_dummy_op_void,
735 .set_id_led = efx_port_dummy_op_set_id_led,
736 .monitor = efx_port_dummy_op_int,
737};
738
739void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
740{ 732{
741 struct falcon_board *board = falcon_board(efx); 733 struct falcon_board *board = falcon_board(efx);
742 u8 type_id = FALCON_BOARD_TYPE(revision_info); 734 u8 type_id = FALCON_BOARD_TYPE(revision_info);
@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
754 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) 746 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
755 ? board->type->ref_model : board->type->gen_type, 747 ? board->type->ref_model : board->type->gen_type,
756 'A' + board->major, board->minor); 748 'A' + board->major, board->minor);
749 return 0;
757 } else { 750 } else {
758 EFX_ERR(efx, "unknown board type %d\n", type_id); 751 EFX_ERR(efx, "unknown board type %d\n", type_id);
759 board->type = &falcon_dummy_board; 752 return -ENODEV;
760 } 753 }
761} 754}
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 9351c0331a47..3166bafdfbef 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -156,7 +156,7 @@ extern struct efx_nic_type siena_a0_nic_type;
156 ************************************************************************** 156 **************************************************************************
157 */ 157 */
158 158
159extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info); 159extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
160 160
161/* TX data path */ 161/* TX data path */
162extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); 162extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 38dcc42c4f79..e0c46f59d1f8 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -456,8 +456,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
456 456
457static void siena_update_nic_stats(struct efx_nic *efx) 457static void siena_update_nic_stats(struct efx_nic *efx)
458{ 458{
459 while (siena_try_update_nic_stats(efx) == -EAGAIN) 459 int retry;
460 cpu_relax(); 460
461 /* If we're unlucky enough to read statistics wduring the DMA, wait
462 * up to 10ms for it to finish (typically takes <500us) */
463 for (retry = 0; retry < 100; ++retry) {
464 if (siena_try_update_nic_stats(efx) == 0)
465 return;
466 udelay(100);
467 }
468
469 /* Use the old values instead */
461} 470}
462 471
463static void siena_start_nic_stats(struct efx_nic *efx) 472static void siena_start_nic_stats(struct efx_nic *efx)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 22cf1c446de3..ecc41cffb470 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -8633,6 +8633,7 @@ static int tg3_test_msi(struct tg3 *tp)
8633 pci_disable_msi(tp->pdev); 8633 pci_disable_msi(tp->pdev);
8634 8634
8635 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 8635 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8636 tp->napi[0].irq_vec = tp->pdev->irq;
8636 8637
8637 err = tg3_request_irq(tp, 0); 8638 err = tg3_request_irq(tp, 0);
8638 if (err) 8639 if (err)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index ba56ce4382d9..d7b7018a1de1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -385,4 +385,25 @@ config USB_CDC_PHONET
385 cellular modem, as found on most Nokia handsets with the 385 cellular modem, as found on most Nokia handsets with the
386 "PC suite" USB profile. 386 "PC suite" USB profile.
387 387
388config USB_IPHETH
389 tristate "Apple iPhone USB Ethernet driver"
390 default n
391 ---help---
392 Module used to share Internet connection (tethering) from your
393 iPhone (Original, 3G and 3GS) to your system.
394 Note that you need userspace libraries and programs that are needed
395 to pair your device with your system and that understand the iPhone
396 protocol.
397
398 For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
399
400config USB_SIERRA_NET
401 tristate "USB-to-WWAN Driver for Sierra Wireless modems"
402 depends on USB_USBNET
403 help
404 Choose this option if you have a Sierra Wireless USB-to-WWAN device.
405
406 To compile this driver as a module, choose M here: the
407 module will be called sierra_net.
408
388endmenu 409endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 82ea62955b56..b13a279663ba 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,4 +23,6 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
23obj-$(CONFIG_USB_USBNET) += usbnet.o 23obj-$(CONFIG_USB_USBNET) += usbnet.o
24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o 24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o 25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
26obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
26 28
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c8cdb7f30adc..3547cf13d219 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -431,6 +431,7 @@ static const struct driver_info mbm_info = {
431 .bind = cdc_bind, 431 .bind = cdc_bind,
432 .unbind = usbnet_cdc_unbind, 432 .unbind = usbnet_cdc_unbind,
433 .status = cdc_status, 433 .status = cdc_status,
434 .manage_power = cdc_manage_power,
434}; 435};
435 436
436/*-------------------------------------------------------------------------*/ 437/*-------------------------------------------------------------------------*/
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 04b281002a76..5dfed9297b22 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -240,7 +240,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
240 goto out; 240 goto out;
241 241
242 dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg); 242 dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
243 dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14); 243 dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
244 244
245 for (i = 0; i < DM_TIMEOUT; i++) { 245 for (i = 0; i < DM_TIMEOUT; i++) {
246 u8 tmp; 246 u8 tmp;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
new file mode 100644
index 000000000000..418825d26f90
--- /dev/null
+++ b/drivers/net/usb/ipheth.c
@@ -0,0 +1,569 @@
1/*
2 * ipheth.c - Apple iPhone USB Ethernet driver
3 *
4 * Copyright (c) 2009 Diego Giagio <diego@giagio.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of GIAGIO.COM nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 *
41 * Attention: iPhone device must be paired, otherwise it won't respond to our
42 * driver. For more info: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
43 *
44 */
45
46#include <linux/kernel.h>
47#include <linux/errno.h>
48#include <linux/init.h>
49#include <linux/slab.h>
50#include <linux/module.h>
51#include <linux/netdevice.h>
52#include <linux/etherdevice.h>
53#include <linux/ethtool.h>
54#include <linux/usb.h>
55#include <linux/workqueue.h>
56
57#define USB_VENDOR_APPLE 0x05ac
58#define USB_PRODUCT_IPHONE 0x1290
59#define USB_PRODUCT_IPHONE_3G 0x1292
60#define USB_PRODUCT_IPHONE_3GS 0x1294
61
62#define IPHETH_USBINTF_CLASS 255
63#define IPHETH_USBINTF_SUBCLASS 253
64#define IPHETH_USBINTF_PROTO 1
65
66#define IPHETH_BUF_SIZE 1516
67#define IPHETH_TX_TIMEOUT (5 * HZ)
68
69#define IPHETH_INTFNUM 2
70#define IPHETH_ALT_INTFNUM 1
71
72#define IPHETH_CTRL_ENDP 0x00
73#define IPHETH_CTRL_BUF_SIZE 0x40
74#define IPHETH_CTRL_TIMEOUT (5 * HZ)
75
76#define IPHETH_CMD_GET_MACADDR 0x00
77#define IPHETH_CMD_CARRIER_CHECK 0x45
78
79#define IPHETH_CARRIER_CHECK_TIMEOUT round_jiffies_relative(1 * HZ)
80#define IPHETH_CARRIER_ON 0x04
81
82static struct usb_device_id ipheth_table[] = {
83 { USB_DEVICE_AND_INTERFACE_INFO(
84 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE,
85 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
86 IPHETH_USBINTF_PROTO) },
87 { USB_DEVICE_AND_INTERFACE_INFO(
88 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3G,
89 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
90 IPHETH_USBINTF_PROTO) },
91 { USB_DEVICE_AND_INTERFACE_INFO(
92 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
93 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
94 IPHETH_USBINTF_PROTO) },
95 { }
96};
97MODULE_DEVICE_TABLE(usb, ipheth_table);
98
99struct ipheth_device {
100 struct usb_device *udev;
101 struct usb_interface *intf;
102 struct net_device *net;
103 struct sk_buff *tx_skb;
104 struct urb *tx_urb;
105 struct urb *rx_urb;
106 unsigned char *tx_buf;
107 unsigned char *rx_buf;
108 unsigned char *ctrl_buf;
109 u8 bulk_in;
110 u8 bulk_out;
111 struct delayed_work carrier_work;
112};
113
114static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags);
115
116static int ipheth_alloc_urbs(struct ipheth_device *iphone)
117{
118 struct urb *tx_urb = NULL;
119 struct urb *rx_urb = NULL;
120 u8 *tx_buf = NULL;
121 u8 *rx_buf = NULL;
122
123 tx_urb = usb_alloc_urb(0, GFP_KERNEL);
124 if (tx_urb == NULL)
125 goto error_nomem;
126
127 rx_urb = usb_alloc_urb(0, GFP_KERNEL);
128 if (rx_urb == NULL)
129 goto free_tx_urb;
130
131 tx_buf = usb_buffer_alloc(iphone->udev,
132 IPHETH_BUF_SIZE,
133 GFP_KERNEL,
134 &tx_urb->transfer_dma);
135 if (tx_buf == NULL)
136 goto free_rx_urb;
137
138 rx_buf = usb_buffer_alloc(iphone->udev,
139 IPHETH_BUF_SIZE,
140 GFP_KERNEL,
141 &rx_urb->transfer_dma);
142 if (rx_buf == NULL)
143 goto free_tx_buf;
144
145
146 iphone->tx_urb = tx_urb;
147 iphone->rx_urb = rx_urb;
148 iphone->tx_buf = tx_buf;
149 iphone->rx_buf = rx_buf;
150 return 0;
151
152free_tx_buf:
153 usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
154 tx_urb->transfer_dma);
155free_rx_urb:
156 usb_free_urb(rx_urb);
157free_tx_urb:
158 usb_free_urb(tx_urb);
159error_nomem:
160 return -ENOMEM;
161}
162
163static void ipheth_free_urbs(struct ipheth_device *iphone)
164{
165 usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
166 iphone->rx_urb->transfer_dma);
167 usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
168 iphone->tx_urb->transfer_dma);
169 usb_free_urb(iphone->rx_urb);
170 usb_free_urb(iphone->tx_urb);
171}
172
173static void ipheth_kill_urbs(struct ipheth_device *dev)
174{
175 usb_kill_urb(dev->tx_urb);
176 usb_kill_urb(dev->rx_urb);
177}
178
179static void ipheth_rcvbulk_callback(struct urb *urb)
180{
181 struct ipheth_device *dev;
182 struct sk_buff *skb;
183 int status;
184 char *buf;
185 int len;
186
187 dev = urb->context;
188 if (dev == NULL)
189 return;
190
191 status = urb->status;
192 switch (status) {
193 case -ENOENT:
194 case -ECONNRESET:
195 case -ESHUTDOWN:
196 return;
197 case 0:
198 break;
199 default:
200 err("%s: urb status: %d", __func__, urb->status);
201 return;
202 }
203
204 len = urb->actual_length;
205 buf = urb->transfer_buffer;
206
207 skb = dev_alloc_skb(NET_IP_ALIGN + len);
208 if (!skb) {
209 err("%s: dev_alloc_skb: -ENOMEM", __func__);
210 dev->net->stats.rx_dropped++;
211 return;
212 }
213
214 skb_reserve(skb, NET_IP_ALIGN);
215 memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN);
216 skb->dev = dev->net;
217 skb->protocol = eth_type_trans(skb, dev->net);
218
219 dev->net->stats.rx_packets++;
220 dev->net->stats.rx_bytes += len;
221
222 netif_rx(skb);
223 ipheth_rx_submit(dev, GFP_ATOMIC);
224}
225
226static void ipheth_sndbulk_callback(struct urb *urb)
227{
228 struct ipheth_device *dev;
229
230 dev = urb->context;
231 if (dev == NULL)
232 return;
233
234 if (urb->status != 0 &&
235 urb->status != -ENOENT &&
236 urb->status != -ECONNRESET &&
237 urb->status != -ESHUTDOWN)
238 err("%s: urb status: %d", __func__, urb->status);
239
240 dev_kfree_skb_irq(dev->tx_skb);
241 netif_wake_queue(dev->net);
242}
243
244static int ipheth_carrier_set(struct ipheth_device *dev)
245{
246 struct usb_device *udev = dev->udev;
247 int retval;
248
249 retval = usb_control_msg(udev,
250 usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
251 IPHETH_CMD_CARRIER_CHECK, /* request */
252 0xc0, /* request type */
253 0x00, /* value */
254 0x02, /* index */
255 dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
256 IPHETH_CTRL_TIMEOUT);
257 if (retval < 0) {
258 err("%s: usb_control_msg: %d", __func__, retval);
259 return retval;
260 }
261
262 if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON)
263 netif_carrier_on(dev->net);
264 else
265 netif_carrier_off(dev->net);
266
267 return 0;
268}
269
270static void ipheth_carrier_check_work(struct work_struct *work)
271{
272 struct ipheth_device *dev = container_of(work, struct ipheth_device,
273 carrier_work.work);
274
275 ipheth_carrier_set(dev);
276 schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
277}
278
279static int ipheth_get_macaddr(struct ipheth_device *dev)
280{
281 struct usb_device *udev = dev->udev;
282 struct net_device *net = dev->net;
283 int retval;
284
285 retval = usb_control_msg(udev,
286 usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
287 IPHETH_CMD_GET_MACADDR, /* request */
288 0xc0, /* request type */
289 0x00, /* value */
290 0x02, /* index */
291 dev->ctrl_buf,
292 IPHETH_CTRL_BUF_SIZE,
293 IPHETH_CTRL_TIMEOUT);
294 if (retval < 0) {
295 err("%s: usb_control_msg: %d", __func__, retval);
296 } else if (retval < ETH_ALEN) {
297 err("%s: usb_control_msg: short packet: %d bytes",
298 __func__, retval);
299 retval = -EINVAL;
300 } else {
301 memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN);
302 retval = 0;
303 }
304
305 return retval;
306}
307
308static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
309{
310 struct usb_device *udev = dev->udev;
311 int retval;
312
313 usb_fill_bulk_urb(dev->rx_urb, udev,
314 usb_rcvbulkpipe(udev, dev->bulk_in),
315 dev->rx_buf, IPHETH_BUF_SIZE,
316 ipheth_rcvbulk_callback,
317 dev);
318 dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
319
320 retval = usb_submit_urb(dev->rx_urb, mem_flags);
321 if (retval)
322 err("%s: usb_submit_urb: %d", __func__, retval);
323 return retval;
324}
325
326static int ipheth_open(struct net_device *net)
327{
328 struct ipheth_device *dev = netdev_priv(net);
329 struct usb_device *udev = dev->udev;
330 int retval = 0;
331
332 usb_set_interface(udev, IPHETH_INTFNUM, IPHETH_ALT_INTFNUM);
333
334 retval = ipheth_carrier_set(dev);
335 if (retval)
336 return retval;
337
338 retval = ipheth_rx_submit(dev, GFP_KERNEL);
339 if (retval)
340 return retval;
341
342 schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
343 netif_start_queue(net);
344 return retval;
345}
346
347static int ipheth_close(struct net_device *net)
348{
349 struct ipheth_device *dev = netdev_priv(net);
350
351 cancel_delayed_work_sync(&dev->carrier_work);
352 netif_stop_queue(net);
353 return 0;
354}
355
356static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
357{
358 struct ipheth_device *dev = netdev_priv(net);
359 struct usb_device *udev = dev->udev;
360 int retval;
361
362 /* Paranoid */
363 if (skb->len > IPHETH_BUF_SIZE) {
364 WARN(1, "%s: skb too large: %d bytes", __func__, skb->len);
365 dev->net->stats.tx_dropped++;
366 dev_kfree_skb_irq(skb);
367 return NETDEV_TX_OK;
368 }
369
370 memcpy(dev->tx_buf, skb->data, skb->len);
371 if (skb->len < IPHETH_BUF_SIZE)
372 memset(dev->tx_buf + skb->len, 0, IPHETH_BUF_SIZE - skb->len);
373
374 usb_fill_bulk_urb(dev->tx_urb, udev,
375 usb_sndbulkpipe(udev, dev->bulk_out),
376 dev->tx_buf, IPHETH_BUF_SIZE,
377 ipheth_sndbulk_callback,
378 dev);
379 dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
380
381 retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
382 if (retval) {
383 err("%s: usb_submit_urb: %d", __func__, retval);
384 dev->net->stats.tx_errors++;
385 dev_kfree_skb_irq(skb);
386 } else {
387 dev->tx_skb = skb;
388
389 dev->net->stats.tx_packets++;
390 dev->net->stats.tx_bytes += skb->len;
391 netif_stop_queue(net);
392 }
393
394 return NETDEV_TX_OK;
395}
396
397static void ipheth_tx_timeout(struct net_device *net)
398{
399 struct ipheth_device *dev = netdev_priv(net);
400
401 err("%s: TX timeout", __func__);
402 dev->net->stats.tx_errors++;
403 usb_unlink_urb(dev->tx_urb);
404}
405
406static struct net_device_stats *ipheth_stats(struct net_device *net)
407{
408 struct ipheth_device *dev = netdev_priv(net);
409 return &dev->net->stats;
410}
411
412static u32 ipheth_ethtool_op_get_link(struct net_device *net)
413{
414 struct ipheth_device *dev = netdev_priv(net);
415 return netif_carrier_ok(dev->net);
416}
417
418static struct ethtool_ops ops = {
419 .get_link = ipheth_ethtool_op_get_link
420};
421
422static const struct net_device_ops ipheth_netdev_ops = {
423 .ndo_open = &ipheth_open,
424 .ndo_stop = &ipheth_close,
425 .ndo_start_xmit = &ipheth_tx,
426 .ndo_tx_timeout = &ipheth_tx_timeout,
427 .ndo_get_stats = &ipheth_stats,
428};
429
430static struct device_type ipheth_type = {
431 .name = "wwan",
432};
433
434static int ipheth_probe(struct usb_interface *intf,
435 const struct usb_device_id *id)
436{
437 struct usb_device *udev = interface_to_usbdev(intf);
438 struct usb_host_interface *hintf;
439 struct usb_endpoint_descriptor *endp;
440 struct ipheth_device *dev;
441 struct net_device *netdev;
442 int i;
443 int retval;
444
445 netdev = alloc_etherdev(sizeof(struct ipheth_device));
446 if (!netdev)
447 return -ENOMEM;
448
449 netdev->netdev_ops = &ipheth_netdev_ops;
450 netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
451 strcpy(netdev->name, "wwan%d");
452
453 dev = netdev_priv(netdev);
454 dev->udev = udev;
455 dev->net = netdev;
456 dev->intf = intf;
457
458 /* Set up endpoints */
459 hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM);
460 if (hintf == NULL) {
461 retval = -ENODEV;
462 err("Unable to find alternate settings interface");
463 goto err_endpoints;
464 }
465
466 for (i = 0; i < hintf->desc.bNumEndpoints; i++) {
467 endp = &hintf->endpoint[i].desc;
468 if (usb_endpoint_is_bulk_in(endp))
469 dev->bulk_in = endp->bEndpointAddress;
470 else if (usb_endpoint_is_bulk_out(endp))
471 dev->bulk_out = endp->bEndpointAddress;
472 }
473 if (!(dev->bulk_in && dev->bulk_out)) {
474 retval = -ENODEV;
475 err("Unable to find endpoints");
476 goto err_endpoints;
477 }
478
479 dev->ctrl_buf = kmalloc(IPHETH_CTRL_BUF_SIZE, GFP_KERNEL);
480 if (dev->ctrl_buf == NULL) {
481 retval = -ENOMEM;
482 goto err_alloc_ctrl_buf;
483 }
484
485 retval = ipheth_get_macaddr(dev);
486 if (retval)
487 goto err_get_macaddr;
488
489 INIT_DELAYED_WORK(&dev->carrier_work, ipheth_carrier_check_work);
490
491 retval = ipheth_alloc_urbs(dev);
492 if (retval) {
493 err("error allocating urbs: %d", retval);
494 goto err_alloc_urbs;
495 }
496
497 usb_set_intfdata(intf, dev);
498
499 SET_NETDEV_DEV(netdev, &intf->dev);
500 SET_ETHTOOL_OPS(netdev, &ops);
501 SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
502
503 retval = register_netdev(netdev);
504 if (retval) {
505 err("error registering netdev: %d", retval);
506 retval = -EIO;
507 goto err_register_netdev;
508 }
509
510 dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n");
511 return 0;
512
513err_register_netdev:
514 ipheth_free_urbs(dev);
515err_alloc_urbs:
516err_get_macaddr:
517err_alloc_ctrl_buf:
518 kfree(dev->ctrl_buf);
519err_endpoints:
520 free_netdev(netdev);
521 return retval;
522}
523
524static void ipheth_disconnect(struct usb_interface *intf)
525{
526 struct ipheth_device *dev;
527
528 dev = usb_get_intfdata(intf);
529 if (dev != NULL) {
530 unregister_netdev(dev->net);
531 ipheth_kill_urbs(dev);
532 ipheth_free_urbs(dev);
533 kfree(dev->ctrl_buf);
534 free_netdev(dev->net);
535 }
536 usb_set_intfdata(intf, NULL);
537 dev_info(&intf->dev, "Apple iPhone USB Ethernet now disconnected\n");
538}
539
540static struct usb_driver ipheth_driver = {
541 .name = "ipheth",
542 .probe = ipheth_probe,
543 .disconnect = ipheth_disconnect,
544 .id_table = ipheth_table,
545};
546
547static int __init ipheth_init(void)
548{
549 int retval;
550
551 retval = usb_register(&ipheth_driver);
552 if (retval) {
553 err("usb_register failed: %d", retval);
554 return retval;
555 }
556 return 0;
557}
558
559static void __exit ipheth_exit(void)
560{
561 usb_deregister(&ipheth_driver);
562}
563
564module_init(ipheth_init);
565module_exit(ipheth_exit);
566
567MODULE_AUTHOR("Diego Giagio <diego@giagio.com>");
568MODULE_DESCRIPTION("Apple iPhone USB Ethernet driver");
569MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 52671ea043a7..c4c334d9770f 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -145,6 +145,7 @@ static struct usb_device_id usb_klsi_table[] = {
145 { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ 145 { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */
146 { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ 146 { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */
147 { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ 147 { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */
148 { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */
148 { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ 149 { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */
149 { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ 150 { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */
150 { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ 151 { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
new file mode 100644
index 000000000000..f1942d69a0d5
--- /dev/null
+++ b/drivers/net/usb/sierra_net.c
@@ -0,0 +1,1004 @@
1/*
2 * USB-to-WWAN Driver for Sierra Wireless modems
3 *
4 * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer
5 * <linux@sierrawireless.com>
6 *
7 * Portions of this based on the cdc_ether driver by David Brownell (2003-2005)
8 * and Ole Andre Vadla Ravnas (ActiveSync) (2006).
9 *
10 * IMPORTANT DISCLAIMER: This driver is not commercially supported by
11 * Sierra Wireless. Use at your own risk.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#define DRIVER_VERSION "v.2.0"
29#define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer"
30#define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems"
31static const char driver_name[] = "sierra_net";
32
33/* if defined debug messages enabled */
34/*#define DEBUG*/
35
36#include <linux/module.h>
37#include <linux/etherdevice.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
40#include <linux/sched.h>
41#include <linux/timer.h>
42#include <linux/usb.h>
43#include <linux/usb/cdc.h>
44#include <net/ip.h>
45#include <net/udp.h>
46#include <asm/unaligned.h>
47#include <linux/usb/usbnet.h>
48
49#define SWI_USB_REQUEST_GET_FW_ATTR 0x06
50#define SWI_GET_FW_ATTR_MASK 0x08
51
52/* atomic counter partially included in MAC address to make sure 2 devices
53 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
54 */
55static atomic_t iface_counter = ATOMIC_INIT(0);
56
57/*
58 * SYNC Timer Delay definition used to set the expiry time
59 */
60#define SIERRA_NET_SYNCDELAY (2*HZ)
61
62/* Max. MTU supported. The modem buffers are limited to 1500 */
63#define SIERRA_NET_MAX_SUPPORTED_MTU 1500
64
65/* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control
66 * message reception ... and thus the max. received packet.
67 * (May be the cause for parse_hip returning -EINVAL)
68 */
69#define SIERRA_NET_USBCTL_BUF_LEN 1024
70
71/* list of interface numbers - used for constructing interface lists */
72struct sierra_net_iface_info {
73 const u32 infolen; /* number of interface numbers on list */
74 const u8 *ifaceinfo; /* pointer to the array holding the numbers */
75};
76
77struct sierra_net_info_data {
78 u16 rx_urb_size;
79 struct sierra_net_iface_info whitelist;
80};
81
82/* Private data structure */
83struct sierra_net_data {
84
85 u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
86
87 u16 link_up; /* air link up or down */
88 u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
89
90 u8 sync_msg[4]; /* SYNC message */
91 u8 shdwn_msg[4]; /* Shutdown message */
92
93 /* Backpointer to the container */
94 struct usbnet *usbnet;
95
96 u8 ifnum; /* interface number */
97
98/* Bit masks, must be a power of 2 */
99#define SIERRA_NET_EVENT_RESP_AVAIL 0x01
100#define SIERRA_NET_TIMER_EXPIRY 0x02
101 unsigned long kevent_flags;
102 struct work_struct sierra_net_kevent;
103 struct timer_list sync_timer; /* For retrying SYNC sequence */
104};
105
106struct param {
107 int is_present;
108 union {
109 void *ptr;
110 u32 dword;
111 u16 word;
112 u8 byte;
113 };
114};
115
116/* HIP message type */
117#define SIERRA_NET_HIP_EXTENDEDID 0x7F
118#define SIERRA_NET_HIP_HSYNC_ID 0x60 /* Modem -> host */
119#define SIERRA_NET_HIP_RESTART_ID 0x62 /* Modem -> host */
120#define SIERRA_NET_HIP_MSYNC_ID 0x20 /* Host -> modem */
121#define SIERRA_NET_HIP_SHUTD_ID 0x26 /* Host -> modem */
122
123#define SIERRA_NET_HIP_EXT_IP_IN_ID 0x0202
124#define SIERRA_NET_HIP_EXT_IP_OUT_ID 0x0002
125
126/* 3G UMTS Link Sense Indication definitions */
127#define SIERRA_NET_HIP_LSI_UMTSID 0x78
128
129/* Reverse Channel Grant Indication HIP message */
130#define SIERRA_NET_HIP_RCGI 0x64
131
132/* LSI Protocol types */
133#define SIERRA_NET_PROTOCOL_UMTS 0x01
134/* LSI Coverage */
135#define SIERRA_NET_COVERAGE_NONE 0x00
136#define SIERRA_NET_COVERAGE_NOPACKET 0x01
137
138/* LSI Session */
139#define SIERRA_NET_SESSION_IDLE 0x00
140/* LSI Link types */
141#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
142
143struct lsi_umts {
144 u8 protocol;
145 u8 unused1;
146 __be16 length;
147 /* eventually use a union for the rest - assume umts for now */
148 u8 coverage;
149 u8 unused2[41];
150 u8 session_state;
151 u8 unused3[33];
152 u8 link_type;
153 u8 pdp_addr_len; /* NW-supplied PDP address len */
154 u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
155 u8 unused4[23];
156 u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */
157 u8 dns1_addr[16]; /* NW-supplied 1st DNS address */
158 u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */
159 u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/
160 u8 wins1_addr_len; /* NW-supplied 1st Wins address len */
161 u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/
162 u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */
163 u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */
164 u8 unused5[4];
165 u8 gw_addr_len; /* NW-supplied GW address len */
166 u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */
167 u8 reserved[8];
168} __attribute__ ((packed));
169
170#define SIERRA_NET_LSI_COMMON_LEN 4
171#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
172#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
173 (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
174
175/* Forward definitions */
176static void sierra_sync_timer(unsigned long syncdata);
177static int sierra_net_change_mtu(struct net_device *net, int new_mtu);
178
179/* Our own net device operations structure */
180static const struct net_device_ops sierra_net_device_ops = {
181 .ndo_open = usbnet_open,
182 .ndo_stop = usbnet_stop,
183 .ndo_start_xmit = usbnet_start_xmit,
184 .ndo_tx_timeout = usbnet_tx_timeout,
185 .ndo_change_mtu = sierra_net_change_mtu,
186 .ndo_set_mac_address = eth_mac_addr,
187 .ndo_validate_addr = eth_validate_addr,
188};
189
190/* get private data associated with passed in usbnet device */
191static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev)
192{
193 return (struct sierra_net_data *)dev->data[0];
194}
195
196/* set private data associated with passed in usbnet device */
197static inline void sierra_net_set_private(struct usbnet *dev,
198 struct sierra_net_data *priv)
199{
200 dev->data[0] = (unsigned long)priv;
201}
202
203/* is packet IPv4 */
204static inline int is_ip(struct sk_buff *skb)
205{
206 return (skb->protocol == cpu_to_be16(ETH_P_IP));
207}
208
209/*
210 * check passed in packet and make sure that:
211 * - it is linear (no scatter/gather)
212 * - it is ethernet (mac_header properly set)
213 */
214static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev)
215{
216 skb_reset_mac_header(skb); /* ethernet header */
217
218 if (skb_is_nonlinear(skb)) {
219 netdev_err(dev->net, "Non linear buffer-dropping\n");
220 return 0;
221 }
222
223 if (!pskb_may_pull(skb, ETH_HLEN))
224 return 0;
225 skb->protocol = eth_hdr(skb)->h_proto;
226
227 return 1;
228}
229
230static const u8 *save16bit(struct param *p, const u8 *datap)
231{
232 p->is_present = 1;
233 p->word = get_unaligned_be16(datap);
234 return datap + sizeof(p->word);
235}
236
237static const u8 *save8bit(struct param *p, const u8 *datap)
238{
239 p->is_present = 1;
240 p->byte = *datap;
241 return datap + sizeof(p->byte);
242}
243
244/*----------------------------------------------------------------------------*
245 * BEGIN HIP *
246 *----------------------------------------------------------------------------*/
247/* HIP header */
248#define SIERRA_NET_HIP_HDR_LEN 4
249/* Extended HIP header */
250#define SIERRA_NET_HIP_EXT_HDR_LEN 6
251
252struct hip_hdr {
253 int hdrlen;
254 struct param payload_len;
255 struct param msgid;
256 struct param msgspecific;
257 struct param extmsgid;
258};
259
260static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh)
261{
262 const u8 *curp = buf;
263 int padded;
264
265 if (buflen < SIERRA_NET_HIP_HDR_LEN)
266 return -EPROTO;
267
268 curp = save16bit(&hh->payload_len, curp);
269 curp = save8bit(&hh->msgid, curp);
270 curp = save8bit(&hh->msgspecific, curp);
271
272 padded = hh->msgid.byte & 0x80;
273 hh->msgid.byte &= 0x7F; /* 7 bits */
274
275 hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID);
276 if (hh->extmsgid.is_present) {
277 if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN)
278 return -EPROTO;
279
280 hh->payload_len.word &= 0x3FFF; /* 14 bits */
281
282 curp = save16bit(&hh->extmsgid, curp);
283 hh->extmsgid.word &= 0x03FF; /* 10 bits */
284
285 hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN;
286 } else {
287 hh->payload_len.word &= 0x07FF; /* 11 bits */
288 hh->hdrlen = SIERRA_NET_HIP_HDR_LEN;
289 }
290
291 if (padded) {
292 hh->hdrlen++;
293 hh->payload_len.word--;
294 }
295
296 /* if real packet shorter than the claimed length */
297 if (buflen < (hh->hdrlen + hh->payload_len.word))
298 return -EINVAL;
299
300 return 0;
301}
302
303static void build_hip(u8 *buf, const u16 payloadlen,
304 struct sierra_net_data *priv)
305{
306 /* the following doesn't have the full functionality. We
307 * currently build only one kind of header, so it is faster this way
308 */
309 put_unaligned_be16(payloadlen, buf);
310 memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template));
311}
312/*----------------------------------------------------------------------------*
313 * END HIP *
314 *----------------------------------------------------------------------------*/
315
316static int sierra_net_send_cmd(struct usbnet *dev,
317 u8 *cmd, int cmdlen, const char * cmd_name)
318{
319 struct sierra_net_data *priv = sierra_net_get_private(dev);
320 int status;
321
322 status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
323 USB_CDC_SEND_ENCAPSULATED_COMMAND,
324 USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0,
325 priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT);
326
327 if (status != cmdlen && status != -ENODEV)
328 netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status);
329
330 return status;
331}
332
333static int sierra_net_send_sync(struct usbnet *dev)
334{
335 int status;
336 struct sierra_net_data *priv = sierra_net_get_private(dev);
337
338 dev_dbg(&dev->udev->dev, "%s", __func__);
339
340 status = sierra_net_send_cmd(dev, priv->sync_msg,
341 sizeof(priv->sync_msg), "SYNC");
342
343 return status;
344}
345
346static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
347{
348 dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix);
349 priv->tx_hdr_template[0] = 0x3F;
350 priv->tx_hdr_template[1] = ctx_ix;
351 *((u16 *)&priv->tx_hdr_template[2]) =
352 cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
353}
354
355static inline int sierra_net_is_valid_addrlen(u8 len)
356{
357 return (len == sizeof(struct in_addr));
358}
359
360static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
361{
362 struct lsi_umts *lsi = (struct lsi_umts *)data;
363
364 if (datalen < sizeof(struct lsi_umts)) {
365 netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
366 __func__, datalen,
367 sizeof(struct lsi_umts));
368 return -1;
369 }
370
371 if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
372 netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
373 __func__, be16_to_cpu(lsi->length),
374 (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
375 return -1;
376 }
377
378 /* Validate the protocol - only support UMTS for now */
379 if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
380 netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
381 lsi->protocol);
382 return -1;
383 }
384
385 /* Validate the link type */
386 if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
387 netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
388 lsi->link_type);
389 return -1;
390 }
391
392 /* Validate the coverage */
393 if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
394 || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
395 netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
396 return 0;
397 }
398
399 /* Validate the session state */
400 if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
401 netdev_err(dev->net, "Session idle, 0x%02x\n",
402 lsi->session_state);
403 return 0;
404 }
405
406 /* Set link_sense true */
407 return 1;
408}
409
410static void sierra_net_handle_lsi(struct usbnet *dev, char *data,
411 struct hip_hdr *hh)
412{
413 struct sierra_net_data *priv = sierra_net_get_private(dev);
414 int link_up;
415
416 link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen,
417 hh->payload_len.word);
418 if (link_up < 0) {
419 netdev_err(dev->net, "Invalid LSI\n");
420 return;
421 }
422 if (link_up) {
423 sierra_net_set_ctx_index(priv, hh->msgspecific.byte);
424 priv->link_up = 1;
425 netif_carrier_on(dev->net);
426 } else {
427 priv->link_up = 0;
428 netif_carrier_off(dev->net);
429 }
430}
431
432static void sierra_net_dosync(struct usbnet *dev)
433{
434 int status;
435 struct sierra_net_data *priv = sierra_net_get_private(dev);
436
437 dev_dbg(&dev->udev->dev, "%s", __func__);
438
439 /* tell modem we are ready */
440 status = sierra_net_send_sync(dev);
441 if (status < 0)
442 netdev_err(dev->net,
443 "Send SYNC failed, status %d\n", status);
444 status = sierra_net_send_sync(dev);
445 if (status < 0)
446 netdev_err(dev->net,
447 "Send SYNC failed, status %d\n", status);
448
449 /* Now, start a timer and make sure we get the Restart Indication */
450 priv->sync_timer.function = sierra_sync_timer;
451 priv->sync_timer.data = (unsigned long) dev;
452 priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY;
453 add_timer(&priv->sync_timer);
454}
455
456static void sierra_net_kevent(struct work_struct *work)
457{
458 struct sierra_net_data *priv =
459 container_of(work, struct sierra_net_data, sierra_net_kevent);
460 struct usbnet *dev = priv->usbnet;
461 int len;
462 int err;
463 u8 *buf;
464 u8 ifnum;
465
466 if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) {
467 clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags);
468
469 /* Query the modem for the LSI message */
470 buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL);
471 if (!buf) {
472 netdev_err(dev->net,
473 "failed to allocate buf for LS msg\n");
474 return;
475 }
476 ifnum = priv->ifnum;
477 len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
478 USB_CDC_GET_ENCAPSULATED_RESPONSE,
479 USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE,
480 0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN,
481 USB_CTRL_SET_TIMEOUT);
482
483 if (len < 0) {
484 netdev_err(dev->net,
485 "usb_control_msg failed, status %d\n", len);
486 } else {
487 struct hip_hdr hh;
488
489 dev_dbg(&dev->udev->dev, "%s: Received status message,"
490 " %04x bytes", __func__, len);
491
492 err = parse_hip(buf, len, &hh);
493 if (err) {
494 netdev_err(dev->net, "%s: Bad packet,"
495 " parse result %d\n", __func__, err);
496 kfree(buf);
497 return;
498 }
499
500 /* Validate packet length */
501 if (len != hh.hdrlen + hh.payload_len.word) {
502 netdev_err(dev->net, "%s: Bad packet, received"
503 " %d, expected %d\n", __func__, len,
504 hh.hdrlen + hh.payload_len.word);
505 kfree(buf);
506 return;
507 }
508
509 /* Switch on received message types */
510 switch (hh.msgid.byte) {
511 case SIERRA_NET_HIP_LSI_UMTSID:
512 dev_dbg(&dev->udev->dev, "LSI for ctx:%d",
513 hh.msgspecific.byte);
514 sierra_net_handle_lsi(dev, buf, &hh);
515 break;
516 case SIERRA_NET_HIP_RESTART_ID:
517 dev_dbg(&dev->udev->dev, "Restart reported: %d,"
518 " stopping sync timer",
519 hh.msgspecific.byte);
520 /* Got sync resp - stop timer & clear mask */
521 del_timer_sync(&priv->sync_timer);
522 clear_bit(SIERRA_NET_TIMER_EXPIRY,
523 &priv->kevent_flags);
524 break;
525 case SIERRA_NET_HIP_HSYNC_ID:
526 dev_dbg(&dev->udev->dev, "SYNC received");
527 err = sierra_net_send_sync(dev);
528 if (err < 0)
529 netdev_err(dev->net,
530 "Send SYNC failed %d\n", err);
531 break;
532 case SIERRA_NET_HIP_EXTENDEDID:
533 netdev_err(dev->net, "Unrecognized HIP msg, "
534 "extmsgid 0x%04x\n", hh.extmsgid.word);
535 break;
536 case SIERRA_NET_HIP_RCGI:
537 /* Ignored */
538 break;
539 default:
540 netdev_err(dev->net, "Unrecognized HIP msg, "
541 "msgid 0x%02x\n", hh.msgid.byte);
542 break;
543 }
544 }
545 kfree(buf);
546 }
547 /* The sync timer bit might be set */
548 if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) {
549 clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags);
550 dev_dbg(&dev->udev->dev, "Deferred sync timer expiry");
551 sierra_net_dosync(priv->usbnet);
552 }
553
554 if (priv->kevent_flags)
555 dev_dbg(&dev->udev->dev, "sierra_net_kevent done, "
556 "kevent_flags = 0x%lx", priv->kevent_flags);
557}
558
559static void sierra_net_defer_kevent(struct usbnet *dev, int work)
560{
561 struct sierra_net_data *priv = sierra_net_get_private(dev);
562
563 set_bit(work, &priv->kevent_flags);
564 schedule_work(&priv->sierra_net_kevent);
565}
566
567/*
568 * Sync Retransmit Timer Handler. On expiry, kick the work queue
569 */
570void sierra_sync_timer(unsigned long syncdata)
571{
572 struct usbnet *dev = (struct usbnet *)syncdata;
573
574 dev_dbg(&dev->udev->dev, "%s", __func__);
575 /* Kick the tasklet */
576 sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY);
577}
578
579static void sierra_net_status(struct usbnet *dev, struct urb *urb)
580{
581 struct usb_cdc_notification *event;
582
583 dev_dbg(&dev->udev->dev, "%s", __func__);
584
585 if (urb->actual_length < sizeof *event)
586 return;
587
588 /* Add cases to handle other standard notifications. */
589 event = urb->transfer_buffer;
590 switch (event->bNotificationType) {
591 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
592 case USB_CDC_NOTIFY_SPEED_CHANGE:
593 /* USB 305 sends those */
594 break;
595 case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
596 sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL);
597 break;
598 default:
599 netdev_err(dev->net, ": unexpected notification %02x!\n",
600 event->bNotificationType);
601 break;
602 }
603}
604
605static void sierra_net_get_drvinfo(struct net_device *net,
606 struct ethtool_drvinfo *info)
607{
608 /* Inherit standard device info */
609 usbnet_get_drvinfo(net, info);
610 strncpy(info->driver, driver_name, sizeof info->driver);
611 strncpy(info->version, DRIVER_VERSION, sizeof info->version);
612}
613
614static u32 sierra_net_get_link(struct net_device *net)
615{
616 struct usbnet *dev = netdev_priv(net);
617 /* Report link is down whenever the interface is down */
618 return sierra_net_get_private(dev)->link_up && netif_running(net);
619}
620
621static struct ethtool_ops sierra_net_ethtool_ops = {
622 .get_drvinfo = sierra_net_get_drvinfo,
623 .get_link = sierra_net_get_link,
624 .get_msglevel = usbnet_get_msglevel,
625 .set_msglevel = usbnet_set_msglevel,
626 .get_settings = usbnet_get_settings,
627 .set_settings = usbnet_set_settings,
628 .nway_reset = usbnet_nway_reset,
629};
630
631/* MTU can not be more than 1500 bytes, enforce it. */
632static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
633{
634 if (new_mtu > SIERRA_NET_MAX_SUPPORTED_MTU)
635 return -EINVAL;
636
637 return usbnet_change_mtu(net, new_mtu);
638}
639
640static int is_whitelisted(const u8 ifnum,
641 const struct sierra_net_iface_info *whitelist)
642{
643 if (whitelist) {
644 const u8 *list = whitelist->ifaceinfo;
645 int i;
646
647 for (i = 0; i < whitelist->infolen; i++) {
648 if (list[i] == ifnum)
649 return 1;
650 }
651 }
652 return 0;
653}
654
655static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
656{
657 int result = 0;
658 u16 *attrdata;
659
660 attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL);
661 if (!attrdata)
662 return -ENOMEM;
663
664 result = usb_control_msg(
665 dev->udev,
666 usb_rcvctrlpipe(dev->udev, 0),
667 /* _u8 vendor specific request */
668 SWI_USB_REQUEST_GET_FW_ATTR,
669 USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */
670 0x0000, /* __u16 value not used */
671 0x0000, /* __u16 index not used */
672 attrdata, /* char *data */
673 sizeof(*attrdata), /* __u16 size */
674 USB_CTRL_SET_TIMEOUT); /* int timeout */
675
676 if (result < 0) {
677 kfree(attrdata);
678 return -EIO;
679 }
680
681 *datap = *attrdata;
682
683 kfree(attrdata);
684 return result;
685}
686
687/*
688 * collects the bulk endpoints, the status endpoint.
689 */
690static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
691{
692 u8 ifacenum;
693 u8 numendpoints;
694 u16 fwattr = 0;
695 int status;
696 struct ethhdr *eth;
697 struct sierra_net_data *priv;
698 static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
699 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
700 static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
701 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
702
703 struct sierra_net_info_data *data =
704 (struct sierra_net_info_data *)dev->driver_info->data;
705
706 dev_dbg(&dev->udev->dev, "%s", __func__);
707
708 ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
709 /* We only accept certain interfaces */
710 if (!is_whitelisted(ifacenum, &data->whitelist)) {
711 dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum);
712 return -ENODEV;
713 }
714 numendpoints = intf->cur_altsetting->desc.bNumEndpoints;
715 /* We have three endpoints, bulk in and out, and a status */
716 if (numendpoints != 3) {
717 dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d",
718 numendpoints);
719 return -ENODEV;
720 }
721 /* Status endpoint set in usbnet_get_endpoints() */
722 dev->status = NULL;
723 status = usbnet_get_endpoints(dev, intf);
724 if (status < 0) {
725 dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)",
726 status);
727 return -ENODEV;
728 }
729 /* Initialize sierra private data */
730 priv = kzalloc(sizeof *priv, GFP_KERNEL);
731 if (!priv) {
732 dev_err(&dev->udev->dev, "No memory");
733 return -ENOMEM;
734 }
735
736 priv->usbnet = dev;
737 priv->ifnum = ifacenum;
738 dev->net->netdev_ops = &sierra_net_device_ops;
739
740 /* change MAC addr to include, ifacenum, and to be unique */
741 dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
742 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
743
744 /* we will have to manufacture ethernet headers, prepare template */
745 eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
746 memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
747 eth->h_proto = cpu_to_be16(ETH_P_IP);
748
749 /* prepare shutdown message template */
750 memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
751 /* set context index initially to 0 - prepares tx hdr template */
752 sierra_net_set_ctx_index(priv, 0);
753
754 /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
755 dev->rx_urb_size = data->rx_urb_size;
756 if (dev->udev->speed != USB_SPEED_HIGH)
757 dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size);
758
759 dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
760 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
761
762 /* Set up the netdev */
763 dev->net->flags |= IFF_NOARP;
764 dev->net->ethtool_ops = &sierra_net_ethtool_ops;
765 netif_carrier_off(dev->net);
766
767 sierra_net_set_private(dev, priv);
768
769 priv->kevent_flags = 0;
770
771 /* Use the shared workqueue */
772 INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent);
773
774 /* Only need to do this once */
775 init_timer(&priv->sync_timer);
776
777 /* verify fw attributes */
778 status = sierra_net_get_fw_attr(dev, &fwattr);
779 dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr);
780
781 /* test whether firmware supports DHCP */
782 if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) {
783 /* found incompatible firmware version */
784 dev_err(&dev->udev->dev, "Incompatible driver and firmware"
785 " versions\n");
786 kfree(priv);
787 return -ENODEV;
788 }
789 /* prepare sync message from template */
790 memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
791
792 /* initiate the sync sequence */
793 sierra_net_dosync(dev);
794
795 return 0;
796}
797
798static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf)
799{
800 int status;
801 struct sierra_net_data *priv = sierra_net_get_private(dev);
802
803 dev_dbg(&dev->udev->dev, "%s", __func__);
804
805 /* Kill the timer then flush the work queue */
806 del_timer_sync(&priv->sync_timer);
807
808 flush_scheduled_work();
809
810 /* tell modem we are going away */
811 status = sierra_net_send_cmd(dev, priv->shdwn_msg,
812 sizeof(priv->shdwn_msg), "Shutdown");
813 if (status < 0)
814 netdev_err(dev->net,
815 "usb_control_msg failed, status %d\n", status);
816
817 sierra_net_set_private(dev, NULL);
818
819 kfree(priv);
820}
821
822static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev,
823 struct sk_buff *skb, int len)
824{
825 struct sk_buff *new_skb;
826
827 /* clone skb */
828 new_skb = skb_clone(skb, GFP_ATOMIC);
829
830 /* remove len bytes from original */
831 skb_pull(skb, len);
832
833 /* trim next packet to it's length */
834 if (new_skb) {
835 skb_trim(new_skb, len);
836 } else {
837 if (netif_msg_rx_err(dev))
838 netdev_err(dev->net, "failed to get skb\n");
839 dev->net->stats.rx_dropped++;
840 }
841
842 return new_skb;
843}
844
845/* ---------------------------- Receive data path ----------------------*/
846static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
847{
848 int err;
849 struct hip_hdr hh;
850 struct sk_buff *new_skb;
851
852 dev_dbg(&dev->udev->dev, "%s", __func__);
853
854 /* could contain multiple packets */
855 while (likely(skb->len)) {
856 err = parse_hip(skb->data, skb->len, &hh);
857 if (err) {
858 if (netif_msg_rx_err(dev))
859 netdev_err(dev->net, "Invalid HIP header %d\n",
860 err);
861 /* dev->net->stats.rx_errors incremented by caller */
862 dev->net->stats.rx_length_errors++;
863 return 0;
864 }
865
866 /* Validate Extended HIP header */
867 if (!hh.extmsgid.is_present
868 || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) {
869 if (netif_msg_rx_err(dev))
870 netdev_err(dev->net, "HIP/ETH: Invalid pkt\n");
871
872 dev->net->stats.rx_frame_errors++;
873 /* dev->net->stats.rx_errors incremented by caller */;
874 return 0;
875 }
876
877 skb_pull(skb, hh.hdrlen);
878
879 /* We are going to accept this packet, prepare it */
880 memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
881 ETH_HLEN);
882
883 /* Last packet in batch handled by usbnet */
884 if (hh.payload_len.word == skb->len)
885 return 1;
886
887 new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word);
888 if (new_skb)
889 usbnet_skb_return(dev, new_skb);
890
891 } /* while */
892
893 return 0;
894}
895
896/* ---------------------------- Transmit data path ----------------------*/
897struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
898 gfp_t flags)
899{
900 struct sierra_net_data *priv = sierra_net_get_private(dev);
901 u16 len;
902 bool need_tail;
903
904 dev_dbg(&dev->udev->dev, "%s", __func__);
905 if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) {
906 /* enough head room as is? */
907 if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) {
908 /* Save the Eth/IP length and set up HIP hdr */
909 len = skb->len;
910 skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN);
911 /* Handle ZLP issue */
912 need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN)
913 % dev->maxpacket == 0);
914 if (need_tail) {
915 if (unlikely(skb_tailroom(skb) == 0)) {
916 netdev_err(dev->net, "tx_fixup:"
917 "no room for packet\n");
918 dev_kfree_skb_any(skb);
919 return NULL;
920 } else {
921 skb->data[skb->len] = 0;
922 __skb_put(skb, 1);
923 len = len + 1;
924 }
925 }
926 build_hip(skb->data, len, priv);
927 return skb;
928 } else {
929 /*
930 * compensate in the future if necessary
931 */
932 netdev_err(dev->net, "tx_fixup: no room for HIP\n");
933 } /* headroom */
934 }
935
936 if (!priv->link_up)
937 dev->net->stats.tx_carrier_errors++;
938
939 /* tx_dropped incremented by usbnet */
940
941 /* filter the packet out, release it */
942 dev_kfree_skb_any(skb);
943 return NULL;
944}
945
946static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
947static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
948 .rx_urb_size = 8 * 1024,
949 .whitelist = {
950 .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
951 .ifaceinfo = sierra_net_ifnum_list
952 }
953};
954
955static const struct driver_info sierra_net_info_68A3 = {
956 .description = "Sierra Wireless USB-to-WWAN Modem",
957 .flags = FLAG_WWAN | FLAG_SEND_ZLP,
958 .bind = sierra_net_bind,
959 .unbind = sierra_net_unbind,
960 .status = sierra_net_status,
961 .rx_fixup = sierra_net_rx_fixup,
962 .tx_fixup = sierra_net_tx_fixup,
963 .data = (unsigned long)&sierra_net_info_data_68A3,
964};
965
966static const struct usb_device_id products[] = {
967 {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
968 .driver_info = (unsigned long) &sierra_net_info_68A3},
969
970 {}, /* last item */
971};
972MODULE_DEVICE_TABLE(usb, products);
973
974/* We are based on usbnet, so let it handle the USB driver specifics */
975static struct usb_driver sierra_net_driver = {
976 .name = "sierra_net",
977 .id_table = products,
978 .probe = usbnet_probe,
979 .disconnect = usbnet_disconnect,
980 .suspend = usbnet_suspend,
981 .resume = usbnet_resume,
982 .no_dynamic_id = 1,
983};
984
985static int __init sierra_net_init(void)
986{
987 BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
988 < sizeof(struct cdc_state));
989
990 return usb_register(&sierra_net_driver);
991}
992
993static void __exit sierra_net_exit(void)
994{
995 usb_deregister(&sierra_net_driver);
996}
997
998module_exit(sierra_net_exit);
999module_init(sierra_net_init);
1000
1001MODULE_AUTHOR(DRIVER_AUTHOR);
1002MODULE_DESCRIPTION(DRIVER_DESC);
1003MODULE_VERSION(DRIVER_VERSION);
1004MODULE_LICENSE("GPL");
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f9f0730b53d5..5ec542dd5b50 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -187,7 +187,6 @@ tx_drop:
187 return NETDEV_TX_OK; 187 return NETDEV_TX_OK;
188 188
189rx_drop: 189rx_drop:
190 kfree_skb(skb);
191 rcv_stats->rx_dropped++; 190 rcv_stats->rx_dropped++;
192 return NETDEV_TX_OK; 191 return NETDEV_TX_OK;
193} 192}
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index f6036fb42319..33bdc6a84e81 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -75,42 +75,7 @@ static void airo_release(struct pcmcia_device *link);
75 75
76static void airo_detach(struct pcmcia_device *p_dev); 76static void airo_detach(struct pcmcia_device *p_dev);
77 77
78/*
79 You'll also need to prototype all the functions that will actually
80 be used to talk to your device. See 'pcmem_cs' for a good example
81 of a fully self-sufficient driver; the other drivers rely more or
82 less on other parts of the kernel.
83*/
84
85/*
86 A linked list of "instances" of the aironet device. Each actual
87 PCMCIA card corresponds to one device instance, and is described
88 by one struct pcmcia_device structure (defined in ds.h).
89
90 You may not want to use a linked list for this -- for example, the
91 memory card driver uses an array of struct pcmcia_device pointers,
92 where minor device numbers are used to derive the corresponding
93 array index.
94*/
95
96/*
97 A driver needs to provide a dev_node_t structure for each device
98 on a card. In some cases, there is only one device per card (for
99 example, ethernet cards, modems). In other cases, there may be
100 many actual or logical devices (SCSI adapters, memory cards with
101 multiple partitions). The dev_node_t structures need to be kept
102 in a linked list starting at the 'dev' field of a struct pcmcia_device
103 structure. We allocate them in the card's private data structure,
104 because they generally shouldn't be allocated dynamically.
105
106 In this case, we also provide a flag to indicate if a device is
107 "stopped" due to a power management event, or card ejection. The
108 device IO routines can use a flag like this to throttle IO to a
109 card that is not ready to accept it.
110*/
111
112typedef struct local_info_t { 78typedef struct local_info_t {
113 dev_node_t node;
114 struct net_device *eth_dev; 79 struct net_device *eth_dev;
115} local_info_t; 80} local_info_t;
116 81
@@ -132,10 +97,6 @@ static int airo_probe(struct pcmcia_device *p_dev)
132 97
133 dev_dbg(&p_dev->dev, "airo_attach()\n"); 98 dev_dbg(&p_dev->dev, "airo_attach()\n");
134 99
135 /* Interrupt setup */
136 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
137 p_dev->irq.Handler = NULL;
138
139 /* 100 /*
140 General socket configuration defaults can go here. In this 101 General socket configuration defaults can go here. In this
141 client, we assume very little, and rely on the CIS for almost 102 client, we assume very little, and rely on the CIS for almost
@@ -212,9 +173,7 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
212 else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM)) 173 else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
213 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000; 174 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
214 175
215 /* Do we need to allocate an interrupt? */ 176 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
216 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
217 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
218 177
219 /* IO window settings */ 178 /* IO window settings */
220 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 179 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -300,16 +259,8 @@ static int airo_config(struct pcmcia_device *link)
300 if (ret) 259 if (ret)
301 goto failed; 260 goto failed;
302 261
303 /* 262 if (!link->irq)
304 Allocate an interrupt line. Note that this does not assign a 263 goto failed;
305 handler to the interrupt, unless the 'Handler' member of the
306 irq structure is initialized.
307 */
308 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
309 ret = pcmcia_request_irq(link, &link->irq);
310 if (ret)
311 goto failed;
312 }
313 264
314 /* 265 /*
315 This actually configures the PCMCIA socket -- setting up 266 This actually configures the PCMCIA socket -- setting up
@@ -320,26 +271,17 @@ static int airo_config(struct pcmcia_device *link)
320 if (ret) 271 if (ret)
321 goto failed; 272 goto failed;
322 ((local_info_t *)link->priv)->eth_dev = 273 ((local_info_t *)link->priv)->eth_dev =
323 init_airo_card(link->irq.AssignedIRQ, 274 init_airo_card(link->irq,
324 link->io.BasePort1, 1, &link->dev); 275 link->io.BasePort1, 1, &link->dev);
325 if (!((local_info_t *)link->priv)->eth_dev) 276 if (!((local_info_t *)link->priv)->eth_dev)
326 goto failed; 277 goto failed;
327 278
328 /*
329 At this point, the dev_node_t structure(s) need to be
330 initialized and arranged in a linked list at link->dev_node.
331 */
332 strcpy(dev->node.dev_name, ((local_info_t *)link->priv)->eth_dev->name);
333 dev->node.major = dev->node.minor = 0;
334 link->dev_node = &dev->node;
335
336 /* Finally, report what we've done */ 279 /* Finally, report what we've done */
337 printk(KERN_INFO "%s: index 0x%02x: ", 280 dev_info(&link->dev, "index 0x%02x: ",
338 dev->node.dev_name, link->conf.ConfigIndex); 281 link->conf.ConfigIndex);
339 if (link->conf.Vpp) 282 if (link->conf.Vpp)
340 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10); 283 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
341 if (link->conf.Attributes & CONF_ENABLE_IRQ) 284 printk(", irq %d", link->irq);
342 printk(", irq %d", link->irq.AssignedIRQ);
343 if (link->io.NumPorts1) 285 if (link->io.NumPorts1)
344 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 286 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
345 link->io.BasePort1+link->io.NumPorts1-1); 287 link->io.BasePort1+link->io.NumPorts1-1);
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 99a6da464bd3..e1c2fcaa8bed 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -727,12 +727,16 @@ static void ar9170_usb_firmware_failed(struct ar9170_usb *aru)
727{ 727{
728 struct device *parent = aru->udev->dev.parent; 728 struct device *parent = aru->udev->dev.parent;
729 729
730 complete(&aru->firmware_loading_complete);
731
730 /* unbind anything failed */ 732 /* unbind anything failed */
731 if (parent) 733 if (parent)
732 down(&parent->sem); 734 down(&parent->sem);
733 device_release_driver(&aru->udev->dev); 735 device_release_driver(&aru->udev->dev);
734 if (parent) 736 if (parent)
735 up(&parent->sem); 737 up(&parent->sem);
738
739 usb_put_dev(aru->udev);
736} 740}
737 741
738static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context) 742static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
@@ -761,6 +765,8 @@ static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
761 if (err) 765 if (err)
762 goto err_unrx; 766 goto err_unrx;
763 767
768 complete(&aru->firmware_loading_complete);
769 usb_put_dev(aru->udev);
764 return; 770 return;
765 771
766 err_unrx: 772 err_unrx:
@@ -858,6 +864,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
858 init_usb_anchor(&aru->tx_pending); 864 init_usb_anchor(&aru->tx_pending);
859 init_usb_anchor(&aru->tx_submitted); 865 init_usb_anchor(&aru->tx_submitted);
860 init_completion(&aru->cmd_wait); 866 init_completion(&aru->cmd_wait);
867 init_completion(&aru->firmware_loading_complete);
861 spin_lock_init(&aru->tx_urb_lock); 868 spin_lock_init(&aru->tx_urb_lock);
862 869
863 aru->tx_pending_urbs = 0; 870 aru->tx_pending_urbs = 0;
@@ -877,6 +884,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
877 if (err) 884 if (err)
878 goto err_freehw; 885 goto err_freehw;
879 886
887 usb_get_dev(aru->udev);
880 return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw", 888 return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw",
881 &aru->udev->dev, GFP_KERNEL, aru, 889 &aru->udev->dev, GFP_KERNEL, aru,
882 ar9170_usb_firmware_step2); 890 ar9170_usb_firmware_step2);
@@ -896,6 +904,9 @@ static void ar9170_usb_disconnect(struct usb_interface *intf)
896 return; 904 return;
897 905
898 aru->common.state = AR9170_IDLE; 906 aru->common.state = AR9170_IDLE;
907
908 wait_for_completion(&aru->firmware_loading_complete);
909
899 ar9170_unregister(&aru->common); 910 ar9170_unregister(&aru->common);
900 ar9170_usb_cancel_urbs(aru); 911 ar9170_usb_cancel_urbs(aru);
901 912
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index a2ce3b169ceb..919b06046eb3 100644
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -71,6 +71,7 @@ struct ar9170_usb {
71 unsigned int tx_pending_urbs; 71 unsigned int tx_pending_urbs;
72 72
73 struct completion cmd_wait; 73 struct completion cmd_wait;
74 struct completion firmware_loading_complete;
74 int readlen; 75 int readlen;
75 u8 *readbuf; 76 u8 *readbuf;
76 77
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 32407911842f..c2746fc7f2be 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -85,41 +85,7 @@ static void atmel_release(struct pcmcia_device *link);
85 85
86static void atmel_detach(struct pcmcia_device *p_dev); 86static void atmel_detach(struct pcmcia_device *p_dev);
87 87
88/*
89 You'll also need to prototype all the functions that will actually
90 be used to talk to your device. See 'pcmem_cs' for a good example
91 of a fully self-sufficient driver; the other drivers rely more or
92 less on other parts of the kernel.
93*/
94
95/*
96 A linked list of "instances" of the atmelnet device. Each actual
97 PCMCIA card corresponds to one device instance, and is described
98 by one struct pcmcia_device structure (defined in ds.h).
99
100 You may not want to use a linked list for this -- for example, the
101 memory card driver uses an array of struct pcmcia_device pointers, where minor
102 device numbers are used to derive the corresponding array index.
103*/
104
105/*
106 A driver needs to provide a dev_node_t structure for each device
107 on a card. In some cases, there is only one device per card (for
108 example, ethernet cards, modems). In other cases, there may be
109 many actual or logical devices (SCSI adapters, memory cards with
110 multiple partitions). The dev_node_t structures need to be kept
111 in a linked list starting at the 'dev' field of a struct pcmcia_device
112 structure. We allocate them in the card's private data structure,
113 because they generally shouldn't be allocated dynamically.
114
115 In this case, we also provide a flag to indicate if a device is
116 "stopped" due to a power management event, or card ejection. The
117 device IO routines can use a flag like this to throttle IO to a
118 card that is not ready to accept it.
119*/
120
121typedef struct local_info_t { 88typedef struct local_info_t {
122 dev_node_t node;
123 struct net_device *eth_dev; 89 struct net_device *eth_dev;
124} local_info_t; 90} local_info_t;
125 91
@@ -141,10 +107,6 @@ static int atmel_probe(struct pcmcia_device *p_dev)
141 107
142 dev_dbg(&p_dev->dev, "atmel_attach()\n"); 108 dev_dbg(&p_dev->dev, "atmel_attach()\n");
143 109
144 /* Interrupt setup */
145 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
146 p_dev->irq.Handler = NULL;
147
148 /* 110 /*
149 General socket configuration defaults can go here. In this 111 General socket configuration defaults can go here. In this
150 client, we assume very little, and rely on the CIS for almost 112 client, we assume very little, and rely on the CIS for almost
@@ -226,9 +188,7 @@ static int atmel_config_check(struct pcmcia_device *p_dev,
226 else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM)) 188 else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
227 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000; 189 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
228 190
229 /* Do we need to allocate an interrupt? */ 191 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
230 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
231 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
232 192
233 /* IO window settings */ 193 /* IO window settings */
234 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 194 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -278,15 +238,9 @@ static int atmel_config(struct pcmcia_device *link)
278 if (pcmcia_loop_config(link, atmel_config_check, NULL)) 238 if (pcmcia_loop_config(link, atmel_config_check, NULL))
279 goto failed; 239 goto failed;
280 240
281 /* 241 if (!link->irq) {
282 Allocate an interrupt line. Note that this does not assign a 242 dev_err(&link->dev, "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
283 handler to the interrupt, unless the 'Handler' member of the 243 goto failed;
284 irq structure is initialized.
285 */
286 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
287 ret = pcmcia_request_irq(link, &link->irq);
288 if (ret)
289 goto failed;
290 } 244 }
291 245
292 /* 246 /*
@@ -298,14 +252,8 @@ static int atmel_config(struct pcmcia_device *link)
298 if (ret) 252 if (ret)
299 goto failed; 253 goto failed;
300 254
301 if (link->irq.AssignedIRQ == 0) {
302 printk(KERN_ALERT
303 "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
304 goto failed;
305 }
306
307 ((local_info_t*)link->priv)->eth_dev = 255 ((local_info_t*)link->priv)->eth_dev =
308 init_atmel_card(link->irq.AssignedIRQ, 256 init_atmel_card(link->irq,
309 link->io.BasePort1, 257 link->io.BasePort1,
310 did ? did->driver_info : ATMEL_FW_TYPE_NONE, 258 did ? did->driver_info : ATMEL_FW_TYPE_NONE,
311 &link->dev, 259 &link->dev,
@@ -315,14 +263,6 @@ static int atmel_config(struct pcmcia_device *link)
315 goto failed; 263 goto failed;
316 264
317 265
318 /*
319 At this point, the dev_node_t structure(s) need to be
320 initialized and arranged in a linked list at link->dev_node.
321 */
322 strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name );
323 dev->node.major = dev->node.minor = 0;
324 link->dev_node = &dev->node;
325
326 return 0; 266 return 0;
327 267
328 failed: 268 failed:
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 609e7051e018..0e99b634267c 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -98,10 +98,7 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
98 if (res != 0) 98 if (res != 0)
99 goto err_disable; 99 goto err_disable;
100 100
101 dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 101 if (!dev->irq)
102 dev->irq.Handler = NULL; /* The handler is registered later. */
103 res = pcmcia_request_irq(dev, &dev->irq);
104 if (res != 0)
105 goto err_disable; 102 goto err_disable;
106 103
107 res = pcmcia_request_configuration(dev, &dev->conf); 104 res = pcmcia_request_configuration(dev, &dev->conf);
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index a36501dbbe02..db72461c486b 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -39,7 +39,6 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Ignore broken CIS VCC entry");
39 39
40/* struct local_info::hw_priv */ 40/* struct local_info::hw_priv */
41struct hostap_cs_priv { 41struct hostap_cs_priv {
42 dev_node_t node;
43 struct pcmcia_device *link; 42 struct pcmcia_device *link;
44 int sandisk_connectplus; 43 int sandisk_connectplus;
45}; 44};
@@ -556,15 +555,7 @@ static int prism2_config_check(struct pcmcia_device *p_dev,
556 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000; 555 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000;
557 556
558 /* Do we need to allocate an interrupt? */ 557 /* Do we need to allocate an interrupt? */
559 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) 558 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
560 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
561 else if (!(p_dev->conf.Attributes & CONF_ENABLE_IRQ)) {
562 /* At least Compaq WL200 does not have IRQInfo1 set,
563 * but it does not work without interrupts.. */
564 printk(KERN_WARNING "Config has no IRQ info, but trying to "
565 "enable IRQ anyway..\n");
566 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
567 }
568 559
569 /* IO window settings */ 560 /* IO window settings */
570 PDEBUG(DEBUG_EXTRA, "IO window settings: cfg->io.nwin=%d " 561 PDEBUG(DEBUG_EXTRA, "IO window settings: cfg->io.nwin=%d "
@@ -633,21 +624,10 @@ static int prism2_config(struct pcmcia_device *link)
633 local = iface->local; 624 local = iface->local;
634 local->hw_priv = hw_priv; 625 local->hw_priv = hw_priv;
635 hw_priv->link = link; 626 hw_priv->link = link;
636 strcpy(hw_priv->node.dev_name, dev->name);
637 link->dev_node = &hw_priv->node;
638 627
639 /* 628 ret = pcmcia_request_irq(link, prism2_interrupt);
640 * Allocate an interrupt line. Note that this does not assign a 629 if (ret)
641 * handler to the interrupt, unless the 'Handler' member of the 630 goto failed;
642 * irq structure is initialized.
643 */
644 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
645 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
646 link->irq.Handler = prism2_interrupt;
647 ret = pcmcia_request_irq(link, &link->irq);
648 if (ret)
649 goto failed;
650 }
651 631
652 /* 632 /*
653 * This actually configures the PCMCIA socket -- setting up 633 * This actually configures the PCMCIA socket -- setting up
@@ -658,7 +638,7 @@ static int prism2_config(struct pcmcia_device *link)
658 if (ret) 638 if (ret)
659 goto failed; 639 goto failed;
660 640
661 dev->irq = link->irq.AssignedIRQ; 641 dev->irq = link->irq;
662 dev->base_addr = link->io.BasePort1; 642 dev->base_addr = link->io.BasePort1;
663 643
664 /* Finally, report what we've done */ 644 /* Finally, report what we've done */
@@ -668,7 +648,7 @@ static int prism2_config(struct pcmcia_device *link)
668 printk(", Vpp %d.%d", link->conf.Vpp / 10, 648 printk(", Vpp %d.%d", link->conf.Vpp / 10,
669 link->conf.Vpp % 10); 649 link->conf.Vpp % 10);
670 if (link->conf.Attributes & CONF_ENABLE_IRQ) 650 if (link->conf.Attributes & CONF_ENABLE_IRQ)
671 printk(", irq %d", link->irq.AssignedIRQ); 651 printk(", irq %d", link->irq);
672 if (link->io.NumPorts1) 652 if (link->io.NumPorts1)
673 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 653 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
674 link->io.BasePort1+link->io.NumPorts1-1); 654 link->io.BasePort1+link->io.NumPorts1-1);
@@ -682,11 +662,9 @@ static int prism2_config(struct pcmcia_device *link)
682 sandisk_enable_wireless(dev); 662 sandisk_enable_wireless(dev);
683 663
684 ret = prism2_hw_config(dev, 1); 664 ret = prism2_hw_config(dev, 1);
685 if (!ret) { 665 if (!ret)
686 ret = hostap_hw_ready(dev); 666 ret = hostap_hw_ready(dev);
687 if (ret == 0 && local->ddev) 667
688 strcpy(hw_priv->node.dev_name, local->ddev->name);
689 }
690 return ret; 668 return ret;
691 669
692 failed: 670 failed:
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9b72c45a7748..2b05fe5e994c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -174,6 +174,8 @@ that only one external action is invoked at a time.
174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" 174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
175#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 175#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
176 176
177struct pm_qos_request_list *ipw2100_pm_qos_req;
178
177/* Debugging stuff */ 179/* Debugging stuff */
178#ifdef CONFIG_IPW2100_DEBUG 180#ifdef CONFIG_IPW2100_DEBUG
179#define IPW2100_RX_DEBUG /* Reception debugging */ 181#define IPW2100_RX_DEBUG /* Reception debugging */
@@ -1739,7 +1741,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1739 /* the ipw2100 hardware really doesn't want power management delays 1741 /* the ipw2100 hardware really doesn't want power management delays
1740 * longer than 175usec 1742 * longer than 175usec
1741 */ 1743 */
1742 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 175); 1744 pm_qos_update_request(ipw2100_pm_qos_req, 175);
1743 1745
1744 /* If the interrupt is enabled, turn it off... */ 1746 /* If the interrupt is enabled, turn it off... */
1745 spin_lock_irqsave(&priv->low_lock, flags); 1747 spin_lock_irqsave(&priv->low_lock, flags);
@@ -1887,8 +1889,7 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1887 ipw2100_disable_interrupts(priv); 1889 ipw2100_disable_interrupts(priv);
1888 spin_unlock_irqrestore(&priv->low_lock, flags); 1890 spin_unlock_irqrestore(&priv->low_lock, flags);
1889 1891
1890 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 1892 pm_qos_update_request(ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
1891 PM_QOS_DEFAULT_VALUE);
1892 1893
1893 /* We have to signal any supplicant if we are disassociating */ 1894 /* We have to signal any supplicant if we are disassociating */
1894 if (associated) 1895 if (associated)
@@ -6669,7 +6670,7 @@ static int __init ipw2100_init(void)
6669 if (ret) 6670 if (ret)
6670 goto out; 6671 goto out;
6671 6672
6672 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 6673 ipw2100_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
6673 PM_QOS_DEFAULT_VALUE); 6674 PM_QOS_DEFAULT_VALUE);
6674#ifdef CONFIG_IPW2100_DEBUG 6675#ifdef CONFIG_IPW2100_DEBUG
6675 ipw2100_debug_level = debug; 6676 ipw2100_debug_level = debug;
@@ -6692,7 +6693,7 @@ static void __exit ipw2100_exit(void)
6692 &driver_attr_debug_level); 6693 &driver_attr_debug_level);
6693#endif 6694#endif
6694 pci_unregister_driver(&ipw2100_pci_driver); 6695 pci_unregister_driver(&ipw2100_pci_driver);
6695 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100"); 6696 pm_qos_remove_request(ipw2100_pm_qos_req);
6696} 6697}
6697 6698
6698module_init(ipw2100_init); 6699module_init(ipw2100_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index c4844adff92a..92b3e64fc14d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -259,7 +259,7 @@ static struct iwl_lib_ops iwl6000_lib = {
259 EEPROM_5000_REG_BAND_3_CHANNELS, 259 EEPROM_5000_REG_BAND_3_CHANNELS,
260 EEPROM_5000_REG_BAND_4_CHANNELS, 260 EEPROM_5000_REG_BAND_4_CHANNELS,
261 EEPROM_5000_REG_BAND_5_CHANNELS, 261 EEPROM_5000_REG_BAND_5_CHANNELS,
262 EEPROM_5000_REG_BAND_24_HT40_CHANNELS, 262 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
263 EEPROM_5000_REG_BAND_52_HT40_CHANNELS 263 EEPROM_5000_REG_BAND_52_HT40_CHANNELS
264 }, 264 },
265 .verify_signature = iwlcore_eeprom_verify_signature, 265 .verify_signature = iwlcore_eeprom_verify_signature,
@@ -323,7 +323,7 @@ static struct iwl_lib_ops iwl6050_lib = {
323 EEPROM_5000_REG_BAND_3_CHANNELS, 323 EEPROM_5000_REG_BAND_3_CHANNELS,
324 EEPROM_5000_REG_BAND_4_CHANNELS, 324 EEPROM_5000_REG_BAND_4_CHANNELS,
325 EEPROM_5000_REG_BAND_5_CHANNELS, 325 EEPROM_5000_REG_BAND_5_CHANNELS,
326 EEPROM_5000_REG_BAND_24_HT40_CHANNELS, 326 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
327 EEPROM_5000_REG_BAND_52_HT40_CHANNELS 327 EEPROM_5000_REG_BAND_52_HT40_CHANNELS
328 }, 328 },
329 .verify_signature = iwlcore_eeprom_verify_signature, 329 .verify_signature = iwlcore_eeprom_verify_signature,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 8b8e3e1cbb44..bdff56583e11 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3331,6 +3331,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3331 3331
3332 cancel_delayed_work_sync(&priv->init_alive_start); 3332 cancel_delayed_work_sync(&priv->init_alive_start);
3333 cancel_delayed_work(&priv->scan_check); 3333 cancel_delayed_work(&priv->scan_check);
3334 cancel_work_sync(&priv->start_internal_scan);
3334 cancel_delayed_work(&priv->alive_start); 3335 cancel_delayed_work(&priv->alive_start);
3335 cancel_work_sync(&priv->beacon_update); 3336 cancel_work_sync(&priv->beacon_update);
3336 del_timer_sync(&priv->statistics_periodic); 3337 del_timer_sync(&priv->statistics_periodic);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 6383d9f8c9b3..f4e59ae07f8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -2621,7 +2621,9 @@ struct iwl_ssid_ie {
2621#define PROBE_OPTION_MAX_3945 4 2621#define PROBE_OPTION_MAX_3945 4
2622#define PROBE_OPTION_MAX 20 2622#define PROBE_OPTION_MAX 20
2623#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) 2623#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2624#define IWL_GOOD_CRC_TH cpu_to_le16(1) 2624#define IWL_GOOD_CRC_TH_DISABLED 0
2625#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2626#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2625#define IWL_MAX_SCAN_SIZE 1024 2627#define IWL_MAX_SCAN_SIZE 1024
2626#define IWL_MAX_CMD_SIZE 4096 2628#define IWL_MAX_CMD_SIZE 4096
2627#define IWL_MAX_PROBE_REQUEST 200 2629#define IWL_MAX_PROBE_REQUEST 200
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 3352f7086632..049b652bcb5e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -3358,7 +3358,6 @@ static void iwl_force_rf_reset(struct iwl_priv *priv)
3358 */ 3358 */
3359 IWL_DEBUG_INFO(priv, "perform radio reset.\n"); 3359 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
3360 iwl_internal_short_hw_scan(priv); 3360 iwl_internal_short_hw_scan(priv);
3361 return;
3362} 3361}
3363 3362
3364 3363
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 732590f5fe30..36940a9ec6b9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -506,7 +506,7 @@ void iwl_init_scan_params(struct iwl_priv *priv);
506int iwl_scan_cancel(struct iwl_priv *priv); 506int iwl_scan_cancel(struct iwl_priv *priv);
507int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); 507int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
508int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); 508int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
509int iwl_internal_short_hw_scan(struct iwl_priv *priv); 509void iwl_internal_short_hw_scan(struct iwl_priv *priv);
510int iwl_force_reset(struct iwl_priv *priv, int mode); 510int iwl_force_reset(struct iwl_priv *priv, int mode);
511u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, 511u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
512 const u8 *ie, int ie_len, int left); 512 const u8 *ie, int ie_len, int left);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 6054c5fba0c1..ef1720a852e9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1296,6 +1296,7 @@ struct iwl_priv {
1296 struct work_struct tt_work; 1296 struct work_struct tt_work;
1297 struct work_struct ct_enter; 1297 struct work_struct ct_enter;
1298 struct work_struct ct_exit; 1298 struct work_struct ct_exit;
1299 struct work_struct start_internal_scan;
1299 1300
1300 struct tasklet_struct irq_tasklet; 1301 struct tasklet_struct irq_tasklet;
1301 1302
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 4e1ba824dc50..8171c701e4e1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -203,6 +203,10 @@ struct iwl_eeprom_enhanced_txpwr {
203#define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\ 203#define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\
204 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ 204 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
205 205
206/* 6000 regulatory - indirect access */
207#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
208 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
209
206/* 6000 and up regulatory tx power - indirect access */ 210/* 6000 and up regulatory tx power - indirect access */
207/* max. elements per section */ 211/* max. elements per section */
208#define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8) 212#define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8)
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 9ab0e412bf10..741e65ec8301 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -470,6 +470,8 @@ EXPORT_SYMBOL(iwl_init_scan_params);
470 470
471static int iwl_scan_initiate(struct iwl_priv *priv) 471static int iwl_scan_initiate(struct iwl_priv *priv)
472{ 472{
473 WARN_ON(!mutex_is_locked(&priv->mutex));
474
473 IWL_DEBUG_INFO(priv, "Starting scan...\n"); 475 IWL_DEBUG_INFO(priv, "Starting scan...\n");
474 set_bit(STATUS_SCANNING, &priv->status); 476 set_bit(STATUS_SCANNING, &priv->status);
475 priv->is_internal_short_scan = false; 477 priv->is_internal_short_scan = false;
@@ -547,24 +549,31 @@ EXPORT_SYMBOL(iwl_mac_hw_scan);
547 * internal short scan, this function should only been called while associated. 549 * internal short scan, this function should only been called while associated.
548 * It will reset and tune the radio to prevent possible RF related problem 550 * It will reset and tune the radio to prevent possible RF related problem
549 */ 551 */
550int iwl_internal_short_hw_scan(struct iwl_priv *priv) 552void iwl_internal_short_hw_scan(struct iwl_priv *priv)
551{ 553{
552 int ret = 0; 554 queue_work(priv->workqueue, &priv->start_internal_scan);
555}
556
557static void iwl_bg_start_internal_scan(struct work_struct *work)
558{
559 struct iwl_priv *priv =
560 container_of(work, struct iwl_priv, start_internal_scan);
561
562 mutex_lock(&priv->mutex);
553 563
554 if (!iwl_is_ready_rf(priv)) { 564 if (!iwl_is_ready_rf(priv)) {
555 ret = -EIO;
556 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); 565 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
557 goto out; 566 goto unlock;
558 } 567 }
568
559 if (test_bit(STATUS_SCANNING, &priv->status)) { 569 if (test_bit(STATUS_SCANNING, &priv->status)) {
560 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); 570 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
561 ret = -EAGAIN; 571 goto unlock;
562 goto out;
563 } 572 }
573
564 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 574 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
565 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); 575 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
566 ret = -EAGAIN; 576 goto unlock;
567 goto out;
568 } 577 }
569 578
570 priv->scan_bands = 0; 579 priv->scan_bands = 0;
@@ -577,9 +586,8 @@ int iwl_internal_short_hw_scan(struct iwl_priv *priv)
577 set_bit(STATUS_SCANNING, &priv->status); 586 set_bit(STATUS_SCANNING, &priv->status);
578 priv->is_internal_short_scan = true; 587 priv->is_internal_short_scan = true;
579 queue_work(priv->workqueue, &priv->request_scan); 588 queue_work(priv->workqueue, &priv->request_scan);
580 589 unlock:
581out: 590 mutex_unlock(&priv->mutex);
582 return ret;
583} 591}
584EXPORT_SYMBOL(iwl_internal_short_hw_scan); 592EXPORT_SYMBOL(iwl_internal_short_hw_scan);
585 593
@@ -805,16 +813,29 @@ static void iwl_bg_request_scan(struct work_struct *data)
805 rate = IWL_RATE_1M_PLCP; 813 rate = IWL_RATE_1M_PLCP;
806 rate_flags = RATE_MCS_CCK_MSK; 814 rate_flags = RATE_MCS_CCK_MSK;
807 } 815 }
808 scan->good_CRC_th = 0; 816 scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
809 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { 817 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
810 band = IEEE80211_BAND_5GHZ; 818 band = IEEE80211_BAND_5GHZ;
811 rate = IWL_RATE_6M_PLCP; 819 rate = IWL_RATE_6M_PLCP;
812 /* 820 /*
813 * If active scaning is requested but a certain channel 821 * If active scanning is requested but a certain channel is
814 * is marked passive, we can do active scanning if we 822 * marked passive, we can do active scanning if we detect
815 * detect transmissions. 823 * transmissions.
824 *
825 * There is an issue with some firmware versions that triggers
826 * a sysassert on a "good CRC threshold" of zero (== disabled),
827 * on a radar channel even though this means that we should NOT
828 * send probes.
829 *
830 * The "good CRC threshold" is the number of frames that we
831 * need to receive during our dwell time on a channel before
832 * sending out probes -- setting this to a huge value will
833 * mean we never reach it, but at the same time work around
834 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
835 * here instead of IWL_GOOD_CRC_TH_DISABLED.
816 */ 836 */
817 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; 837 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
838 IWL_GOOD_CRC_TH_NEVER;
818 839
819 /* Force use of chains B and C (0x6) for scan Rx for 4965 840 /* Force use of chains B and C (0x6) for scan Rx for 4965
820 * Avoid A (0x1) because of its off-channel reception on A-band. 841 * Avoid A (0x1) because of its off-channel reception on A-band.
@@ -965,6 +986,7 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
965 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); 986 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
966 INIT_WORK(&priv->request_scan, iwl_bg_request_scan); 987 INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
967 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); 988 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
989 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
968 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); 990 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
969} 991}
970EXPORT_SYMBOL(iwl_setup_scan_deferred_work); 992EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index b55e4f39a9e1..b74a56c48d26 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -2967,7 +2967,8 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2967 * is marked passive, we can do active scanning if we 2967 * is marked passive, we can do active scanning if we
2968 * detect transmissions. 2968 * detect transmissions.
2969 */ 2969 */
2970 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; 2970 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2971 IWL_GOOD_CRC_TH_DISABLED;
2971 band = IEEE80211_BAND_5GHZ; 2972 band = IEEE80211_BAND_5GHZ;
2972 } else { 2973 } else {
2973 IWL_WARN(priv, "Invalid scan band count\n"); 2974 IWL_WARN(priv, "Invalid scan band count\n");
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 6d55439a7b97..08e4e3908003 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -777,7 +777,7 @@ static void if_cs_release(struct pcmcia_device *p_dev)
777 777
778 lbs_deb_enter(LBS_DEB_CS); 778 lbs_deb_enter(LBS_DEB_CS);
779 779
780 free_irq(p_dev->irq.AssignedIRQ, card); 780 free_irq(p_dev->irq, card);
781 pcmcia_disable_device(p_dev); 781 pcmcia_disable_device(p_dev);
782 if (card->iobase) 782 if (card->iobase)
783 ioport_unmap(card->iobase); 783 ioport_unmap(card->iobase);
@@ -807,8 +807,7 @@ static int if_cs_ioprobe(struct pcmcia_device *p_dev,
807 p_dev->io.NumPorts1 = cfg->io.win[0].len; 807 p_dev->io.NumPorts1 = cfg->io.win[0].len;
808 808
809 /* Do we need to allocate an interrupt? */ 809 /* Do we need to allocate an interrupt? */
810 if (cfg->irq.IRQInfo1) 810 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
811 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
812 811
813 /* IO window settings */ 812 /* IO window settings */
814 if (cfg->io.nwin != 1) { 813 if (cfg->io.nwin != 1) {
@@ -837,9 +836,6 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
837 card->p_dev = p_dev; 836 card->p_dev = p_dev;
838 p_dev->priv = card; 837 p_dev->priv = card;
839 838
840 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
841 p_dev->irq.Handler = NULL;
842
843 p_dev->conf.Attributes = 0; 839 p_dev->conf.Attributes = 0;
844 p_dev->conf.IntType = INT_MEMORY_AND_IO; 840 p_dev->conf.IntType = INT_MEMORY_AND_IO;
845 841
@@ -854,13 +850,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
854 * a handler to the interrupt, unless the 'Handler' member of 850 * a handler to the interrupt, unless the 'Handler' member of
855 * the irq structure is initialized. 851 * the irq structure is initialized.
856 */ 852 */
857 if (p_dev->conf.Attributes & CONF_ENABLE_IRQ) { 853 if (!p_dev->irq)
858 ret = pcmcia_request_irq(p_dev, &p_dev->irq); 854 goto out1;
859 if (ret) {
860 lbs_pr_err("error in pcmcia_request_irq\n");
861 goto out1;
862 }
863 }
864 855
865 /* Initialize io access */ 856 /* Initialize io access */
866 card->iobase = ioport_map(p_dev->io.BasePort1, p_dev->io.NumPorts1); 857 card->iobase = ioport_map(p_dev->io.BasePort1, p_dev->io.NumPorts1);
@@ -883,7 +874,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
883 874
884 /* Finally, report what we've done */ 875 /* Finally, report what we've done */
885 lbs_deb_cs("irq %d, io 0x%04x-0x%04x\n", 876 lbs_deb_cs("irq %d, io 0x%04x-0x%04x\n",
886 p_dev->irq.AssignedIRQ, p_dev->io.BasePort1, 877 p_dev->irq, p_dev->io.BasePort1,
887 p_dev->io.BasePort1 + p_dev->io.NumPorts1 - 1); 878 p_dev->io.BasePort1 + p_dev->io.NumPorts1 - 1);
888 879
889 /* 880 /*
@@ -940,7 +931,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
940 priv->fw_ready = 1; 931 priv->fw_ready = 1;
941 932
942 /* Now actually get the IRQ */ 933 /* Now actually get the IRQ */
943 ret = request_irq(p_dev->irq.AssignedIRQ, if_cs_interrupt, 934 ret = request_irq(p_dev->irq, if_cs_interrupt,
944 IRQF_SHARED, DRV_NAME, card); 935 IRQF_SHARED, DRV_NAME, card);
945 if (ret) { 936 if (ret) {
946 lbs_pr_err("error in request_irq\n"); 937 lbs_pr_err("error in request_irq\n");
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 1d4ada188eda..03056ab73032 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -50,7 +50,6 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket
50 * struct orinoco_private */ 50 * struct orinoco_private */
51struct orinoco_pccard { 51struct orinoco_pccard {
52 struct pcmcia_device *p_dev; 52 struct pcmcia_device *p_dev;
53 dev_node_t node;
54 53
55 /* Used to handle hard reset */ 54 /* Used to handle hard reset */
56 /* yuck, we need this hack to work around the insanity of the 55 /* yuck, we need this hack to work around the insanity of the
@@ -119,10 +118,6 @@ orinoco_cs_probe(struct pcmcia_device *link)
119 card->p_dev = link; 118 card->p_dev = link;
120 link->priv = priv; 119 link->priv = priv;
121 120
122 /* Interrupt setup */
123 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
124 link->irq.Handler = orinoco_interrupt;
125
126 /* General socket configuration defaults can go here. In this 121 /* General socket configuration defaults can go here. In this
127 * client, we assume very little, and rely on the CIS for 122 * client, we assume very little, and rely on the CIS for
128 * almost everything. In most clients, many details (i.e., 123 * almost everything. In most clients, many details (i.e.,
@@ -144,8 +139,7 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
144{ 139{
145 struct orinoco_private *priv = link->priv; 140 struct orinoco_private *priv = link->priv;
146 141
147 if (link->dev_node) 142 orinoco_if_del(priv);
148 orinoco_if_del(priv);
149 143
150 orinoco_cs_release(link); 144 orinoco_cs_release(link);
151 145
@@ -230,7 +224,6 @@ static int
230orinoco_cs_config(struct pcmcia_device *link) 224orinoco_cs_config(struct pcmcia_device *link)
231{ 225{
232 struct orinoco_private *priv = link->priv; 226 struct orinoco_private *priv = link->priv;
233 struct orinoco_pccard *card = priv->card;
234 hermes_t *hw = &priv->hw; 227 hermes_t *hw = &priv->hw;
235 int ret; 228 int ret;
236 void __iomem *mem; 229 void __iomem *mem;
@@ -258,12 +251,7 @@ orinoco_cs_config(struct pcmcia_device *link)
258 goto failed; 251 goto failed;
259 } 252 }
260 253
261 /* 254 ret = pcmcia_request_irq(link, orinoco_interrupt);
262 * Allocate an interrupt line. Note that this does not assign
263 * a handler to the interrupt, unless the 'Handler' member of
264 * the irq structure is initialized.
265 */
266 ret = pcmcia_request_irq(link, &link->irq);
267 if (ret) 255 if (ret)
268 goto failed; 256 goto failed;
269 257
@@ -285,9 +273,6 @@ orinoco_cs_config(struct pcmcia_device *link)
285 if (ret) 273 if (ret)
286 goto failed; 274 goto failed;
287 275
288 /* Ok, we have the configuration, prepare to register the netdev */
289 card->node.major = card->node.minor = 0;
290
291 /* Initialise the main driver */ 276 /* Initialise the main driver */
292 if (orinoco_init(priv) != 0) { 277 if (orinoco_init(priv) != 0) {
293 printk(KERN_ERR PFX "orinoco_init() failed\n"); 278 printk(KERN_ERR PFX "orinoco_init() failed\n");
@@ -296,17 +281,11 @@ orinoco_cs_config(struct pcmcia_device *link)
296 281
297 /* Register an interface with the stack */ 282 /* Register an interface with the stack */
298 if (orinoco_if_add(priv, link->io.BasePort1, 283 if (orinoco_if_add(priv, link->io.BasePort1,
299 link->irq.AssignedIRQ) != 0) { 284 link->irq) != 0) {
300 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 285 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
301 goto failed; 286 goto failed;
302 } 287 }
303 288
304 /* At this point, the dev_node_t structure(s) needs to be
305 * initialized and arranged in a linked list at link->dev_node. */
306 strcpy(card->node.dev_name, priv->ndev->name);
307 link->dev_node = &card->node; /* link->dev_node being non-NULL is also
308 * used to indicate that the
309 * net_device has been registered */
310 return 0; 289 return 0;
311 290
312 failed: 291 failed:
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index 59bda240fdc2..41b9ce425855 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -57,7 +57,6 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket
57 * struct orinoco_private */ 57 * struct orinoco_private */
58struct orinoco_pccard { 58struct orinoco_pccard {
59 struct pcmcia_device *p_dev; 59 struct pcmcia_device *p_dev;
60 dev_node_t node;
61}; 60};
62 61
63/********************************************************************/ 62/********************************************************************/
@@ -193,10 +192,6 @@ spectrum_cs_probe(struct pcmcia_device *link)
193 card->p_dev = link; 192 card->p_dev = link;
194 link->priv = priv; 193 link->priv = priv;
195 194
196 /* Interrupt setup */
197 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
198 link->irq.Handler = orinoco_interrupt;
199
200 /* General socket configuration defaults can go here. In this 195 /* General socket configuration defaults can go here. In this
201 * client, we assume very little, and rely on the CIS for 196 * client, we assume very little, and rely on the CIS for
202 * almost everything. In most clients, many details (i.e., 197 * almost everything. In most clients, many details (i.e.,
@@ -218,8 +213,7 @@ static void spectrum_cs_detach(struct pcmcia_device *link)
218{ 213{
219 struct orinoco_private *priv = link->priv; 214 struct orinoco_private *priv = link->priv;
220 215
221 if (link->dev_node) 216 orinoco_if_del(priv);
222 orinoco_if_del(priv);
223 217
224 spectrum_cs_release(link); 218 spectrum_cs_release(link);
225 219
@@ -304,7 +298,6 @@ static int
304spectrum_cs_config(struct pcmcia_device *link) 298spectrum_cs_config(struct pcmcia_device *link)
305{ 299{
306 struct orinoco_private *priv = link->priv; 300 struct orinoco_private *priv = link->priv;
307 struct orinoco_pccard *card = priv->card;
308 hermes_t *hw = &priv->hw; 301 hermes_t *hw = &priv->hw;
309 int ret; 302 int ret;
310 void __iomem *mem; 303 void __iomem *mem;
@@ -332,12 +325,7 @@ spectrum_cs_config(struct pcmcia_device *link)
332 goto failed; 325 goto failed;
333 } 326 }
334 327
335 /* 328 ret = pcmcia_request_irq(link, orinoco_interrupt);
336 * Allocate an interrupt line. Note that this does not assign
337 * a handler to the interrupt, unless the 'Handler' member of
338 * the irq structure is initialized.
339 */
340 ret = pcmcia_request_irq(link, &link->irq);
341 if (ret) 329 if (ret)
342 goto failed; 330 goto failed;
343 331
@@ -359,9 +347,6 @@ spectrum_cs_config(struct pcmcia_device *link)
359 if (ret) 347 if (ret)
360 goto failed; 348 goto failed;
361 349
362 /* Ok, we have the configuration, prepare to register the netdev */
363 card->node.major = card->node.minor = 0;
364
365 /* Reset card */ 350 /* Reset card */
366 if (spectrum_cs_hard_reset(priv) != 0) 351 if (spectrum_cs_hard_reset(priv) != 0)
367 goto failed; 352 goto failed;
@@ -374,17 +359,11 @@ spectrum_cs_config(struct pcmcia_device *link)
374 359
375 /* Register an interface with the stack */ 360 /* Register an interface with the stack */
376 if (orinoco_if_add(priv, link->io.BasePort1, 361 if (orinoco_if_add(priv, link->io.BasePort1,
377 link->irq.AssignedIRQ) != 0) { 362 link->irq) != 0) {
378 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 363 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
379 goto failed; 364 goto failed;
380 } 365 }
381 366
382 /* At this point, the dev_node_t structure(s) needs to be
383 * initialized and arranged in a linked list at link->dev_node. */
384 strcpy(card->node.dev_name, priv->ndev->name);
385 link->dev_node = &card->node; /* link->dev_node being non-NULL is also
386 * used to indicate that the
387 * net_device has been registered */
388 return 0; 367 return 0;
389 368
390 failed: 369 failed:
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 269fda362836..c24067f1a0cb 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -246,7 +246,7 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
246 u32 idx, i; 246 u32 idx, i;
247 247
248 i = (*index) % ring_limit; 248 i = (*index) % ring_limit;
249 (*index) = idx = le32_to_cpu(ring_control->device_idx[1]); 249 (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
250 idx %= ring_limit; 250 idx %= ring_limit;
251 251
252 while (i != idx) { 252 while (i != idx) {
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 11865ea21875..f7d2a34ca531 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -51,7 +51,6 @@
51#include <pcmcia/cistpl.h> 51#include <pcmcia/cistpl.h>
52#include <pcmcia/cisreg.h> 52#include <pcmcia/cisreg.h>
53#include <pcmcia/ds.h> 53#include <pcmcia/ds.h>
54#include <pcmcia/mem_op.h>
55 54
56#include <linux/wireless.h> 55#include <linux/wireless.h>
57#include <net/iw_handler.h> 56#include <net/iw_handler.h>
@@ -321,10 +320,6 @@ static int ray_probe(struct pcmcia_device *p_dev)
321 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 320 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
322 p_dev->io.IOAddrLines = 5; 321 p_dev->io.IOAddrLines = 5;
323 322
324 /* Interrupt setup. For PCMCIA, driver takes what's given */
325 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
326 p_dev->irq.Handler = &ray_interrupt;
327
328 /* General socket configuration */ 323 /* General socket configuration */
329 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 324 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
330 p_dev->conf.IntType = INT_MEMORY_AND_IO; 325 p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -383,8 +378,7 @@ static void ray_detach(struct pcmcia_device *link)
383 del_timer(&local->timer); 378 del_timer(&local->timer);
384 379
385 if (link->priv) { 380 if (link->priv) {
386 if (link->dev_node) 381 unregister_netdev(dev);
387 unregister_netdev(dev);
388 free_netdev(dev); 382 free_netdev(dev);
389 } 383 }
390 dev_dbg(&link->dev, "ray_cs ray_detach ending\n"); 384 dev_dbg(&link->dev, "ray_cs ray_detach ending\n");
@@ -417,10 +411,10 @@ static int ray_config(struct pcmcia_device *link)
417 /* Now allocate an interrupt line. Note that this does not 411 /* Now allocate an interrupt line. Note that this does not
418 actually assign a handler to the interrupt. 412 actually assign a handler to the interrupt.
419 */ 413 */
420 ret = pcmcia_request_irq(link, &link->irq); 414 ret = pcmcia_request_irq(link, ray_interrupt);
421 if (ret) 415 if (ret)
422 goto failed; 416 goto failed;
423 dev->irq = link->irq.AssignedIRQ; 417 dev->irq = link->irq;
424 418
425 /* This actually configures the PCMCIA socket -- setting up 419 /* This actually configures the PCMCIA socket -- setting up
426 the I/O windows and the interrupt mapping. 420 the I/O windows and the interrupt mapping.
@@ -493,9 +487,6 @@ static int ray_config(struct pcmcia_device *link)
493 return i; 487 return i;
494 } 488 }
495 489
496 strcpy(local->node.dev_name, dev->name);
497 link->dev_node = &local->node;
498
499 printk(KERN_INFO "%s: RayLink, irq %d, hw_addr %pM\n", 490 printk(KERN_INFO "%s: RayLink, irq %d, hw_addr %pM\n",
500 dev->name, dev->irq, dev->dev_addr); 491 dev->name, dev->irq, dev->dev_addr);
501 492
diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h
index 1e23b7f4cca7..9f01ddb19748 100644
--- a/drivers/net/wireless/ray_cs.h
+++ b/drivers/net/wireless/ray_cs.h
@@ -25,7 +25,6 @@ struct beacon_rx {
25typedef struct ray_dev_t { 25typedef struct ray_dev_t {
26 int card_status; 26 int card_status;
27 int authentication_state; 27 int authentication_state;
28 dev_node_t node;
29 window_handle_t amem_handle; /* handle to window for attribute memory */ 28 window_handle_t amem_handle; /* handle to window for attribute memory */
30 window_handle_t rmem_handle; /* handle to window for rx buffer on card */ 29 window_handle_t rmem_handle; /* handle to window for rx buffer on card */
31 void __iomem *sram; /* pointer to beginning of shared RAM */ 30 void __iomem *sram; /* pointer to beginning of shared RAM */
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 8bce1a550a22..8816e371fd0e 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -610,7 +610,6 @@ struct wl3501_card {
610 struct iw_statistics wstats; 610 struct iw_statistics wstats;
611 struct iw_spy_data spy_data; 611 struct iw_spy_data spy_data;
612 struct iw_public_data wireless_data; 612 struct iw_public_data wireless_data;
613 struct dev_node_t node;
614 struct pcmcia_device *p_dev; 613 struct pcmcia_device *p_dev;
615}; 614};
616#endif 615#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 7b9621de239f..5e5d24c1ce2b 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1451,6 +1451,8 @@ static void wl3501_detach(struct pcmcia_device *link)
1451 netif_device_detach(dev); 1451 netif_device_detach(dev);
1452 wl3501_release(link); 1452 wl3501_release(link);
1453 1453
1454 unregister_netdev(dev);
1455
1454 if (link->priv) 1456 if (link->priv)
1455 free_netdev(link->priv); 1457 free_netdev(link->priv);
1456 1458
@@ -1897,10 +1899,6 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
1897 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 1899 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
1898 p_dev->io.IOAddrLines = 5; 1900 p_dev->io.IOAddrLines = 5;
1899 1901
1900 /* Interrupt setup */
1901 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
1902 p_dev->irq.Handler = wl3501_interrupt;
1903
1904 /* General socket configuration */ 1902 /* General socket configuration */
1905 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 1903 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
1906 p_dev->conf.IntType = INT_MEMORY_AND_IO; 1904 p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -1961,7 +1959,7 @@ static int wl3501_config(struct pcmcia_device *link)
1961 /* Now allocate an interrupt line. Note that this does not actually 1959 /* Now allocate an interrupt line. Note that this does not actually
1962 * assign a handler to the interrupt. */ 1960 * assign a handler to the interrupt. */
1963 1961
1964 ret = pcmcia_request_irq(link, &link->irq); 1962 ret = pcmcia_request_irq(link, wl3501_interrupt);
1965 if (ret) 1963 if (ret)
1966 goto failed; 1964 goto failed;
1967 1965
@@ -1972,7 +1970,7 @@ static int wl3501_config(struct pcmcia_device *link)
1972 if (ret) 1970 if (ret)
1973 goto failed; 1971 goto failed;
1974 1972
1975 dev->irq = link->irq.AssignedIRQ; 1973 dev->irq = link->irq;
1976 dev->base_addr = link->io.BasePort1; 1974 dev->base_addr = link->io.BasePort1;
1977 SET_NETDEV_DEV(dev, &link->dev); 1975 SET_NETDEV_DEV(dev, &link->dev);
1978 if (register_netdev(dev)) { 1976 if (register_netdev(dev)) {
@@ -1981,20 +1979,15 @@ static int wl3501_config(struct pcmcia_device *link)
1981 } 1979 }
1982 1980
1983 this = netdev_priv(dev); 1981 this = netdev_priv(dev);
1984 /*
1985 * At this point, the dev_node_t structure(s) should be initialized and
1986 * arranged in a linked list at link->dev_node.
1987 */
1988 link->dev_node = &this->node;
1989 1982
1990 this->base_addr = dev->base_addr; 1983 this->base_addr = dev->base_addr;
1991 1984
1992 if (!wl3501_get_flash_mac_addr(this)) { 1985 if (!wl3501_get_flash_mac_addr(this)) {
1993 printk(KERN_WARNING "%s: Cant read MAC addr in flash ROM?\n", 1986 printk(KERN_WARNING "%s: Cant read MAC addr in flash ROM?\n",
1994 dev->name); 1987 dev->name);
1988 unregister_netdev(dev);
1995 goto failed; 1989 goto failed;
1996 } 1990 }
1997 strcpy(this->node.dev_name, dev->name);
1998 1991
1999 for (i = 0; i < 6; i++) 1992 for (i = 0; i < 6; i++)
2000 dev->dev_addr[i] = ((char *)&this->mac_addr)[i]; 1993 dev->dev_addr[i] = ((char *)&this->mac_addr)[i];
@@ -2038,12 +2031,6 @@ failed:
2038 */ 2031 */
2039static void wl3501_release(struct pcmcia_device *link) 2032static void wl3501_release(struct pcmcia_device *link)
2040{ 2033{
2041 struct net_device *dev = link->priv;
2042
2043 /* Unlink the device chain */
2044 if (link->dev_node)
2045 unregister_netdev(dev);
2046
2047 pcmcia_disable_device(link); 2034 pcmcia_disable_device(link);
2048} 2035}
2049 2036
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index 81c753a617ab..9548cbb5012a 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -102,6 +102,7 @@ static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = {
102 { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, }, 102 { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, },
103 { 0 } 103 { 0 }
104}; 104};
105MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl);
105 106
106static struct zorro_driver zorro8390_driver = { 107static struct zorro_driver zorro8390_driver = {
107 .name = "zorro8390", 108 .name = "zorro8390",
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 18ecae4a4375..b4748337223b 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -69,7 +69,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
69 } 69 }
70 70
71 phy = get_phy_device(mdio, be32_to_cpup(addr)); 71 phy = get_phy_device(mdio, be32_to_cpup(addr));
72 if (!phy) { 72 if (!phy || IS_ERR(phy)) {
73 dev_err(&mdio->dev, "error probing PHY at address %i\n", 73 dev_err(&mdio->dev, "error probing PHY at address %i\n",
74 *addr); 74 *addr);
75 continue; 75 continue;
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 166b67ea622f..219f79e2210a 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -30,23 +30,7 @@
30 30
31#define OP_BUFFER_FLAGS 0 31#define OP_BUFFER_FLAGS 0
32 32
33/* 33static struct ring_buffer *op_ring_buffer;
34 * Read and write access is using spin locking. Thus, writing to the
35 * buffer by NMI handler (x86) could occur also during critical
36 * sections when reading the buffer. To avoid this, there are 2
37 * buffers for independent read and write access. Read access is in
38 * process context only, write access only in the NMI handler. If the
39 * read buffer runs empty, both buffers are swapped atomically. There
40 * is potentially a small window during swapping where the buffers are
41 * disabled and samples could be lost.
42 *
43 * Using 2 buffers is a little bit overhead, but the solution is clear
44 * and does not require changes in the ring buffer implementation. It
45 * can be changed to a single buffer solution when the ring buffer
46 * access is implemented as non-locking atomic code.
47 */
48static struct ring_buffer *op_ring_buffer_read;
49static struct ring_buffer *op_ring_buffer_write;
50DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); 34DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
51 35
52static void wq_sync_buffer(struct work_struct *work); 36static void wq_sync_buffer(struct work_struct *work);
@@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void)
68 52
69void free_cpu_buffers(void) 53void free_cpu_buffers(void)
70{ 54{
71 if (op_ring_buffer_read) 55 if (op_ring_buffer)
72 ring_buffer_free(op_ring_buffer_read); 56 ring_buffer_free(op_ring_buffer);
73 op_ring_buffer_read = NULL; 57 op_ring_buffer = NULL;
74 if (op_ring_buffer_write)
75 ring_buffer_free(op_ring_buffer_write);
76 op_ring_buffer_write = NULL;
77} 58}
78 59
79#define RB_EVENT_HDR_SIZE 4 60#define RB_EVENT_HDR_SIZE 4
@@ -86,11 +67,8 @@ int alloc_cpu_buffers(void)
86 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + 67 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
87 RB_EVENT_HDR_SIZE); 68 RB_EVENT_HDR_SIZE);
88 69
89 op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); 70 op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
90 if (!op_ring_buffer_read) 71 if (!op_ring_buffer)
91 goto fail;
92 op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
93 if (!op_ring_buffer_write)
94 goto fail; 72 goto fail;
95 73
96 for_each_possible_cpu(i) { 74 for_each_possible_cpu(i) {
@@ -162,16 +140,11 @@ struct op_sample
162*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) 140*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
163{ 141{
164 entry->event = ring_buffer_lock_reserve 142 entry->event = ring_buffer_lock_reserve
165 (op_ring_buffer_write, sizeof(struct op_sample) + 143 (op_ring_buffer, sizeof(struct op_sample) +
166 size * sizeof(entry->sample->data[0])); 144 size * sizeof(entry->sample->data[0]));
167 if (entry->event) 145 if (!entry->event)
168 entry->sample = ring_buffer_event_data(entry->event);
169 else
170 entry->sample = NULL;
171
172 if (!entry->sample)
173 return NULL; 146 return NULL;
174 147 entry->sample = ring_buffer_event_data(entry->event);
175 entry->size = size; 148 entry->size = size;
176 entry->data = entry->sample->data; 149 entry->data = entry->sample->data;
177 150
@@ -180,25 +153,16 @@ struct op_sample
180 153
181int op_cpu_buffer_write_commit(struct op_entry *entry) 154int op_cpu_buffer_write_commit(struct op_entry *entry)
182{ 155{
183 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event); 156 return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
184} 157}
185 158
186struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) 159struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
187{ 160{
188 struct ring_buffer_event *e; 161 struct ring_buffer_event *e;
189 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); 162 e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
190 if (e) 163 if (!e)
191 goto event;
192 if (ring_buffer_swap_cpu(op_ring_buffer_read,
193 op_ring_buffer_write,
194 cpu))
195 return NULL; 164 return NULL;
196 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
197 if (e)
198 goto event;
199 return NULL;
200 165
201event:
202 entry->event = e; 166 entry->event = e;
203 entry->sample = ring_buffer_event_data(e); 167 entry->sample = ring_buffer_event_data(e);
204 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) 168 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
@@ -209,8 +173,7 @@ event:
209 173
210unsigned long op_cpu_buffer_entries(int cpu) 174unsigned long op_cpu_buffer_entries(int cpu)
211{ 175{
212 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) 176 return ring_buffer_entries_cpu(op_ring_buffer, cpu);
213 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
214} 177}
215 178
216static int 179static int
@@ -356,8 +319,16 @@ void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
356 319
357void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) 320void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
358{ 321{
359 int is_kernel = !user_mode(regs); 322 int is_kernel;
360 unsigned long pc = profile_pc(regs); 323 unsigned long pc;
324
325 if (likely(regs)) {
326 is_kernel = !user_mode(regs);
327 pc = profile_pc(regs);
328 } else {
329 is_kernel = 0; /* This value will not be used */
330 pc = ESCAPE_CODE; /* as this causes an early return. */
331 }
361 332
362 __oprofile_add_ext_sample(pc, regs, event, is_kernel); 333 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
363} 334}
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index dc8a0428260d..b336cd9ee7a1 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -253,22 +253,26 @@ static int __init oprofile_init(void)
253 int err; 253 int err;
254 254
255 err = oprofile_arch_init(&oprofile_ops); 255 err = oprofile_arch_init(&oprofile_ops);
256
257 if (err < 0 || timer) { 256 if (err < 0 || timer) {
258 printk(KERN_INFO "oprofile: using timer interrupt.\n"); 257 printk(KERN_INFO "oprofile: using timer interrupt.\n");
259 oprofile_timer_init(&oprofile_ops); 258 err = oprofile_timer_init(&oprofile_ops);
259 if (err)
260 goto out_arch;
260 } 261 }
261
262 err = oprofilefs_register(); 262 err = oprofilefs_register();
263 if (err) 263 if (err)
264 oprofile_arch_exit(); 264 goto out_arch;
265 return 0;
265 266
267out_arch:
268 oprofile_arch_exit();
266 return err; 269 return err;
267} 270}
268 271
269 272
270static void __exit oprofile_exit(void) 273static void __exit oprofile_exit(void)
271{ 274{
275 oprofile_timer_exit();
272 oprofilefs_unregister(); 276 oprofilefs_unregister();
273 oprofile_arch_exit(); 277 oprofile_arch_exit();
274} 278}
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index cb92f5c98c1a..47e12cb4ee8b 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -34,7 +34,8 @@ struct super_block;
34struct dentry; 34struct dentry;
35 35
36void oprofile_create_files(struct super_block *sb, struct dentry *root); 36void oprofile_create_files(struct super_block *sb, struct dentry *root);
37void oprofile_timer_init(struct oprofile_operations *ops); 37int oprofile_timer_init(struct oprofile_operations *ops);
38void oprofile_timer_exit(void);
38 39
39int oprofile_set_backtrace(unsigned long depth); 40int oprofile_set_backtrace(unsigned long depth);
40int oprofile_set_timeout(unsigned long time); 41int oprofile_set_timeout(unsigned long time);
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 333f915568c7..dc0ae4d14dff 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -13,34 +13,94 @@
13#include <linux/oprofile.h> 13#include <linux/oprofile.h>
14#include <linux/profile.h> 14#include <linux/profile.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/cpu.h>
17#include <linux/hrtimer.h>
18#include <asm/irq_regs.h>
16#include <asm/ptrace.h> 19#include <asm/ptrace.h>
17 20
18#include "oprof.h" 21#include "oprof.h"
19 22
20static int timer_notify(struct pt_regs *regs) 23static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
24
25static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
26{
27 oprofile_add_sample(get_irq_regs(), 0);
28 hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC));
29 return HRTIMER_RESTART;
30}
31
32static void __oprofile_hrtimer_start(void *unused)
33{
34 struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer);
35
36 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
37 hrtimer->function = oprofile_hrtimer_notify;
38
39 hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC),
40 HRTIMER_MODE_REL_PINNED);
41}
42
43static int oprofile_hrtimer_start(void)
21{ 44{
22 oprofile_add_sample(regs, 0); 45 on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
23 return 0; 46 return 0;
24} 47}
25 48
26static int timer_start(void) 49static void __oprofile_hrtimer_stop(int cpu)
27{ 50{
28 return register_timer_hook(timer_notify); 51 struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
52
53 hrtimer_cancel(hrtimer);
29} 54}
30 55
56static void oprofile_hrtimer_stop(void)
57{
58 int cpu;
59
60 for_each_online_cpu(cpu)
61 __oprofile_hrtimer_stop(cpu);
62}
31 63
32static void timer_stop(void) 64static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
65 unsigned long action, void *hcpu)
33{ 66{
34 unregister_timer_hook(timer_notify); 67 long cpu = (long) hcpu;
68
69 switch (action) {
70 case CPU_ONLINE:
71 case CPU_ONLINE_FROZEN:
72 smp_call_function_single(cpu, __oprofile_hrtimer_start,
73 NULL, 1);
74 break;
75 case CPU_DEAD:
76 case CPU_DEAD_FROZEN:
77 __oprofile_hrtimer_stop(cpu);
78 break;
79 }
80 return NOTIFY_OK;
35} 81}
36 82
83static struct notifier_block __refdata oprofile_cpu_notifier = {
84 .notifier_call = oprofile_cpu_notify,
85};
37 86
38void __init oprofile_timer_init(struct oprofile_operations *ops) 87int __init oprofile_timer_init(struct oprofile_operations *ops)
39{ 88{
89 int rc;
90
91 rc = register_hotcpu_notifier(&oprofile_cpu_notifier);
92 if (rc)
93 return rc;
40 ops->create_files = NULL; 94 ops->create_files = NULL;
41 ops->setup = NULL; 95 ops->setup = NULL;
42 ops->shutdown = NULL; 96 ops->shutdown = NULL;
43 ops->start = timer_start; 97 ops->start = oprofile_hrtimer_start;
44 ops->stop = timer_stop; 98 ops->stop = oprofile_hrtimer_stop;
45 ops->cpu_type = "timer"; 99 ops->cpu_type = "timer";
100 return 0;
101}
102
103void __exit oprofile_timer_exit(void)
104{
105 unregister_hotcpu_notifier(&oprofile_cpu_notifier);
46} 106}
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index 7dd370fa3439..fd8cfe95f0a3 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -75,7 +75,6 @@ INT_MODULE_PARM(epp_mode, 1);
75typedef struct parport_info_t { 75typedef struct parport_info_t {
76 struct pcmcia_device *p_dev; 76 struct pcmcia_device *p_dev;
77 int ndev; 77 int ndev;
78 dev_node_t node;
79 struct parport *port; 78 struct parport *port;
80} parport_info_t; 79} parport_info_t;
81 80
@@ -105,7 +104,6 @@ static int parport_probe(struct pcmcia_device *link)
105 104
106 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 105 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
107 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 106 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
108 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
109 link->conf.Attributes = CONF_ENABLE_IRQ; 107 link->conf.Attributes = CONF_ENABLE_IRQ;
110 link->conf.IntType = INT_MEMORY_AND_IO; 108 link->conf.IntType = INT_MEMORY_AND_IO;
111 109
@@ -174,20 +172,19 @@ static int parport_config(struct pcmcia_device *link)
174 if (ret) 172 if (ret)
175 goto failed; 173 goto failed;
176 174
177 ret = pcmcia_request_irq(link, &link->irq); 175 if (!link->irq)
178 if (ret)
179 goto failed; 176 goto failed;
180 ret = pcmcia_request_configuration(link, &link->conf); 177 ret = pcmcia_request_configuration(link, &link->conf);
181 if (ret) 178 if (ret)
182 goto failed; 179 goto failed;
183 180
184 p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2, 181 p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2,
185 link->irq.AssignedIRQ, PARPORT_DMA_NONE, 182 link->irq, PARPORT_DMA_NONE,
186 &link->dev, IRQF_SHARED); 183 &link->dev, IRQF_SHARED);
187 if (p == NULL) { 184 if (p == NULL) {
188 printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at " 185 printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at "
189 "0x%3x, irq %u failed\n", link->io.BasePort1, 186 "0x%3x, irq %u failed\n", link->io.BasePort1,
190 link->irq.AssignedIRQ); 187 link->irq);
191 goto failed; 188 goto failed;
192 } 189 }
193 190
@@ -195,11 +192,7 @@ static int parport_config(struct pcmcia_device *link)
195 if (epp_mode) 192 if (epp_mode)
196 p->modes |= PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP; 193 p->modes |= PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP;
197 info->ndev = 1; 194 info->ndev = 1;
198 info->node.major = LP_MAJOR;
199 info->node.minor = p->number;
200 info->port = p; 195 info->port = p;
201 strcpy(info->node.dev_name, p->name);
202 link->dev_node = &info->node;
203 196
204 return 0; 197 return 0;
205 198
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 4e3e0382c16e..083034710fa6 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/vmalloc.h>
23 24
24#include <asm/pci-bridge.h> 25#include <asm/pci-bridge.h>
25#include <linux/mutex.h> 26#include <linux/mutex.h>
@@ -430,6 +431,8 @@ int dlpar_remove_slot(char *drc_name)
430 rc = dlpar_remove_pci_slot(drc_name, dn); 431 rc = dlpar_remove_pci_slot(drc_name, dn);
431 break; 432 break;
432 } 433 }
434 vm_unmap_aliases();
435
433 printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); 436 printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
434exit: 437exit:
435 mutex_unlock(&rpadlpar_mutex); 438 mutex_unlock(&rpadlpar_mutex);
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 719702240780..ef7411c660b9 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -29,6 +29,7 @@
29#include <linux/pci_hotplug.h> 29#include <linux/pci_hotplug.h>
30#include <linux/smp.h> 30#include <linux/smp.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/vmalloc.h>
32#include <asm/eeh.h> /* for eeh_add_device() */ 33#include <asm/eeh.h> /* for eeh_add_device() */
33#include <asm/rtas.h> /* rtas_call */ 34#include <asm/rtas.h> /* rtas_call */
34#include <asm/pci-bridge.h> /* for pci_controller */ 35#include <asm/pci-bridge.h> /* for pci_controller */
@@ -418,6 +419,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
418 return -EINVAL; 419 return -EINVAL;
419 420
420 pcibios_remove_pci_devices(slot->bus); 421 pcibios_remove_pci_devices(slot->bus);
422 vm_unmap_aliases();
423
421 slot->state = NOT_CONFIGURED; 424 slot->state = NOT_CONFIGURED;
422 return 0; 425 return 0;
423} 426}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 417312528ddf..371dc564e2e4 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -3626,14 +3626,15 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
3626 domain_remove_one_dev_info(dmar_domain, pdev); 3626 domain_remove_one_dev_info(dmar_domain, pdev);
3627} 3627}
3628 3628
3629static int intel_iommu_map_range(struct iommu_domain *domain, 3629static int intel_iommu_map(struct iommu_domain *domain,
3630 unsigned long iova, phys_addr_t hpa, 3630 unsigned long iova, phys_addr_t hpa,
3631 size_t size, int iommu_prot) 3631 int gfp_order, int iommu_prot)
3632{ 3632{
3633 struct dmar_domain *dmar_domain = domain->priv; 3633 struct dmar_domain *dmar_domain = domain->priv;
3634 u64 max_addr; 3634 u64 max_addr;
3635 int addr_width; 3635 int addr_width;
3636 int prot = 0; 3636 int prot = 0;
3637 size_t size;
3637 int ret; 3638 int ret;
3638 3639
3639 if (iommu_prot & IOMMU_READ) 3640 if (iommu_prot & IOMMU_READ)
@@ -3643,6 +3644,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
3643 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) 3644 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3644 prot |= DMA_PTE_SNP; 3645 prot |= DMA_PTE_SNP;
3645 3646
3647 size = PAGE_SIZE << gfp_order;
3646 max_addr = iova + size; 3648 max_addr = iova + size;
3647 if (dmar_domain->max_addr < max_addr) { 3649 if (dmar_domain->max_addr < max_addr) {
3648 int min_agaw; 3650 int min_agaw;
@@ -3669,19 +3671,19 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
3669 return ret; 3671 return ret;
3670} 3672}
3671 3673
3672static void intel_iommu_unmap_range(struct iommu_domain *domain, 3674static int intel_iommu_unmap(struct iommu_domain *domain,
3673 unsigned long iova, size_t size) 3675 unsigned long iova, int gfp_order)
3674{ 3676{
3675 struct dmar_domain *dmar_domain = domain->priv; 3677 struct dmar_domain *dmar_domain = domain->priv;
3676 3678 size_t size = PAGE_SIZE << gfp_order;
3677 if (!size)
3678 return;
3679 3679
3680 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 3680 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3681 (iova + size - 1) >> VTD_PAGE_SHIFT); 3681 (iova + size - 1) >> VTD_PAGE_SHIFT);
3682 3682
3683 if (dmar_domain->max_addr == iova + size) 3683 if (dmar_domain->max_addr == iova + size)
3684 dmar_domain->max_addr = iova; 3684 dmar_domain->max_addr = iova;
3685
3686 return gfp_order;
3685} 3687}
3686 3688
3687static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, 3689static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3714,8 +3716,8 @@ static struct iommu_ops intel_iommu_ops = {
3714 .domain_destroy = intel_iommu_domain_destroy, 3716 .domain_destroy = intel_iommu_domain_destroy,
3715 .attach_dev = intel_iommu_attach_device, 3717 .attach_dev = intel_iommu_attach_device,
3716 .detach_dev = intel_iommu_detach_device, 3718 .detach_dev = intel_iommu_detach_device,
3717 .map = intel_iommu_map_range, 3719 .map = intel_iommu_map,
3718 .unmap = intel_iommu_unmap_range, 3720 .unmap = intel_iommu_unmap,
3719 .iova_to_phys = intel_iommu_iova_to_phys, 3721 .iova_to_phys = intel_iommu_iova_to_phys,
3720 .domain_has_cap = intel_iommu_domain_has_cap, 3722 .domain_has_cap = intel_iommu_domain_has_cap,
3721}; 3723};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 9a3d3309f896..1df7c508814e 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -679,7 +679,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
679 */ 679 */
680int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) 680int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
681{ 681{
682 return state > PCI_D0 ? 682 return state >= PCI_D0 ?
683 pci_platform_power_transition(dev, state) : -EINVAL; 683 pci_platform_power_transition(dev, state) : -EINVAL;
684} 684}
685EXPORT_SYMBOL_GPL(__pci_complete_power_transition); 685EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
@@ -716,10 +716,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
716 */ 716 */
717 return 0; 717 return 0;
718 718
719 /* Check if we're already there */
720 if (dev->current_state == state)
721 return 0;
722
723 __pci_start_power_transition(dev, state); 719 __pci_start_power_transition(dev, state);
724 720
725 /* This device is quirked not to be put into D3, so 721 /* This device is quirked not to be put into D3, so
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index aa495ad9bbd4..7a711ee314b7 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -244,11 +244,17 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
244 244
245 /* Assert Secondary Bus Reset */ 245 /* Assert Secondary Bus Reset */
246 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl); 246 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
247 p2p_ctrl |= PCI_CB_BRIDGE_CTL_CB_RESET; 247 p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
248 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); 248 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
249 249
250 /*
251 * we should send hot reset message for 2ms to allow it time to
252 * propogate to all downstream ports
253 */
254 msleep(2);
255
250 /* De-assert Secondary Bus Reset */ 256 /* De-assert Secondary Bus Reset */
251 p2p_ctrl &= ~PCI_CB_BRIDGE_CTL_CB_RESET; 257 p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
252 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); 258 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
253 259
254 /* 260 /*
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 882bd8d29fe3..c82548afcd5c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -174,19 +174,14 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
174 pci_read_config_dword(dev, pos, &sz); 174 pci_read_config_dword(dev, pos, &sz);
175 pci_write_config_dword(dev, pos, l); 175 pci_write_config_dword(dev, pos, l);
176 176
177 if (!sz)
178 goto fail; /* BAR not implemented */
179
180 /* 177 /*
181 * All bits set in sz means the device isn't working properly. 178 * All bits set in sz means the device isn't working properly.
182 * If it's a memory BAR or a ROM, bit 0 must be clear; if it's 179 * If the BAR isn't implemented, all bits must be 0. If it's a
183 * an io BAR, bit 1 must be clear. 180 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
181 * 1 must be clear.
184 */ 182 */
185 if (sz == 0xffffffff) { 183 if (!sz || sz == 0xffffffff)
186 dev_err(&dev->dev, "reg %x: invalid size %#x; broken device?\n",
187 pos, sz);
188 goto fail; 184 goto fail;
189 }
190 185
191 /* 186 /*
192 * I don't know how l can have all bits set. Copied from old code. 187 * I don't know how l can have all bits set. Copied from old code.
@@ -249,17 +244,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
249 pos, res); 244 pos, res);
250 } 245 }
251 } else { 246 } else {
252 u32 size = pci_size(l, sz, mask); 247 sz = pci_size(l, sz, mask);
253 248
254 if (!size) { 249 if (!sz)
255 dev_err(&dev->dev, "reg %x: invalid size "
256 "(l %#x sz %#x mask %#x); broken device?",
257 pos, l, sz, mask);
258 goto fail; 250 goto fail;
259 }
260 251
261 res->start = l; 252 res->start = l;
262 res->end = l + size; 253 res->end = l + sz;
263 254
264 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); 255 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
265 } 256 }
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 4fe36d2e1049..19b111383f62 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -838,65 +838,11 @@ static void pci_bus_dump_resources(struct pci_bus *bus)
838 } 838 }
839} 839}
840 840
841static int __init pci_bus_get_depth(struct pci_bus *bus)
842{
843 int depth = 0;
844 struct pci_dev *dev;
845
846 list_for_each_entry(dev, &bus->devices, bus_list) {
847 int ret;
848 struct pci_bus *b = dev->subordinate;
849 if (!b)
850 continue;
851
852 ret = pci_bus_get_depth(b);
853 if (ret + 1 > depth)
854 depth = ret + 1;
855 }
856
857 return depth;
858}
859static int __init pci_get_max_depth(void)
860{
861 int depth = 0;
862 struct pci_bus *bus;
863
864 list_for_each_entry(bus, &pci_root_buses, node) {
865 int ret;
866
867 ret = pci_bus_get_depth(bus);
868 if (ret > depth)
869 depth = ret;
870 }
871
872 return depth;
873}
874
875/*
876 * first try will not touch pci bridge res
877 * second and later try will clear small leaf bridge res
878 * will stop till to the max deepth if can not find good one
879 */
880void __init 841void __init
881pci_assign_unassigned_resources(void) 842pci_assign_unassigned_resources(void)
882{ 843{
883 struct pci_bus *bus; 844 struct pci_bus *bus;
884 int tried_times = 0;
885 enum release_type rel_type = leaf_only;
886 struct resource_list_x head, *list;
887 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
888 IORESOURCE_PREFETCH;
889 unsigned long failed_type;
890 int max_depth = pci_get_max_depth();
891 int pci_try_num;
892 845
893 head.next = NULL;
894
895 pci_try_num = max_depth + 1;
896 printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
897 max_depth, pci_try_num);
898
899again:
900 /* Depth first, calculate sizes and alignments of all 846 /* Depth first, calculate sizes and alignments of all
901 subordinate buses. */ 847 subordinate buses. */
902 list_for_each_entry(bus, &pci_root_buses, node) { 848 list_for_each_entry(bus, &pci_root_buses, node) {
@@ -904,65 +850,9 @@ again:
904 } 850 }
905 /* Depth last, allocate resources and update the hardware. */ 851 /* Depth last, allocate resources and update the hardware. */
906 list_for_each_entry(bus, &pci_root_buses, node) { 852 list_for_each_entry(bus, &pci_root_buses, node) {
907 __pci_bus_assign_resources(bus, &head); 853 pci_bus_assign_resources(bus);
908 }
909 tried_times++;
910
911 /* any device complain? */
912 if (!head.next)
913 goto enable_and_dump;
914 failed_type = 0;
915 for (list = head.next; list;) {
916 failed_type |= list->flags;
917 list = list->next;
918 }
919 /*
920 * io port are tight, don't try extra
921 * or if reach the limit, don't want to try more
922 */
923 failed_type &= type_mask;
924 if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) {
925 free_failed_list(&head);
926 goto enable_and_dump;
927 }
928
929 printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
930 tried_times + 1);
931
932 /* third times and later will not check if it is leaf */
933 if ((tried_times + 1) > 2)
934 rel_type = whole_subtree;
935
936 /*
937 * Try to release leaf bridge's resources that doesn't fit resource of
938 * child device under that bridge
939 */
940 for (list = head.next; list;) {
941 bus = list->dev->bus;
942 pci_bus_release_bridge_resources(bus, list->flags & type_mask,
943 rel_type);
944 list = list->next;
945 }
946 /* restore size and flags */
947 for (list = head.next; list;) {
948 struct resource *res = list->res;
949
950 res->start = list->start;
951 res->end = list->end;
952 res->flags = list->flags;
953 if (list->dev->subordinate)
954 res->flags = 0;
955
956 list = list->next;
957 }
958 free_failed_list(&head);
959
960 goto again;
961
962enable_and_dump:
963 /* Depth last, update the hardware. */
964 list_for_each_entry(bus, &pci_root_buses, node)
965 pci_enable_bridges(bus); 854 pci_enable_bridges(bus);
855 }
966 856
967 /* dump the resource on buses */ 857 /* dump the resource on buses */
968 list_for_each_entry(bus, &pci_root_buses, node) { 858 list_for_each_entry(bus, &pci_root_buses, node) {
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index d189e4743e69..d0f5ad306078 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -49,26 +49,6 @@ config PCMCIA_LOAD_CIS
49 49
50 If unsure, say Y. 50 If unsure, say Y.
51 51
52config PCMCIA_IOCTL
53 bool "PCMCIA control ioctl (obsolete)"
54 depends on PCMCIA && ARM && !SMP && !PREEMPT
55 default y
56 help
57 If you say Y here, the deprecated ioctl interface to the PCMCIA
58 subsystem will be built. It is needed by the deprecated pcmcia-cs
59 tools (cardmgr, cardctl) to function properly.
60
61 You should use the new pcmciautils package instead (see
62 <file:Documentation/Changes> for location and details).
63
64 This config option will most likely be removed from kernel 2.6.35,
65 the associated code from kernel 2.6.36.
66
67 As the PCMCIA ioctl is not locking safe, it depends on !SMP and
68 !PREEMPT.
69
70 If unsure, say N.
71
72config CARDBUS 52config CARDBUS
73 bool "32-bit CardBus support" 53 bool "32-bit CardBus support"
74 depends on PCI 54 depends on PCI
@@ -234,7 +214,8 @@ config PCMCIA_PXA2XX
234 depends on ARM && ARCH_PXA && PCMCIA 214 depends on ARM && ARCH_PXA && PCMCIA
235 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \ 215 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
236 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \ 216 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
237 || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2) 217 || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2 \
218 || MACH_VPAC270)
238 select PCMCIA_SOC_COMMON 219 select PCMCIA_SOC_COMMON
239 help 220 help
240 Say Y here to include support for the PXA2xx PCMCIA controller 221 Say Y here to include support for the PXA2xx PCMCIA controller
@@ -317,7 +298,7 @@ config ELECTRA_CF
317 PA Semi Electra eval board. 298 PA Semi Electra eval board.
318 299
319config PCCARD_NONSTATIC 300config PCCARD_NONSTATIC
320 tristate 301 bool
321 302
322config PCCARD_IODYN 303config PCCARD_IODYN
323 bool 304 bool
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 381b031d9d75..d006e8beab9c 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -2,15 +2,18 @@
2# Makefile for the kernel pcmcia subsystem (c/o David Hinds) 2# Makefile for the kernel pcmcia subsystem (c/o David Hinds)
3# 3#
4 4
5pcmcia_core-y += cs.o rsrc_mgr.o socket_sysfs.o 5pcmcia_core-y += cs.o socket_sysfs.o
6pcmcia_core-$(CONFIG_CARDBUS) += cardbus.o 6pcmcia_core-$(CONFIG_CARDBUS) += cardbus.o
7obj-$(CONFIG_PCCARD) += pcmcia_core.o 7obj-$(CONFIG_PCCARD) += pcmcia_core.o
8 8
9pcmcia-y += ds.o pcmcia_resource.o cistpl.o 9pcmcia-y += ds.o pcmcia_resource.o cistpl.o pcmcia_cis.o
10pcmcia-$(CONFIG_PCMCIA_IOCTL) += pcmcia_ioctl.o 10pcmcia-$(CONFIG_PCMCIA_IOCTL) += pcmcia_ioctl.o
11obj-$(CONFIG_PCMCIA) += pcmcia.o 11obj-$(CONFIG_PCMCIA) += pcmcia.o
12 12
13obj-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o 13pcmcia_rsrc-y += rsrc_mgr.o
14pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
15pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o
16obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o
14 17
15 18
16# socket drivers 19# socket drivers
@@ -66,6 +69,7 @@ pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o
66pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o 69pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
67pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o 70pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
68pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o 71pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
72pxa2xx-obj-$(CONFIG_MACH_VPAC270) += pxa2xx_vpac270.o
69 73
70obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y) 74obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y)
71 75
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index 9e84d039de41..eae9cbe37a3e 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -113,7 +113,7 @@ static int bfin_cf_get_status(struct pcmcia_socket *s, u_int *sp)
113 113
114 if (bfin_cf_present(cf->cd_pfx)) { 114 if (bfin_cf_present(cf->cd_pfx)) {
115 *sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD; 115 *sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD;
116 s->irq.AssignedIRQ = 0; 116 s->pcmcia_irq = 0;
117 s->pci_irq = cf->irq; 117 s->pci_irq = cf->irq;
118 118
119 } else 119 } else
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index e6ab2a47d8cb..9a58862f1401 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -94,7 +94,6 @@ int __ref cb_alloc(struct pcmcia_socket *s)
94 pci_enable_bridges(bus); 94 pci_enable_bridges(bus);
95 pci_bus_add_devices(bus); 95 pci_bus_add_devices(bus);
96 96
97 s->irq.AssignedIRQ = s->pci_irq;
98 return 0; 97 return 0;
99} 98}
100 99
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 854959cada3a..60d428be0b07 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -129,6 +129,8 @@ static void __iomem *set_cis_map(struct pcmcia_socket *s,
129 129
130/** 130/**
131 * pcmcia_read_cis_mem() - low-level function to read CIS memory 131 * pcmcia_read_cis_mem() - low-level function to read CIS memory
132 *
133 * must be called with ops_mutex held
132 */ 134 */
133int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, 135int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
134 u_int len, void *ptr) 136 u_int len, void *ptr)
@@ -138,7 +140,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
138 140
139 dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len); 141 dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
140 142
141 mutex_lock(&s->ops_mutex);
142 if (attr & IS_INDIRECT) { 143 if (attr & IS_INDIRECT) {
143 /* Indirect accesses use a bunch of special registers at fixed 144 /* Indirect accesses use a bunch of special registers at fixed
144 locations in common memory */ 145 locations in common memory */
@@ -153,7 +154,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
153 if (!sys) { 154 if (!sys) {
154 dev_dbg(&s->dev, "could not map memory\n"); 155 dev_dbg(&s->dev, "could not map memory\n");
155 memset(ptr, 0xff, len); 156 memset(ptr, 0xff, len);
156 mutex_unlock(&s->ops_mutex);
157 return -1; 157 return -1;
158 } 158 }
159 159
@@ -184,7 +184,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
184 if (!sys) { 184 if (!sys) {
185 dev_dbg(&s->dev, "could not map memory\n"); 185 dev_dbg(&s->dev, "could not map memory\n");
186 memset(ptr, 0xff, len); 186 memset(ptr, 0xff, len);
187 mutex_unlock(&s->ops_mutex);
188 return -1; 187 return -1;
189 } 188 }
190 end = sys + s->map_size; 189 end = sys + s->map_size;
@@ -198,7 +197,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
198 addr = 0; 197 addr = 0;
199 } 198 }
200 } 199 }
201 mutex_unlock(&s->ops_mutex);
202 dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n", 200 dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
203 *(u_char *)(ptr+0), *(u_char *)(ptr+1), 201 *(u_char *)(ptr+0), *(u_char *)(ptr+1),
204 *(u_char *)(ptr+2), *(u_char *)(ptr+3)); 202 *(u_char *)(ptr+2), *(u_char *)(ptr+3));
@@ -209,7 +207,8 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
209/** 207/**
210 * pcmcia_write_cis_mem() - low-level function to write CIS memory 208 * pcmcia_write_cis_mem() - low-level function to write CIS memory
211 * 209 *
212 * Probably only useful for writing one-byte registers. 210 * Probably only useful for writing one-byte registers. Must be called
211 * with ops_mutex held.
213 */ 212 */
214void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, 213void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
215 u_int len, void *ptr) 214 u_int len, void *ptr)
@@ -220,7 +219,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
220 dev_dbg(&s->dev, 219 dev_dbg(&s->dev,
221 "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len); 220 "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
222 221
223 mutex_lock(&s->ops_mutex);
224 if (attr & IS_INDIRECT) { 222 if (attr & IS_INDIRECT) {
225 /* Indirect accesses use a bunch of special registers at fixed 223 /* Indirect accesses use a bunch of special registers at fixed
226 locations in common memory */ 224 locations in common memory */
@@ -234,7 +232,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
234 ((cis_width) ? MAP_16BIT : 0)); 232 ((cis_width) ? MAP_16BIT : 0));
235 if (!sys) { 233 if (!sys) {
236 dev_dbg(&s->dev, "could not map memory\n"); 234 dev_dbg(&s->dev, "could not map memory\n");
237 mutex_unlock(&s->ops_mutex);
238 return; /* FIXME: Error */ 235 return; /* FIXME: Error */
239 } 236 }
240 237
@@ -260,7 +257,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
260 sys = set_cis_map(s, card_offset, flags); 257 sys = set_cis_map(s, card_offset, flags);
261 if (!sys) { 258 if (!sys) {
262 dev_dbg(&s->dev, "could not map memory\n"); 259 dev_dbg(&s->dev, "could not map memory\n");
263 mutex_unlock(&s->ops_mutex);
264 return; /* FIXME: error */ 260 return; /* FIXME: error */
265 } 261 }
266 262
@@ -275,7 +271,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
275 addr = 0; 271 addr = 0;
276 } 272 }
277 } 273 }
278 mutex_unlock(&s->ops_mutex);
279} 274}
280 275
281 276
@@ -314,7 +309,6 @@ static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
314 return 0; 309 return 0;
315 } 310 }
316 } 311 }
317 mutex_unlock(&s->ops_mutex);
318 312
319 ret = pcmcia_read_cis_mem(s, attr, addr, len, ptr); 313 ret = pcmcia_read_cis_mem(s, attr, addr, len, ptr);
320 314
@@ -326,11 +320,11 @@ static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
326 cis->len = len; 320 cis->len = len;
327 cis->attr = attr; 321 cis->attr = attr;
328 memcpy(cis->cache, ptr, len); 322 memcpy(cis->cache, ptr, len);
329 mutex_lock(&s->ops_mutex);
330 list_add(&cis->node, &s->cis_cache); 323 list_add(&cis->node, &s->cis_cache);
331 mutex_unlock(&s->ops_mutex);
332 } 324 }
333 } 325 }
326 mutex_unlock(&s->ops_mutex);
327
334 return ret; 328 return ret;
335} 329}
336 330
@@ -386,6 +380,7 @@ int verify_cis_cache(struct pcmcia_socket *s)
386 "no memory for verifying CIS\n"); 380 "no memory for verifying CIS\n");
387 return -ENOMEM; 381 return -ENOMEM;
388 } 382 }
383 mutex_lock(&s->ops_mutex);
389 list_for_each_entry(cis, &s->cis_cache, node) { 384 list_for_each_entry(cis, &s->cis_cache, node) {
390 int len = cis->len; 385 int len = cis->len;
391 386
@@ -395,10 +390,12 @@ int verify_cis_cache(struct pcmcia_socket *s)
395 ret = pcmcia_read_cis_mem(s, cis->attr, cis->addr, len, buf); 390 ret = pcmcia_read_cis_mem(s, cis->attr, cis->addr, len, buf);
396 if (ret || memcmp(buf, cis->cache, len) != 0) { 391 if (ret || memcmp(buf, cis->cache, len) != 0) {
397 kfree(buf); 392 kfree(buf);
393 mutex_unlock(&s->ops_mutex);
398 return -1; 394 return -1;
399 } 395 }
400 } 396 }
401 kfree(buf); 397 kfree(buf);
398 mutex_unlock(&s->ops_mutex);
402 return 0; 399 return 0;
403} 400}
404 401
@@ -1362,106 +1359,6 @@ EXPORT_SYMBOL(pcmcia_parse_tuple);
1362 1359
1363 1360
1364/** 1361/**
1365 * pccard_read_tuple() - internal CIS tuple access
1366 * @s: the struct pcmcia_socket where the card is inserted
1367 * @function: the device function we loop for
1368 * @code: which CIS code shall we look for?
1369 * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
1370 *
1371 * pccard_read_tuple() reads out one tuple and attempts to parse it
1372 */
1373int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
1374 cisdata_t code, void *parse)
1375{
1376 tuple_t tuple;
1377 cisdata_t *buf;
1378 int ret;
1379
1380 buf = kmalloc(256, GFP_KERNEL);
1381 if (buf == NULL) {
1382 dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
1383 return -ENOMEM;
1384 }
1385 tuple.DesiredTuple = code;
1386 tuple.Attributes = 0;
1387 if (function == BIND_FN_ALL)
1388 tuple.Attributes = TUPLE_RETURN_COMMON;
1389 ret = pccard_get_first_tuple(s, function, &tuple);
1390 if (ret != 0)
1391 goto done;
1392 tuple.TupleData = buf;
1393 tuple.TupleOffset = 0;
1394 tuple.TupleDataMax = 255;
1395 ret = pccard_get_tuple_data(s, &tuple);
1396 if (ret != 0)
1397 goto done;
1398 ret = pcmcia_parse_tuple(&tuple, parse);
1399done:
1400 kfree(buf);
1401 return ret;
1402}
1403
1404
1405/**
1406 * pccard_loop_tuple() - loop over tuples in the CIS
1407 * @s: the struct pcmcia_socket where the card is inserted
1408 * @function: the device function we loop for
1409 * @code: which CIS code shall we look for?
1410 * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
1411 * @priv_data: private data to be passed to the loop_tuple function.
1412 * @loop_tuple: function to call for each CIS entry of type @function. IT
1413 * gets passed the raw tuple, the paresed tuple (if @parse is
1414 * set) and @priv_data.
1415 *
1416 * pccard_loop_tuple() loops over all CIS entries of type @function, and
1417 * calls the @loop_tuple function for each entry. If the call to @loop_tuple
1418 * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
1419 */
1420int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
1421 cisdata_t code, cisparse_t *parse, void *priv_data,
1422 int (*loop_tuple) (tuple_t *tuple,
1423 cisparse_t *parse,
1424 void *priv_data))
1425{
1426 tuple_t tuple;
1427 cisdata_t *buf;
1428 int ret;
1429
1430 buf = kzalloc(256, GFP_KERNEL);
1431 if (buf == NULL) {
1432 dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
1433 return -ENOMEM;
1434 }
1435
1436 tuple.TupleData = buf;
1437 tuple.TupleDataMax = 255;
1438 tuple.TupleOffset = 0;
1439 tuple.DesiredTuple = code;
1440 tuple.Attributes = 0;
1441
1442 ret = pccard_get_first_tuple(s, function, &tuple);
1443 while (!ret) {
1444 if (pccard_get_tuple_data(s, &tuple))
1445 goto next_entry;
1446
1447 if (parse)
1448 if (pcmcia_parse_tuple(&tuple, parse))
1449 goto next_entry;
1450
1451 ret = loop_tuple(&tuple, parse, priv_data);
1452 if (!ret)
1453 break;
1454
1455next_entry:
1456 ret = pccard_get_next_tuple(s, function, &tuple);
1457 }
1458
1459 kfree(buf);
1460 return ret;
1461}
1462
1463
1464/**
1465 * pccard_validate_cis() - check whether card has a sensible CIS 1362 * pccard_validate_cis() - check whether card has a sensible CIS
1466 * @s: the struct pcmcia_socket we are to check 1363 * @s: the struct pcmcia_socket we are to check
1467 * @info: returns the number of tuples in the (valid) CIS, or 0 1364 * @info: returns the number of tuples in the (valid) CIS, or 0
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 75ed866e6953..976d80706eae 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -337,7 +337,6 @@ static void socket_shutdown(struct pcmcia_socket *s)
337 s->socket = dead_socket; 337 s->socket = dead_socket;
338 s->ops->init(s); 338 s->ops->init(s);
339 s->ops->set_socket(s, &s->socket); 339 s->ops->set_socket(s, &s->socket);
340 s->irq.AssignedIRQ = s->irq.Config = 0;
341 s->lock_count = 0; 340 s->lock_count = 0;
342 kfree(s->fake_cis); 341 kfree(s->fake_cis);
343 s->fake_cis = NULL; 342 s->fake_cis = NULL;
@@ -671,20 +670,22 @@ static int pccardd(void *__skt)
671 socket_remove(skt); 670 socket_remove(skt);
672 if (sysfs_events & PCMCIA_UEVENT_INSERT) 671 if (sysfs_events & PCMCIA_UEVENT_INSERT)
673 socket_insert(skt); 672 socket_insert(skt);
674 if ((sysfs_events & PCMCIA_UEVENT_RESUME) &&
675 !(skt->state & SOCKET_CARDBUS)) {
676 ret = socket_resume(skt);
677 if (!ret && skt->callback)
678 skt->callback->resume(skt);
679 }
680 if ((sysfs_events & PCMCIA_UEVENT_SUSPEND) && 673 if ((sysfs_events & PCMCIA_UEVENT_SUSPEND) &&
681 !(skt->state & SOCKET_CARDBUS)) { 674 !(skt->state & SOCKET_CARDBUS)) {
682 if (skt->callback) 675 if (skt->callback)
683 ret = skt->callback->suspend(skt); 676 ret = skt->callback->suspend(skt);
684 else 677 else
685 ret = 0; 678 ret = 0;
686 if (!ret) 679 if (!ret) {
687 socket_suspend(skt); 680 socket_suspend(skt);
681 msleep(100);
682 }
683 }
684 if ((sysfs_events & PCMCIA_UEVENT_RESUME) &&
685 !(skt->state & SOCKET_CARDBUS)) {
686 ret = socket_resume(skt);
687 if (!ret && skt->callback)
688 skt->callback->resume(skt);
688 } 689 }
689 if ((sysfs_events & PCMCIA_UEVENT_REQUERY) && 690 if ((sysfs_events & PCMCIA_UEVENT_REQUERY) &&
690 !(skt->state & SOCKET_CARDBUS)) { 691 !(skt->state & SOCKET_CARDBUS)) {
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index f95864c2191e..4126a75445ea 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -52,13 +52,11 @@ struct cis_cache_entry {
52 52
53struct pccard_resource_ops { 53struct pccard_resource_ops {
54 int (*validate_mem) (struct pcmcia_socket *s); 54 int (*validate_mem) (struct pcmcia_socket *s);
55 int (*adjust_io_region) (struct resource *res, 55 int (*find_io) (struct pcmcia_socket *s,
56 unsigned long r_start, 56 unsigned int attr,
57 unsigned long r_end, 57 unsigned int *base,
58 struct pcmcia_socket *s); 58 unsigned int num,
59 struct resource* (*find_io) (unsigned long base, int num, 59 unsigned int align);
60 unsigned long align,
61 struct pcmcia_socket *s);
62 struct resource* (*find_mem) (unsigned long base, unsigned long num, 60 struct resource* (*find_mem) (unsigned long base, unsigned long num,
63 unsigned long align, int low, 61 unsigned long align, int low,
64 struct pcmcia_socket *s); 62 struct pcmcia_socket *s);
@@ -89,6 +87,14 @@ struct pccard_resource_ops {
89 87
90 88
91/* 89/*
90 * Stuff internal to module "pcmcia_rsrc":
91 */
92extern int static_init(struct pcmcia_socket *s);
93extern struct resource *pcmcia_make_resource(unsigned long start,
94 unsigned long end,
95 int flags, const char *name);
96
97/*
92 * Stuff internal to module "pcmcia_core": 98 * Stuff internal to module "pcmcia_core":
93 */ 99 */
94 100
@@ -149,6 +155,8 @@ extern struct resource *pcmcia_find_mem_region(u_long base,
149 int low, 155 int low,
150 struct pcmcia_socket *s); 156 struct pcmcia_socket *s);
151 157
158void pcmcia_cleanup_irq(struct pcmcia_socket *s);
159int pcmcia_setup_irq(struct pcmcia_device *p_dev);
152 160
153/* cistpl.c */ 161/* cistpl.c */
154extern struct bin_attribute pccard_cis_attr; 162extern struct bin_attribute pccard_cis_attr;
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 2d48196a48cd..0f4cc3f00028 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -146,7 +146,6 @@ static irqreturn_t db1200_pcmcia_cdirq(int irq, void *data)
146static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock) 146static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
147{ 147{
148 int ret; 148 int ret;
149 unsigned long flags;
150 149
151 if (sock->stschg_irq != -1) { 150 if (sock->stschg_irq != -1) {
152 ret = request_irq(sock->stschg_irq, db1000_pcmcia_stschgirq, 151 ret = request_irq(sock->stschg_irq, db1000_pcmcia_stschgirq,
@@ -162,30 +161,23 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
162 * active one disabled. 161 * active one disabled.
163 */ 162 */
164 if (sock->board_type == BOARD_TYPE_DB1200) { 163 if (sock->board_type == BOARD_TYPE_DB1200) {
165 local_irq_save(flags);
166
167 ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq, 164 ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq,
168 IRQF_DISABLED, "pcmcia_insert", sock); 165 IRQF_DISABLED, "pcmcia_insert", sock);
169 if (ret) { 166 if (ret)
170 local_irq_restore(flags);
171 goto out1; 167 goto out1;
172 }
173 168
174 ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq, 169 ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq,
175 IRQF_DISABLED, "pcmcia_eject", sock); 170 IRQF_DISABLED, "pcmcia_eject", sock);
176 if (ret) { 171 if (ret) {
177 free_irq(sock->insert_irq, sock); 172 free_irq(sock->insert_irq, sock);
178 local_irq_restore(flags);
179 goto out1; 173 goto out1;
180 } 174 }
181 175
182 /* disable the currently active one */ 176 /* enable the currently silent one */
183 if (db1200_card_inserted(sock)) 177 if (db1200_card_inserted(sock))
184 disable_irq_nosync(sock->insert_irq); 178 enable_irq(sock->eject_irq);
185 else 179 else
186 disable_irq_nosync(sock->eject_irq); 180 enable_irq(sock->insert_irq);
187
188 local_irq_restore(flags);
189 } else { 181 } else {
190 /* all other (older) Db1x00 boards use a GPIO to show 182 /* all other (older) Db1x00 boards use a GPIO to show
191 * card detection status: use both-edge triggers. 183 * card detection status: use both-edge triggers.
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 4014cf8e4a26..7ef7adee5e4f 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -335,7 +335,6 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
335 335
336 mutex_lock(&s->ops_mutex); 336 mutex_lock(&s->ops_mutex);
337 list_del(&p_dev->socket_device_list); 337 list_del(&p_dev->socket_device_list);
338 p_dev->_removed = 1;
339 mutex_unlock(&s->ops_mutex); 338 mutex_unlock(&s->ops_mutex);
340 339
341 dev_dbg(&p_dev->dev, "unregistering device\n"); 340 dev_dbg(&p_dev->dev, "unregistering device\n");
@@ -372,8 +371,6 @@ static int pcmcia_device_remove(struct device *dev)
372 if (p_drv->remove) 371 if (p_drv->remove)
373 p_drv->remove(p_dev); 372 p_drv->remove(p_dev);
374 373
375 p_dev->dev_node = NULL;
376
377 /* check for proper unloading */ 374 /* check for proper unloading */
378 if (p_dev->_irq || p_dev->_io || p_dev->_locked) 375 if (p_dev->_irq || p_dev->_io || p_dev->_locked)
379 dev_printk(KERN_INFO, dev, 376 dev_printk(KERN_INFO, dev,
@@ -480,15 +477,6 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
480} 477}
481 478
482 479
483/* device_add_lock is needed to avoid double registration by cardmgr and kernel.
484 * Serializes pcmcia_device_add; will most likely be removed in future.
485 *
486 * While it has the caveat that adding new PCMCIA devices inside(!) device_register()
487 * won't work, this doesn't matter much at the moment: the driver core doesn't
488 * support it either.
489 */
490static DEFINE_MUTEX(device_add_lock);
491
492struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int function) 480struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int function)
493{ 481{
494 struct pcmcia_device *p_dev, *tmp_dev; 482 struct pcmcia_device *p_dev, *tmp_dev;
@@ -498,8 +486,6 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
498 if (!s) 486 if (!s)
499 return NULL; 487 return NULL;
500 488
501 mutex_lock(&device_add_lock);
502
503 pr_debug("adding device to %d, function %d\n", s->sock, function); 489 pr_debug("adding device to %d, function %d\n", s->sock, function);
504 490
505 p_dev = kzalloc(sizeof(struct pcmcia_device), GFP_KERNEL); 491 p_dev = kzalloc(sizeof(struct pcmcia_device), GFP_KERNEL);
@@ -539,8 +525,8 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
539 525
540 /* 526 /*
541 * p_dev->function_config must be the same for all card functions. 527 * p_dev->function_config must be the same for all card functions.
542 * Note that this is serialized by the device_add_lock, so that 528 * Note that this is serialized by ops_mutex, so that only one
543 * only one such struct will be created. 529 * such struct will be created.
544 */ 530 */
545 list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list) 531 list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list)
546 if (p_dev->func == tmp_dev->func) { 532 if (p_dev->func == tmp_dev->func) {
@@ -553,28 +539,31 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
553 /* Add to the list in pcmcia_bus_socket */ 539 /* Add to the list in pcmcia_bus_socket */
554 list_add(&p_dev->socket_device_list, &s->devices_list); 540 list_add(&p_dev->socket_device_list, &s->devices_list);
555 541
556 mutex_unlock(&s->ops_mutex); 542 if (pcmcia_setup_irq(p_dev))
543 dev_warn(&p_dev->dev,
544 "IRQ setup failed -- device might not work\n");
557 545
558 if (!p_dev->function_config) { 546 if (!p_dev->function_config) {
559 dev_dbg(&p_dev->dev, "creating config_t\n"); 547 dev_dbg(&p_dev->dev, "creating config_t\n");
560 p_dev->function_config = kzalloc(sizeof(struct config_t), 548 p_dev->function_config = kzalloc(sizeof(struct config_t),
561 GFP_KERNEL); 549 GFP_KERNEL);
562 if (!p_dev->function_config) 550 if (!p_dev->function_config) {
551 mutex_unlock(&s->ops_mutex);
563 goto err_unreg; 552 goto err_unreg;
553 }
564 kref_init(&p_dev->function_config->ref); 554 kref_init(&p_dev->function_config->ref);
565 } 555 }
556 mutex_unlock(&s->ops_mutex);
566 557
567 dev_printk(KERN_NOTICE, &p_dev->dev, 558 dev_printk(KERN_NOTICE, &p_dev->dev,
568 "pcmcia: registering new device %s\n", 559 "pcmcia: registering new device %s (IRQ: %d)\n",
569 p_dev->devname); 560 p_dev->devname, p_dev->irq);
570 561
571 pcmcia_device_query(p_dev); 562 pcmcia_device_query(p_dev);
572 563
573 if (device_register(&p_dev->dev)) 564 if (device_register(&p_dev->dev))
574 goto err_unreg; 565 goto err_unreg;
575 566
576 mutex_unlock(&device_add_lock);
577
578 return p_dev; 567 return p_dev;
579 568
580 err_unreg: 569 err_unreg:
@@ -592,7 +581,6 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
592 kfree(p_dev->devname); 581 kfree(p_dev->devname);
593 kfree(p_dev); 582 kfree(p_dev);
594 err_put: 583 err_put:
595 mutex_unlock(&device_add_lock);
596 pcmcia_put_socket(s); 584 pcmcia_put_socket(s);
597 585
598 return NULL; 586 return NULL;
@@ -654,14 +642,7 @@ static int pcmcia_requery_callback(struct device *dev, void * _data)
654 642
655static void pcmcia_requery(struct pcmcia_socket *s) 643static void pcmcia_requery(struct pcmcia_socket *s)
656{ 644{
657 int present, has_pfc; 645 int has_pfc;
658
659 mutex_lock(&s->ops_mutex);
660 present = s->pcmcia_state.present;
661 mutex_unlock(&s->ops_mutex);
662
663 if (!present)
664 return;
665 646
666 if (s->functions == 0) { 647 if (s->functions == 0) {
667 pcmcia_card_add(s); 648 pcmcia_card_add(s);
@@ -828,11 +809,12 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
828 } 809 }
829 810
830 if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) { 811 if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) {
831 if (dev->device_no != did->device_no) 812 dev_dbg(&dev->dev, "this is a pseudo-multi-function device\n");
832 return 0;
833 mutex_lock(&dev->socket->ops_mutex); 813 mutex_lock(&dev->socket->ops_mutex);
834 dev->socket->pcmcia_state.has_pfc = 1; 814 dev->socket->pcmcia_state.has_pfc = 1;
835 mutex_unlock(&dev->socket->ops_mutex); 815 mutex_unlock(&dev->socket->ops_mutex);
816 if (dev->device_no != did->device_no)
817 return 0;
836 } 818 }
837 819
838 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID) { 820 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID) {
@@ -843,7 +825,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
843 825
844 /* if this is a pseudo-multi-function device, 826 /* if this is a pseudo-multi-function device,
845 * we need explicit matches */ 827 * we need explicit matches */
846 if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) 828 if (dev->socket->pcmcia_state.has_pfc)
847 return 0; 829 return 0;
848 if (dev->device_no) 830 if (dev->device_no)
849 return 0; 831 return 0;
@@ -1260,20 +1242,19 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
1260 1242
1261 switch (event) { 1243 switch (event) {
1262 case CS_EVENT_CARD_REMOVAL: 1244 case CS_EVENT_CARD_REMOVAL:
1263 mutex_lock(&s->ops_mutex); 1245 atomic_set(&skt->present, 0);
1264 s->pcmcia_state.present = 0;
1265 mutex_unlock(&s->ops_mutex);
1266 pcmcia_card_remove(skt, NULL); 1246 pcmcia_card_remove(skt, NULL);
1267 handle_event(skt, event); 1247 handle_event(skt, event);
1268 mutex_lock(&s->ops_mutex); 1248 mutex_lock(&s->ops_mutex);
1269 destroy_cis_cache(s); 1249 destroy_cis_cache(s);
1250 pcmcia_cleanup_irq(s);
1270 mutex_unlock(&s->ops_mutex); 1251 mutex_unlock(&s->ops_mutex);
1271 break; 1252 break;
1272 1253
1273 case CS_EVENT_CARD_INSERTION: 1254 case CS_EVENT_CARD_INSERTION:
1255 atomic_set(&skt->present, 1);
1274 mutex_lock(&s->ops_mutex); 1256 mutex_lock(&s->ops_mutex);
1275 s->pcmcia_state.has_pfc = 0; 1257 s->pcmcia_state.has_pfc = 0;
1276 s->pcmcia_state.present = 1;
1277 destroy_cis_cache(s); /* to be on the safe side... */ 1258 destroy_cis_cache(s); /* to be on the safe side... */
1278 mutex_unlock(&s->ops_mutex); 1259 mutex_unlock(&s->ops_mutex);
1279 pcmcia_card_add(skt); 1260 pcmcia_card_add(skt);
@@ -1292,6 +1273,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
1292 destroy_cis_cache(skt); 1273 destroy_cis_cache(skt);
1293 kfree(skt->fake_cis); 1274 kfree(skt->fake_cis);
1294 skt->fake_cis = NULL; 1275 skt->fake_cis = NULL;
1276 s->functions = 0;
1295 mutex_unlock(&s->ops_mutex); 1277 mutex_unlock(&s->ops_mutex);
1296 /* now, add the new card */ 1278 /* now, add the new card */
1297 ds_event(skt, CS_EVENT_CARD_INSERTION, 1279 ds_event(skt, CS_EVENT_CARD_INSERTION,
@@ -1313,7 +1295,13 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
1313 return 0; 1295 return 0;
1314} /* ds_event */ 1296} /* ds_event */
1315 1297
1316 1298/*
1299 * NOTE: This is racy. There's no guarantee the card will still be
1300 * physically present, even if the call to this function returns
1301 * non-NULL. Furthermore, the device driver most likely is unbound
1302 * almost immediately, so the timeframe where pcmcia_dev_present
1303 * returns NULL is probably really really small.
1304 */
1317struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev) 1305struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev)
1318{ 1306{
1319 struct pcmcia_device *p_dev; 1307 struct pcmcia_device *p_dev;
@@ -1323,22 +1311,9 @@ struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev)
1323 if (!p_dev) 1311 if (!p_dev)
1324 return NULL; 1312 return NULL;
1325 1313
1326 mutex_lock(&p_dev->socket->ops_mutex); 1314 if (atomic_read(&p_dev->socket->present) != 0)
1327 if (!p_dev->socket->pcmcia_state.present) 1315 ret = p_dev;
1328 goto out;
1329
1330 if (p_dev->socket->pcmcia_state.dead)
1331 goto out;
1332
1333 if (p_dev->_removed)
1334 goto out;
1335
1336 if (p_dev->suspended)
1337 goto out;
1338 1316
1339 ret = p_dev;
1340 out:
1341 mutex_unlock(&p_dev->socket->ops_mutex);
1342 pcmcia_put_dev(p_dev); 1317 pcmcia_put_dev(p_dev);
1343 return ret; 1318 return ret;
1344} 1319}
@@ -1388,6 +1363,8 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev,
1388 return ret; 1363 return ret;
1389 } 1364 }
1390 1365
1366 atomic_set(&socket->present, 0);
1367
1391 return 0; 1368 return 0;
1392} 1369}
1393 1370
@@ -1399,10 +1376,6 @@ static void pcmcia_bus_remove_socket(struct device *dev,
1399 if (!socket) 1376 if (!socket)
1400 return; 1377 return;
1401 1378
1402 mutex_lock(&socket->ops_mutex);
1403 socket->pcmcia_state.dead = 1;
1404 mutex_unlock(&socket->ops_mutex);
1405
1406 pccard_register_pcmcia(socket, NULL); 1379 pccard_register_pcmcia(socket, NULL);
1407 1380
1408 /* unregister any unbound devices */ 1381 /* unregister any unbound devices */
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index a7cfc7964c7c..0ad06a3bd562 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -117,7 +117,7 @@ static int omap_cf_get_status(struct pcmcia_socket *s, u_int *sp)
117 117
118 *sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD; 118 *sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD;
119 cf = container_of(s, struct omap_cf_socket, socket); 119 cf = container_of(s, struct omap_cf_socket, socket);
120 s->irq.AssignedIRQ = 0; 120 s->pcmcia_irq = 0;
121 s->pci_irq = cf->irq; 121 s->pci_irq = cf->irq;
122 } else 122 } else
123 *sp = 0; 123 *sp = 0;
diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c
new file mode 100644
index 000000000000..4a65eaf96b0a
--- /dev/null
+++ b/drivers/pcmcia/pcmcia_cis.c
@@ -0,0 +1,356 @@
1/*
2 * PCMCIA high-level CIS access functions
3 *
4 * The initial developer of the original code is David A. Hinds
5 * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
6 * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
7 *
8 * Copyright (C) 1999 David A. Hinds
9 * Copyright (C) 2004-2009 Dominik Brodowski
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 */
16
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/netdevice.h>
21
22#include <pcmcia/cs_types.h>
23#include <pcmcia/cisreg.h>
24#include <pcmcia/cistpl.h>
25#include <pcmcia/ss.h>
26#include <pcmcia/cs.h>
27#include <pcmcia/ds.h>
28#include "cs_internal.h"
29
30
31/**
32 * pccard_read_tuple() - internal CIS tuple access
33 * @s: the struct pcmcia_socket where the card is inserted
34 * @function: the device function we loop for
35 * @code: which CIS code shall we look for?
36 * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
37 *
38 * pccard_read_tuple() reads out one tuple and attempts to parse it
39 */
40int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
41 cisdata_t code, void *parse)
42{
43 tuple_t tuple;
44 cisdata_t *buf;
45 int ret;
46
47 buf = kmalloc(256, GFP_KERNEL);
48 if (buf == NULL) {
49 dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
50 return -ENOMEM;
51 }
52 tuple.DesiredTuple = code;
53 tuple.Attributes = 0;
54 if (function == BIND_FN_ALL)
55 tuple.Attributes = TUPLE_RETURN_COMMON;
56 ret = pccard_get_first_tuple(s, function, &tuple);
57 if (ret != 0)
58 goto done;
59 tuple.TupleData = buf;
60 tuple.TupleOffset = 0;
61 tuple.TupleDataMax = 255;
62 ret = pccard_get_tuple_data(s, &tuple);
63 if (ret != 0)
64 goto done;
65 ret = pcmcia_parse_tuple(&tuple, parse);
66done:
67 kfree(buf);
68 return ret;
69}
70
71
72/**
73 * pccard_loop_tuple() - loop over tuples in the CIS
74 * @s: the struct pcmcia_socket where the card is inserted
75 * @function: the device function we loop for
76 * @code: which CIS code shall we look for?
77 * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
78 * @priv_data: private data to be passed to the loop_tuple function.
79 * @loop_tuple: function to call for each CIS entry of type @function. IT
80 * gets passed the raw tuple, the paresed tuple (if @parse is
81 * set) and @priv_data.
82 *
83 * pccard_loop_tuple() loops over all CIS entries of type @function, and
84 * calls the @loop_tuple function for each entry. If the call to @loop_tuple
85 * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
86 */
87int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
88 cisdata_t code, cisparse_t *parse, void *priv_data,
89 int (*loop_tuple) (tuple_t *tuple,
90 cisparse_t *parse,
91 void *priv_data))
92{
93 tuple_t tuple;
94 cisdata_t *buf;
95 int ret;
96
97 buf = kzalloc(256, GFP_KERNEL);
98 if (buf == NULL) {
99 dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
100 return -ENOMEM;
101 }
102
103 tuple.TupleData = buf;
104 tuple.TupleDataMax = 255;
105 tuple.TupleOffset = 0;
106 tuple.DesiredTuple = code;
107 tuple.Attributes = 0;
108
109 ret = pccard_get_first_tuple(s, function, &tuple);
110 while (!ret) {
111 if (pccard_get_tuple_data(s, &tuple))
112 goto next_entry;
113
114 if (parse)
115 if (pcmcia_parse_tuple(&tuple, parse))
116 goto next_entry;
117
118 ret = loop_tuple(&tuple, parse, priv_data);
119 if (!ret)
120 break;
121
122next_entry:
123 ret = pccard_get_next_tuple(s, function, &tuple);
124 }
125
126 kfree(buf);
127 return ret;
128}
129
130struct pcmcia_cfg_mem {
131 struct pcmcia_device *p_dev;
132 void *priv_data;
133 int (*conf_check) (struct pcmcia_device *p_dev,
134 cistpl_cftable_entry_t *cfg,
135 cistpl_cftable_entry_t *dflt,
136 unsigned int vcc,
137 void *priv_data);
138 cisparse_t parse;
139 cistpl_cftable_entry_t dflt;
140};
141
142/**
143 * pcmcia_do_loop_config() - internal helper for pcmcia_loop_config()
144 *
145 * pcmcia_do_loop_config() is the internal callback for the call from
146 * pcmcia_loop_config() to pccard_loop_tuple(). Data is transferred
147 * by a struct pcmcia_cfg_mem.
148 */
149static int pcmcia_do_loop_config(tuple_t *tuple, cisparse_t *parse, void *priv)
150{
151 cistpl_cftable_entry_t *cfg = &parse->cftable_entry;
152 struct pcmcia_cfg_mem *cfg_mem = priv;
153
154 /* default values */
155 cfg_mem->p_dev->conf.ConfigIndex = cfg->index;
156 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
157 cfg_mem->dflt = *cfg;
158
159 return cfg_mem->conf_check(cfg_mem->p_dev, cfg, &cfg_mem->dflt,
160 cfg_mem->p_dev->socket->socket.Vcc,
161 cfg_mem->priv_data);
162}
163
164/**
165 * pcmcia_loop_config() - loop over configuration options
166 * @p_dev: the struct pcmcia_device which we need to loop for.
167 * @conf_check: function to call for each configuration option.
168 * It gets passed the struct pcmcia_device, the CIS data
169 * describing the configuration option, and private data
170 * being passed to pcmcia_loop_config()
171 * @priv_data: private data to be passed to the conf_check function.
172 *
173 * pcmcia_loop_config() loops over all configuration options, and calls
174 * the driver-specific conf_check() for each one, checking whether
175 * it is a valid one. Returns 0 on success or errorcode otherwise.
176 */
177int pcmcia_loop_config(struct pcmcia_device *p_dev,
178 int (*conf_check) (struct pcmcia_device *p_dev,
179 cistpl_cftable_entry_t *cfg,
180 cistpl_cftable_entry_t *dflt,
181 unsigned int vcc,
182 void *priv_data),
183 void *priv_data)
184{
185 struct pcmcia_cfg_mem *cfg_mem;
186 int ret;
187
188 cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL);
189 if (cfg_mem == NULL)
190 return -ENOMEM;
191
192 cfg_mem->p_dev = p_dev;
193 cfg_mem->conf_check = conf_check;
194 cfg_mem->priv_data = priv_data;
195
196 ret = pccard_loop_tuple(p_dev->socket, p_dev->func,
197 CISTPL_CFTABLE_ENTRY, &cfg_mem->parse,
198 cfg_mem, pcmcia_do_loop_config);
199
200 kfree(cfg_mem);
201 return ret;
202}
203EXPORT_SYMBOL(pcmcia_loop_config);
204
205
206struct pcmcia_loop_mem {
207 struct pcmcia_device *p_dev;
208 void *priv_data;
209 int (*loop_tuple) (struct pcmcia_device *p_dev,
210 tuple_t *tuple,
211 void *priv_data);
212};
213
214/**
215 * pcmcia_do_loop_tuple() - internal helper for pcmcia_loop_config()
216 *
217 * pcmcia_do_loop_tuple() is the internal callback for the call from
218 * pcmcia_loop_tuple() to pccard_loop_tuple(). Data is transferred
219 * by a struct pcmcia_cfg_mem.
220 */
221static int pcmcia_do_loop_tuple(tuple_t *tuple, cisparse_t *parse, void *priv)
222{
223 struct pcmcia_loop_mem *loop = priv;
224
225 return loop->loop_tuple(loop->p_dev, tuple, loop->priv_data);
226};
227
228/**
229 * pcmcia_loop_tuple() - loop over tuples in the CIS
230 * @p_dev: the struct pcmcia_device which we need to loop for.
231 * @code: which CIS code shall we look for?
232 * @priv_data: private data to be passed to the loop_tuple function.
233 * @loop_tuple: function to call for each CIS entry of type @function. IT
234 * gets passed the raw tuple and @priv_data.
235 *
236 * pcmcia_loop_tuple() loops over all CIS entries of type @function, and
237 * calls the @loop_tuple function for each entry. If the call to @loop_tuple
238 * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
239 */
240int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code,
241 int (*loop_tuple) (struct pcmcia_device *p_dev,
242 tuple_t *tuple,
243 void *priv_data),
244 void *priv_data)
245{
246 struct pcmcia_loop_mem loop = {
247 .p_dev = p_dev,
248 .loop_tuple = loop_tuple,
249 .priv_data = priv_data};
250
251 return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL,
252 &loop, pcmcia_do_loop_tuple);
253}
254EXPORT_SYMBOL(pcmcia_loop_tuple);
255
256
257struct pcmcia_loop_get {
258 size_t len;
259 cisdata_t **buf;
260};
261
262/**
263 * pcmcia_do_get_tuple() - internal helper for pcmcia_get_tuple()
264 *
265 * pcmcia_do_get_tuple() is the internal callback for the call from
266 * pcmcia_get_tuple() to pcmcia_loop_tuple(). As we're only interested in
267 * the first tuple, return 0 unconditionally. Create a memory buffer large
268 * enough to hold the content of the tuple, and fill it with the tuple data.
269 * The caller is responsible to free the buffer.
270 */
271static int pcmcia_do_get_tuple(struct pcmcia_device *p_dev, tuple_t *tuple,
272 void *priv)
273{
274 struct pcmcia_loop_get *get = priv;
275
276 *get->buf = kzalloc(tuple->TupleDataLen, GFP_KERNEL);
277 if (*get->buf) {
278 get->len = tuple->TupleDataLen;
279 memcpy(*get->buf, tuple->TupleData, tuple->TupleDataLen);
280 } else
281 dev_dbg(&p_dev->dev, "do_get_tuple: out of memory\n");
282 return 0;
283}
284
285/**
286 * pcmcia_get_tuple() - get first tuple from CIS
287 * @p_dev: the struct pcmcia_device which we need to loop for.
288 * @code: which CIS code shall we look for?
289 * @buf: pointer to store the buffer to.
290 *
291 * pcmcia_get_tuple() gets the content of the first CIS entry of type @code.
292 * It returns the buffer length (or zero). The caller is responsible to free
293 * the buffer passed in @buf.
294 */
295size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
296 unsigned char **buf)
297{
298 struct pcmcia_loop_get get = {
299 .len = 0,
300 .buf = buf,
301 };
302
303 *get.buf = NULL;
304 pcmcia_loop_tuple(p_dev, code, pcmcia_do_get_tuple, &get);
305
306 return get.len;
307}
308EXPORT_SYMBOL(pcmcia_get_tuple);
309
310
311/**
312 * pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis()
313 *
314 * pcmcia_do_get_mac() is the internal callback for the call from
315 * pcmcia_get_mac_from_cis() to pcmcia_loop_tuple(). We check whether the
316 * tuple contains a proper LAN_NODE_ID of length 6, and copy the data
317 * to struct net_device->dev_addr[i].
318 */
319static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
320 void *priv)
321{
322 struct net_device *dev = priv;
323 int i;
324
325 if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
326 return -EINVAL;
327 if (tuple->TupleDataLen < ETH_ALEN + 2) {
328 dev_warn(&p_dev->dev, "Invalid CIS tuple length for "
329 "LAN_NODE_ID\n");
330 return -EINVAL;
331 }
332
333 if (tuple->TupleData[1] != ETH_ALEN) {
334 dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n");
335 return -EINVAL;
336 }
337 for (i = 0; i < 6; i++)
338 dev->dev_addr[i] = tuple->TupleData[i+2];
339 return 0;
340}
341
342/**
343 * pcmcia_get_mac_from_cis() - read out MAC address from CISTPL_FUNCE
344 * @p_dev: the struct pcmcia_device for which we want the address.
345 * @dev: a properly prepared struct net_device to store the info to.
346 *
347 * pcmcia_get_mac_from_cis() reads out the hardware MAC address from
348 * CISTPL_FUNCE and stores it into struct net_device *dev->dev_addr which
349 * must be set up properly by the driver (see examples!).
350 */
351int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev)
352{
353 return pcmcia_loop_tuple(p_dev, CISTPL_FUNCE, pcmcia_do_get_mac, dev);
354}
355EXPORT_SYMBOL(pcmcia_get_mac_from_cis);
356
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 104e73d5d86c..ef0c5f133691 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -301,7 +301,9 @@ static int pccard_get_status(struct pcmcia_socket *s,
301 (c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) { 301 (c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) {
302 u_char reg; 302 u_char reg;
303 if (c->CardValues & PRESENT_PIN_REPLACE) { 303 if (c->CardValues & PRESENT_PIN_REPLACE) {
304 mutex_lock(&s->ops_mutex);
304 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg); 305 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg);
306 mutex_unlock(&s->ops_mutex);
305 status->CardState |= 307 status->CardState |=
306 (reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0; 308 (reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0;
307 status->CardState |= 309 status->CardState |=
@@ -315,7 +317,9 @@ static int pccard_get_status(struct pcmcia_socket *s,
315 status->CardState |= CS_EVENT_READY_CHANGE; 317 status->CardState |= CS_EVENT_READY_CHANGE;
316 } 318 }
317 if (c->CardValues & PRESENT_EXT_STATUS) { 319 if (c->CardValues & PRESENT_EXT_STATUS) {
320 mutex_lock(&s->ops_mutex);
318 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg); 321 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg);
322 mutex_unlock(&s->ops_mutex);
319 status->CardState |= 323 status->CardState |=
320 (reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0; 324 (reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0;
321 } 325 }
@@ -351,7 +355,7 @@ static int pccard_get_configuration_info(struct pcmcia_socket *s,
351 if (s->state & SOCKET_CARDBUS_CONFIG) { 355 if (s->state & SOCKET_CARDBUS_CONFIG) {
352 config->Attributes = CONF_VALID_CLIENT; 356 config->Attributes = CONF_VALID_CLIENT;
353 config->IntType = INT_CARDBUS; 357 config->IntType = INT_CARDBUS;
354 config->AssignedIRQ = s->irq.AssignedIRQ; 358 config->AssignedIRQ = s->pcmcia_irq;
355 if (config->AssignedIRQ) 359 if (config->AssignedIRQ)
356 config->Attributes |= CONF_ENABLE_IRQ; 360 config->Attributes |= CONF_ENABLE_IRQ;
357 if (s->io[0].res) { 361 if (s->io[0].res) {
@@ -391,7 +395,7 @@ static int pccard_get_configuration_info(struct pcmcia_socket *s,
391 config->ExtStatus = c->ExtStatus; 395 config->ExtStatus = c->ExtStatus;
392 config->Present = config->CardValues = c->CardValues; 396 config->Present = config->CardValues = c->CardValues;
393 config->IRQAttributes = c->irq.Attributes; 397 config->IRQAttributes = c->irq.Attributes;
394 config->AssignedIRQ = s->irq.AssignedIRQ; 398 config->AssignedIRQ = s->pcmcia_irq;
395 config->BasePort1 = c->io.BasePort1; 399 config->BasePort1 = c->io.BasePort1;
396 config->NumPorts1 = c->io.NumPorts1; 400 config->NumPorts1 = c->io.NumPorts1;
397 config->Attributes1 = c->io.Attributes1; 401 config->Attributes1 = c->io.Attributes1;
@@ -571,7 +575,6 @@ static struct pci_bus *pcmcia_lookup_bus(struct pcmcia_socket *s)
571 575
572static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int first) 576static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int first)
573{ 577{
574 dev_node_t *node;
575 struct pcmcia_device *p_dev; 578 struct pcmcia_device *p_dev;
576 struct pcmcia_driver *p_drv; 579 struct pcmcia_driver *p_drv;
577 int ret = 0; 580 int ret = 0;
@@ -633,21 +636,13 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int
633 goto err_put; 636 goto err_put;
634 } 637 }
635 638
636 if (first) 639 if (!first) {
637 node = p_dev->dev_node;
638 else
639 for (node = p_dev->dev_node; node; node = node->next)
640 if (node == bind_info->next)
641 break;
642 if (!node) {
643 ret = -ENODEV; 640 ret = -ENODEV;
644 goto err_put; 641 goto err_put;
645 } 642 }
646 643
647 strlcpy(bind_info->name, node->dev_name, DEV_NAME_LEN); 644 strlcpy(bind_info->name, dev_name(&p_dev->dev), DEV_NAME_LEN);
648 bind_info->major = node->major; 645 bind_info->next = NULL;
649 bind_info->minor = node->minor;
650 bind_info->next = node->next;
651 646
652 err_put: 647 err_put:
653 pcmcia_put_dev(p_dev); 648 pcmcia_put_dev(p_dev);
@@ -711,7 +706,7 @@ static int ds_open(struct inode *inode, struct file *file)
711 warning_printed = 1; 706 warning_printed = 1;
712 } 707 }
713 708
714 if (s->pcmcia_state.present) 709 if (atomic_read(&s->present))
715 queue_event(user, CS_EVENT_CARD_INSERTION); 710 queue_event(user, CS_EVENT_CARD_INSERTION);
716out: 711out:
717 unlock_kernel(); 712 unlock_kernel();
@@ -770,9 +765,6 @@ static ssize_t ds_read(struct file *file, char __user *buf,
770 return -EIO; 765 return -EIO;
771 766
772 s = user->socket; 767 s = user->socket;
773 if (s->pcmcia_state.dead)
774 return -EIO;
775
776 ret = wait_event_interruptible(s->queue, !queue_empty(user)); 768 ret = wait_event_interruptible(s->queue, !queue_empty(user));
777 if (ret == 0) 769 if (ret == 0)
778 ret = put_user(get_queued_event(user), (int __user *)buf) ? -EFAULT : 4; 770 ret = put_user(get_queued_event(user), (int __user *)buf) ? -EFAULT : 4;
@@ -838,8 +830,6 @@ static int ds_ioctl(struct inode *inode, struct file *file,
838 return -EIO; 830 return -EIO;
839 831
840 s = user->socket; 832 s = user->socket;
841 if (s->pcmcia_state.dead)
842 return -EIO;
843 833
844 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 834 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
845 if (size > sizeof(ds_ioctl_arg_t)) 835 if (size > sizeof(ds_ioctl_arg_t))
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 7c3d03bb4f30..29f91fac1dff 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -23,6 +23,8 @@
23#include <linux/netdevice.h> 23#include <linux/netdevice.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25 25
26#include <asm/irq.h>
27
26#include <pcmcia/cs_types.h> 28#include <pcmcia/cs_types.h>
27#include <pcmcia/ss.h> 29#include <pcmcia/ss.h>
28#include <pcmcia/cs.h> 30#include <pcmcia/cs.h>
@@ -38,29 +40,6 @@ static int io_speed;
38module_param(io_speed, int, 0444); 40module_param(io_speed, int, 0444);
39 41
40 42
41#ifdef CONFIG_PCMCIA_PROBE
42#include <asm/irq.h>
43/* mask of IRQs already reserved by other cards, we should avoid using them */
44static u8 pcmcia_used_irq[NR_IRQS];
45#endif
46
47static int pcmcia_adjust_io_region(struct resource *res, unsigned long start,
48 unsigned long end, struct pcmcia_socket *s)
49{
50 if (s->resource_ops->adjust_io_region)
51 return s->resource_ops->adjust_io_region(res, start, end, s);
52 return -ENOMEM;
53}
54
55static struct resource *pcmcia_find_io_region(unsigned long base, int num,
56 unsigned long align,
57 struct pcmcia_socket *s)
58{
59 if (s->resource_ops->find_io)
60 return s->resource_ops->find_io(base, num, align, s);
61 return NULL;
62}
63
64int pcmcia_validate_mem(struct pcmcia_socket *s) 43int pcmcia_validate_mem(struct pcmcia_socket *s)
65{ 44{
66 if (s->resource_ops->validate_mem) 45 if (s->resource_ops->validate_mem)
@@ -86,8 +65,7 @@ struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align,
86static int alloc_io_space(struct pcmcia_socket *s, u_int attr, 65static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
87 unsigned int *base, unsigned int num, u_int lines) 66 unsigned int *base, unsigned int num, u_int lines)
88{ 67{
89 int i; 68 unsigned int align;
90 unsigned int try, align;
91 69
92 align = (*base) ? (lines ? 1<<lines : 0) : 1; 70 align = (*base) ? (lines ? 1<<lines : 0) : 1;
93 if (align && (align < num)) { 71 if (align && (align < num)) {
@@ -104,50 +82,8 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
104 *base, align); 82 *base, align);
105 align = 0; 83 align = 0;
106 } 84 }
107 if ((s->features & SS_CAP_STATIC_MAP) && s->io_offset) { 85
108 *base = s->io_offset | (*base & 0x0fff); 86 return s->resource_ops->find_io(s, attr, base, num, align);
109 return 0;
110 }
111 /* Check for an already-allocated window that must conflict with
112 * what was asked for. It is a hack because it does not catch all
113 * potential conflicts, just the most obvious ones.
114 */
115 for (i = 0; i < MAX_IO_WIN; i++)
116 if ((s->io[i].res) && *base &&
117 ((s->io[i].res->start & (align-1)) == *base))
118 return 1;
119 for (i = 0; i < MAX_IO_WIN; i++) {
120 if (!s->io[i].res) {
121 s->io[i].res = pcmcia_find_io_region(*base, num, align, s);
122 if (s->io[i].res) {
123 *base = s->io[i].res->start;
124 s->io[i].res->flags = (s->io[i].res->flags & ~IORESOURCE_BITS) | (attr & IORESOURCE_BITS);
125 s->io[i].InUse = num;
126 break;
127 } else
128 return 1;
129 } else if ((s->io[i].res->flags & IORESOURCE_BITS) != (attr & IORESOURCE_BITS))
130 continue;
131 /* Try to extend top of window */
132 try = s->io[i].res->end + 1;
133 if ((*base == 0) || (*base == try))
134 if (pcmcia_adjust_io_region(s->io[i].res, s->io[i].res->start,
135 s->io[i].res->end + num, s) == 0) {
136 *base = try;
137 s->io[i].InUse += num;
138 break;
139 }
140 /* Try to extend bottom of window */
141 try = s->io[i].res->start - num;
142 if ((*base == 0) || (*base == try))
143 if (pcmcia_adjust_io_region(s->io[i].res, s->io[i].res->start - num,
144 s->io[i].res->end, s) == 0) {
145 *base = try;
146 s->io[i].InUse += num;
147 break;
148 }
149 }
150 return (i == MAX_IO_WIN);
151} /* alloc_io_space */ 87} /* alloc_io_space */
152 88
153 89
@@ -187,6 +123,7 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
187 config_t *c; 123 config_t *c;
188 int addr; 124 int addr;
189 u_char val; 125 u_char val;
126 int ret = 0;
190 127
191 if (!p_dev || !p_dev->function_config) 128 if (!p_dev || !p_dev->function_config)
192 return -EINVAL; 129 return -EINVAL;
@@ -203,11 +140,10 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
203 } 140 }
204 141
205 addr = (c->ConfigBase + reg->Offset) >> 1; 142 addr = (c->ConfigBase + reg->Offset) >> 1;
206 mutex_unlock(&s->ops_mutex);
207 143
208 switch (reg->Action) { 144 switch (reg->Action) {
209 case CS_READ: 145 case CS_READ:
210 pcmcia_read_cis_mem(s, 1, addr, 1, &val); 146 ret = pcmcia_read_cis_mem(s, 1, addr, 1, &val);
211 reg->Value = val; 147 reg->Value = val;
212 break; 148 break;
213 case CS_WRITE: 149 case CS_WRITE:
@@ -216,10 +152,11 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
216 break; 152 break;
217 default: 153 default:
218 dev_dbg(&s->dev, "Invalid conf register request\n"); 154 dev_dbg(&s->dev, "Invalid conf register request\n");
219 return -EINVAL; 155 ret = -EINVAL;
220 break; 156 break;
221 } 157 }
222 return 0; 158 mutex_unlock(&s->ops_mutex);
159 return ret;
223} /* pcmcia_access_configuration_register */ 160} /* pcmcia_access_configuration_register */
224EXPORT_SYMBOL(pcmcia_access_configuration_register); 161EXPORT_SYMBOL(pcmcia_access_configuration_register);
225 162
@@ -275,19 +212,9 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
275 goto unlock; 212 goto unlock;
276 } 213 }
277 214
278 if (mod->Attributes & CONF_IRQ_CHANGE_VALID) { 215 if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
279 if (mod->Attributes & CONF_ENABLE_IRQ) { 216 dev_dbg(&s->dev,
280 c->Attributes |= CONF_ENABLE_IRQ; 217 "changing Vcc or IRQ is not allowed at this time\n");
281 s->socket.io_irq = s->irq.AssignedIRQ;
282 } else {
283 c->Attributes &= ~CONF_ENABLE_IRQ;
284 s->socket.io_irq = 0;
285 }
286 s->ops->set_socket(s, &s->socket);
287 }
288
289 if (mod->Attributes & CONF_VCC_CHANGE_VALID) {
290 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
291 ret = -EINVAL; 218 ret = -EINVAL;
292 goto unlock; 219 goto unlock;
293 } 220 }
@@ -422,52 +349,6 @@ out:
422} /* pcmcia_release_io */ 349} /* pcmcia_release_io */
423 350
424 351
425static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
426{
427 struct pcmcia_socket *s = p_dev->socket;
428 config_t *c;
429 int ret = -EINVAL;
430
431 mutex_lock(&s->ops_mutex);
432
433 c = p_dev->function_config;
434
435 if (!p_dev->_irq)
436 goto out;
437
438 p_dev->_irq = 0;
439
440 if (c->state & CONFIG_LOCKED)
441 goto out;
442
443 if (c->irq.Attributes != req->Attributes) {
444 dev_dbg(&s->dev, "IRQ attributes must match assigned ones\n");
445 goto out;
446 }
447 if (s->irq.AssignedIRQ != req->AssignedIRQ) {
448 dev_dbg(&s->dev, "IRQ must match assigned one\n");
449 goto out;
450 }
451 if (--s->irq.Config == 0) {
452 c->state &= ~CONFIG_IRQ_REQ;
453 s->irq.AssignedIRQ = 0;
454 }
455
456 if (req->Handler)
457 free_irq(req->AssignedIRQ, p_dev->priv);
458
459#ifdef CONFIG_PCMCIA_PROBE
460 pcmcia_used_irq[req->AssignedIRQ]--;
461#endif
462 ret = 0;
463
464out:
465 mutex_unlock(&s->ops_mutex);
466
467 return ret;
468} /* pcmcia_release_irq */
469
470
471int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t wh) 352int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t wh)
472{ 353{
473 struct pcmcia_socket *s = p_dev->socket; 354 struct pcmcia_socket *s = p_dev->socket;
@@ -551,12 +432,11 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
551 if (req->Attributes & CONF_ENABLE_SPKR) 432 if (req->Attributes & CONF_ENABLE_SPKR)
552 s->socket.flags |= SS_SPKR_ENA; 433 s->socket.flags |= SS_SPKR_ENA;
553 if (req->Attributes & CONF_ENABLE_IRQ) 434 if (req->Attributes & CONF_ENABLE_IRQ)
554 s->socket.io_irq = s->irq.AssignedIRQ; 435 s->socket.io_irq = s->pcmcia_irq;
555 else 436 else
556 s->socket.io_irq = 0; 437 s->socket.io_irq = 0;
557 s->ops->set_socket(s, &s->socket); 438 s->ops->set_socket(s, &s->socket);
558 s->lock_count++; 439 s->lock_count++;
559 mutex_unlock(&s->ops_mutex);
560 440
561 /* Set up CIS configuration registers */ 441 /* Set up CIS configuration registers */
562 base = c->ConfigBase = req->ConfigBase; 442 base = c->ConfigBase = req->ConfigBase;
@@ -574,9 +454,9 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
574 if (req->Present & PRESENT_IOBASE_0) 454 if (req->Present & PRESENT_IOBASE_0)
575 c->Option |= COR_ADDR_DECODE; 455 c->Option |= COR_ADDR_DECODE;
576 } 456 }
577 if (c->state & CONFIG_IRQ_REQ) 457 if ((req->Attributes & CONF_ENABLE_IRQ) &&
578 if (!(c->irq.Attributes & IRQ_FORCED_PULSE)) 458 !(req->Attributes & CONF_ENABLE_PULSE_IRQ))
579 c->Option |= COR_LEVEL_REQ; 459 c->Option |= COR_LEVEL_REQ;
580 pcmcia_write_cis_mem(s, 1, (base + CISREG_COR)>>1, 1, &c->Option); 460 pcmcia_write_cis_mem(s, 1, (base + CISREG_COR)>>1, 1, &c->Option);
581 mdelay(40); 461 mdelay(40);
582 } 462 }
@@ -605,7 +485,6 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
605 485
606 /* Configure I/O windows */ 486 /* Configure I/O windows */
607 if (c->state & CONFIG_IO_REQ) { 487 if (c->state & CONFIG_IO_REQ) {
608 mutex_lock(&s->ops_mutex);
609 iomap.speed = io_speed; 488 iomap.speed = io_speed;
610 for (i = 0; i < MAX_IO_WIN; i++) 489 for (i = 0; i < MAX_IO_WIN; i++)
611 if (s->io[i].res) { 490 if (s->io[i].res) {
@@ -624,11 +503,11 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
624 s->ops->set_io_map(s, &iomap); 503 s->ops->set_io_map(s, &iomap);
625 s->io[i].Config++; 504 s->io[i].Config++;
626 } 505 }
627 mutex_unlock(&s->ops_mutex);
628 } 506 }
629 507
630 c->state |= CONFIG_LOCKED; 508 c->state |= CONFIG_LOCKED;
631 p_dev->_locked = 1; 509 p_dev->_locked = 1;
510 mutex_unlock(&s->ops_mutex);
632 return 0; 511 return 0;
633} /* pcmcia_request_configuration */ 512} /* pcmcia_request_configuration */
634EXPORT_SYMBOL(pcmcia_request_configuration); 513EXPORT_SYMBOL(pcmcia_request_configuration);
@@ -706,137 +585,176 @@ out:
706EXPORT_SYMBOL(pcmcia_request_io); 585EXPORT_SYMBOL(pcmcia_request_io);
707 586
708 587
709/** pcmcia_request_irq 588/**
589 * pcmcia_request_irq() - attempt to request a IRQ for a PCMCIA device
710 * 590 *
711 * Request_irq() reserves an irq for this client. 591 * pcmcia_request_irq() is a wrapper around request_irq which will allow
592 * the PCMCIA core to clean up the registration in pcmcia_disable_device().
593 * Drivers are free to use request_irq() directly, but then they need to
594 * call free_irq themselfves, too. Also, only IRQF_SHARED capable IRQ
595 * handlers are allowed.
596 */
597int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev,
598 irq_handler_t handler)
599{
600 int ret;
601
602 if (!p_dev->irq)
603 return -EINVAL;
604
605 ret = request_irq(p_dev->irq, handler, IRQF_SHARED,
606 p_dev->devname, p_dev->priv);
607 if (!ret)
608 p_dev->_irq = 1;
609
610 return ret;
611}
612EXPORT_SYMBOL(pcmcia_request_irq);
613
614
615/**
616 * pcmcia_request_exclusive_irq() - attempt to request an exclusive IRQ first
712 * 617 *
713 * Also, since Linux only reserves irq's when they are actually 618 * pcmcia_request_exclusive_irq() is a wrapper around request_irq which
714 * hooked, we don't guarantee that an irq will still be available 619 * attempts first to request an exclusive IRQ. If it fails, it also accepts
715 * when the configuration is locked. Now that I think about it, 620 * a shared IRQ, but prints out a warning. PCMCIA drivers should allow for
716 * there might be a way to fix this using a dummy handler. 621 * IRQ sharing and either use request_irq directly (then they need to call
622 * free_irq themselves, too), or the pcmcia_request_irq() function.
717 */ 623 */
624int __must_check
625__pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev,
626 irq_handler_t handler)
627{
628 int ret;
629
630 if (!p_dev->irq)
631 return -EINVAL;
632
633 ret = request_irq(p_dev->irq, handler, 0, p_dev->devname, p_dev->priv);
634 if (ret) {
635 ret = pcmcia_request_irq(p_dev, handler);
636 dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: "
637 "request for exclusive IRQ could not be fulfilled.\n");
638 dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: the driver "
639 "needs updating to supported shared IRQ lines.\n");
640 }
641 if (ret)
642 dev_printk(KERN_INFO, &p_dev->dev, "request_irq() failed\n");
643 else
644 p_dev->_irq = 1;
645
646 return ret;
647} /* pcmcia_request_exclusive_irq */
648EXPORT_SYMBOL(__pcmcia_request_exclusive_irq);
649
718 650
719#ifdef CONFIG_PCMCIA_PROBE 651#ifdef CONFIG_PCMCIA_PROBE
652
653/* mask of IRQs already reserved by other cards, we should avoid using them */
654static u8 pcmcia_used_irq[NR_IRQS];
655
720static irqreturn_t test_action(int cpl, void *dev_id) 656static irqreturn_t test_action(int cpl, void *dev_id)
721{ 657{
722 return IRQ_NONE; 658 return IRQ_NONE;
723} 659}
724#endif
725 660
726int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req) 661/**
662 * pcmcia_setup_isa_irq() - determine whether an ISA IRQ can be used
663 * @p_dev - the associated PCMCIA device
664 *
665 * locking note: must be called with ops_mutex locked.
666 */
667static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type)
727{ 668{
728 struct pcmcia_socket *s = p_dev->socket; 669 struct pcmcia_socket *s = p_dev->socket;
729 config_t *c; 670 unsigned int try, irq;
730 int ret = -EINVAL, irq = 0; 671 u32 mask = s->irq_mask;
731 int type; 672 int ret = -ENODEV;
732 673
733 mutex_lock(&s->ops_mutex); 674 for (try = 0; try < 64; try++) {
675 irq = try % 32;
734 676
735 if (!(s->state & SOCKET_PRESENT)) { 677 /* marked as available by driver, not blocked by userspace? */
736 dev_dbg(&s->dev, "No card present\n"); 678 if (!((mask >> irq) & 1))
737 goto out; 679 continue;
738 } 680
739 c = p_dev->function_config; 681 /* avoid an IRQ which is already used by another PCMCIA card */
740 if (c->state & CONFIG_LOCKED) { 682 if ((try < 32) && pcmcia_used_irq[irq])
741 dev_dbg(&s->dev, "Configuration is locked\n"); 683 continue;
742 goto out; 684
743 } 685 /* register the correct driver, if possible, to check whether
744 if (c->state & CONFIG_IRQ_REQ) { 686 * registering a dummy handle works, i.e. if the IRQ isn't
745 dev_dbg(&s->dev, "IRQ already configured\n"); 687 * marked as used by the kernel resource management core */
746 goto out; 688 ret = request_irq(irq, test_action, type, p_dev->devname,
689 p_dev);
690 if (!ret) {
691 free_irq(irq, p_dev);
692 p_dev->irq = s->pcmcia_irq = irq;
693 pcmcia_used_irq[irq]++;
694 break;
695 }
747 } 696 }
748 697
749 /* Decide what type of interrupt we are registering */ 698 return ret;
750 type = 0; 699}
751 if (s->functions > 1) /* All of this ought to be handled higher up */
752 type = IRQF_SHARED;
753 else if (req->Attributes & IRQ_TYPE_DYNAMIC_SHARING)
754 type = IRQF_SHARED;
755 else
756 printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n");
757 700
758 /* If the interrupt is already assigned, it must be the same */ 701void pcmcia_cleanup_irq(struct pcmcia_socket *s)
759 if (s->irq.AssignedIRQ != 0) 702{
760 irq = s->irq.AssignedIRQ; 703 pcmcia_used_irq[s->pcmcia_irq]--;
704 s->pcmcia_irq = 0;
705}
761 706
762#ifdef CONFIG_PCMCIA_PROBE 707#else /* CONFIG_PCMCIA_PROBE */
763 if (!irq) {
764 int try;
765 u32 mask = s->irq_mask;
766 void *data = p_dev; /* something unique to this device */
767 708
768 for (try = 0; try < 64; try++) { 709static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type)
769 irq = try % 32; 710{
711 return -EINVAL;
712}
770 713
771 /* marked as available by driver, and not blocked by userspace? */ 714void pcmcia_cleanup_irq(struct pcmcia_socket *s)
772 if (!((mask >> irq) & 1)) 715{
773 continue; 716 s->pcmcia_irq = 0;
717 return;
718}
774 719
775 /* avoid an IRQ which is already used by a PCMCIA card */ 720#endif /* CONFIG_PCMCIA_PROBE */
776 if ((try < 32) && pcmcia_used_irq[irq])
777 continue;
778 721
779 /* register the correct driver, if possible, of check whether
780 * registering a dummy handle works, i.e. if the IRQ isn't
781 * marked as used by the kernel resource management core */
782 ret = request_irq(irq,
783 (req->Handler) ? req->Handler : test_action,
784 type,
785 p_dev->devname,
786 (req->Handler) ? p_dev->priv : data);
787 if (!ret) {
788 if (!req->Handler)
789 free_irq(irq, data);
790 break;
791 }
792 }
793 }
794#endif
795 /* only assign PCI irq if no IRQ already assigned */
796 if (ret && !s->irq.AssignedIRQ) {
797 if (!s->pci_irq) {
798 dev_printk(KERN_INFO, &s->dev, "no IRQ found\n");
799 goto out;
800 }
801 type = IRQF_SHARED;
802 irq = s->pci_irq;
803 }
804 722
805 if (ret && req->Handler) { 723/**
806 ret = request_irq(irq, req->Handler, type, 724 * pcmcia_setup_irq() - determine IRQ to be used for device
807 p_dev->devname, p_dev->priv); 725 * @p_dev - the associated PCMCIA device
808 if (ret) { 726 *
809 dev_printk(KERN_INFO, &s->dev, 727 * locking note: must be called with ops_mutex locked.
810 "request_irq() failed\n"); 728 */
811 goto out; 729int pcmcia_setup_irq(struct pcmcia_device *p_dev)
812 } 730{
813 } 731 struct pcmcia_socket *s = p_dev->socket;
814 732
815 /* Make sure the fact the request type was overridden is passed back */ 733 if (p_dev->irq)
816 if (type == IRQF_SHARED && !(req->Attributes & IRQ_TYPE_DYNAMIC_SHARING)) { 734 return 0;
817 req->Attributes |= IRQ_TYPE_DYNAMIC_SHARING; 735
818 dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: " 736 /* already assigned? */
819 "request for exclusive IRQ could not be fulfilled.\n"); 737 if (s->pcmcia_irq) {
820 dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: the driver " 738 p_dev->irq = s->pcmcia_irq;
821 "needs updating to supported shared IRQ lines.\n"); 739 return 0;
822 } 740 }
823 c->irq.Attributes = req->Attributes;
824 s->irq.AssignedIRQ = req->AssignedIRQ = irq;
825 s->irq.Config++;
826 741
827 c->state |= CONFIG_IRQ_REQ; 742 /* prefer an exclusive ISA irq */
828 p_dev->_irq = 1; 743 if (!pcmcia_setup_isa_irq(p_dev, 0))
744 return 0;
829 745
830#ifdef CONFIG_PCMCIA_PROBE 746 /* but accept a shared ISA irq */
831 pcmcia_used_irq[irq]++; 747 if (!pcmcia_setup_isa_irq(p_dev, IRQF_SHARED))
832#endif 748 return 0;
833 749
834 ret = 0; 750 /* but use the PCI irq otherwise */
835out: 751 if (s->pci_irq) {
836 mutex_unlock(&s->ops_mutex); 752 p_dev->irq = s->pcmcia_irq = s->pci_irq;
837 return ret; 753 return 0;
838} /* pcmcia_request_irq */ 754 }
839EXPORT_SYMBOL(pcmcia_request_irq); 755
756 return -EINVAL;
757}
840 758
841 759
842/** pcmcia_request_window 760/** pcmcia_request_window
@@ -939,237 +857,9 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev)
939{ 857{
940 pcmcia_release_configuration(p_dev); 858 pcmcia_release_configuration(p_dev);
941 pcmcia_release_io(p_dev, &p_dev->io); 859 pcmcia_release_io(p_dev, &p_dev->io);
942 pcmcia_release_irq(p_dev, &p_dev->irq); 860 if (p_dev->_irq)
861 free_irq(p_dev->irq, p_dev->priv);
943 if (p_dev->win) 862 if (p_dev->win)
944 pcmcia_release_window(p_dev, p_dev->win); 863 pcmcia_release_window(p_dev, p_dev->win);
945} 864}
946EXPORT_SYMBOL(pcmcia_disable_device); 865EXPORT_SYMBOL(pcmcia_disable_device);
947
948
949struct pcmcia_cfg_mem {
950 struct pcmcia_device *p_dev;
951 void *priv_data;
952 int (*conf_check) (struct pcmcia_device *p_dev,
953 cistpl_cftable_entry_t *cfg,
954 cistpl_cftable_entry_t *dflt,
955 unsigned int vcc,
956 void *priv_data);
957 cisparse_t parse;
958 cistpl_cftable_entry_t dflt;
959};
960
961/**
962 * pcmcia_do_loop_config() - internal helper for pcmcia_loop_config()
963 *
964 * pcmcia_do_loop_config() is the internal callback for the call from
965 * pcmcia_loop_config() to pccard_loop_tuple(). Data is transferred
966 * by a struct pcmcia_cfg_mem.
967 */
968static int pcmcia_do_loop_config(tuple_t *tuple, cisparse_t *parse, void *priv)
969{
970 cistpl_cftable_entry_t *cfg = &parse->cftable_entry;
971 struct pcmcia_cfg_mem *cfg_mem = priv;
972
973 /* default values */
974 cfg_mem->p_dev->conf.ConfigIndex = cfg->index;
975 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
976 cfg_mem->dflt = *cfg;
977
978 return cfg_mem->conf_check(cfg_mem->p_dev, cfg, &cfg_mem->dflt,
979 cfg_mem->p_dev->socket->socket.Vcc,
980 cfg_mem->priv_data);
981}
982
983/**
984 * pcmcia_loop_config() - loop over configuration options
985 * @p_dev: the struct pcmcia_device which we need to loop for.
986 * @conf_check: function to call for each configuration option.
987 * It gets passed the struct pcmcia_device, the CIS data
988 * describing the configuration option, and private data
989 * being passed to pcmcia_loop_config()
990 * @priv_data: private data to be passed to the conf_check function.
991 *
992 * pcmcia_loop_config() loops over all configuration options, and calls
993 * the driver-specific conf_check() for each one, checking whether
994 * it is a valid one. Returns 0 on success or errorcode otherwise.
995 */
996int pcmcia_loop_config(struct pcmcia_device *p_dev,
997 int (*conf_check) (struct pcmcia_device *p_dev,
998 cistpl_cftable_entry_t *cfg,
999 cistpl_cftable_entry_t *dflt,
1000 unsigned int vcc,
1001 void *priv_data),
1002 void *priv_data)
1003{
1004 struct pcmcia_cfg_mem *cfg_mem;
1005 int ret;
1006
1007 cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL);
1008 if (cfg_mem == NULL)
1009 return -ENOMEM;
1010
1011 cfg_mem->p_dev = p_dev;
1012 cfg_mem->conf_check = conf_check;
1013 cfg_mem->priv_data = priv_data;
1014
1015 ret = pccard_loop_tuple(p_dev->socket, p_dev->func,
1016 CISTPL_CFTABLE_ENTRY, &cfg_mem->parse,
1017 cfg_mem, pcmcia_do_loop_config);
1018
1019 kfree(cfg_mem);
1020 return ret;
1021}
1022EXPORT_SYMBOL(pcmcia_loop_config);
1023
1024
1025struct pcmcia_loop_mem {
1026 struct pcmcia_device *p_dev;
1027 void *priv_data;
1028 int (*loop_tuple) (struct pcmcia_device *p_dev,
1029 tuple_t *tuple,
1030 void *priv_data);
1031};
1032
1033/**
1034 * pcmcia_do_loop_tuple() - internal helper for pcmcia_loop_config()
1035 *
1036 * pcmcia_do_loop_tuple() is the internal callback for the call from
1037 * pcmcia_loop_tuple() to pccard_loop_tuple(). Data is transferred
1038 * by a struct pcmcia_cfg_mem.
1039 */
1040static int pcmcia_do_loop_tuple(tuple_t *tuple, cisparse_t *parse, void *priv)
1041{
1042 struct pcmcia_loop_mem *loop = priv;
1043
1044 return loop->loop_tuple(loop->p_dev, tuple, loop->priv_data);
1045};
1046
1047/**
1048 * pcmcia_loop_tuple() - loop over tuples in the CIS
1049 * @p_dev: the struct pcmcia_device which we need to loop for.
1050 * @code: which CIS code shall we look for?
1051 * @priv_data: private data to be passed to the loop_tuple function.
1052 * @loop_tuple: function to call for each CIS entry of type @function. IT
1053 * gets passed the raw tuple and @priv_data.
1054 *
1055 * pcmcia_loop_tuple() loops over all CIS entries of type @function, and
1056 * calls the @loop_tuple function for each entry. If the call to @loop_tuple
1057 * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
1058 */
1059int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code,
1060 int (*loop_tuple) (struct pcmcia_device *p_dev,
1061 tuple_t *tuple,
1062 void *priv_data),
1063 void *priv_data)
1064{
1065 struct pcmcia_loop_mem loop = {
1066 .p_dev = p_dev,
1067 .loop_tuple = loop_tuple,
1068 .priv_data = priv_data};
1069
1070 return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL,
1071 &loop, pcmcia_do_loop_tuple);
1072}
1073EXPORT_SYMBOL(pcmcia_loop_tuple);
1074
1075
1076struct pcmcia_loop_get {
1077 size_t len;
1078 cisdata_t **buf;
1079};
1080
1081/**
1082 * pcmcia_do_get_tuple() - internal helper for pcmcia_get_tuple()
1083 *
1084 * pcmcia_do_get_tuple() is the internal callback for the call from
1085 * pcmcia_get_tuple() to pcmcia_loop_tuple(). As we're only interested in
1086 * the first tuple, return 0 unconditionally. Create a memory buffer large
1087 * enough to hold the content of the tuple, and fill it with the tuple data.
1088 * The caller is responsible to free the buffer.
1089 */
1090static int pcmcia_do_get_tuple(struct pcmcia_device *p_dev, tuple_t *tuple,
1091 void *priv)
1092{
1093 struct pcmcia_loop_get *get = priv;
1094
1095 *get->buf = kzalloc(tuple->TupleDataLen, GFP_KERNEL);
1096 if (*get->buf) {
1097 get->len = tuple->TupleDataLen;
1098 memcpy(*get->buf, tuple->TupleData, tuple->TupleDataLen);
1099 } else
1100 dev_dbg(&p_dev->dev, "do_get_tuple: out of memory\n");
1101 return 0;
1102}
1103
1104/**
1105 * pcmcia_get_tuple() - get first tuple from CIS
1106 * @p_dev: the struct pcmcia_device which we need to loop for.
1107 * @code: which CIS code shall we look for?
1108 * @buf: pointer to store the buffer to.
1109 *
1110 * pcmcia_get_tuple() gets the content of the first CIS entry of type @code.
1111 * It returns the buffer length (or zero). The caller is responsible to free
1112 * the buffer passed in @buf.
1113 */
1114size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
1115 unsigned char **buf)
1116{
1117 struct pcmcia_loop_get get = {
1118 .len = 0,
1119 .buf = buf,
1120 };
1121
1122 *get.buf = NULL;
1123 pcmcia_loop_tuple(p_dev, code, pcmcia_do_get_tuple, &get);
1124
1125 return get.len;
1126}
1127EXPORT_SYMBOL(pcmcia_get_tuple);
1128
1129
1130/**
1131 * pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis()
1132 *
1133 * pcmcia_do_get_mac() is the internal callback for the call from
1134 * pcmcia_get_mac_from_cis() to pcmcia_loop_tuple(). We check whether the
1135 * tuple contains a proper LAN_NODE_ID of length 6, and copy the data
1136 * to struct net_device->dev_addr[i].
1137 */
1138static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
1139 void *priv)
1140{
1141 struct net_device *dev = priv;
1142 int i;
1143
1144 if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
1145 return -EINVAL;
1146 if (tuple->TupleDataLen < ETH_ALEN + 2) {
1147 dev_warn(&p_dev->dev, "Invalid CIS tuple length for "
1148 "LAN_NODE_ID\n");
1149 return -EINVAL;
1150 }
1151
1152 if (tuple->TupleData[1] != ETH_ALEN) {
1153 dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n");
1154 return -EINVAL;
1155 }
1156 for (i = 0; i < 6; i++)
1157 dev->dev_addr[i] = tuple->TupleData[i+2];
1158 return 0;
1159}
1160
1161/**
1162 * pcmcia_get_mac_from_cis() - read out MAC address from CISTPL_FUNCE
1163 * @p_dev: the struct pcmcia_device for which we want the address.
1164 * @dev: a properly prepared struct net_device to store the info to.
1165 *
1166 * pcmcia_get_mac_from_cis() reads out the hardware MAC address from
1167 * CISTPL_FUNCE and stores it into struct net_device *dev->dev_addr which
1168 * must be set up properly by the driver (see examples!).
1169 */
1170int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev)
1171{
1172 return pcmcia_loop_tuple(p_dev, CISTPL_FUNCE, pcmcia_do_get_mac, dev);
1173}
1174EXPORT_SYMBOL(pcmcia_get_mac_from_cis);
1175
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
new file mode 100644
index 000000000000..55627eccee8e
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -0,0 +1,229 @@
1/*
2 * linux/drivers/pcmcia/pxa2xx_vpac270.c
3 *
4 * Driver for Voipac PXA270 PCMCIA and CF sockets
5 *
6 * Copyright (C) 2010
7 * Marek Vasut <marek.vasut@gmail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/platform_device.h>
17
18#include <asm/mach-types.h>
19
20#include <mach/gpio.h>
21#include <mach/vpac270.h>
22
23#include "soc_common.h"
24
25static struct pcmcia_irqs cd_irqs[] = {
26 {
27 .sock = 0,
28 .irq = IRQ_GPIO(GPIO84_VPAC270_PCMCIA_CD),
29 .str = "PCMCIA CD"
30 },
31 {
32 .sock = 1,
33 .irq = IRQ_GPIO(GPIO17_VPAC270_CF_CD),
34 .str = "CF CD"
35 },
36};
37
38static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
39{
40 int ret;
41
42 if (skt->nr == 0) {
43 ret = gpio_request(GPIO84_VPAC270_PCMCIA_CD, "PCMCIA CD");
44 if (ret)
45 goto err1;
46 ret = gpio_direction_input(GPIO84_VPAC270_PCMCIA_CD);
47 if (ret)
48 goto err2;
49
50 ret = gpio_request(GPIO35_VPAC270_PCMCIA_RDY, "PCMCIA RDY");
51 if (ret)
52 goto err2;
53 ret = gpio_direction_input(GPIO35_VPAC270_PCMCIA_RDY);
54 if (ret)
55 goto err3;
56
57 ret = gpio_request(GPIO107_VPAC270_PCMCIA_PPEN, "PCMCIA PPEN");
58 if (ret)
59 goto err3;
60 ret = gpio_direction_output(GPIO107_VPAC270_PCMCIA_PPEN, 0);
61 if (ret)
62 goto err4;
63
64 ret = gpio_request(GPIO11_VPAC270_PCMCIA_RESET, "PCMCIA RESET");
65 if (ret)
66 goto err4;
67 ret = gpio_direction_output(GPIO11_VPAC270_PCMCIA_RESET, 0);
68 if (ret)
69 goto err5;
70
71 skt->socket.pci_irq = gpio_to_irq(GPIO35_VPAC270_PCMCIA_RDY);
72
73 return soc_pcmcia_request_irqs(skt, &cd_irqs[0], 1);
74
75err5:
76 gpio_free(GPIO11_VPAC270_PCMCIA_RESET);
77err4:
78 gpio_free(GPIO107_VPAC270_PCMCIA_PPEN);
79err3:
80 gpio_free(GPIO35_VPAC270_PCMCIA_RDY);
81err2:
82 gpio_free(GPIO84_VPAC270_PCMCIA_CD);
83err1:
84 return ret;
85
86 } else {
87 ret = gpio_request(GPIO17_VPAC270_CF_CD, "CF CD");
88 if (ret)
89 goto err6;
90 ret = gpio_direction_input(GPIO17_VPAC270_CF_CD);
91 if (ret)
92 goto err7;
93
94 ret = gpio_request(GPIO12_VPAC270_CF_RDY, "CF RDY");
95 if (ret)
96 goto err7;
97 ret = gpio_direction_input(GPIO12_VPAC270_CF_RDY);
98 if (ret)
99 goto err8;
100
101 ret = gpio_request(GPIO16_VPAC270_CF_RESET, "CF RESET");
102 if (ret)
103 goto err8;
104 ret = gpio_direction_output(GPIO16_VPAC270_CF_RESET, 0);
105 if (ret)
106 goto err9;
107
108 skt->socket.pci_irq = gpio_to_irq(GPIO12_VPAC270_CF_RDY);
109
110 return soc_pcmcia_request_irqs(skt, &cd_irqs[1], 1);
111
112err9:
113 gpio_free(GPIO16_VPAC270_CF_RESET);
114err8:
115 gpio_free(GPIO12_VPAC270_CF_RDY);
116err7:
117 gpio_free(GPIO17_VPAC270_CF_CD);
118err6:
119 return ret;
120
121 }
122}
123
124static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
125{
126 gpio_free(GPIO11_VPAC270_PCMCIA_RESET);
127 gpio_free(GPIO107_VPAC270_PCMCIA_PPEN);
128 gpio_free(GPIO35_VPAC270_PCMCIA_RDY);
129 gpio_free(GPIO84_VPAC270_PCMCIA_CD);
130 gpio_free(GPIO16_VPAC270_CF_RESET);
131 gpio_free(GPIO12_VPAC270_CF_RDY);
132 gpio_free(GPIO17_VPAC270_CF_CD);
133}
134
135static void vpac270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
136 struct pcmcia_state *state)
137{
138 if (skt->nr == 0) {
139 state->detect = !gpio_get_value(GPIO84_VPAC270_PCMCIA_CD);
140 state->ready = !!gpio_get_value(GPIO35_VPAC270_PCMCIA_RDY);
141 } else {
142 state->detect = !gpio_get_value(GPIO17_VPAC270_CF_CD);
143 state->ready = !!gpio_get_value(GPIO12_VPAC270_CF_RDY);
144 }
145 state->bvd1 = 1;
146 state->bvd2 = 1;
147 state->wrprot = 0;
148 state->vs_3v = 1;
149 state->vs_Xv = 0;
150}
151
152static int
153vpac270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
154 const socket_state_t *state)
155{
156 if (skt->nr == 0) {
157 gpio_set_value(GPIO11_VPAC270_PCMCIA_RESET,
158 (state->flags & SS_RESET));
159 gpio_set_value(GPIO107_VPAC270_PCMCIA_PPEN,
160 !(state->Vcc == 33 || state->Vcc == 50));
161 } else {
162 gpio_set_value(GPIO16_VPAC270_CF_RESET,
163 (state->flags & SS_RESET));
164 }
165
166 return 0;
167}
168
169static void vpac270_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
170{
171}
172
173static void vpac270_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
174{
175}
176
177static struct pcmcia_low_level vpac270_pcmcia_ops = {
178 .owner = THIS_MODULE,
179
180 .first = 0,
181 .nr = 2,
182
183 .hw_init = vpac270_pcmcia_hw_init,
184 .hw_shutdown = vpac270_pcmcia_hw_shutdown,
185
186 .socket_state = vpac270_pcmcia_socket_state,
187 .configure_socket = vpac270_pcmcia_configure_socket,
188
189 .socket_init = vpac270_pcmcia_socket_init,
190 .socket_suspend = vpac270_pcmcia_socket_suspend,
191};
192
193static struct platform_device *vpac270_pcmcia_device;
194
195static int __init vpac270_pcmcia_init(void)
196{
197 int ret;
198
199 if (!machine_is_vpac270())
200 return -ENODEV;
201
202 vpac270_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
203 if (!vpac270_pcmcia_device)
204 return -ENOMEM;
205
206 ret = platform_device_add_data(vpac270_pcmcia_device,
207 &vpac270_pcmcia_ops, sizeof(vpac270_pcmcia_ops));
208
209 if (!ret)
210 ret = platform_device_add(vpac270_pcmcia_device);
211
212 if (ret)
213 platform_device_put(vpac270_pcmcia_device);
214
215 return ret;
216}
217
218static void __exit vpac270_pcmcia_exit(void)
219{
220 platform_device_unregister(vpac270_pcmcia_device);
221}
222
223module_init(vpac270_pcmcia_init);
224module_exit(vpac270_pcmcia_exit);
225
226MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
227MODULE_DESCRIPTION("PCMCIA support for Voipac PXA270");
228MODULE_ALIAS("platform:pxa2xx-pcmcia");
229MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/rsrc_iodyn.c b/drivers/pcmcia/rsrc_iodyn.c
new file mode 100644
index 000000000000..d0bf35021065
--- /dev/null
+++ b/drivers/pcmcia/rsrc_iodyn.c
@@ -0,0 +1,172 @@
1/*
2 * rsrc_iodyn.c -- Resource management routines for MEM-static sockets.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * The initial developer of the original code is David A. Hinds
9 * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
10 * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
11 *
12 * (C) 1999 David A. Hinds
13 */
14
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18
19#include <pcmcia/cs_types.h>
20#include <pcmcia/ss.h>
21#include <pcmcia/cs.h>
22#include <pcmcia/cistpl.h>
23#include "cs_internal.h"
24
25
26struct pcmcia_align_data {
27 unsigned long mask;
28 unsigned long offset;
29};
30
31static resource_size_t pcmcia_align(void *align_data,
32 const struct resource *res,
33 resource_size_t size, resource_size_t align)
34{
35 struct pcmcia_align_data *data = align_data;
36 resource_size_t start;
37
38 start = (res->start & ~data->mask) + data->offset;
39 if (start < res->start)
40 start += data->mask + 1;
41
42#ifdef CONFIG_X86
43 if (res->flags & IORESOURCE_IO) {
44 if (start & 0x300)
45 start = (start + 0x3ff) & ~0x3ff;
46 }
47#endif
48
49#ifdef CONFIG_M68K
50 if (res->flags & IORESOURCE_IO) {
51 if ((res->start + size - 1) >= 1024)
52 start = res->end;
53 }
54#endif
55
56 return start;
57}
58
59
60static struct resource *__iodyn_find_io_region(struct pcmcia_socket *s,
61 unsigned long base, int num,
62 unsigned long align)
63{
64 struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
65 dev_name(&s->dev));
66 struct pcmcia_align_data data;
67 unsigned long min = base;
68 int ret;
69
70 data.mask = align - 1;
71 data.offset = base & data.mask;
72
73#ifdef CONFIG_PCI
74 if (s->cb_dev) {
75 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
76 min, 0, pcmcia_align, &data);
77 } else
78#endif
79 ret = allocate_resource(&ioport_resource, res, num, min, ~0UL,
80 1, pcmcia_align, &data);
81
82 if (ret != 0) {
83 kfree(res);
84 res = NULL;
85 }
86 return res;
87}
88
89static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr,
90 unsigned int *base, unsigned int num,
91 unsigned int align)
92{
93 int i, ret = 0;
94
95 /* Check for an already-allocated window that must conflict with
96 * what was asked for. It is a hack because it does not catch all
97 * potential conflicts, just the most obvious ones.
98 */
99 for (i = 0; i < MAX_IO_WIN; i++) {
100 if (!s->io[i].res)
101 continue;
102
103 if (!*base)
104 continue;
105
106 if ((s->io[i].res->start & (align-1)) == *base)
107 return -EBUSY;
108 }
109
110 for (i = 0; i < MAX_IO_WIN; i++) {
111 struct resource *res = s->io[i].res;
112 unsigned int try;
113
114 if (res && (res->flags & IORESOURCE_BITS) !=
115 (attr & IORESOURCE_BITS))
116 continue;
117
118 if (!res) {
119 if (align == 0)
120 align = 0x10000;
121
122 res = s->io[i].res = __iodyn_find_io_region(s, *base,
123 num, align);
124 if (!res)
125 return -EINVAL;
126
127 *base = res->start;
128 s->io[i].res->flags =
129 ((res->flags & ~IORESOURCE_BITS) |
130 (attr & IORESOURCE_BITS));
131 s->io[i].InUse = num;
132 return 0;
133 }
134
135 /* Try to extend top of window */
136 try = res->end + 1;
137 if ((*base == 0) || (*base == try)) {
138 if (adjust_resource(s->io[i].res, res->start,
139 res->end - res->start + num + 1))
140 continue;
141 *base = try;
142 s->io[i].InUse += num;
143 return 0;
144 }
145
146 /* Try to extend bottom of window */
147 try = res->start - num;
148 if ((*base == 0) || (*base == try)) {
149 if (adjust_resource(s->io[i].res,
150 res->start - num,
151 res->end - res->start + num + 1))
152 continue;
153 *base = try;
154 s->io[i].InUse += num;
155 return 0;
156 }
157 }
158
159 return -EINVAL;
160}
161
162
163struct pccard_resource_ops pccard_iodyn_ops = {
164 .validate_mem = NULL,
165 .find_io = iodyn_find_io,
166 .find_mem = NULL,
167 .add_io = NULL,
168 .add_mem = NULL,
169 .init = static_init,
170 .exit = NULL,
171};
172EXPORT_SYMBOL(pccard_iodyn_ops);
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index ffa5f3cae57b..142efac3c387 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -22,7 +22,7 @@
22#include <pcmcia/cistpl.h> 22#include <pcmcia/cistpl.h>
23#include "cs_internal.h" 23#include "cs_internal.h"
24 24
25static int static_init(struct pcmcia_socket *s) 25int static_init(struct pcmcia_socket *s)
26{ 26{
27 /* the good thing about SS_CAP_STATIC_MAP sockets is 27 /* the good thing about SS_CAP_STATIC_MAP sockets is
28 * that they don't need a resource database */ 28 * that they don't need a resource database */
@@ -32,118 +32,44 @@ static int static_init(struct pcmcia_socket *s)
32 return 0; 32 return 0;
33} 33}
34 34
35 35struct resource *pcmcia_make_resource(unsigned long start, unsigned long end,
36struct pccard_resource_ops pccard_static_ops = { 36 int flags, const char *name)
37 .validate_mem = NULL,
38 .adjust_io_region = NULL,
39 .find_io = NULL,
40 .find_mem = NULL,
41 .add_io = NULL,
42 .add_mem = NULL,
43 .init = static_init,
44 .exit = NULL,
45};
46EXPORT_SYMBOL(pccard_static_ops);
47
48
49#ifdef CONFIG_PCCARD_IODYN
50
51static struct resource *
52make_resource(unsigned long b, unsigned long n, int flags, char *name)
53{ 37{
54 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); 38 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
55 39
56 if (res) { 40 if (res) {
57 res->name = name; 41 res->name = name;
58 res->start = b; 42 res->start = start;
59 res->end = b + n - 1; 43 res->end = start + end - 1;
60 res->flags = flags; 44 res->flags = flags;
61 } 45 }
62 return res; 46 return res;
63} 47}
64 48
65struct pcmcia_align_data { 49static int static_find_io(struct pcmcia_socket *s, unsigned int attr,
66 unsigned long mask; 50 unsigned int *base, unsigned int num,
67 unsigned long offset; 51 unsigned int align)
68};
69
70static resource_size_t pcmcia_align(void *align_data,
71 const struct resource *res,
72 resource_size_t size, resource_size_t align)
73{ 52{
74 struct pcmcia_align_data *data = align_data; 53 if (!s->io_offset)
75 resource_size_t start; 54 return -EINVAL;
55 *base = s->io_offset | (*base & 0x0fff);
76 56
77 start = (res->start & ~data->mask) + data->offset; 57 return 0;
78 if (start < res->start)
79 start += data->mask + 1;
80
81#ifdef CONFIG_X86
82 if (res->flags & IORESOURCE_IO) {
83 if (start & 0x300)
84 start = (start + 0x3ff) & ~0x3ff;
85 }
86#endif
87
88#ifdef CONFIG_M68K
89 if (res->flags & IORESOURCE_IO) {
90 if ((res->start + size - 1) >= 1024)
91 start = res->end;
92 }
93#endif
94
95 return start;
96}
97
98
99static int iodyn_adjust_io_region(struct resource *res, unsigned long r_start,
100 unsigned long r_end, struct pcmcia_socket *s)
101{
102 return adjust_resource(res, r_start, r_end - r_start + 1);
103} 58}
104 59
105 60
106static struct resource *iodyn_find_io_region(unsigned long base, int num, 61struct pccard_resource_ops pccard_static_ops = {
107 unsigned long align, struct pcmcia_socket *s)
108{
109 struct resource *res = make_resource(0, num, IORESOURCE_IO,
110 dev_name(&s->dev));
111 struct pcmcia_align_data data;
112 unsigned long min = base;
113 int ret;
114
115 if (align == 0)
116 align = 0x10000;
117
118 data.mask = align - 1;
119 data.offset = base & data.mask;
120
121#ifdef CONFIG_PCI
122 if (s->cb_dev) {
123 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
124 min, 0, pcmcia_align, &data);
125 } else
126#endif
127 ret = allocate_resource(&ioport_resource, res, num, min, ~0UL,
128 1, pcmcia_align, &data);
129
130 if (ret != 0) {
131 kfree(res);
132 res = NULL;
133 }
134 return res;
135}
136
137struct pccard_resource_ops pccard_iodyn_ops = {
138 .validate_mem = NULL, 62 .validate_mem = NULL,
139 .adjust_io_region = iodyn_adjust_io_region, 63 .find_io = static_find_io,
140 .find_io = iodyn_find_io_region,
141 .find_mem = NULL, 64 .find_mem = NULL,
142 .add_io = NULL, 65 .add_io = NULL,
143 .add_mem = NULL, 66 .add_mem = NULL,
144 .init = static_init, 67 .init = static_init,
145 .exit = NULL, 68 .exit = NULL,
146}; 69};
147EXPORT_SYMBOL(pccard_iodyn_ops); 70EXPORT_SYMBOL(pccard_static_ops);
71
148 72
149#endif /* CONFIG_PCCARD_IODYN */ 73MODULE_AUTHOR("David A. Hinds, Dominik Brodowski");
74MODULE_LICENSE("GPL");
75MODULE_ALIAS("rsrc_nonstatic");
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index a6eb7b59ba9f..dcd1a4ad3d63 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -34,8 +34,10 @@
34#include <pcmcia/cistpl.h> 34#include <pcmcia/cistpl.h>
35#include "cs_internal.h" 35#include "cs_internal.h"
36 36
37/* moved to rsrc_mgr.c
37MODULE_AUTHOR("David A. Hinds, Dominik Brodowski"); 38MODULE_AUTHOR("David A. Hinds, Dominik Brodowski");
38MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40*/
39 41
40/* Parameters that can be set with 'insmod' */ 42/* Parameters that can be set with 'insmod' */
41 43
@@ -70,27 +72,13 @@ struct socket_data {
70======================================================================*/ 72======================================================================*/
71 73
72static struct resource * 74static struct resource *
73make_resource(resource_size_t b, resource_size_t n, int flags, const char *name)
74{
75 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
76
77 if (res) {
78 res->name = name;
79 res->start = b;
80 res->end = b + n - 1;
81 res->flags = flags;
82 }
83 return res;
84}
85
86static struct resource *
87claim_region(struct pcmcia_socket *s, resource_size_t base, 75claim_region(struct pcmcia_socket *s, resource_size_t base,
88 resource_size_t size, int type, char *name) 76 resource_size_t size, int type, char *name)
89{ 77{
90 struct resource *res, *parent; 78 struct resource *res, *parent;
91 79
92 parent = type & IORESOURCE_MEM ? &iomem_resource : &ioport_resource; 80 parent = type & IORESOURCE_MEM ? &iomem_resource : &ioport_resource;
93 res = make_resource(base, size, type | IORESOURCE_BUSY, name); 81 res = pcmcia_make_resource(base, size, type | IORESOURCE_BUSY, name);
94 82
95 if (res) { 83 if (res) {
96#ifdef CONFIG_PCI 84#ifdef CONFIG_PCI
@@ -661,8 +649,9 @@ pcmcia_align(void *align_data, const struct resource *res,
661 * Adjust an existing IO region allocation, but making sure that we don't 649 * Adjust an existing IO region allocation, but making sure that we don't
662 * encroach outside the resources which the user supplied. 650 * encroach outside the resources which the user supplied.
663 */ 651 */
664static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_start, 652static int __nonstatic_adjust_io_region(struct pcmcia_socket *s,
665 unsigned long r_end, struct pcmcia_socket *s) 653 unsigned long r_start,
654 unsigned long r_end)
666{ 655{
667 struct resource_map *m; 656 struct resource_map *m;
668 struct socket_data *s_data = s->resource_data; 657 struct socket_data *s_data = s->resource_data;
@@ -675,8 +664,7 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
675 if (start > r_start || r_end > end) 664 if (start > r_start || r_end > end)
676 continue; 665 continue;
677 666
678 ret = adjust_resource(res, r_start, r_end - r_start + 1); 667 ret = 0;
679 break;
680 } 668 }
681 669
682 return ret; 670 return ret;
@@ -695,18 +683,17 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
695 683
696======================================================================*/ 684======================================================================*/
697 685
698static struct resource *nonstatic_find_io_region(unsigned long base, int num, 686static struct resource *__nonstatic_find_io_region(struct pcmcia_socket *s,
699 unsigned long align, struct pcmcia_socket *s) 687 unsigned long base, int num,
688 unsigned long align)
700{ 689{
701 struct resource *res = make_resource(0, num, IORESOURCE_IO, dev_name(&s->dev)); 690 struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
691 dev_name(&s->dev));
702 struct socket_data *s_data = s->resource_data; 692 struct socket_data *s_data = s->resource_data;
703 struct pcmcia_align_data data; 693 struct pcmcia_align_data data;
704 unsigned long min = base; 694 unsigned long min = base;
705 int ret; 695 int ret;
706 696
707 if (align == 0)
708 align = 0x10000;
709
710 data.mask = align - 1; 697 data.mask = align - 1;
711 data.offset = base & data.mask; 698 data.offset = base & data.mask;
712 data.map = &s_data->io_db; 699 data.map = &s_data->io_db;
@@ -727,10 +714,97 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
727 return res; 714 return res;
728} 715}
729 716
717static int nonstatic_find_io(struct pcmcia_socket *s, unsigned int attr,
718 unsigned int *base, unsigned int num,
719 unsigned int align)
720{
721 int i, ret = 0;
722
723 /* Check for an already-allocated window that must conflict with
724 * what was asked for. It is a hack because it does not catch all
725 * potential conflicts, just the most obvious ones.
726 */
727 for (i = 0; i < MAX_IO_WIN; i++) {
728 if (!s->io[i].res)
729 continue;
730
731 if (!*base)
732 continue;
733
734 if ((s->io[i].res->start & (align-1)) == *base)
735 return -EBUSY;
736 }
737
738 for (i = 0; i < MAX_IO_WIN; i++) {
739 struct resource *res = s->io[i].res;
740 unsigned int try;
741
742 if (res && (res->flags & IORESOURCE_BITS) !=
743 (attr & IORESOURCE_BITS))
744 continue;
745
746 if (!res) {
747 if (align == 0)
748 align = 0x10000;
749
750 res = s->io[i].res = __nonstatic_find_io_region(s,
751 *base, num,
752 align);
753 if (!res)
754 return -EINVAL;
755
756 *base = res->start;
757 s->io[i].res->flags =
758 ((res->flags & ~IORESOURCE_BITS) |
759 (attr & IORESOURCE_BITS));
760 s->io[i].InUse = num;
761 return 0;
762 }
763
764 /* Try to extend top of window */
765 try = res->end + 1;
766 if ((*base == 0) || (*base == try)) {
767 ret = __nonstatic_adjust_io_region(s, res->start,
768 res->end + num);
769 if (!ret) {
770 ret = adjust_resource(s->io[i].res, res->start,
771 res->end - res->start + num + 1);
772 if (ret)
773 continue;
774 *base = try;
775 s->io[i].InUse += num;
776 return 0;
777 }
778 }
779
780 /* Try to extend bottom of window */
781 try = res->start - num;
782 if ((*base == 0) || (*base == try)) {
783 ret = __nonstatic_adjust_io_region(s,
784 res->start - num,
785 res->end);
786 if (!ret) {
787 ret = adjust_resource(s->io[i].res,
788 res->start - num,
789 res->end - res->start + num + 1);
790 if (ret)
791 continue;
792 *base = try;
793 s->io[i].InUse += num;
794 return 0;
795 }
796 }
797 }
798
799 return -EINVAL;
800}
801
802
730static struct resource *nonstatic_find_mem_region(u_long base, u_long num, 803static struct resource *nonstatic_find_mem_region(u_long base, u_long num,
731 u_long align, int low, struct pcmcia_socket *s) 804 u_long align, int low, struct pcmcia_socket *s)
732{ 805{
733 struct resource *res = make_resource(0, num, IORESOURCE_MEM, dev_name(&s->dev)); 806 struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM,
807 dev_name(&s->dev));
734 struct socket_data *s_data = s->resource_data; 808 struct socket_data *s_data = s->resource_data;
735 struct pcmcia_align_data data; 809 struct pcmcia_align_data data;
736 unsigned long min, max; 810 unsigned long min, max;
@@ -861,23 +935,42 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
861 return -ENODEV; 935 return -ENODEV;
862 936
863#if defined(CONFIG_X86) 937#if defined(CONFIG_X86)
864 /* If this is the root bus, the risk of hitting 938 /* If this is the root bus, the risk of hitting some strange
865 * some strange system devices which aren't protected 939 * system devices is too high: If a driver isn't loaded, the
866 * by either ACPI resource tables or properly requested 940 * resources are not claimed; even if a driver is loaded, it
867 * resources is too big. Therefore, don't do auto-adding 941 * may not request all resources or even the wrong one. We
868 * of resources at the moment. 942 * can neither trust the rest of the kernel nor ACPI/PNP and
943 * CRS parsing to get it right. Therefore, use several
944 * safeguards:
945 *
946 * - Do not auto-add resources if the CardBus bridge is on
947 * the PCI root bus
948 *
949 * - Avoid any I/O ports < 0x100.
950 *
951 * - On PCI-PCI bridges, only use resources which are set up
952 * exclusively for the secondary PCI bus: the risk of hitting
953 * system devices is quite low, as they usually aren't
954 * connected to the secondary PCI bus.
869 */ 955 */
870 if (s->cb_dev->bus->number == 0) 956 if (s->cb_dev->bus->number == 0)
871 return -EINVAL; 957 return -EINVAL;
872#endif
873 958
959 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
960 res = s->cb_dev->bus->resource[i];
961#else
874 pci_bus_for_each_resource(s->cb_dev->bus, res, i) { 962 pci_bus_for_each_resource(s->cb_dev->bus, res, i) {
963#endif
875 if (!res) 964 if (!res)
876 continue; 965 continue;
877 966
878 if (res->flags & IORESOURCE_IO) { 967 if (res->flags & IORESOURCE_IO) {
968 /* safeguard against the root resource, where the
969 * risk of hitting any other device would be too
970 * high */
879 if (res == &ioport_resource) 971 if (res == &ioport_resource)
880 continue; 972 continue;
973
881 dev_printk(KERN_INFO, &s->cb_dev->dev, 974 dev_printk(KERN_INFO, &s->cb_dev->dev,
882 "pcmcia: parent PCI bridge window: %pR\n", 975 "pcmcia: parent PCI bridge window: %pR\n",
883 res); 976 res);
@@ -887,8 +980,12 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
887 } 980 }
888 981
889 if (res->flags & IORESOURCE_MEM) { 982 if (res->flags & IORESOURCE_MEM) {
983 /* safeguard against the root resource, where the
984 * risk of hitting any other device would be too
985 * high */
890 if (res == &iomem_resource) 986 if (res == &iomem_resource)
891 continue; 987 continue;
988
892 dev_printk(KERN_INFO, &s->cb_dev->dev, 989 dev_printk(KERN_INFO, &s->cb_dev->dev,
893 "pcmcia: parent PCI bridge window: %pR\n", 990 "pcmcia: parent PCI bridge window: %pR\n",
894 res); 991 res);
@@ -956,8 +1053,7 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
956 1053
957struct pccard_resource_ops pccard_nonstatic_ops = { 1054struct pccard_resource_ops pccard_nonstatic_ops = {
958 .validate_mem = pcmcia_nonstatic_validate_mem, 1055 .validate_mem = pcmcia_nonstatic_validate_mem,
959 .adjust_io_region = nonstatic_adjust_io_region, 1056 .find_io = nonstatic_find_io,
960 .find_io = nonstatic_find_io_region,
961 .find_mem = nonstatic_find_mem_region, 1057 .find_mem = nonstatic_find_mem_region,
962 .add_io = adjust_io, 1058 .add_io = adjust_io,
963 .add_mem = adjust_memory, 1059 .add_mem = adjust_memory,
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 83ace277426c..424e576f3acb 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1303,13 +1303,6 @@ static int yenta_dev_suspend_noirq(struct device *dev)
1303 pci_read_config_dword(pdev, 17*4, &socket->saved_state[1]); 1303 pci_read_config_dword(pdev, 17*4, &socket->saved_state[1]);
1304 pci_disable_device(pdev); 1304 pci_disable_device(pdev);
1305 1305
1306 /*
1307 * Some laptops (IBM T22) do not like us putting the Cardbus
1308 * bridge into D3. At a guess, some other laptop will
1309 * probably require this, so leave it commented out for now.
1310 */
1311 /* pci_set_power_state(dev, 3); */
1312
1313 return 0; 1306 return 0;
1314} 1307}
1315 1308
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 7bec4588c268..6c3320d75055 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -390,6 +390,7 @@ config EEEPC_WMI
390 depends on ACPI_WMI 390 depends on ACPI_WMI
391 depends on INPUT 391 depends on INPUT
392 depends on EXPERIMENTAL 392 depends on EXPERIMENTAL
393 select INPUT_SPARSEKMAP
393 ---help--- 394 ---help---
394 Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. 395 Say Y here if you want to support WMI-based hotkeys on Eee PC laptops.
395 396
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 52262b012abb..efe8f6388906 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -79,15 +79,15 @@ static uint wapf = 1;
79module_param(wapf, uint, 0644); 79module_param(wapf, uint, 0644);
80MODULE_PARM_DESC(wapf, "WAPF value"); 80MODULE_PARM_DESC(wapf, "WAPF value");
81 81
82static uint wlan_status = 1; 82static int wlan_status = 1;
83static uint bluetooth_status = 1; 83static int bluetooth_status = 1;
84 84
85module_param(wlan_status, uint, 0644); 85module_param(wlan_status, int, 0644);
86MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot " 86MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
87 "(0 = disabled, 1 = enabled, -1 = don't do anything). " 87 "(0 = disabled, 1 = enabled, -1 = don't do anything). "
88 "default is 1"); 88 "default is 1");
89 89
90module_param(bluetooth_status, uint, 0644); 90module_param(bluetooth_status, int, 0644);
91MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " 91MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
92 "(0 = disabled, 1 = enabled, -1 = don't do anything). " 92 "(0 = disabled, 1 = enabled, -1 = don't do anything). "
93 "default is 1"); 93 "default is 1");
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 6ba6c30e5bb6..66f53c3c35e8 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -217,6 +217,7 @@ static void dell_wmi_notify(u32 value, void *context)
217 if (dell_new_hk_type && (buffer_entry[1] != 0x10)) { 217 if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
218 printk(KERN_INFO "dell-wmi: Received unknown WMI event" 218 printk(KERN_INFO "dell-wmi: Received unknown WMI event"
219 " (0x%x)\n", buffer_entry[1]); 219 " (0x%x)\n", buffer_entry[1]);
220 kfree(obj);
220 return; 221 return;
221 } 222 }
222 223
@@ -234,7 +235,7 @@ static void dell_wmi_notify(u32 value, void *context)
234 key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) { 235 key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) {
235 /* Don't report brightness notifications that will also 236 /* Don't report brightness notifications that will also
236 * come via ACPI */ 237 * come via ACPI */
237 return; 238 ;
238 } else { 239 } else {
239 input_report_key(dell_wmi_input_dev, key->keycode, 1); 240 input_report_key(dell_wmi_input_dev, key->keycode, 1);
240 input_sync(dell_wmi_input_dev); 241 input_sync(dell_wmi_input_dev);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 54a015785ca8..0306174ba875 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -169,7 +169,6 @@ struct eeepc_laptop {
169 struct backlight_device *backlight_device; 169 struct backlight_device *backlight_device;
170 170
171 struct input_dev *inputdev; 171 struct input_dev *inputdev;
172 struct key_entry *keymap;
173 172
174 struct rfkill *wlan_rfkill; 173 struct rfkill *wlan_rfkill;
175 struct rfkill *bluetooth_rfkill; 174 struct rfkill *bluetooth_rfkill;
@@ -1204,8 +1203,8 @@ static int eeepc_input_init(struct eeepc_laptop *eeepc)
1204static void eeepc_input_exit(struct eeepc_laptop *eeepc) 1203static void eeepc_input_exit(struct eeepc_laptop *eeepc)
1205{ 1204{
1206 if (eeepc->inputdev) { 1205 if (eeepc->inputdev) {
1206 sparse_keymap_free(eeepc->inputdev);
1207 input_unregister_device(eeepc->inputdev); 1207 input_unregister_device(eeepc->inputdev);
1208 kfree(eeepc->keymap);
1209 } 1208 }
1210} 1209}
1211 1210
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 9f8822658fd7..b227eb469f49 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -23,6 +23,8 @@
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */ 24 */
25 25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
26#include <linux/kernel.h> 28#include <linux/kernel.h>
27#include <linux/module.h> 29#include <linux/module.h>
28#include <linux/init.h> 30#include <linux/init.h>
@@ -30,22 +32,34 @@
30#include <linux/slab.h> 32#include <linux/slab.h>
31#include <linux/input.h> 33#include <linux/input.h>
32#include <linux/input/sparse-keymap.h> 34#include <linux/input/sparse-keymap.h>
35#include <linux/fb.h>
36#include <linux/backlight.h>
37#include <linux/platform_device.h>
33#include <acpi/acpi_bus.h> 38#include <acpi/acpi_bus.h>
34#include <acpi/acpi_drivers.h> 39#include <acpi/acpi_drivers.h>
35 40
41#define EEEPC_WMI_FILE "eeepc-wmi"
42
36MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); 43MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
37MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); 44MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
38MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
39 46
40#define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" 47#define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000"
48#define EEEPC_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
41 49
42MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); 50MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID);
51MODULE_ALIAS("wmi:"EEEPC_WMI_MGMT_GUID);
43 52
44#define NOTIFY_BRNUP_MIN 0x11 53#define NOTIFY_BRNUP_MIN 0x11
45#define NOTIFY_BRNUP_MAX 0x1f 54#define NOTIFY_BRNUP_MAX 0x1f
46#define NOTIFY_BRNDOWN_MIN 0x20 55#define NOTIFY_BRNDOWN_MIN 0x20
47#define NOTIFY_BRNDOWN_MAX 0x2e 56#define NOTIFY_BRNDOWN_MAX 0x2e
48 57
58#define EEEPC_WMI_METHODID_DEVS 0x53564544
59#define EEEPC_WMI_METHODID_DSTS 0x53544344
60
61#define EEEPC_WMI_DEVID_BACKLIGHT 0x00050012
62
49static const struct key_entry eeepc_wmi_keymap[] = { 63static const struct key_entry eeepc_wmi_keymap[] = {
50 /* Sleep already handled via generic ACPI code */ 64 /* Sleep already handled via generic ACPI code */
51 { KE_KEY, 0x5d, { KEY_WLAN } }, 65 { KE_KEY, 0x5d, { KEY_WLAN } },
@@ -58,18 +72,198 @@ static const struct key_entry eeepc_wmi_keymap[] = {
58 { KE_END, 0}, 72 { KE_END, 0},
59}; 73};
60 74
61static struct input_dev *eeepc_wmi_input_dev; 75struct bios_args {
76 u32 dev_id;
77 u32 ctrl_param;
78};
79
80struct eeepc_wmi {
81 struct input_dev *inputdev;
82 struct backlight_device *backlight_device;
83};
84
85static struct platform_device *platform_device;
86
87static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc)
88{
89 int err;
90
91 eeepc->inputdev = input_allocate_device();
92 if (!eeepc->inputdev)
93 return -ENOMEM;
94
95 eeepc->inputdev->name = "Eee PC WMI hotkeys";
96 eeepc->inputdev->phys = EEEPC_WMI_FILE "/input0";
97 eeepc->inputdev->id.bustype = BUS_HOST;
98 eeepc->inputdev->dev.parent = &platform_device->dev;
99
100 err = sparse_keymap_setup(eeepc->inputdev, eeepc_wmi_keymap, NULL);
101 if (err)
102 goto err_free_dev;
103
104 err = input_register_device(eeepc->inputdev);
105 if (err)
106 goto err_free_keymap;
107
108 return 0;
109
110err_free_keymap:
111 sparse_keymap_free(eeepc->inputdev);
112err_free_dev:
113 input_free_device(eeepc->inputdev);
114 return err;
115}
116
117static void eeepc_wmi_input_exit(struct eeepc_wmi *eeepc)
118{
119 if (eeepc->inputdev) {
120 sparse_keymap_free(eeepc->inputdev);
121 input_unregister_device(eeepc->inputdev);
122 }
123
124 eeepc->inputdev = NULL;
125}
126
127static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *ctrl_param)
128{
129 struct acpi_buffer input = { (acpi_size)sizeof(u32), &dev_id };
130 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
131 union acpi_object *obj;
132 acpi_status status;
133 u32 tmp;
134
135 status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
136 1, EEEPC_WMI_METHODID_DSTS, &input, &output);
137
138 if (ACPI_FAILURE(status))
139 return status;
140
141 obj = (union acpi_object *)output.pointer;
142 if (obj && obj->type == ACPI_TYPE_INTEGER)
143 tmp = (u32)obj->integer.value;
144 else
145 tmp = 0;
146
147 if (ctrl_param)
148 *ctrl_param = tmp;
149
150 kfree(obj);
151
152 return status;
153
154}
155
156static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param)
157{
158 struct bios_args args = {
159 .dev_id = dev_id,
160 .ctrl_param = ctrl_param,
161 };
162 struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
163 acpi_status status;
164
165 status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
166 1, EEEPC_WMI_METHODID_DEVS, &input, NULL);
167
168 return status;
169}
170
171static int read_brightness(struct backlight_device *bd)
172{
173 static u32 ctrl_param;
174 acpi_status status;
175
176 status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &ctrl_param);
177
178 if (ACPI_FAILURE(status))
179 return -1;
180 else
181 return ctrl_param & 0xFF;
182}
183
184static int update_bl_status(struct backlight_device *bd)
185{
186
187 static u32 ctrl_param;
188 acpi_status status;
189
190 ctrl_param = bd->props.brightness;
191
192 status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT, ctrl_param);
193
194 if (ACPI_FAILURE(status))
195 return -1;
196 else
197 return 0;
198}
199
200static const struct backlight_ops eeepc_wmi_bl_ops = {
201 .get_brightness = read_brightness,
202 .update_status = update_bl_status,
203};
204
205static int eeepc_wmi_backlight_notify(struct eeepc_wmi *eeepc, int code)
206{
207 struct backlight_device *bd = eeepc->backlight_device;
208 int old = bd->props.brightness;
209 int new;
210
211 if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
212 new = code - NOTIFY_BRNUP_MIN + 1;
213 else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
214 new = code - NOTIFY_BRNDOWN_MIN;
215
216 bd->props.brightness = new;
217 backlight_update_status(bd);
218 backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
219
220 return old;
221}
222
223static int eeepc_wmi_backlight_init(struct eeepc_wmi *eeepc)
224{
225 struct backlight_device *bd;
226 struct backlight_properties props;
227
228 memset(&props, 0, sizeof(struct backlight_properties));
229 props.max_brightness = 15;
230 bd = backlight_device_register(EEEPC_WMI_FILE,
231 &platform_device->dev, eeepc,
232 &eeepc_wmi_bl_ops, &props);
233 if (IS_ERR(bd)) {
234 pr_err("Could not register backlight device\n");
235 return PTR_ERR(bd);
236 }
237
238 eeepc->backlight_device = bd;
239
240 bd->props.brightness = read_brightness(bd);
241 bd->props.power = FB_BLANK_UNBLANK;
242 backlight_update_status(bd);
243
244 return 0;
245}
246
247static void eeepc_wmi_backlight_exit(struct eeepc_wmi *eeepc)
248{
249 if (eeepc->backlight_device)
250 backlight_device_unregister(eeepc->backlight_device);
251
252 eeepc->backlight_device = NULL;
253}
62 254
63static void eeepc_wmi_notify(u32 value, void *context) 255static void eeepc_wmi_notify(u32 value, void *context)
64{ 256{
257 struct eeepc_wmi *eeepc = context;
65 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; 258 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
66 union acpi_object *obj; 259 union acpi_object *obj;
67 acpi_status status; 260 acpi_status status;
68 int code; 261 int code;
262 int orig_code;
69 263
70 status = wmi_get_event_data(value, &response); 264 status = wmi_get_event_data(value, &response);
71 if (status != AE_OK) { 265 if (status != AE_OK) {
72 pr_err("EEEPC WMI: bad event status 0x%x\n", status); 266 pr_err("bad event status 0x%x\n", status);
73 return; 267 return;
74 } 268 }
75 269
@@ -77,81 +271,142 @@ static void eeepc_wmi_notify(u32 value, void *context)
77 271
78 if (obj && obj->type == ACPI_TYPE_INTEGER) { 272 if (obj && obj->type == ACPI_TYPE_INTEGER) {
79 code = obj->integer.value; 273 code = obj->integer.value;
274 orig_code = code;
80 275
81 if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) 276 if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
82 code = NOTIFY_BRNUP_MIN; 277 code = NOTIFY_BRNUP_MIN;
83 else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) 278 else if (code >= NOTIFY_BRNDOWN_MIN &&
279 code <= NOTIFY_BRNDOWN_MAX)
84 code = NOTIFY_BRNDOWN_MIN; 280 code = NOTIFY_BRNDOWN_MIN;
85 281
86 if (!sparse_keymap_report_event(eeepc_wmi_input_dev, 282 if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
283 if (!acpi_video_backlight_support())
284 eeepc_wmi_backlight_notify(eeepc, orig_code);
285 }
286
287 if (!sparse_keymap_report_event(eeepc->inputdev,
87 code, 1, true)) 288 code, 1, true))
88 pr_info("EEEPC WMI: Unknown key %x pressed\n", code); 289 pr_info("Unknown key %x pressed\n", code);
89 } 290 }
90 291
91 kfree(obj); 292 kfree(obj);
92} 293}
93 294
94static int eeepc_wmi_input_setup(void) 295static int __devinit eeepc_wmi_platform_probe(struct platform_device *device)
95{ 296{
297 struct eeepc_wmi *eeepc;
96 int err; 298 int err;
299 acpi_status status;
97 300
98 eeepc_wmi_input_dev = input_allocate_device(); 301 eeepc = platform_get_drvdata(device);
99 if (!eeepc_wmi_input_dev)
100 return -ENOMEM;
101
102 eeepc_wmi_input_dev->name = "Eee PC WMI hotkeys";
103 eeepc_wmi_input_dev->phys = "wmi/input0";
104 eeepc_wmi_input_dev->id.bustype = BUS_HOST;
105 302
106 err = sparse_keymap_setup(eeepc_wmi_input_dev, eeepc_wmi_keymap, NULL); 303 err = eeepc_wmi_input_init(eeepc);
107 if (err) 304 if (err)
108 goto err_free_dev; 305 goto error_input;
109 306
110 err = input_register_device(eeepc_wmi_input_dev); 307 if (!acpi_video_backlight_support()) {
111 if (err) 308 err = eeepc_wmi_backlight_init(eeepc);
112 goto err_free_keymap; 309 if (err)
310 goto error_backlight;
311 } else
312 pr_info("Backlight controlled by ACPI video driver\n");
313
314 status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
315 eeepc_wmi_notify, eeepc);
316 if (ACPI_FAILURE(status)) {
317 pr_err("Unable to register notify handler - %d\n",
318 status);
319 err = -ENODEV;
320 goto error_wmi;
321 }
113 322
114 return 0; 323 return 0;
115 324
116err_free_keymap: 325error_wmi:
117 sparse_keymap_free(eeepc_wmi_input_dev); 326 eeepc_wmi_backlight_exit(eeepc);
118err_free_dev: 327error_backlight:
119 input_free_device(eeepc_wmi_input_dev); 328 eeepc_wmi_input_exit(eeepc);
329error_input:
120 return err; 330 return err;
121} 331}
122 332
333static int __devexit eeepc_wmi_platform_remove(struct platform_device *device)
334{
335 struct eeepc_wmi *eeepc;
336
337 eeepc = platform_get_drvdata(device);
338 wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
339 eeepc_wmi_backlight_exit(eeepc);
340 eeepc_wmi_input_exit(eeepc);
341
342 return 0;
343}
344
345static struct platform_driver platform_driver = {
346 .driver = {
347 .name = EEEPC_WMI_FILE,
348 .owner = THIS_MODULE,
349 },
350 .probe = eeepc_wmi_platform_probe,
351 .remove = __devexit_p(eeepc_wmi_platform_remove),
352};
353
123static int __init eeepc_wmi_init(void) 354static int __init eeepc_wmi_init(void)
124{ 355{
356 struct eeepc_wmi *eeepc;
125 int err; 357 int err;
126 acpi_status status;
127 358
128 if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID)) { 359 if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID) ||
129 pr_warning("EEEPC WMI: No known WMI GUID found\n"); 360 !wmi_has_guid(EEEPC_WMI_MGMT_GUID)) {
361 pr_warning("No known WMI GUID found\n");
130 return -ENODEV; 362 return -ENODEV;
131 } 363 }
132 364
133 err = eeepc_wmi_input_setup(); 365 eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL);
134 if (err) 366 if (!eeepc)
135 return err; 367 return -ENOMEM;
136 368
137 status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID, 369 platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1);
138 eeepc_wmi_notify, NULL); 370 if (!platform_device) {
139 if (ACPI_FAILURE(status)) { 371 pr_warning("Unable to allocate platform device\n");
140 sparse_keymap_free(eeepc_wmi_input_dev); 372 err = -ENOMEM;
141 input_unregister_device(eeepc_wmi_input_dev); 373 goto fail_platform;
142 pr_err("EEEPC WMI: Unable to register notify handler - %d\n", 374 }
143 status); 375
144 return -ENODEV; 376 err = platform_device_add(platform_device);
377 if (err) {
378 pr_warning("Unable to add platform device\n");
379 goto put_dev;
380 }
381
382 platform_set_drvdata(platform_device, eeepc);
383
384 err = platform_driver_register(&platform_driver);
385 if (err) {
386 pr_warning("Unable to register platform driver\n");
387 goto del_dev;
145 } 388 }
146 389
147 return 0; 390 return 0;
391
392del_dev:
393 platform_device_del(platform_device);
394put_dev:
395 platform_device_put(platform_device);
396fail_platform:
397 kfree(eeepc);
398
399 return err;
148} 400}
149 401
150static void __exit eeepc_wmi_exit(void) 402static void __exit eeepc_wmi_exit(void)
151{ 403{
152 wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID); 404 struct eeepc_wmi *eeepc;
153 sparse_keymap_free(eeepc_wmi_input_dev); 405
154 input_unregister_device(eeepc_wmi_input_dev); 406 eeepc = platform_get_drvdata(platform_device);
407 platform_driver_unregister(&platform_driver);
408 platform_device_unregister(platform_device);
409 kfree(eeepc);
155} 410}
156 411
157module_init(eeepc_wmi_init); 412module_init(eeepc_wmi_init);
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 35bb44af49b3..100e4d9372f1 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -274,26 +274,6 @@ static void pnpacpi_parse_allocated_busresource(struct pnp_dev *dev,
274 pnp_add_bus_resource(dev, start, end); 274 pnp_add_bus_resource(dev, start, end);
275} 275}
276 276
277static u64 addr_space_length(struct pnp_dev *dev, u64 min, u64 max, u64 len)
278{
279 u64 max_len;
280
281 max_len = max - min + 1;
282 if (len <= max_len)
283 return len;
284
285 /*
286 * Per 6.4.3.5, _LEN cannot exceed _MAX - _MIN + 1, but some BIOSes
287 * don't do this correctly, e.g.,
288 * https://bugzilla.kernel.org/show_bug.cgi?id=15480
289 */
290 dev_info(&dev->dev,
291 "resource length %#llx doesn't fit in %#llx-%#llx, trimming\n",
292 (unsigned long long) len, (unsigned long long) min,
293 (unsigned long long) max);
294 return max_len;
295}
296
297static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, 277static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev,
298 struct acpi_resource *res) 278 struct acpi_resource *res)
299{ 279{
@@ -309,7 +289,8 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev,
309 return; 289 return;
310 } 290 }
311 291
312 len = addr_space_length(dev, p->minimum, p->maximum, p->address_length); 292 /* Windows apparently computes length rather than using _LEN */
293 len = p->maximum - p->minimum + 1;
313 window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; 294 window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0;
314 295
315 if (p->resource_type == ACPI_MEMORY_RANGE) 296 if (p->resource_type == ACPI_MEMORY_RANGE)
@@ -330,7 +311,8 @@ static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev,
330 int window; 311 int window;
331 u64 len; 312 u64 len;
332 313
333 len = addr_space_length(dev, p->minimum, p->maximum, p->address_length); 314 /* Windows apparently computes length rather than using _LEN */
315 len = p->maximum - p->minimum + 1;
334 window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; 316 window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0;
335 317
336 if (p->resource_type == ACPI_MEMORY_RANGE) 318 if (p->resource_type == ACPI_MEMORY_RANGE)
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 2e54e6a23c72..e3446ab8b563 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -211,6 +211,8 @@ int pnp_check_port(struct pnp_dev *dev, struct resource *res)
211 if (tres->flags & IORESOURCE_IO) { 211 if (tres->flags & IORESOURCE_IO) {
212 if (cannot_compare(tres->flags)) 212 if (cannot_compare(tres->flags))
213 continue; 213 continue;
214 if (tres->flags & IORESOURCE_WINDOW)
215 continue;
214 tport = &tres->start; 216 tport = &tres->start;
215 tend = &tres->end; 217 tend = &tres->end;
216 if (ranged_conflict(port, end, tport, tend)) 218 if (ranged_conflict(port, end, tport, tend))
@@ -271,6 +273,8 @@ int pnp_check_mem(struct pnp_dev *dev, struct resource *res)
271 if (tres->flags & IORESOURCE_MEM) { 273 if (tres->flags & IORESOURCE_MEM) {
272 if (cannot_compare(tres->flags)) 274 if (cannot_compare(tres->flags))
273 continue; 275 continue;
276 if (tres->flags & IORESOURCE_WINDOW)
277 continue;
274 taddr = &tres->start; 278 taddr = &tres->start;
275 tend = &tres->end; 279 tend = &tres->end;
276 if (ranged_conflict(addr, end, taddr, tend)) 280 if (ranged_conflict(addr, end, taddr, tend))
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index b6218f11c957..552cad85ae5a 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -109,7 +109,7 @@ static int max8925_is_enabled(struct regulator_dev *rdev)
109 struct max8925_regulator_info *info = rdev_get_drvdata(rdev); 109 struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
110 int ret; 110 int ret;
111 111
112 ret = max8925_reg_read(info->i2c, info->vol_reg); 112 ret = max8925_reg_read(info->i2c, info->enable_reg);
113 if (ret < 0) 113 if (ret < 0)
114 return ret; 114 return ret;
115 115
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 6a1303759432..50ac047cd136 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -620,6 +620,16 @@ config RTC_DRV_NUC900
620 620
621comment "on-CPU RTC drivers" 621comment "on-CPU RTC drivers"
622 622
623config RTC_DRV_DAVINCI
624 tristate "TI DaVinci RTC"
625 depends on ARCH_DAVINCI_DM365
626 help
627 If you say yes here you get support for the RTC on the
628 DaVinci platforms (DM365).
629
630 This driver can also be built as a module. If so, the module
631 will be called rtc-davinci.
632
623config RTC_DRV_OMAP 633config RTC_DRV_OMAP
624 tristate "TI OMAP1" 634 tristate "TI OMAP1"
625 depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX 635 depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 44ef194a9573..245311a1348f 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o
27obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o 27obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
28obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o 28obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
29obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o 29obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
30obj-$(CONFIG_RTC_DRV_DAVINCI) += rtc-davinci.o
30obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o 31obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
31obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o 32obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
32obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o 33obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
new file mode 100644
index 000000000000..92a8f6cacda9
--- /dev/null
+++ b/drivers/rtc/rtc-davinci.c
@@ -0,0 +1,673 @@
1/*
2 * DaVinci Power Management and Real Time Clock Driver for TI platforms
3 *
4 * Copyright (C) 2009 Texas Instruments, Inc
5 *
6 * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/ioport.h>
26#include <linux/delay.h>
27#include <linux/spinlock.h>
28#include <linux/rtc.h>
29#include <linux/bcd.h>
30#include <linux/platform_device.h>
31#include <linux/io.h>
32
33/*
34 * The DaVinci RTC is a simple RTC with the following
35 * Sec: 0 - 59 : BCD count
36 * Min: 0 - 59 : BCD count
37 * Hour: 0 - 23 : BCD count
38 * Day: 0 - 0x7FFF(32767) : Binary count ( Over 89 years )
39 */
40
41/* PRTC interface registers */
42#define DAVINCI_PRTCIF_PID 0x00
43#define PRTCIF_CTLR 0x04
44#define PRTCIF_LDATA 0x08
45#define PRTCIF_UDATA 0x0C
46#define PRTCIF_INTEN 0x10
47#define PRTCIF_INTFLG 0x14
48
49/* PRTCIF_CTLR bit fields */
50#define PRTCIF_CTLR_BUSY BIT(31)
51#define PRTCIF_CTLR_SIZE BIT(25)
52#define PRTCIF_CTLR_DIR BIT(24)
53#define PRTCIF_CTLR_BENU_MSB BIT(23)
54#define PRTCIF_CTLR_BENU_3RD_BYTE BIT(22)
55#define PRTCIF_CTLR_BENU_2ND_BYTE BIT(21)
56#define PRTCIF_CTLR_BENU_LSB BIT(20)
57#define PRTCIF_CTLR_BENU_MASK (0x00F00000)
58#define PRTCIF_CTLR_BENL_MSB BIT(19)
59#define PRTCIF_CTLR_BENL_3RD_BYTE BIT(18)
60#define PRTCIF_CTLR_BENL_2ND_BYTE BIT(17)
61#define PRTCIF_CTLR_BENL_LSB BIT(16)
62#define PRTCIF_CTLR_BENL_MASK (0x000F0000)
63
64/* PRTCIF_INTEN bit fields */
65#define PRTCIF_INTEN_RTCSS BIT(1)
66#define PRTCIF_INTEN_RTCIF BIT(0)
67#define PRTCIF_INTEN_MASK (PRTCIF_INTEN_RTCSS \
68 | PRTCIF_INTEN_RTCIF)
69
70/* PRTCIF_INTFLG bit fields */
71#define PRTCIF_INTFLG_RTCSS BIT(1)
72#define PRTCIF_INTFLG_RTCIF BIT(0)
73#define PRTCIF_INTFLG_MASK (PRTCIF_INTFLG_RTCSS \
74 | PRTCIF_INTFLG_RTCIF)
75
76/* PRTC subsystem registers */
77#define PRTCSS_RTC_INTC_EXTENA1 (0x0C)
78#define PRTCSS_RTC_CTRL (0x10)
79#define PRTCSS_RTC_WDT (0x11)
80#define PRTCSS_RTC_TMR0 (0x12)
81#define PRTCSS_RTC_TMR1 (0x13)
82#define PRTCSS_RTC_CCTRL (0x14)
83#define PRTCSS_RTC_SEC (0x15)
84#define PRTCSS_RTC_MIN (0x16)
85#define PRTCSS_RTC_HOUR (0x17)
86#define PRTCSS_RTC_DAY0 (0x18)
87#define PRTCSS_RTC_DAY1 (0x19)
88#define PRTCSS_RTC_AMIN (0x1A)
89#define PRTCSS_RTC_AHOUR (0x1B)
90#define PRTCSS_RTC_ADAY0 (0x1C)
91#define PRTCSS_RTC_ADAY1 (0x1D)
92#define PRTCSS_RTC_CLKC_CNT (0x20)
93
94/* PRTCSS_RTC_INTC_EXTENA1 */
95#define PRTCSS_RTC_INTC_EXTENA1_MASK (0x07)
96
97/* PRTCSS_RTC_CTRL bit fields */
98#define PRTCSS_RTC_CTRL_WDTBUS BIT(7)
99#define PRTCSS_RTC_CTRL_WEN BIT(6)
100#define PRTCSS_RTC_CTRL_WDRT BIT(5)
101#define PRTCSS_RTC_CTRL_WDTFLG BIT(4)
102#define PRTCSS_RTC_CTRL_TE BIT(3)
103#define PRTCSS_RTC_CTRL_TIEN BIT(2)
104#define PRTCSS_RTC_CTRL_TMRFLG BIT(1)
105#define PRTCSS_RTC_CTRL_TMMD BIT(0)
106
107/* PRTCSS_RTC_CCTRL bit fields */
108#define PRTCSS_RTC_CCTRL_CALBUSY BIT(7)
109#define PRTCSS_RTC_CCTRL_DAEN BIT(5)
110#define PRTCSS_RTC_CCTRL_HAEN BIT(4)
111#define PRTCSS_RTC_CCTRL_MAEN BIT(3)
112#define PRTCSS_RTC_CCTRL_ALMFLG BIT(2)
113#define PRTCSS_RTC_CCTRL_AIEN BIT(1)
114#define PRTCSS_RTC_CCTRL_CAEN BIT(0)
115
116static DEFINE_SPINLOCK(davinci_rtc_lock);
117
118struct davinci_rtc {
119 struct rtc_device *rtc;
120 void __iomem *base;
121 resource_size_t pbase;
122 size_t base_size;
123 int irq;
124};
125
126static inline void rtcif_write(struct davinci_rtc *davinci_rtc,
127 u32 val, u32 addr)
128{
129 writel(val, davinci_rtc->base + addr);
130}
131
132static inline u32 rtcif_read(struct davinci_rtc *davinci_rtc, u32 addr)
133{
134 return readl(davinci_rtc->base + addr);
135}
136
137static inline void rtcif_wait(struct davinci_rtc *davinci_rtc)
138{
139 while (rtcif_read(davinci_rtc, PRTCIF_CTLR) & PRTCIF_CTLR_BUSY)
140 cpu_relax();
141}
142
143static inline void rtcss_write(struct davinci_rtc *davinci_rtc,
144 unsigned long val, u8 addr)
145{
146 rtcif_wait(davinci_rtc);
147
148 rtcif_write(davinci_rtc, PRTCIF_CTLR_BENL_LSB | addr, PRTCIF_CTLR);
149 rtcif_write(davinci_rtc, val, PRTCIF_LDATA);
150
151 rtcif_wait(davinci_rtc);
152}
153
154static inline u8 rtcss_read(struct davinci_rtc *davinci_rtc, u8 addr)
155{
156 rtcif_wait(davinci_rtc);
157
158 rtcif_write(davinci_rtc, PRTCIF_CTLR_DIR | PRTCIF_CTLR_BENL_LSB | addr,
159 PRTCIF_CTLR);
160
161 rtcif_wait(davinci_rtc);
162
163 return rtcif_read(davinci_rtc, PRTCIF_LDATA);
164}
165
166static inline void davinci_rtcss_calendar_wait(struct davinci_rtc *davinci_rtc)
167{
168 while (rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) &
169 PRTCSS_RTC_CCTRL_CALBUSY)
170 cpu_relax();
171}
172
173static irqreturn_t davinci_rtc_interrupt(int irq, void *class_dev)
174{
175 struct davinci_rtc *davinci_rtc = class_dev;
176 unsigned long events = 0;
177 u32 irq_flg;
178 u8 alm_irq, tmr_irq;
179 u8 rtc_ctrl, rtc_cctrl;
180 int ret = IRQ_NONE;
181
182 irq_flg = rtcif_read(davinci_rtc, PRTCIF_INTFLG) &
183 PRTCIF_INTFLG_RTCSS;
184
185 alm_irq = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) &
186 PRTCSS_RTC_CCTRL_ALMFLG;
187
188 tmr_irq = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL) &
189 PRTCSS_RTC_CTRL_TMRFLG;
190
191 if (irq_flg) {
192 if (alm_irq) {
193 events |= RTC_IRQF | RTC_AF;
194 rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
195 rtc_cctrl |= PRTCSS_RTC_CCTRL_ALMFLG;
196 rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
197 } else if (tmr_irq) {
198 events |= RTC_IRQF | RTC_PF;
199 rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
200 rtc_ctrl |= PRTCSS_RTC_CTRL_TMRFLG;
201 rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
202 }
203
204 rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS,
205 PRTCIF_INTFLG);
206 rtc_update_irq(davinci_rtc->rtc, 1, events);
207
208 ret = IRQ_HANDLED;
209 }
210
211 return ret;
212}
213
214static int
215davinci_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
216{
217 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
218 u8 rtc_ctrl;
219 unsigned long flags;
220 int ret = 0;
221
222 spin_lock_irqsave(&davinci_rtc_lock, flags);
223
224 rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
225
226 switch (cmd) {
227 case RTC_WIE_ON:
228 rtc_ctrl |= PRTCSS_RTC_CTRL_WEN | PRTCSS_RTC_CTRL_WDTFLG;
229 break;
230 case RTC_WIE_OFF:
231 rtc_ctrl &= ~PRTCSS_RTC_CTRL_WEN;
232 break;
233 case RTC_UIE_OFF:
234 case RTC_UIE_ON:
235 ret = -ENOTTY;
236 break;
237 default:
238 ret = -ENOIOCTLCMD;
239 }
240
241 rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
242
243 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
244
245 return ret;
246}
247
248static int convertfromdays(u16 days, struct rtc_time *tm)
249{
250 int tmp_days, year, mon;
251
252 for (year = 2000;; year++) {
253 tmp_days = rtc_year_days(1, 12, year);
254 if (days >= tmp_days)
255 days -= tmp_days;
256 else {
257 for (mon = 0;; mon++) {
258 tmp_days = rtc_month_days(mon, year);
259 if (days >= tmp_days) {
260 days -= tmp_days;
261 } else {
262 tm->tm_year = year - 1900;
263 tm->tm_mon = mon;
264 tm->tm_mday = days + 1;
265 break;
266 }
267 }
268 break;
269 }
270 }
271 return 0;
272}
273
274static int convert2days(u16 *days, struct rtc_time *tm)
275{
276 int i;
277 *days = 0;
278
279 /* epoch == 1900 */
280 if (tm->tm_year < 100 || tm->tm_year > 199)
281 return -EINVAL;
282
283 for (i = 2000; i < 1900 + tm->tm_year; i++)
284 *days += rtc_year_days(1, 12, i);
285
286 *days += rtc_year_days(tm->tm_mday, tm->tm_mon, 1900 + tm->tm_year);
287
288 return 0;
289}
290
291static int davinci_rtc_read_time(struct device *dev, struct rtc_time *tm)
292{
293 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
294 u16 days = 0;
295 u8 day0, day1;
296 unsigned long flags;
297
298 spin_lock_irqsave(&davinci_rtc_lock, flags);
299
300 davinci_rtcss_calendar_wait(davinci_rtc);
301 tm->tm_sec = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_SEC));
302
303 davinci_rtcss_calendar_wait(davinci_rtc);
304 tm->tm_min = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_MIN));
305
306 davinci_rtcss_calendar_wait(davinci_rtc);
307 tm->tm_hour = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_HOUR));
308
309 davinci_rtcss_calendar_wait(davinci_rtc);
310 day0 = rtcss_read(davinci_rtc, PRTCSS_RTC_DAY0);
311
312 davinci_rtcss_calendar_wait(davinci_rtc);
313 day1 = rtcss_read(davinci_rtc, PRTCSS_RTC_DAY1);
314
315 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
316
317 days |= day1;
318 days <<= 8;
319 days |= day0;
320
321 if (convertfromdays(days, tm) < 0)
322 return -EINVAL;
323
324 return 0;
325}
326
327static int davinci_rtc_set_time(struct device *dev, struct rtc_time *tm)
328{
329 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
330 u16 days;
331 u8 rtc_cctrl;
332 unsigned long flags;
333
334 if (convert2days(&days, tm) < 0)
335 return -EINVAL;
336
337 spin_lock_irqsave(&davinci_rtc_lock, flags);
338
339 davinci_rtcss_calendar_wait(davinci_rtc);
340 rtcss_write(davinci_rtc, bin2bcd(tm->tm_sec), PRTCSS_RTC_SEC);
341
342 davinci_rtcss_calendar_wait(davinci_rtc);
343 rtcss_write(davinci_rtc, bin2bcd(tm->tm_min), PRTCSS_RTC_MIN);
344
345 davinci_rtcss_calendar_wait(davinci_rtc);
346 rtcss_write(davinci_rtc, bin2bcd(tm->tm_hour), PRTCSS_RTC_HOUR);
347
348 davinci_rtcss_calendar_wait(davinci_rtc);
349 rtcss_write(davinci_rtc, days & 0xFF, PRTCSS_RTC_DAY0);
350
351 davinci_rtcss_calendar_wait(davinci_rtc);
352 rtcss_write(davinci_rtc, (days & 0xFF00) >> 8, PRTCSS_RTC_DAY1);
353
354 rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
355 rtc_cctrl |= PRTCSS_RTC_CCTRL_CAEN;
356 rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
357
358 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
359
360 return 0;
361}
362
363static int davinci_rtc_alarm_irq_enable(struct device *dev,
364 unsigned int enabled)
365{
366 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
367 unsigned long flags;
368 u8 rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
369
370 spin_lock_irqsave(&davinci_rtc_lock, flags);
371
372 if (enabled)
373 rtc_cctrl |= PRTCSS_RTC_CCTRL_DAEN |
374 PRTCSS_RTC_CCTRL_HAEN |
375 PRTCSS_RTC_CCTRL_MAEN |
376 PRTCSS_RTC_CCTRL_ALMFLG |
377 PRTCSS_RTC_CCTRL_AIEN;
378 else
379 rtc_cctrl &= ~PRTCSS_RTC_CCTRL_AIEN;
380
381 davinci_rtcss_calendar_wait(davinci_rtc);
382 rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
383
384 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
385
386 return 0;
387}
388
389static int davinci_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
390{
391 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
392 u16 days = 0;
393 u8 day0, day1;
394 unsigned long flags;
395
396 spin_lock_irqsave(&davinci_rtc_lock, flags);
397
398 davinci_rtcss_calendar_wait(davinci_rtc);
399 alm->time.tm_min = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_AMIN));
400
401 davinci_rtcss_calendar_wait(davinci_rtc);
402 alm->time.tm_hour = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_AHOUR));
403
404 davinci_rtcss_calendar_wait(davinci_rtc);
405 day0 = rtcss_read(davinci_rtc, PRTCSS_RTC_ADAY0);
406
407 davinci_rtcss_calendar_wait(davinci_rtc);
408 day1 = rtcss_read(davinci_rtc, PRTCSS_RTC_ADAY1);
409
410 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
411 days |= day1;
412 days <<= 8;
413 days |= day0;
414
415 if (convertfromdays(days, &alm->time) < 0)
416 return -EINVAL;
417
418 alm->pending = !!(rtcss_read(davinci_rtc,
419 PRTCSS_RTC_CCTRL) &
420 PRTCSS_RTC_CCTRL_AIEN);
421 alm->enabled = alm->pending && device_may_wakeup(dev);
422
423 return 0;
424}
425
426static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
427{
428 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
429 unsigned long flags;
430 u16 days;
431
432 if (alm->time.tm_mday <= 0 && alm->time.tm_mon < 0
433 && alm->time.tm_year < 0) {
434 struct rtc_time tm;
435 unsigned long now, then;
436
437 davinci_rtc_read_time(dev, &tm);
438 rtc_tm_to_time(&tm, &now);
439
440 alm->time.tm_mday = tm.tm_mday;
441 alm->time.tm_mon = tm.tm_mon;
442 alm->time.tm_year = tm.tm_year;
443 rtc_tm_to_time(&alm->time, &then);
444
445 if (then < now) {
446 rtc_time_to_tm(now + 24 * 60 * 60, &tm);
447 alm->time.tm_mday = tm.tm_mday;
448 alm->time.tm_mon = tm.tm_mon;
449 alm->time.tm_year = tm.tm_year;
450 }
451 }
452
453 if (convert2days(&days, &alm->time) < 0)
454 return -EINVAL;
455
456 spin_lock_irqsave(&davinci_rtc_lock, flags);
457
458 davinci_rtcss_calendar_wait(davinci_rtc);
459 rtcss_write(davinci_rtc, bin2bcd(alm->time.tm_min), PRTCSS_RTC_AMIN);
460
461 davinci_rtcss_calendar_wait(davinci_rtc);
462 rtcss_write(davinci_rtc, bin2bcd(alm->time.tm_hour), PRTCSS_RTC_AHOUR);
463
464 davinci_rtcss_calendar_wait(davinci_rtc);
465 rtcss_write(davinci_rtc, days & 0xFF, PRTCSS_RTC_ADAY0);
466
467 davinci_rtcss_calendar_wait(davinci_rtc);
468 rtcss_write(davinci_rtc, (days & 0xFF00) >> 8, PRTCSS_RTC_ADAY1);
469
470 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
471
472 return 0;
473}
474
475static int davinci_rtc_irq_set_state(struct device *dev, int enabled)
476{
477 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
478 unsigned long flags;
479 u8 rtc_ctrl;
480
481 spin_lock_irqsave(&davinci_rtc_lock, flags);
482
483 rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
484
485 if (enabled) {
486 while (rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL)
487 & PRTCSS_RTC_CTRL_WDTBUS)
488 cpu_relax();
489
490 rtc_ctrl |= PRTCSS_RTC_CTRL_TE;
491 rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
492
493 rtcss_write(davinci_rtc, 0x0, PRTCSS_RTC_CLKC_CNT);
494
495 rtc_ctrl |= PRTCSS_RTC_CTRL_TIEN |
496 PRTCSS_RTC_CTRL_TMMD |
497 PRTCSS_RTC_CTRL_TMRFLG;
498 } else
499 rtc_ctrl &= ~PRTCSS_RTC_CTRL_TIEN;
500
501 rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
502
503 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
504
505 return 0;
506}
507
508static int davinci_rtc_irq_set_freq(struct device *dev, int freq)
509{
510 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
511 unsigned long flags;
512 u16 tmr_counter = (0x8000 >> (ffs(freq) - 1));
513
514 spin_lock_irqsave(&davinci_rtc_lock, flags);
515
516 rtcss_write(davinci_rtc, tmr_counter & 0xFF, PRTCSS_RTC_TMR0);
517 rtcss_write(davinci_rtc, (tmr_counter & 0xFF00) >> 8, PRTCSS_RTC_TMR1);
518
519 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
520
521 return 0;
522}
523
524static struct rtc_class_ops davinci_rtc_ops = {
525 .ioctl = davinci_rtc_ioctl,
526 .read_time = davinci_rtc_read_time,
527 .set_time = davinci_rtc_set_time,
528 .alarm_irq_enable = davinci_rtc_alarm_irq_enable,
529 .read_alarm = davinci_rtc_read_alarm,
530 .set_alarm = davinci_rtc_set_alarm,
531 .irq_set_state = davinci_rtc_irq_set_state,
532 .irq_set_freq = davinci_rtc_irq_set_freq,
533};
534
535static int __init davinci_rtc_probe(struct platform_device *pdev)
536{
537 struct device *dev = &pdev->dev;
538 struct davinci_rtc *davinci_rtc;
539 struct resource *res, *mem;
540 int ret = 0;
541
542 davinci_rtc = kzalloc(sizeof(struct davinci_rtc), GFP_KERNEL);
543 if (!davinci_rtc) {
544 dev_dbg(dev, "could not allocate memory for private data\n");
545 return -ENOMEM;
546 }
547
548 davinci_rtc->irq = platform_get_irq(pdev, 0);
549 if (davinci_rtc->irq < 0) {
550 dev_err(dev, "no RTC irq\n");
551 ret = davinci_rtc->irq;
552 goto fail1;
553 }
554
555 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
556 if (!res) {
557 dev_err(dev, "no mem resource\n");
558 ret = -EINVAL;
559 goto fail1;
560 }
561
562 davinci_rtc->pbase = res->start;
563 davinci_rtc->base_size = resource_size(res);
564
565 mem = request_mem_region(davinci_rtc->pbase, davinci_rtc->base_size,
566 pdev->name);
567 if (!mem) {
568 dev_err(dev, "RTC registers at %08x are not free\n",
569 davinci_rtc->pbase);
570 ret = -EBUSY;
571 goto fail1;
572 }
573
574 davinci_rtc->base = ioremap(davinci_rtc->pbase, davinci_rtc->base_size);
575 if (!davinci_rtc->base) {
576 dev_err(dev, "unable to ioremap MEM resource\n");
577 ret = -ENOMEM;
578 goto fail2;
579 }
580
581 davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
582 &davinci_rtc_ops, THIS_MODULE);
583 if (IS_ERR(davinci_rtc->rtc)) {
584 dev_err(dev, "unable to register RTC device, err %ld\n",
585 PTR_ERR(davinci_rtc->rtc));
586 goto fail3;
587 }
588
589 rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG);
590 rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
591 rtcss_write(davinci_rtc, 0, PRTCSS_RTC_INTC_EXTENA1);
592
593 rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CTRL);
594 rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CCTRL);
595
596 ret = request_irq(davinci_rtc->irq, davinci_rtc_interrupt,
597 IRQF_DISABLED, "davinci_rtc", davinci_rtc);
598 if (ret < 0) {
599 dev_err(dev, "unable to register davinci RTC interrupt\n");
600 goto fail4;
601 }
602
603 /* Enable interrupts */
604 rtcif_write(davinci_rtc, PRTCIF_INTEN_RTCSS, PRTCIF_INTEN);
605 rtcss_write(davinci_rtc, PRTCSS_RTC_INTC_EXTENA1_MASK,
606 PRTCSS_RTC_INTC_EXTENA1);
607
608 rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL);
609
610 platform_set_drvdata(pdev, davinci_rtc);
611
612 device_init_wakeup(&pdev->dev, 0);
613
614 return 0;
615
616fail4:
617 rtc_device_unregister(davinci_rtc->rtc);
618fail3:
619 iounmap(davinci_rtc->base);
620fail2:
621 release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
622fail1:
623 kfree(davinci_rtc);
624
625 return ret;
626}
627
628static int __devexit davinci_rtc_remove(struct platform_device *pdev)
629{
630 struct davinci_rtc *davinci_rtc = platform_get_drvdata(pdev);
631
632 device_init_wakeup(&pdev->dev, 0);
633
634 rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
635
636 free_irq(davinci_rtc->irq, davinci_rtc);
637
638 rtc_device_unregister(davinci_rtc->rtc);
639
640 iounmap(davinci_rtc->base);
641 release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
642
643 platform_set_drvdata(pdev, NULL);
644
645 kfree(davinci_rtc);
646
647 return 0;
648}
649
650static struct platform_driver davinci_rtc_driver = {
651 .probe = davinci_rtc_probe,
652 .remove = __devexit_p(davinci_rtc_remove),
653 .driver = {
654 .name = "rtc_davinci",
655 .owner = THIS_MODULE,
656 },
657};
658
659static int __init rtc_init(void)
660{
661 return platform_driver_probe(&davinci_rtc_driver, davinci_rtc_probe);
662}
663module_init(rtc_init);
664
665static void __exit rtc_exit(void)
666{
667 platform_driver_unregister(&davinci_rtc_driver);
668}
669module_exit(rtc_exit);
670
671MODULE_AUTHOR("Miguel Aguilar <miguel.aguilar@ridgerun.com>");
672MODULE_DESCRIPTION("Texas Instruments DaVinci PRTC Driver");
673MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index acf222f91f5a..0e86247d791e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -37,6 +37,9 @@
37 */ 37 */
38#define DASD_CHANQ_MAX_SIZE 4 38#define DASD_CHANQ_MAX_SIZE 4
39 39
40#define DASD_SLEEPON_START_TAG (void *) 1
41#define DASD_SLEEPON_END_TAG (void *) 2
42
40/* 43/*
41 * SECTION: exported variables of dasd.c 44 * SECTION: exported variables of dasd.c
42 */ 45 */
@@ -62,6 +65,7 @@ static void dasd_device_tasklet(struct dasd_device *);
62static void dasd_block_tasklet(struct dasd_block *); 65static void dasd_block_tasklet(struct dasd_block *);
63static void do_kick_device(struct work_struct *); 66static void do_kick_device(struct work_struct *);
64static void do_restore_device(struct work_struct *); 67static void do_restore_device(struct work_struct *);
68static void do_reload_device(struct work_struct *);
65static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 69static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
66static void dasd_device_timeout(unsigned long); 70static void dasd_device_timeout(unsigned long);
67static void dasd_block_timeout(unsigned long); 71static void dasd_block_timeout(unsigned long);
@@ -112,6 +116,7 @@ struct dasd_device *dasd_alloc_device(void)
112 device->timer.data = (unsigned long) device; 116 device->timer.data = (unsigned long) device;
113 INIT_WORK(&device->kick_work, do_kick_device); 117 INIT_WORK(&device->kick_work, do_kick_device);
114 INIT_WORK(&device->restore_device, do_restore_device); 118 INIT_WORK(&device->restore_device, do_restore_device);
119 INIT_WORK(&device->reload_device, do_reload_device);
115 device->state = DASD_STATE_NEW; 120 device->state = DASD_STATE_NEW;
116 device->target = DASD_STATE_NEW; 121 device->target = DASD_STATE_NEW;
117 mutex_init(&device->state_mutex); 122 mutex_init(&device->state_mutex);
@@ -518,6 +523,26 @@ void dasd_kick_device(struct dasd_device *device)
518} 523}
519 524
520/* 525/*
526 * dasd_reload_device will schedule a call do do_reload_device to the kernel
527 * event daemon.
528 */
529static void do_reload_device(struct work_struct *work)
530{
531 struct dasd_device *device = container_of(work, struct dasd_device,
532 reload_device);
533 device->discipline->reload(device);
534 dasd_put_device(device);
535}
536
537void dasd_reload_device(struct dasd_device *device)
538{
539 dasd_get_device(device);
540 /* queue call to dasd_reload_device to the kernel event daemon. */
541 schedule_work(&device->reload_device);
542}
543EXPORT_SYMBOL(dasd_reload_device);
544
545/*
521 * dasd_restore_device will schedule a call do do_restore_device to the kernel 546 * dasd_restore_device will schedule a call do do_restore_device to the kernel
522 * event daemon. 547 * event daemon.
523 */ 548 */
@@ -1472,7 +1497,10 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1472 */ 1497 */
1473static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1498static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1474{ 1499{
1475 wake_up((wait_queue_head_t *) data); 1500 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1501 cqr->callback_data = DASD_SLEEPON_END_TAG;
1502 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1503 wake_up(&generic_waitq);
1476} 1504}
1477 1505
1478static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 1506static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
@@ -1482,10 +1510,7 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1482 1510
1483 device = cqr->startdev; 1511 device = cqr->startdev;
1484 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1512 spin_lock_irq(get_ccwdev_lock(device->cdev));
1485 rc = ((cqr->status == DASD_CQR_DONE || 1513 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
1486 cqr->status == DASD_CQR_NEED_ERP ||
1487 cqr->status == DASD_CQR_TERMINATED) &&
1488 list_empty(&cqr->devlist));
1489 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1514 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1490 return rc; 1515 return rc;
1491} 1516}
@@ -1573,7 +1598,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1573 wait_event(generic_waitq, !(device->stopped)); 1598 wait_event(generic_waitq, !(device->stopped));
1574 1599
1575 cqr->callback = dasd_wakeup_cb; 1600 cqr->callback = dasd_wakeup_cb;
1576 cqr->callback_data = (void *) &generic_waitq; 1601 cqr->callback_data = DASD_SLEEPON_START_TAG;
1577 dasd_add_request_tail(cqr); 1602 dasd_add_request_tail(cqr);
1578 if (interruptible) { 1603 if (interruptible) {
1579 rc = wait_event_interruptible( 1604 rc = wait_event_interruptible(
@@ -1652,7 +1677,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1652 } 1677 }
1653 1678
1654 cqr->callback = dasd_wakeup_cb; 1679 cqr->callback = dasd_wakeup_cb;
1655 cqr->callback_data = (void *) &generic_waitq; 1680 cqr->callback_data = DASD_SLEEPON_START_TAG;
1656 cqr->status = DASD_CQR_QUEUED; 1681 cqr->status = DASD_CQR_QUEUED;
1657 list_add(&cqr->devlist, &device->ccw_queue); 1682 list_add(&cqr->devlist, &device->ccw_queue);
1658 1683
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 6632649dd6aa..85bfd8794856 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1418,9 +1418,29 @@ static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
1418 struct dasd_ccw_req *erp) 1418 struct dasd_ccw_req *erp)
1419{ 1419{
1420 struct dasd_ccw_req *cqr = erp->refers; 1420 struct dasd_ccw_req *cqr = erp->refers;
1421 char *sense;
1421 1422
1422 if (cqr->block && 1423 if (cqr->block &&
1423 (cqr->block->base != cqr->startdev)) { 1424 (cqr->block->base != cqr->startdev)) {
1425
1426 sense = dasd_get_sense(&erp->refers->irb);
1427 /*
1428 * dynamic pav may have changed base alias mapping
1429 */
1430 if (!test_bit(DASD_FLAG_OFFLINE, &cqr->startdev->flags) && sense
1431 && (sense[0] == 0x10) && (sense[7] == 0x0F)
1432 && (sense[8] == 0x67)) {
1433 /*
1434 * remove device from alias handling to prevent new
1435 * requests from being scheduled on the
1436 * wrong alias device
1437 */
1438 dasd_alias_remove_device(cqr->startdev);
1439
1440 /* schedule worker to reload device */
1441 dasd_reload_device(cqr->startdev);
1442 }
1443
1424 if (cqr->startdev->features & DASD_FEATURE_ERPLOG) { 1444 if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
1425 DBF_DEV_EVENT(DBF_ERR, cqr->startdev, 1445 DBF_DEV_EVENT(DBF_ERR, cqr->startdev,
1426 "ERP on alias device for request %p," 1446 "ERP on alias device for request %p,"
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 8c4814258e93..4155805dcdff 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -190,20 +190,21 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
190 struct alias_server *server, *newserver; 190 struct alias_server *server, *newserver;
191 struct alias_lcu *lcu, *newlcu; 191 struct alias_lcu *lcu, *newlcu;
192 int is_lcu_known; 192 int is_lcu_known;
193 struct dasd_uid *uid; 193 struct dasd_uid uid;
194 194
195 private = (struct dasd_eckd_private *) device->private; 195 private = (struct dasd_eckd_private *) device->private;
196 uid = &private->uid; 196
197 device->discipline->get_uid(device, &uid);
197 spin_lock_irqsave(&aliastree.lock, flags); 198 spin_lock_irqsave(&aliastree.lock, flags);
198 is_lcu_known = 1; 199 is_lcu_known = 1;
199 server = _find_server(uid); 200 server = _find_server(&uid);
200 if (!server) { 201 if (!server) {
201 spin_unlock_irqrestore(&aliastree.lock, flags); 202 spin_unlock_irqrestore(&aliastree.lock, flags);
202 newserver = _allocate_server(uid); 203 newserver = _allocate_server(&uid);
203 if (IS_ERR(newserver)) 204 if (IS_ERR(newserver))
204 return PTR_ERR(newserver); 205 return PTR_ERR(newserver);
205 spin_lock_irqsave(&aliastree.lock, flags); 206 spin_lock_irqsave(&aliastree.lock, flags);
206 server = _find_server(uid); 207 server = _find_server(&uid);
207 if (!server) { 208 if (!server) {
208 list_add(&newserver->server, &aliastree.serverlist); 209 list_add(&newserver->server, &aliastree.serverlist);
209 server = newserver; 210 server = newserver;
@@ -214,14 +215,14 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
214 } 215 }
215 } 216 }
216 217
217 lcu = _find_lcu(server, uid); 218 lcu = _find_lcu(server, &uid);
218 if (!lcu) { 219 if (!lcu) {
219 spin_unlock_irqrestore(&aliastree.lock, flags); 220 spin_unlock_irqrestore(&aliastree.lock, flags);
220 newlcu = _allocate_lcu(uid); 221 newlcu = _allocate_lcu(&uid);
221 if (IS_ERR(newlcu)) 222 if (IS_ERR(newlcu))
222 return PTR_ERR(newlcu); 223 return PTR_ERR(newlcu);
223 spin_lock_irqsave(&aliastree.lock, flags); 224 spin_lock_irqsave(&aliastree.lock, flags);
224 lcu = _find_lcu(server, uid); 225 lcu = _find_lcu(server, &uid);
225 if (!lcu) { 226 if (!lcu) {
226 list_add(&newlcu->lcu, &server->lculist); 227 list_add(&newlcu->lcu, &server->lculist);
227 lcu = newlcu; 228 lcu = newlcu;
@@ -256,20 +257,20 @@ void dasd_alias_lcu_setup_complete(struct dasd_device *device)
256 unsigned long flags; 257 unsigned long flags;
257 struct alias_server *server; 258 struct alias_server *server;
258 struct alias_lcu *lcu; 259 struct alias_lcu *lcu;
259 struct dasd_uid *uid; 260 struct dasd_uid uid;
260 261
261 private = (struct dasd_eckd_private *) device->private; 262 private = (struct dasd_eckd_private *) device->private;
262 uid = &private->uid; 263 device->discipline->get_uid(device, &uid);
263 lcu = NULL; 264 lcu = NULL;
264 spin_lock_irqsave(&aliastree.lock, flags); 265 spin_lock_irqsave(&aliastree.lock, flags);
265 server = _find_server(uid); 266 server = _find_server(&uid);
266 if (server) 267 if (server)
267 lcu = _find_lcu(server, uid); 268 lcu = _find_lcu(server, &uid);
268 spin_unlock_irqrestore(&aliastree.lock, flags); 269 spin_unlock_irqrestore(&aliastree.lock, flags);
269 if (!lcu) { 270 if (!lcu) {
270 DBF_EVENT_DEVID(DBF_ERR, device->cdev, 271 DBF_EVENT_DEVID(DBF_ERR, device->cdev,
271 "could not find lcu for %04x %02x", 272 "could not find lcu for %04x %02x",
272 uid->ssid, uid->real_unit_addr); 273 uid.ssid, uid.real_unit_addr);
273 WARN_ON(1); 274 WARN_ON(1);
274 return; 275 return;
275 } 276 }
@@ -282,20 +283,20 @@ void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
282 unsigned long flags; 283 unsigned long flags;
283 struct alias_server *server; 284 struct alias_server *server;
284 struct alias_lcu *lcu; 285 struct alias_lcu *lcu;
285 struct dasd_uid *uid; 286 struct dasd_uid uid;
286 287
287 private = (struct dasd_eckd_private *) device->private; 288 private = (struct dasd_eckd_private *) device->private;
288 uid = &private->uid; 289 device->discipline->get_uid(device, &uid);
289 lcu = NULL; 290 lcu = NULL;
290 spin_lock_irqsave(&aliastree.lock, flags); 291 spin_lock_irqsave(&aliastree.lock, flags);
291 server = _find_server(uid); 292 server = _find_server(&uid);
292 if (server) 293 if (server)
293 lcu = _find_lcu(server, uid); 294 lcu = _find_lcu(server, &uid);
294 spin_unlock_irqrestore(&aliastree.lock, flags); 295 spin_unlock_irqrestore(&aliastree.lock, flags);
295 if (!lcu) { 296 if (!lcu) {
296 DBF_EVENT_DEVID(DBF_ERR, device->cdev, 297 DBF_EVENT_DEVID(DBF_ERR, device->cdev,
297 "could not find lcu for %04x %02x", 298 "could not find lcu for %04x %02x",
298 uid->ssid, uid->real_unit_addr); 299 uid.ssid, uid.real_unit_addr);
299 WARN_ON(1); 300 WARN_ON(1);
300 return; 301 return;
301 } 302 }
@@ -314,9 +315,11 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
314 struct alias_lcu *lcu; 315 struct alias_lcu *lcu;
315 struct alias_server *server; 316 struct alias_server *server;
316 int was_pending; 317 int was_pending;
318 struct dasd_uid uid;
317 319
318 private = (struct dasd_eckd_private *) device->private; 320 private = (struct dasd_eckd_private *) device->private;
319 lcu = private->lcu; 321 lcu = private->lcu;
322 device->discipline->get_uid(device, &uid);
320 spin_lock_irqsave(&lcu->lock, flags); 323 spin_lock_irqsave(&lcu->lock, flags);
321 list_del_init(&device->alias_list); 324 list_del_init(&device->alias_list);
322 /* make sure that the workers don't use this device */ 325 /* make sure that the workers don't use this device */
@@ -353,7 +356,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
353 _schedule_lcu_update(lcu, NULL); 356 _schedule_lcu_update(lcu, NULL);
354 spin_unlock(&lcu->lock); 357 spin_unlock(&lcu->lock);
355 } 358 }
356 server = _find_server(&private->uid); 359 server = _find_server(&uid);
357 if (server && list_empty(&server->lculist)) { 360 if (server && list_empty(&server->lculist)) {
358 list_del(&server->server); 361 list_del(&server->server);
359 _free_server(server); 362 _free_server(server);
@@ -366,19 +369,30 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
366 * in the lcu is up to date and will update the device uid before 369 * in the lcu is up to date and will update the device uid before
367 * adding it to a pav group. 370 * adding it to a pav group.
368 */ 371 */
372
369static int _add_device_to_lcu(struct alias_lcu *lcu, 373static int _add_device_to_lcu(struct alias_lcu *lcu,
370 struct dasd_device *device) 374 struct dasd_device *device,
375 struct dasd_device *pos)
371{ 376{
372 377
373 struct dasd_eckd_private *private; 378 struct dasd_eckd_private *private;
374 struct alias_pav_group *group; 379 struct alias_pav_group *group;
375 struct dasd_uid *uid; 380 struct dasd_uid uid;
381 unsigned long flags;
376 382
377 private = (struct dasd_eckd_private *) device->private; 383 private = (struct dasd_eckd_private *) device->private;
378 uid = &private->uid; 384
379 uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type; 385 /* only lock if not already locked */
380 uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua; 386 if (device != pos)
381 dasd_set_uid(device->cdev, &private->uid); 387 spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
388 CDEV_NESTED_SECOND);
389 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
390 private->uid.base_unit_addr =
391 lcu->uac->unit[private->uid.real_unit_addr].base_ua;
392 uid = private->uid;
393
394 if (device != pos)
395 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
382 396
383 /* if we have no PAV anyway, we don't need to bother with PAV groups */ 397 /* if we have no PAV anyway, we don't need to bother with PAV groups */
384 if (lcu->pav == NO_PAV) { 398 if (lcu->pav == NO_PAV) {
@@ -386,25 +400,25 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
386 return 0; 400 return 0;
387 } 401 }
388 402
389 group = _find_group(lcu, uid); 403 group = _find_group(lcu, &uid);
390 if (!group) { 404 if (!group) {
391 group = kzalloc(sizeof(*group), GFP_ATOMIC); 405 group = kzalloc(sizeof(*group), GFP_ATOMIC);
392 if (!group) 406 if (!group)
393 return -ENOMEM; 407 return -ENOMEM;
394 memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor)); 408 memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
395 memcpy(group->uid.serial, uid->serial, sizeof(uid->serial)); 409 memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
396 group->uid.ssid = uid->ssid; 410 group->uid.ssid = uid.ssid;
397 if (uid->type == UA_BASE_DEVICE) 411 if (uid.type == UA_BASE_DEVICE)
398 group->uid.base_unit_addr = uid->real_unit_addr; 412 group->uid.base_unit_addr = uid.real_unit_addr;
399 else 413 else
400 group->uid.base_unit_addr = uid->base_unit_addr; 414 group->uid.base_unit_addr = uid.base_unit_addr;
401 memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit)); 415 memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
402 INIT_LIST_HEAD(&group->group); 416 INIT_LIST_HEAD(&group->group);
403 INIT_LIST_HEAD(&group->baselist); 417 INIT_LIST_HEAD(&group->baselist);
404 INIT_LIST_HEAD(&group->aliaslist); 418 INIT_LIST_HEAD(&group->aliaslist);
405 list_add(&group->group, &lcu->grouplist); 419 list_add(&group->group, &lcu->grouplist);
406 } 420 }
407 if (uid->type == UA_BASE_DEVICE) 421 if (uid.type == UA_BASE_DEVICE)
408 list_move(&device->alias_list, &group->baselist); 422 list_move(&device->alias_list, &group->baselist);
409 else 423 else
410 list_move(&device->alias_list, &group->aliaslist); 424 list_move(&device->alias_list, &group->aliaslist);
@@ -525,7 +539,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
525 if (rc) 539 if (rc)
526 return rc; 540 return rc;
527 541
528 spin_lock_irqsave(&lcu->lock, flags); 542 /* need to take cdev lock before lcu lock */
543 spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
544 CDEV_NESTED_FIRST);
545 spin_lock(&lcu->lock);
529 lcu->pav = NO_PAV; 546 lcu->pav = NO_PAV;
530 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { 547 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
531 switch (lcu->uac->unit[i].ua_type) { 548 switch (lcu->uac->unit[i].ua_type) {
@@ -542,9 +559,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
542 559
543 list_for_each_entry_safe(device, tempdev, &lcu->active_devices, 560 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
544 alias_list) { 561 alias_list) {
545 _add_device_to_lcu(lcu, device); 562 _add_device_to_lcu(lcu, device, refdev);
546 } 563 }
547 spin_unlock_irqrestore(&lcu->lock, flags); 564 spin_unlock(&lcu->lock);
565 spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
548 return 0; 566 return 0;
549} 567}
550 568
@@ -628,9 +646,12 @@ int dasd_alias_add_device(struct dasd_device *device)
628 private = (struct dasd_eckd_private *) device->private; 646 private = (struct dasd_eckd_private *) device->private;
629 lcu = private->lcu; 647 lcu = private->lcu;
630 rc = 0; 648 rc = 0;
631 spin_lock_irqsave(&lcu->lock, flags); 649
650 /* need to take cdev lock before lcu lock */
651 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
652 spin_lock(&lcu->lock);
632 if (!(lcu->flags & UPDATE_PENDING)) { 653 if (!(lcu->flags & UPDATE_PENDING)) {
633 rc = _add_device_to_lcu(lcu, device); 654 rc = _add_device_to_lcu(lcu, device, device);
634 if (rc) 655 if (rc)
635 lcu->flags |= UPDATE_PENDING; 656 lcu->flags |= UPDATE_PENDING;
636 } 657 }
@@ -638,10 +659,19 @@ int dasd_alias_add_device(struct dasd_device *device)
638 list_move(&device->alias_list, &lcu->active_devices); 659 list_move(&device->alias_list, &lcu->active_devices);
639 _schedule_lcu_update(lcu, device); 660 _schedule_lcu_update(lcu, device);
640 } 661 }
641 spin_unlock_irqrestore(&lcu->lock, flags); 662 spin_unlock(&lcu->lock);
663 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
642 return rc; 664 return rc;
643} 665}
644 666
667int dasd_alias_update_add_device(struct dasd_device *device)
668{
669 struct dasd_eckd_private *private;
670 private = (struct dasd_eckd_private *) device->private;
671 private->lcu->flags |= UPDATE_PENDING;
672 return dasd_alias_add_device(device);
673}
674
645int dasd_alias_remove_device(struct dasd_device *device) 675int dasd_alias_remove_device(struct dasd_device *device)
646{ 676{
647 struct dasd_eckd_private *private; 677 struct dasd_eckd_private *private;
@@ -740,19 +770,30 @@ static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
740 struct alias_pav_group *pavgroup; 770 struct alias_pav_group *pavgroup;
741 struct dasd_device *device; 771 struct dasd_device *device;
742 struct dasd_eckd_private *private; 772 struct dasd_eckd_private *private;
773 unsigned long flags;
743 774
744 /* active and inactive list can contain alias as well as base devices */ 775 /* active and inactive list can contain alias as well as base devices */
745 list_for_each_entry(device, &lcu->active_devices, alias_list) { 776 list_for_each_entry(device, &lcu->active_devices, alias_list) {
746 private = (struct dasd_eckd_private *) device->private; 777 private = (struct dasd_eckd_private *) device->private;
747 if (private->uid.type != UA_BASE_DEVICE) 778 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
779 if (private->uid.type != UA_BASE_DEVICE) {
780 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
781 flags);
748 continue; 782 continue;
783 }
784 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
749 dasd_schedule_block_bh(device->block); 785 dasd_schedule_block_bh(device->block);
750 dasd_schedule_device_bh(device); 786 dasd_schedule_device_bh(device);
751 } 787 }
752 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { 788 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
753 private = (struct dasd_eckd_private *) device->private; 789 private = (struct dasd_eckd_private *) device->private;
754 if (private->uid.type != UA_BASE_DEVICE) 790 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
791 if (private->uid.type != UA_BASE_DEVICE) {
792 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
793 flags);
755 continue; 794 continue;
795 }
796 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
756 dasd_schedule_block_bh(device->block); 797 dasd_schedule_block_bh(device->block);
757 dasd_schedule_device_bh(device); 798 dasd_schedule_device_bh(device);
758 } 799 }
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index eff9c812c5c2..34d51dd4c539 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -49,7 +49,6 @@ struct dasd_devmap {
49 unsigned int devindex; 49 unsigned int devindex;
50 unsigned short features; 50 unsigned short features;
51 struct dasd_device *device; 51 struct dasd_device *device;
52 struct dasd_uid uid;
53}; 52};
54 53
55/* 54/*
@@ -936,42 +935,46 @@ dasd_device_status_show(struct device *dev, struct device_attribute *attr,
936 935
937static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL); 936static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
938 937
939static ssize_t 938static ssize_t dasd_alias_show(struct device *dev,
940dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) 939 struct device_attribute *attr, char *buf)
941{ 940{
942 struct dasd_devmap *devmap; 941 struct dasd_device *device;
943 int alias; 942 struct dasd_uid uid;
944 943
945 devmap = dasd_find_busid(dev_name(dev)); 944 device = dasd_device_from_cdev(to_ccwdev(dev));
946 spin_lock(&dasd_devmap_lock); 945 if (IS_ERR(device))
947 if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
948 spin_unlock(&dasd_devmap_lock);
949 return sprintf(buf, "0\n"); 946 return sprintf(buf, "0\n");
947
948 if (device->discipline && device->discipline->get_uid &&
949 !device->discipline->get_uid(device, &uid)) {
950 if (uid.type == UA_BASE_PAV_ALIAS ||
951 uid.type == UA_HYPER_PAV_ALIAS)
952 return sprintf(buf, "1\n");
950 } 953 }
951 if (devmap->uid.type == UA_BASE_PAV_ALIAS || 954 dasd_put_device(device);
952 devmap->uid.type == UA_HYPER_PAV_ALIAS) 955
953 alias = 1; 956 return sprintf(buf, "0\n");
954 else
955 alias = 0;
956 spin_unlock(&dasd_devmap_lock);
957 return sprintf(buf, alias ? "1\n" : "0\n");
958} 957}
959 958
960static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL); 959static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
961 960
962static ssize_t 961static ssize_t dasd_vendor_show(struct device *dev,
963dasd_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) 962 struct device_attribute *attr, char *buf)
964{ 963{
965 struct dasd_devmap *devmap; 964 struct dasd_device *device;
965 struct dasd_uid uid;
966 char *vendor; 966 char *vendor;
967 967
968 devmap = dasd_find_busid(dev_name(dev)); 968 device = dasd_device_from_cdev(to_ccwdev(dev));
969 spin_lock(&dasd_devmap_lock); 969 vendor = "";
970 if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0) 970 if (IS_ERR(device))
971 vendor = devmap->uid.vendor; 971 return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
972 else 972
973 vendor = ""; 973 if (device->discipline && device->discipline->get_uid &&
974 spin_unlock(&dasd_devmap_lock); 974 !device->discipline->get_uid(device, &uid))
975 vendor = uid.vendor;
976
977 dasd_put_device(device);
975 978
976 return snprintf(buf, PAGE_SIZE, "%s\n", vendor); 979 return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
977} 980}
@@ -985,48 +988,51 @@ static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
985static ssize_t 988static ssize_t
986dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) 989dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
987{ 990{
988 struct dasd_devmap *devmap; 991 struct dasd_device *device;
992 struct dasd_uid uid;
989 char uid_string[UID_STRLEN]; 993 char uid_string[UID_STRLEN];
990 char ua_string[3]; 994 char ua_string[3];
991 struct dasd_uid *uid;
992 995
993 devmap = dasd_find_busid(dev_name(dev)); 996 device = dasd_device_from_cdev(to_ccwdev(dev));
994 spin_lock(&dasd_devmap_lock); 997 uid_string[0] = 0;
995 if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) { 998 if (IS_ERR(device))
996 spin_unlock(&dasd_devmap_lock); 999 return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
997 return sprintf(buf, "\n"); 1000
998 } 1001 if (device->discipline && device->discipline->get_uid &&
999 uid = &devmap->uid; 1002 !device->discipline->get_uid(device, &uid)) {
1000 switch (uid->type) { 1003 switch (uid.type) {
1001 case UA_BASE_DEVICE: 1004 case UA_BASE_DEVICE:
1002 sprintf(ua_string, "%02x", uid->real_unit_addr); 1005 snprintf(ua_string, sizeof(ua_string), "%02x",
1003 break; 1006 uid.real_unit_addr);
1004 case UA_BASE_PAV_ALIAS: 1007 break;
1005 sprintf(ua_string, "%02x", uid->base_unit_addr); 1008 case UA_BASE_PAV_ALIAS:
1006 break; 1009 snprintf(ua_string, sizeof(ua_string), "%02x",
1007 case UA_HYPER_PAV_ALIAS: 1010 uid.base_unit_addr);
1008 sprintf(ua_string, "xx"); 1011 break;
1009 break; 1012 case UA_HYPER_PAV_ALIAS:
1010 default: 1013 snprintf(ua_string, sizeof(ua_string), "xx");
1011 /* should not happen, treat like base device */ 1014 break;
1012 sprintf(ua_string, "%02x", uid->real_unit_addr); 1015 default:
1013 break; 1016 /* should not happen, treat like base device */
1017 snprintf(ua_string, sizeof(ua_string), "%02x",
1018 uid.real_unit_addr);
1019 break;
1020 }
1021
1022 if (strlen(uid.vduit) > 0)
1023 snprintf(uid_string, sizeof(uid_string),
1024 "%s.%s.%04x.%s.%s",
1025 uid.vendor, uid.serial, uid.ssid, ua_string,
1026 uid.vduit);
1027 else
1028 snprintf(uid_string, sizeof(uid_string),
1029 "%s.%s.%04x.%s",
1030 uid.vendor, uid.serial, uid.ssid, ua_string);
1014 } 1031 }
1015 if (strlen(uid->vduit) > 0) 1032 dasd_put_device(device);
1016 snprintf(uid_string, sizeof(uid_string), 1033
1017 "%s.%s.%04x.%s.%s",
1018 uid->vendor, uid->serial,
1019 uid->ssid, ua_string,
1020 uid->vduit);
1021 else
1022 snprintf(uid_string, sizeof(uid_string),
1023 "%s.%s.%04x.%s",
1024 uid->vendor, uid->serial,
1025 uid->ssid, ua_string);
1026 spin_unlock(&dasd_devmap_lock);
1027 return snprintf(buf, PAGE_SIZE, "%s\n", uid_string); 1034 return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
1028} 1035}
1029
1030static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL); 1036static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
1031 1037
1032/* 1038/*
@@ -1094,50 +1100,6 @@ static struct attribute_group dasd_attr_group = {
1094}; 1100};
1095 1101
1096/* 1102/*
1097 * Return copy of the device unique identifier.
1098 */
1099int
1100dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
1101{
1102 struct dasd_devmap *devmap;
1103
1104 devmap = dasd_find_busid(dev_name(&cdev->dev));
1105 if (IS_ERR(devmap))
1106 return PTR_ERR(devmap);
1107 spin_lock(&dasd_devmap_lock);
1108 *uid = devmap->uid;
1109 spin_unlock(&dasd_devmap_lock);
1110 return 0;
1111}
1112EXPORT_SYMBOL_GPL(dasd_get_uid);
1113
1114/*
1115 * Register the given device unique identifier into devmap struct.
1116 * In addition check if the related storage server subsystem ID is already
1117 * contained in the dasd_server_ssid_list. If subsystem ID is not contained,
1118 * create new entry.
1119 * Return 0 if server was already in serverlist,
1120 * 1 if the server was added successful
1121 * <0 in case of error.
1122 */
1123int
1124dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
1125{
1126 struct dasd_devmap *devmap;
1127
1128 devmap = dasd_find_busid(dev_name(&cdev->dev));
1129 if (IS_ERR(devmap))
1130 return PTR_ERR(devmap);
1131
1132 spin_lock(&dasd_devmap_lock);
1133 devmap->uid = *uid;
1134 spin_unlock(&dasd_devmap_lock);
1135
1136 return 0;
1137}
1138EXPORT_SYMBOL_GPL(dasd_set_uid);
1139
1140/*
1141 * Return value of the specified feature. 1103 * Return value of the specified feature.
1142 */ 1104 */
1143int 1105int
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 0cb233116855..5b1cd8d6e971 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -692,18 +692,20 @@ dasd_eckd_cdl_reclen(int recid)
692/* 692/*
693 * Generate device unique id that specifies the physical device. 693 * Generate device unique id that specifies the physical device.
694 */ 694 */
695static int dasd_eckd_generate_uid(struct dasd_device *device, 695static int dasd_eckd_generate_uid(struct dasd_device *device)
696 struct dasd_uid *uid)
697{ 696{
698 struct dasd_eckd_private *private; 697 struct dasd_eckd_private *private;
698 struct dasd_uid *uid;
699 int count; 699 int count;
700 unsigned long flags;
700 701
701 private = (struct dasd_eckd_private *) device->private; 702 private = (struct dasd_eckd_private *) device->private;
702 if (!private) 703 if (!private)
703 return -ENODEV; 704 return -ENODEV;
704 if (!private->ned || !private->gneq) 705 if (!private->ned || !private->gneq)
705 return -ENODEV; 706 return -ENODEV;
706 707 uid = &private->uid;
708 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
707 memset(uid, 0, sizeof(struct dasd_uid)); 709 memset(uid, 0, sizeof(struct dasd_uid));
708 memcpy(uid->vendor, private->ned->HDA_manufacturer, 710 memcpy(uid->vendor, private->ned->HDA_manufacturer,
709 sizeof(uid->vendor) - 1); 711 sizeof(uid->vendor) - 1);
@@ -726,9 +728,25 @@ static int dasd_eckd_generate_uid(struct dasd_device *device,
726 private->vdsneq->uit[count]); 728 private->vdsneq->uit[count]);
727 } 729 }
728 } 730 }
731 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
729 return 0; 732 return 0;
730} 733}
731 734
735static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
736{
737 struct dasd_eckd_private *private;
738 unsigned long flags;
739
740 if (device->private) {
741 private = (struct dasd_eckd_private *)device->private;
742 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
743 *uid = private->uid;
744 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
745 return 0;
746 }
747 return -EINVAL;
748}
749
732static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, 750static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
733 void *rcd_buffer, 751 void *rcd_buffer,
734 struct ciw *ciw, __u8 lpm) 752 struct ciw *ciw, __u8 lpm)
@@ -1088,6 +1106,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1088{ 1106{
1089 struct dasd_eckd_private *private; 1107 struct dasd_eckd_private *private;
1090 struct dasd_block *block; 1108 struct dasd_block *block;
1109 struct dasd_uid temp_uid;
1091 int is_known, rc; 1110 int is_known, rc;
1092 int readonly; 1111 int readonly;
1093 1112
@@ -1124,13 +1143,13 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1124 if (rc) 1143 if (rc)
1125 goto out_err1; 1144 goto out_err1;
1126 1145
1127 /* Generate device unique id and register in devmap */ 1146 /* Generate device unique id */
1128 rc = dasd_eckd_generate_uid(device, &private->uid); 1147 rc = dasd_eckd_generate_uid(device);
1129 if (rc) 1148 if (rc)
1130 goto out_err1; 1149 goto out_err1;
1131 dasd_set_uid(device->cdev, &private->uid);
1132 1150
1133 if (private->uid.type == UA_BASE_DEVICE) { 1151 dasd_eckd_get_uid(device, &temp_uid);
1152 if (temp_uid.type == UA_BASE_DEVICE) {
1134 block = dasd_alloc_block(); 1153 block = dasd_alloc_block();
1135 if (IS_ERR(block)) { 1154 if (IS_ERR(block)) {
1136 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1155 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
@@ -1451,6 +1470,7 @@ static int dasd_eckd_ready_to_online(struct dasd_device *device)
1451 1470
1452static int dasd_eckd_online_to_ready(struct dasd_device *device) 1471static int dasd_eckd_online_to_ready(struct dasd_device *device)
1453{ 1472{
1473 cancel_work_sync(&device->reload_device);
1454 return dasd_alias_remove_device(device); 1474 return dasd_alias_remove_device(device);
1455}; 1475};
1456 1476
@@ -1709,10 +1729,27 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1709{ 1729{
1710 char mask; 1730 char mask;
1711 char *sense = NULL; 1731 char *sense = NULL;
1732 struct dasd_eckd_private *private;
1712 1733
1734 private = (struct dasd_eckd_private *) device->private;
1713 /* first of all check for state change pending interrupt */ 1735 /* first of all check for state change pending interrupt */
1714 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1736 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1715 if ((scsw_dstat(&irb->scsw) & mask) == mask) { 1737 if ((scsw_dstat(&irb->scsw) & mask) == mask) {
1738 /* for alias only and not in offline processing*/
1739 if (!device->block && private->lcu &&
1740 !test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1741 /*
1742 * the state change could be caused by an alias
1743 * reassignment remove device from alias handling
1744 * to prevent new requests from being scheduled on
1745 * the wrong alias device
1746 */
1747 dasd_alias_remove_device(device);
1748
1749 /* schedule worker to reload device */
1750 dasd_reload_device(device);
1751 }
1752
1716 dasd_generic_handle_state_change(device); 1753 dasd_generic_handle_state_change(device);
1717 return; 1754 return;
1718 } 1755 }
@@ -3259,7 +3296,7 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
3259 dasd_eckd_dump_sense_ccw(device, req, irb); 3296 dasd_eckd_dump_sense_ccw(device, req, irb);
3260} 3297}
3261 3298
3262int dasd_eckd_pm_freeze(struct dasd_device *device) 3299static int dasd_eckd_pm_freeze(struct dasd_device *device)
3263{ 3300{
3264 /* 3301 /*
3265 * the device should be disconnected from our LCU structure 3302 * the device should be disconnected from our LCU structure
@@ -3272,7 +3309,7 @@ int dasd_eckd_pm_freeze(struct dasd_device *device)
3272 return 0; 3309 return 0;
3273} 3310}
3274 3311
3275int dasd_eckd_restore_device(struct dasd_device *device) 3312static int dasd_eckd_restore_device(struct dasd_device *device)
3276{ 3313{
3277 struct dasd_eckd_private *private; 3314 struct dasd_eckd_private *private;
3278 struct dasd_eckd_characteristics temp_rdc_data; 3315 struct dasd_eckd_characteristics temp_rdc_data;
@@ -3287,15 +3324,16 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3287 if (rc) 3324 if (rc)
3288 goto out_err; 3325 goto out_err;
3289 3326
3290 /* Generate device unique id and register in devmap */ 3327 dasd_eckd_get_uid(device, &temp_uid);
3291 rc = dasd_eckd_generate_uid(device, &private->uid); 3328 /* Generate device unique id */
3292 dasd_get_uid(device->cdev, &temp_uid); 3329 rc = dasd_eckd_generate_uid(device);
3330 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3293 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) 3331 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
3294 dev_err(&device->cdev->dev, "The UID of the DASD has " 3332 dev_err(&device->cdev->dev, "The UID of the DASD has "
3295 "changed\n"); 3333 "changed\n");
3334 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3296 if (rc) 3335 if (rc)
3297 goto out_err; 3336 goto out_err;
3298 dasd_set_uid(device->cdev, &private->uid);
3299 3337
3300 /* register lcu with alias handling, enable PAV if this is a new lcu */ 3338 /* register lcu with alias handling, enable PAV if this is a new lcu */
3301 is_known = dasd_alias_make_device_known_to_lcu(device); 3339 is_known = dasd_alias_make_device_known_to_lcu(device);
@@ -3336,6 +3374,56 @@ out_err:
3336 return -1; 3374 return -1;
3337} 3375}
3338 3376
3377static int dasd_eckd_reload_device(struct dasd_device *device)
3378{
3379 struct dasd_eckd_private *private;
3380 int rc, old_base;
3381 char print_uid[60];
3382 struct dasd_uid uid;
3383 unsigned long flags;
3384
3385 private = (struct dasd_eckd_private *) device->private;
3386
3387 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3388 old_base = private->uid.base_unit_addr;
3389 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3390
3391 /* Read Configuration Data */
3392 rc = dasd_eckd_read_conf(device);
3393 if (rc)
3394 goto out_err;
3395
3396 rc = dasd_eckd_generate_uid(device);
3397 if (rc)
3398 goto out_err;
3399 /*
3400 * update unit address configuration and
3401 * add device to alias management
3402 */
3403 dasd_alias_update_add_device(device);
3404
3405 dasd_eckd_get_uid(device, &uid);
3406
3407 if (old_base != uid.base_unit_addr) {
3408 if (strlen(uid.vduit) > 0)
3409 snprintf(print_uid, sizeof(print_uid),
3410 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
3411 uid.ssid, uid.base_unit_addr, uid.vduit);
3412 else
3413 snprintf(print_uid, sizeof(print_uid),
3414 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
3415 uid.ssid, uid.base_unit_addr);
3416
3417 dev_info(&device->cdev->dev,
3418 "An Alias device was reassigned to a new base device "
3419 "with UID: %s\n", print_uid);
3420 }
3421 return 0;
3422
3423out_err:
3424 return -1;
3425}
3426
3339static struct ccw_driver dasd_eckd_driver = { 3427static struct ccw_driver dasd_eckd_driver = {
3340 .name = "dasd-eckd", 3428 .name = "dasd-eckd",
3341 .owner = THIS_MODULE, 3429 .owner = THIS_MODULE,
@@ -3389,6 +3477,8 @@ static struct dasd_discipline dasd_eckd_discipline = {
3389 .ioctl = dasd_eckd_ioctl, 3477 .ioctl = dasd_eckd_ioctl,
3390 .freeze = dasd_eckd_pm_freeze, 3478 .freeze = dasd_eckd_pm_freeze,
3391 .restore = dasd_eckd_restore_device, 3479 .restore = dasd_eckd_restore_device,
3480 .reload = dasd_eckd_reload_device,
3481 .get_uid = dasd_eckd_get_uid,
3392}; 3482};
3393 3483
3394static int __init 3484static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 864d53c04201..dd6385a5af14 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -426,7 +426,6 @@ struct alias_pav_group {
426 struct dasd_device *next; 426 struct dasd_device *next;
427}; 427};
428 428
429
430struct dasd_eckd_private { 429struct dasd_eckd_private {
431 struct dasd_eckd_characteristics rdc_data; 430 struct dasd_eckd_characteristics rdc_data;
432 u8 *conf_data; 431 u8 *conf_data;
@@ -463,4 +462,5 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
463void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); 462void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
464void dasd_alias_lcu_setup_complete(struct dasd_device *); 463void dasd_alias_lcu_setup_complete(struct dasd_device *);
465void dasd_alias_wait_for_lcu_setup(struct dasd_device *); 464void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
465int dasd_alias_update_add_device(struct dasd_device *);
466#endif /* DASD_ECKD_H */ 466#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index a91d4a97d4f2..32fac186ba3f 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -81,6 +81,10 @@ struct dasd_block;
81#define DASD_SIM_MSG_TO_OP 0x03 81#define DASD_SIM_MSG_TO_OP 0x03
82#define DASD_SIM_LOG 0x0C 82#define DASD_SIM_LOG 0x0C
83 83
84/* lock class for nested cdev lock */
85#define CDEV_NESTED_FIRST 1
86#define CDEV_NESTED_SECOND 2
87
84/* 88/*
85 * SECTION: MACROs for klogd and s390 debug feature (dbf) 89 * SECTION: MACROs for klogd and s390 debug feature (dbf)
86 */ 90 */
@@ -229,6 +233,24 @@ struct dasd_ccw_req {
229typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); 233typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
230 234
231/* 235/*
236 * Unique identifier for dasd device.
237 */
238#define UA_NOT_CONFIGURED 0x00
239#define UA_BASE_DEVICE 0x01
240#define UA_BASE_PAV_ALIAS 0x02
241#define UA_HYPER_PAV_ALIAS 0x03
242
243struct dasd_uid {
244 __u8 type;
245 char vendor[4];
246 char serial[15];
247 __u16 ssid;
248 __u8 real_unit_addr;
249 __u8 base_unit_addr;
250 char vduit[33];
251};
252
253/*
232 * the struct dasd_discipline is 254 * the struct dasd_discipline is
233 * sth like a table of virtual functions, if you think of dasd_eckd 255 * sth like a table of virtual functions, if you think of dasd_eckd
234 * inheriting dasd... 256 * inheriting dasd...
@@ -312,28 +334,15 @@ struct dasd_discipline {
312 /* suspend/resume functions */ 334 /* suspend/resume functions */
313 int (*freeze) (struct dasd_device *); 335 int (*freeze) (struct dasd_device *);
314 int (*restore) (struct dasd_device *); 336 int (*restore) (struct dasd_device *);
315};
316 337
317extern struct dasd_discipline *dasd_diag_discipline_pointer; 338 /* reload device after state change */
318 339 int (*reload) (struct dasd_device *);
319/*
320 * Unique identifier for dasd device.
321 */
322#define UA_NOT_CONFIGURED 0x00
323#define UA_BASE_DEVICE 0x01
324#define UA_BASE_PAV_ALIAS 0x02
325#define UA_HYPER_PAV_ALIAS 0x03
326 340
327struct dasd_uid { 341 int (*get_uid) (struct dasd_device *, struct dasd_uid *);
328 __u8 type;
329 char vendor[4];
330 char serial[15];
331 __u16 ssid;
332 __u8 real_unit_addr;
333 __u8 base_unit_addr;
334 char vduit[33];
335}; 342};
336 343
344extern struct dasd_discipline *dasd_diag_discipline_pointer;
345
337/* 346/*
338 * Notification numbers for extended error reporting notifications: 347 * Notification numbers for extended error reporting notifications:
339 * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's 348 * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
@@ -386,6 +395,7 @@ struct dasd_device {
386 struct tasklet_struct tasklet; 395 struct tasklet_struct tasklet;
387 struct work_struct kick_work; 396 struct work_struct kick_work;
388 struct work_struct restore_device; 397 struct work_struct restore_device;
398 struct work_struct reload_device;
389 struct timer_list timer; 399 struct timer_list timer;
390 400
391 debug_info_t *debug_area; 401 debug_info_t *debug_area;
@@ -582,6 +592,7 @@ void dasd_enable_device(struct dasd_device *);
582void dasd_set_target_state(struct dasd_device *, int); 592void dasd_set_target_state(struct dasd_device *, int);
583void dasd_kick_device(struct dasd_device *); 593void dasd_kick_device(struct dasd_device *);
584void dasd_restore_device(struct dasd_device *); 594void dasd_restore_device(struct dasd_device *);
595void dasd_reload_device(struct dasd_device *);
585 596
586void dasd_add_request_head(struct dasd_ccw_req *); 597void dasd_add_request_head(struct dasd_ccw_req *);
587void dasd_add_request_tail(struct dasd_ccw_req *); 598void dasd_add_request_tail(struct dasd_ccw_req *);
@@ -629,8 +640,6 @@ void dasd_devmap_exit(void);
629struct dasd_device *dasd_create_device(struct ccw_device *); 640struct dasd_device *dasd_create_device(struct ccw_device *);
630void dasd_delete_device(struct dasd_device *); 641void dasd_delete_device(struct dasd_device *);
631 642
632int dasd_get_uid(struct ccw_device *, struct dasd_uid *);
633int dasd_set_uid(struct ccw_device *, struct dasd_uid *);
634int dasd_get_feature(struct ccw_device *, int); 643int dasd_get_feature(struct ccw_device *, int);
635int dasd_set_feature(struct ccw_device *, int, int); 644int dasd_set_feature(struct ccw_device *, int, int);
636 645
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 4e34d3686c23..40834f18754c 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -148,13 +148,12 @@ config VMLOGRDR
148 This driver depends on the IUCV support driver. 148 This driver depends on the IUCV support driver.
149 149
150config VMCP 150config VMCP
151 tristate "Support for the z/VM CP interface (VM only)" 151 bool "Support for the z/VM CP interface"
152 depends on S390 152 depends on S390
153 help 153 help
154 Select this option if you want to be able to interact with the control 154 Select this option if you want to be able to interact with the control
155 program on z/VM 155 program on z/VM
156 156
157
158config MONREADER 157config MONREADER
159 tristate "API for reading z/VM monitor service records" 158 tristate "API for reading z/VM monitor service records"
160 depends on IUCV 159 depends on IUCV
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 0eabcca3c92d..857dfcb7b359 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -484,6 +484,7 @@ fs3270_open(struct inode *inode, struct file *filp)
484 raw3270_del_view(&fp->view); 484 raw3270_del_view(&fp->view);
485 goto out; 485 goto out;
486 } 486 }
487 nonseekable_open(inode, filp);
487 filp->private_data = fp; 488 filp->private_data = fp;
488out: 489out:
489 mutex_unlock(&fs3270_mutex); 490 mutex_unlock(&fs3270_mutex);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index cb6bffe7141a..18d9a497863b 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -49,7 +49,7 @@ static unsigned char ret_diacr[NR_DEAD] = {
49struct kbd_data * 49struct kbd_data *
50kbd_alloc(void) { 50kbd_alloc(void) {
51 struct kbd_data *kbd; 51 struct kbd_data *kbd;
52 int i, len; 52 int i;
53 53
54 kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL); 54 kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
55 if (!kbd) 55 if (!kbd)
@@ -59,12 +59,11 @@ kbd_alloc(void) {
59 goto out_kbd; 59 goto out_kbd;
60 for (i = 0; i < ARRAY_SIZE(key_maps); i++) { 60 for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
61 if (key_maps[i]) { 61 if (key_maps[i]) {
62 kbd->key_maps[i] = 62 kbd->key_maps[i] = kmemdup(key_maps[i],
63 kmalloc(sizeof(u_short)*NR_KEYS, GFP_KERNEL); 63 sizeof(u_short) * NR_KEYS,
64 GFP_KERNEL);
64 if (!kbd->key_maps[i]) 65 if (!kbd->key_maps[i])
65 goto out_maps; 66 goto out_maps;
66 memcpy(kbd->key_maps[i], key_maps[i],
67 sizeof(u_short)*NR_KEYS);
68 } 67 }
69 } 68 }
70 kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL); 69 kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL);
@@ -72,23 +71,21 @@ kbd_alloc(void) {
72 goto out_maps; 71 goto out_maps;
73 for (i = 0; i < ARRAY_SIZE(func_table); i++) { 72 for (i = 0; i < ARRAY_SIZE(func_table); i++) {
74 if (func_table[i]) { 73 if (func_table[i]) {
75 len = strlen(func_table[i]) + 1; 74 kbd->func_table[i] = kstrdup(func_table[i],
76 kbd->func_table[i] = kmalloc(len, GFP_KERNEL); 75 GFP_KERNEL);
77 if (!kbd->func_table[i]) 76 if (!kbd->func_table[i])
78 goto out_func; 77 goto out_func;
79 memcpy(kbd->func_table[i], func_table[i], len);
80 } 78 }
81 } 79 }
82 kbd->fn_handler = 80 kbd->fn_handler =
83 kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); 81 kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
84 if (!kbd->fn_handler) 82 if (!kbd->fn_handler)
85 goto out_func; 83 goto out_func;
86 kbd->accent_table = 84 kbd->accent_table = kmemdup(accent_table,
87 kmalloc(sizeof(struct kbdiacruc)*MAX_DIACR, GFP_KERNEL); 85 sizeof(struct kbdiacruc) * MAX_DIACR,
86 GFP_KERNEL);
88 if (!kbd->accent_table) 87 if (!kbd->accent_table)
89 goto out_fn_handler; 88 goto out_fn_handler;
90 memcpy(kbd->accent_table, accent_table,
91 sizeof(struct kbdiacruc)*MAX_DIACR);
92 kbd->accent_table_size = accent_table_size; 89 kbd->accent_table_size = accent_table_size;
93 return kbd; 90 return kbd;
94 91
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 5bb59d36a6d4..04e532eec032 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,24 +1,20 @@
1/* 1/*
2 * Copyright IBM Corp. 2004,2007 2 * Copyright IBM Corp. 2004,2010
3 * Interface implementation for communication with the z/VM control program 3 * Interface implementation for communication with the z/VM control program
4 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
5 * 4 *
5 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
6 * 6 *
7 * z/VMs CP offers the possibility to issue commands via the diagnose code 8 7 * z/VMs CP offers the possibility to issue commands via the diagnose code 8
8 * this driver implements a character device that issues these commands and 8 * this driver implements a character device that issues these commands and
9 * returns the answer of CP. 9 * returns the answer of CP.
10 10 *
11 * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS 11 * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
12 */ 12 */
13 13
14#define KMSG_COMPONENT "vmcp"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
17#include <linux/fs.h> 14#include <linux/fs.h>
18#include <linux/init.h> 15#include <linux/init.h>
19#include <linux/kernel.h> 16#include <linux/kernel.h>
20#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
21#include <linux/module.h>
22#include <linux/slab.h> 18#include <linux/slab.h>
23#include <asm/compat.h> 19#include <asm/compat.h>
24#include <asm/cpcmd.h> 20#include <asm/cpcmd.h>
@@ -26,10 +22,6 @@
26#include <asm/uaccess.h> 22#include <asm/uaccess.h>
27#include "vmcp.h" 23#include "vmcp.h"
28 24
29MODULE_LICENSE("GPL");
30MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>");
31MODULE_DESCRIPTION("z/VM CP interface");
32
33static debug_info_t *vmcp_debug; 25static debug_info_t *vmcp_debug;
34 26
35static int vmcp_open(struct inode *inode, struct file *file) 27static int vmcp_open(struct inode *inode, struct file *file)
@@ -197,11 +189,8 @@ static int __init vmcp_init(void)
197{ 189{
198 int ret; 190 int ret;
199 191
200 if (!MACHINE_IS_VM) { 192 if (!MACHINE_IS_VM)
201 pr_warning("The z/VM CP interface device driver cannot be " 193 return 0;
202 "loaded without z/VM\n");
203 return -ENODEV;
204 }
205 194
206 vmcp_debug = debug_register("vmcp", 1, 1, 240); 195 vmcp_debug = debug_register("vmcp", 1, 1, 240);
207 if (!vmcp_debug) 196 if (!vmcp_debug)
@@ -214,19 +203,8 @@ static int __init vmcp_init(void)
214 } 203 }
215 204
216 ret = misc_register(&vmcp_dev); 205 ret = misc_register(&vmcp_dev);
217 if (ret) { 206 if (ret)
218 debug_unregister(vmcp_debug); 207 debug_unregister(vmcp_debug);
219 return ret; 208 return ret;
220 }
221
222 return 0;
223}
224
225static void __exit vmcp_exit(void)
226{
227 misc_deregister(&vmcp_dev);
228 debug_unregister(vmcp_debug);
229} 209}
230 210device_initcall(vmcp_init);
231module_init(vmcp_init);
232module_exit(vmcp_exit);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 7217966f7d31..f5ea3384a4b9 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -445,7 +445,7 @@ static int zcore_memmap_open(struct inode *inode, struct file *filp)
445 } 445 }
446 kfree(chunk_array); 446 kfree(chunk_array);
447 filp->private_data = buf; 447 filp->private_data = buf;
448 return 0; 448 return nonseekable_open(inode, filp);
449} 449}
450 450
451static int zcore_memmap_release(struct inode *inode, struct file *filp) 451static int zcore_memmap_release(struct inode *inode, struct file *filp)
@@ -473,7 +473,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
473 473
474static int zcore_reipl_open(struct inode *inode, struct file *filp) 474static int zcore_reipl_open(struct inode *inode, struct file *filp)
475{ 475{
476 return 0; 476 return nonseekable_open(inode, filp);
477} 477}
478 478
479static int zcore_reipl_release(struct inode *inode, struct file *filp) 479static int zcore_reipl_release(struct inode *inode, struct file *filp)
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 3b6f4adc5094..a83877c664a6 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -803,6 +803,7 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd,
803 803
804static const struct file_operations chsc_fops = { 804static const struct file_operations chsc_fops = {
805 .owner = THIS_MODULE, 805 .owner = THIS_MODULE,
806 .open = nonseekable_open,
806 .unlocked_ioctl = chsc_ioctl, 807 .unlocked_ioctl = chsc_ioctl,
807 .compat_ioctl = chsc_ioctl, 808 .compat_ioctl = chsc_ioctl,
808}; 809};
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5feea1a371e1..f4e6cf3aceb8 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -616,7 +616,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
616 struct pt_regs *old_regs; 616 struct pt_regs *old_regs;
617 617
618 old_regs = set_irq_regs(regs); 618 old_regs = set_irq_regs(regs);
619 s390_idle_check(); 619 s390_idle_check(regs, S390_lowcore.int_clock,
620 S390_lowcore.async_enter_timer);
620 irq_enter(); 621 irq_enter();
621 __get_cpu_var(s390_idle).nohz_delay = 1; 622 __get_cpu_var(s390_idle).nohz_delay = 1;
622 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 623 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 511649115bd7..ac94ac751459 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -648,6 +648,8 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
648static void __init 648static void __init
649css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 649css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
650{ 650{
651 struct cpuid cpu_id;
652
651 if (css_general_characteristics.mcss) { 653 if (css_general_characteristics.mcss) {
652 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 654 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
653 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 655 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
@@ -658,8 +660,9 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
658 css->global_pgid.pgid_high.cpu_addr = 0; 660 css->global_pgid.pgid_high.cpu_addr = 0;
659#endif 661#endif
660 } 662 }
661 css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident; 663 get_cpu_id(&cpu_id);
662 css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine; 664 css->global_pgid.cpu_id = cpu_id.ident;
665 css->global_pgid.cpu_model = cpu_id.machine;
663 css->global_pgid.tod_high = tod_high; 666 css->global_pgid.tod_high = tod_high;
664 667
665} 668}
@@ -1062,6 +1065,7 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1062} 1065}
1063 1066
1064static const struct file_operations cio_settle_proc_fops = { 1067static const struct file_operations cio_settle_proc_fops = {
1068 .open = nonseekable_open,
1065 .write = cio_settle_write, 1069 .write = cio_settle_write,
1066}; 1070};
1067 1071
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 48aa0647432b..f0037eefd44e 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -13,8 +13,8 @@
13#include <asm/debug.h> 13#include <asm/debug.h>
14#include "chsc.h" 14#include "chsc.h"
15 15
16#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ 16#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
17#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ 17#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
18 18
19/* 19/*
20 * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait 20 * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
@@ -296,10 +296,8 @@ struct qdio_q {
296 struct qdio_irq *irq_ptr; 296 struct qdio_irq *irq_ptr;
297 struct sl *sl; 297 struct sl *sl;
298 /* 298 /*
299 * Warning: Leave this member at the end so it won't be cleared in 299 * A page is allocated under this pointer and used for slib and sl.
300 * qdio_fill_qs. A page is allocated under this pointer and used for 300 * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
301 * slib and sl. slib is 2048 bytes big and sl points to offset
302 * PAGE_SIZE / 2.
303 */ 301 */
304 struct slib *slib; 302 struct slib *slib;
305} __attribute__ ((aligned(256))); 303} __attribute__ ((aligned(256)));
@@ -372,11 +370,6 @@ static inline int multicast_outbound(struct qdio_q *q)
372 (q->nr == q->irq_ptr->nr_output_qs - 1); 370 (q->nr == q->irq_ptr->nr_output_qs - 1);
373} 371}
374 372
375static inline unsigned long long get_usecs(void)
376{
377 return monotonic_clock() >> 12;
378}
379
380#define pci_out_supported(q) \ 373#define pci_out_supported(q) \
381 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) 374 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
382#define is_qebsm(q) (q->irq_ptr->sch_token != 0) 375#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 88be7b9ea6e1..00520f9a7a8e 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -336,10 +336,10 @@ again:
336 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); 336 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
337 337
338 if (!start_time) { 338 if (!start_time) {
339 start_time = get_usecs(); 339 start_time = get_clock();
340 goto again; 340 goto again;
341 } 341 }
342 if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) 342 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
343 goto again; 343 goto again;
344 } 344 }
345 return cc; 345 return cc;
@@ -536,7 +536,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
536 if ((bufnr != q->last_move) || q->qdio_error) { 536 if ((bufnr != q->last_move) || q->qdio_error) {
537 q->last_move = bufnr; 537 q->last_move = bufnr;
538 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) 538 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
539 q->u.in.timestamp = get_usecs(); 539 q->u.in.timestamp = get_clock();
540 return 1; 540 return 1;
541 } else 541 } else
542 return 0; 542 return 0;
@@ -567,7 +567,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
567 * At this point we know, that inbound first_to_check 567 * At this point we know, that inbound first_to_check
568 * has (probably) not moved (see qdio_inbound_processing). 568 * has (probably) not moved (see qdio_inbound_processing).
569 */ 569 */
570 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 570 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
571 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 571 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
572 q->first_to_check); 572 q->first_to_check);
573 return 1; 573 return 1;
@@ -606,7 +606,7 @@ static void qdio_kick_handler(struct qdio_q *q)
606static void __qdio_inbound_processing(struct qdio_q *q) 606static void __qdio_inbound_processing(struct qdio_q *q)
607{ 607{
608 qperf_inc(q, tasklet_inbound); 608 qperf_inc(q, tasklet_inbound);
609again: 609
610 if (!qdio_inbound_q_moved(q)) 610 if (!qdio_inbound_q_moved(q))
611 return; 611 return;
612 612
@@ -615,7 +615,10 @@ again:
615 if (!qdio_inbound_q_done(q)) { 615 if (!qdio_inbound_q_done(q)) {
616 /* means poll time is not yet over */ 616 /* means poll time is not yet over */
617 qperf_inc(q, tasklet_inbound_resched); 617 qperf_inc(q, tasklet_inbound_resched);
618 goto again; 618 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
619 tasklet_schedule(&q->tasklet);
620 return;
621 }
619 } 622 }
620 623
621 qdio_stop_polling(q); 624 qdio_stop_polling(q);
@@ -625,7 +628,8 @@ again:
625 */ 628 */
626 if (!qdio_inbound_q_done(q)) { 629 if (!qdio_inbound_q_done(q)) {
627 qperf_inc(q, tasklet_inbound_resched2); 630 qperf_inc(q, tasklet_inbound_resched2);
628 goto again; 631 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
632 tasklet_schedule(&q->tasklet);
629 } 633 }
630} 634}
631 635
@@ -955,6 +959,9 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
955 return; 959 return;
956 } 960 }
957 961
962 if (irq_ptr->perf_stat_enabled)
963 irq_ptr->perf_stat.qdio_int++;
964
958 if (IS_ERR(irb)) { 965 if (IS_ERR(irb)) {
959 switch (PTR_ERR(irb)) { 966 switch (PTR_ERR(irb)) {
960 case -EIO: 967 case -EIO:
@@ -1016,30 +1023,6 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
1016} 1023}
1017EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); 1024EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1018 1025
1019/**
1020 * qdio_cleanup - shutdown queues and free data structures
1021 * @cdev: associated ccw device
1022 * @how: use halt or clear to shutdown
1023 *
1024 * This function calls qdio_shutdown() for @cdev with method @how.
1025 * and qdio_free(). The qdio_free() return value is ignored since
1026 * !irq_ptr is already checked.
1027 */
1028int qdio_cleanup(struct ccw_device *cdev, int how)
1029{
1030 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1031 int rc;
1032
1033 if (!irq_ptr)
1034 return -ENODEV;
1035
1036 rc = qdio_shutdown(cdev, how);
1037
1038 qdio_free(cdev);
1039 return rc;
1040}
1041EXPORT_SYMBOL_GPL(qdio_cleanup);
1042
1043static void qdio_shutdown_queues(struct ccw_device *cdev) 1026static void qdio_shutdown_queues(struct ccw_device *cdev)
1044{ 1027{
1045 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1028 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
@@ -1157,28 +1140,6 @@ int qdio_free(struct ccw_device *cdev)
1157EXPORT_SYMBOL_GPL(qdio_free); 1140EXPORT_SYMBOL_GPL(qdio_free);
1158 1141
1159/** 1142/**
1160 * qdio_initialize - allocate and establish queues for a qdio subchannel
1161 * @init_data: initialization data
1162 *
1163 * This function first allocates queues via qdio_allocate() and on success
1164 * establishes them via qdio_establish().
1165 */
1166int qdio_initialize(struct qdio_initialize *init_data)
1167{
1168 int rc;
1169
1170 rc = qdio_allocate(init_data);
1171 if (rc)
1172 return rc;
1173
1174 rc = qdio_establish(init_data);
1175 if (rc)
1176 qdio_free(init_data->cdev);
1177 return rc;
1178}
1179EXPORT_SYMBOL_GPL(qdio_initialize);
1180
1181/**
1182 * qdio_allocate - allocate qdio queues and associated data 1143 * qdio_allocate - allocate qdio queues and associated data
1183 * @init_data: initialization data 1144 * @init_data: initialization data
1184 */ 1145 */
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 7f4a75465140..6326b67c45d2 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -106,10 +106,12 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
106static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, 106static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
107 qdio_handler_t *handler, int i) 107 qdio_handler_t *handler, int i)
108{ 108{
109 /* must be cleared by every qdio_establish */ 109 struct slib *slib = q->slib;
110 memset(q, 0, ((char *)&q->slib) - ((char *)q));
111 memset(q->slib, 0, PAGE_SIZE);
112 110
111 /* queue must be cleared for qdio_establish */
112 memset(q, 0, sizeof(*q));
113 memset(slib, 0, PAGE_SIZE);
114 q->slib = slib;
113 q->irq_ptr = irq_ptr; 115 q->irq_ptr = irq_ptr;
114 q->mask = 1 << (31 - i); 116 q->mask = 1 << (31 - i);
115 q->nr = i; 117 q->nr = i;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index ce5f8910ff83..8daf1b99f153 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -95,7 +95,7 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
95 for_each_input_queue(irq_ptr, q, i) 95 for_each_input_queue(irq_ptr, q, i)
96 list_add_rcu(&q->entry, &tiq_list); 96 list_add_rcu(&q->entry, &tiq_list);
97 mutex_unlock(&tiq_list_lock); 97 mutex_unlock(&tiq_list_lock);
98 xchg(irq_ptr->dsci, 1); 98 xchg(irq_ptr->dsci, 1 << 7);
99} 99}
100 100
101void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) 101void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
@@ -173,7 +173,7 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
173 173
174 /* prevent racing */ 174 /* prevent racing */
175 if (*tiqdio_alsi) 175 if (*tiqdio_alsi)
176 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); 176 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1 << 7);
177 } 177 }
178} 178}
179 179
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 304caf549973..41e0aaefafd5 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -302,7 +302,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
302static int zcrypt_open(struct inode *inode, struct file *filp) 302static int zcrypt_open(struct inode *inode, struct file *filp)
303{ 303{
304 atomic_inc(&zcrypt_open_count); 304 atomic_inc(&zcrypt_open_count);
305 return 0; 305 return nonseekable_open(inode, filp);
306} 306}
307 307
308/** 308/**
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3ba738b2e271..28f71349fdec 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1292,13 +1292,14 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1292 QETH_QDIO_CLEANING)) { 1292 QETH_QDIO_CLEANING)) {
1293 case QETH_QDIO_ESTABLISHED: 1293 case QETH_QDIO_ESTABLISHED:
1294 if (card->info.type == QETH_CARD_TYPE_IQD) 1294 if (card->info.type == QETH_CARD_TYPE_IQD)
1295 rc = qdio_cleanup(CARD_DDEV(card), 1295 rc = qdio_shutdown(CARD_DDEV(card),
1296 QDIO_FLAG_CLEANUP_USING_HALT); 1296 QDIO_FLAG_CLEANUP_USING_HALT);
1297 else 1297 else
1298 rc = qdio_cleanup(CARD_DDEV(card), 1298 rc = qdio_shutdown(CARD_DDEV(card),
1299 QDIO_FLAG_CLEANUP_USING_CLEAR); 1299 QDIO_FLAG_CLEANUP_USING_CLEAR);
1300 if (rc) 1300 if (rc)
1301 QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc); 1301 QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc);
1302 qdio_free(CARD_DDEV(card));
1302 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1303 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1303 break; 1304 break;
1304 case QETH_QDIO_CLEANING: 1305 case QETH_QDIO_CLEANING:
@@ -3810,10 +3811,18 @@ static int qeth_qdio_establish(struct qeth_card *card)
3810 3811
3811 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 3812 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3812 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 3813 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
3813 rc = qdio_initialize(&init_data); 3814 rc = qdio_allocate(&init_data);
3814 if (rc) 3815 if (rc) {
3816 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3817 goto out;
3818 }
3819 rc = qdio_establish(&init_data);
3820 if (rc) {
3815 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 3821 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3822 qdio_free(CARD_DDEV(card));
3823 }
3816 } 3824 }
3825out:
3817 kfree(out_sbal_ptrs); 3826 kfree(out_sbal_ptrs);
3818 kfree(in_sbal_ptrs); 3827 kfree(in_sbal_ptrs);
3819 kfree(qib_param_field); 3828 kfree(qib_param_field);
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 25d9e0ae9c57..1a2db0a35737 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -254,6 +254,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
254} 254}
255 255
256static const struct file_operations zfcp_cfdc_fops = { 256static const struct file_operations zfcp_cfdc_fops = {
257 .open = nonseekable_open,
257 .unlocked_ioctl = zfcp_cfdc_dev_ioctl, 258 .unlocked_ioctl = zfcp_cfdc_dev_ioctl,
258#ifdef CONFIG_COMPAT 259#ifdef CONFIG_COMPAT
259 .compat_ioctl = zfcp_cfdc_dev_ioctl 260 .compat_ioctl = zfcp_cfdc_dev_ioctl
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 19f255b97c86..d3b62eb0fba7 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -105,9 +105,9 @@ static ssize_t
105flash_read(struct file * file, char __user * buf, 105flash_read(struct file * file, char __user * buf,
106 size_t count, loff_t *ppos) 106 size_t count, loff_t *ppos)
107{ 107{
108 unsigned long p = file->f_pos; 108 loff_t p = *ppos;
109 int i; 109 int i;
110 110
111 if (count > flash.read_size - p) 111 if (count > flash.read_size - p)
112 count = flash.read_size - p; 112 count = flash.read_size - p;
113 113
@@ -118,7 +118,7 @@ flash_read(struct file * file, char __user * buf,
118 buf++; 118 buf++;
119 } 119 }
120 120
121 file->f_pos += count; 121 *ppos += count;
122 return count; 122 return count;
123} 123}
124 124
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 9201afe65609..7f87979da22d 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -4724,6 +4724,10 @@ static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc)
4724 BUG_ON((unsigned long)asc_dvc->overrun_buf & 7); 4724 BUG_ON((unsigned long)asc_dvc->overrun_buf & 7);
4725 asc_dvc->overrun_dma = dma_map_single(board->dev, asc_dvc->overrun_buf, 4725 asc_dvc->overrun_dma = dma_map_single(board->dev, asc_dvc->overrun_buf,
4726 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); 4726 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
4727 if (dma_mapping_error(board->dev, asc_dvc->overrun_dma)) {
4728 warn_code = -ENOMEM;
4729 goto err_dma_map;
4730 }
4727 phy_addr = cpu_to_le32(asc_dvc->overrun_dma); 4731 phy_addr = cpu_to_le32(asc_dvc->overrun_dma);
4728 AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_PADDR_D, 4732 AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_PADDR_D,
4729 (uchar *)&phy_addr, 1); 4733 (uchar *)&phy_addr, 1);
@@ -4739,14 +4743,23 @@ static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc)
4739 AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); 4743 AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
4740 if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { 4744 if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
4741 asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; 4745 asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR;
4742 return warn_code; 4746 warn_code = UW_ERR;
4747 goto err_mcode_start;
4743 } 4748 }
4744 if (AscStartChip(iop_base) != 1) { 4749 if (AscStartChip(iop_base) != 1) {
4745 asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; 4750 asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
4746 return warn_code; 4751 warn_code = UW_ERR;
4752 goto err_mcode_start;
4747 } 4753 }
4748 4754
4749 return warn_code; 4755 return warn_code;
4756
4757err_mcode_start:
4758 dma_unmap_single(board->dev, asc_dvc->overrun_dma,
4759 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
4760err_dma_map:
4761 asc_dvc->overrun_dma = 0;
4762 return warn_code;
4750} 4763}
4751 4764
4752static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) 4765static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc)
@@ -4802,6 +4815,8 @@ static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc)
4802 } 4815 }
4803 release_firmware(fw); 4816 release_firmware(fw);
4804 warn_code |= AscInitMicroCodeVar(asc_dvc); 4817 warn_code |= AscInitMicroCodeVar(asc_dvc);
4818 if (!asc_dvc->overrun_dma)
4819 return warn_code;
4805 asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC; 4820 asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC;
4806 AscEnableInterrupt(iop_base); 4821 AscEnableInterrupt(iop_base);
4807 return warn_code; 4822 return warn_code;
@@ -7978,9 +7993,10 @@ static int advansys_reset(struct scsi_cmnd *scp)
7978 status = AscInitAsc1000Driver(asc_dvc); 7993 status = AscInitAsc1000Driver(asc_dvc);
7979 7994
7980 /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */ 7995 /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */
7981 if (asc_dvc->err_code) { 7996 if (asc_dvc->err_code || !asc_dvc->overrun_dma) {
7982 scmd_printk(KERN_INFO, scp, "SCSI bus reset error: " 7997 scmd_printk(KERN_INFO, scp, "SCSI bus reset error: "
7983 "0x%x\n", asc_dvc->err_code); 7998 "0x%x, status: 0x%x\n", asc_dvc->err_code,
7999 status);
7984 ret = FAILED; 8000 ret = FAILED;
7985 } else if (status) { 8001 } else if (status) {
7986 scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: " 8002 scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: "
@@ -12311,7 +12327,7 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
12311 asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); 12327 asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL);
12312 if (!asc_dvc_varp->overrun_buf) { 12328 if (!asc_dvc_varp->overrun_buf) {
12313 ret = -ENOMEM; 12329 ret = -ENOMEM;
12314 goto err_free_wide_mem; 12330 goto err_free_irq;
12315 } 12331 }
12316 warn_code = AscInitAsc1000Driver(asc_dvc_varp); 12332 warn_code = AscInitAsc1000Driver(asc_dvc_varp);
12317 12333
@@ -12320,30 +12336,36 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
12320 "warn 0x%x, error 0x%x\n", 12336 "warn 0x%x, error 0x%x\n",
12321 asc_dvc_varp->init_state, warn_code, 12337 asc_dvc_varp->init_state, warn_code,
12322 asc_dvc_varp->err_code); 12338 asc_dvc_varp->err_code);
12323 if (asc_dvc_varp->err_code) { 12339 if (!asc_dvc_varp->overrun_dma) {
12324 ret = -ENODEV; 12340 ret = -ENODEV;
12325 kfree(asc_dvc_varp->overrun_buf); 12341 goto err_free_mem;
12326 } 12342 }
12327 } 12343 }
12328 } else { 12344 } else {
12329 if (advansys_wide_init_chip(shost)) 12345 if (advansys_wide_init_chip(shost)) {
12330 ret = -ENODEV; 12346 ret = -ENODEV;
12347 goto err_free_mem;
12348 }
12331 } 12349 }
12332 12350
12333 if (ret)
12334 goto err_free_wide_mem;
12335
12336 ASC_DBG_PRT_SCSI_HOST(2, shost); 12351 ASC_DBG_PRT_SCSI_HOST(2, shost);
12337 12352
12338 ret = scsi_add_host(shost, boardp->dev); 12353 ret = scsi_add_host(shost, boardp->dev);
12339 if (ret) 12354 if (ret)
12340 goto err_free_wide_mem; 12355 goto err_free_mem;
12341 12356
12342 scsi_scan_host(shost); 12357 scsi_scan_host(shost);
12343 return 0; 12358 return 0;
12344 12359
12345 err_free_wide_mem: 12360 err_free_mem:
12346 advansys_wide_free_mem(boardp); 12361 if (ASC_NARROW_BOARD(boardp)) {
12362 if (asc_dvc_varp->overrun_dma)
12363 dma_unmap_single(boardp->dev, asc_dvc_varp->overrun_dma,
12364 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
12365 kfree(asc_dvc_varp->overrun_buf);
12366 } else
12367 advansys_wide_free_mem(boardp);
12368 err_free_irq:
12347 free_irq(boardp->irq, shost); 12369 free_irq(boardp->irq, shost);
12348 err_free_dma: 12370 err_free_dma:
12349#ifdef CONFIG_ISA 12371#ifdef CONFIG_ISA
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 6d5ae4474bb3..633e09036357 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -471,12 +471,12 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
471 471
472 WARN_ON(hdrlength >= 256); 472 WARN_ON(hdrlength >= 256);
473 hdr->hlength = hdrlength & 0xFF; 473 hdr->hlength = hdrlength & 0xFF;
474 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
474 475
475 if (session->tt->init_task && session->tt->init_task(task)) 476 if (session->tt->init_task && session->tt->init_task(task))
476 return -EIO; 477 return -EIO;
477 478
478 task->state = ISCSI_TASK_RUNNING; 479 task->state = ISCSI_TASK_RUNNING;
479 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
480 session->cmdsn++; 480 session->cmdsn++;
481 481
482 conn->scsicmd_pdus_cnt++; 482 conn->scsicmd_pdus_cnt++;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index b00efd19aadb..88f744672576 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -395,11 +395,15 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
395void sas_ata_task_abort(struct sas_task *task) 395void sas_ata_task_abort(struct sas_task *task)
396{ 396{
397 struct ata_queued_cmd *qc = task->uldd_task; 397 struct ata_queued_cmd *qc = task->uldd_task;
398 struct request_queue *q = qc->scsicmd->device->request_queue;
398 struct completion *waiting; 399 struct completion *waiting;
400 unsigned long flags;
399 401
400 /* Bounce SCSI-initiated commands to the SCSI EH */ 402 /* Bounce SCSI-initiated commands to the SCSI EH */
401 if (qc->scsicmd) { 403 if (qc->scsicmd) {
404 spin_lock_irqsave(q->queue_lock, flags);
402 blk_abort_request(qc->scsicmd->request); 405 blk_abort_request(qc->scsicmd->request);
406 spin_unlock_irqrestore(q->queue_lock, flags);
403 scsi_schedule_eh(qc->scsicmd->device->host); 407 scsi_schedule_eh(qc->scsicmd->device->host);
404 return; 408 return;
405 } 409 }
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 2660e1b4569a..822835055cef 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -1030,6 +1030,8 @@ int __sas_task_abort(struct sas_task *task)
1030void sas_task_abort(struct sas_task *task) 1030void sas_task_abort(struct sas_task *task)
1031{ 1031{
1032 struct scsi_cmnd *sc = task->uldd_task; 1032 struct scsi_cmnd *sc = task->uldd_task;
1033 struct request_queue *q = sc->device->request_queue;
1034 unsigned long flags;
1033 1035
1034 /* Escape for libsas internal commands */ 1036 /* Escape for libsas internal commands */
1035 if (!sc) { 1037 if (!sc) {
@@ -1044,7 +1046,9 @@ void sas_task_abort(struct sas_task *task)
1044 return; 1046 return;
1045 } 1047 }
1046 1048
1049 spin_lock_irqsave(q->queue_lock, flags);
1047 blk_abort_request(sc->request); 1050 blk_abort_request(sc->request);
1051 spin_unlock_irqrestore(q->queue_lock, flags);
1048 scsi_schedule_eh(sc->device->host); 1052 scsi_schedule_eh(sc->device->host);
1049} 1053}
1050 1054
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 528733b4a392..9d70aef99227 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -80,7 +80,6 @@ MODULE_LICENSE("Dual MPL/GPL");
80 80
81typedef struct scsi_info_t { 81typedef struct scsi_info_t {
82 struct pcmcia_device *p_dev; 82 struct pcmcia_device *p_dev;
83 dev_node_t node;
84 struct Scsi_Host *host; 83 struct Scsi_Host *host;
85} scsi_info_t; 84} scsi_info_t;
86 85
@@ -105,7 +104,6 @@ static int aha152x_probe(struct pcmcia_device *link)
105 link->io.NumPorts1 = 0x20; 104 link->io.NumPorts1 = 0x20;
106 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 105 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
107 link->io.IOAddrLines = 10; 106 link->io.IOAddrLines = 10;
108 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
109 link->conf.Attributes = CONF_ENABLE_IRQ; 107 link->conf.Attributes = CONF_ENABLE_IRQ;
110 link->conf.IntType = INT_MEMORY_AND_IO; 108 link->conf.IntType = INT_MEMORY_AND_IO;
111 link->conf.Present = PRESENT_OPTION; 109 link->conf.Present = PRESENT_OPTION;
@@ -160,8 +158,7 @@ static int aha152x_config_cs(struct pcmcia_device *link)
160 if (ret) 158 if (ret)
161 goto failed; 159 goto failed;
162 160
163 ret = pcmcia_request_irq(link, &link->irq); 161 if (!link->irq)
164 if (ret)
165 goto failed; 162 goto failed;
166 163
167 ret = pcmcia_request_configuration(link, &link->conf); 164 ret = pcmcia_request_configuration(link, &link->conf);
@@ -172,7 +169,7 @@ static int aha152x_config_cs(struct pcmcia_device *link)
172 memset(&s, 0, sizeof(s)); 169 memset(&s, 0, sizeof(s));
173 s.conf = "PCMCIA setup"; 170 s.conf = "PCMCIA setup";
174 s.io_port = link->io.BasePort1; 171 s.io_port = link->io.BasePort1;
175 s.irq = link->irq.AssignedIRQ; 172 s.irq = link->irq;
176 s.scsiid = host_id; 173 s.scsiid = host_id;
177 s.reconnect = reconnect; 174 s.reconnect = reconnect;
178 s.parity = parity; 175 s.parity = parity;
@@ -187,8 +184,6 @@ static int aha152x_config_cs(struct pcmcia_device *link)
187 goto failed; 184 goto failed;
188 } 185 }
189 186
190 sprintf(info->node.dev_name, "scsi%d", host->host_no);
191 link->dev_node = &info->node;
192 info->host = host; 187 info->host = host;
193 188
194 return 0; 189 return 0;
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 914040684079..21b141151dfc 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -63,7 +63,6 @@ MODULE_LICENSE("Dual MPL/GPL");
63 63
64typedef struct scsi_info_t { 64typedef struct scsi_info_t {
65 struct pcmcia_device *p_dev; 65 struct pcmcia_device *p_dev;
66 dev_node_t node;
67 struct Scsi_Host *host; 66 struct Scsi_Host *host;
68} scsi_info_t; 67} scsi_info_t;
69 68
@@ -88,7 +87,6 @@ static int fdomain_probe(struct pcmcia_device *link)
88 link->io.NumPorts1 = 0x10; 87 link->io.NumPorts1 = 0x10;
89 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 88 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
90 link->io.IOAddrLines = 10; 89 link->io.IOAddrLines = 10;
91 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
92 link->conf.Attributes = CONF_ENABLE_IRQ; 90 link->conf.Attributes = CONF_ENABLE_IRQ;
93 link->conf.IntType = INT_MEMORY_AND_IO; 91 link->conf.IntType = INT_MEMORY_AND_IO;
94 link->conf.Present = PRESENT_OPTION; 92 link->conf.Present = PRESENT_OPTION;
@@ -133,8 +131,7 @@ static int fdomain_config(struct pcmcia_device *link)
133 if (ret) 131 if (ret)
134 goto failed; 132 goto failed;
135 133
136 ret = pcmcia_request_irq(link, &link->irq); 134 if (!link->irq)
137 if (ret)
138 goto failed; 135 goto failed;
139 ret = pcmcia_request_configuration(link, &link->conf); 136 ret = pcmcia_request_configuration(link, &link->conf);
140 if (ret) 137 if (ret)
@@ -144,7 +141,7 @@ static int fdomain_config(struct pcmcia_device *link)
144 release_region(link->io.BasePort1, link->io.NumPorts1); 141 release_region(link->io.BasePort1, link->io.NumPorts1);
145 142
146 /* Set configuration options for the fdomain driver */ 143 /* Set configuration options for the fdomain driver */
147 sprintf(str, "%d,%d", link->io.BasePort1, link->irq.AssignedIRQ); 144 sprintf(str, "%d,%d", link->io.BasePort1, link->irq);
148 fdomain_setup(str); 145 fdomain_setup(str);
149 146
150 host = __fdomain_16x0_detect(&fdomain_driver_template); 147 host = __fdomain_16x0_detect(&fdomain_driver_template);
@@ -157,8 +154,6 @@ static int fdomain_config(struct pcmcia_device *link)
157 goto failed; 154 goto failed;
158 scsi_scan_host(host); 155 scsi_scan_host(host);
159 156
160 sprintf(info->node.dev_name, "scsi%d", host->host_no);
161 link->dev_node = &info->node;
162 info->host = host; 157 info->host = host;
163 158
164 return 0; 159 return 0;
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 021246454872..0f0e112c3f8e 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1563,13 +1563,6 @@ static int nsp_cs_probe(struct pcmcia_device *link)
1563 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 1563 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
1564 link->io.IOAddrLines = 10; /* not used */ 1564 link->io.IOAddrLines = 10; /* not used */
1565 1565
1566 /* Interrupt setup */
1567 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
1568
1569 /* Interrupt handler */
1570 link->irq.Handler = &nspintr;
1571 link->irq.Attributes |= IRQF_SHARED;
1572
1573 /* General socket configuration */ 1566 /* General socket configuration */
1574 link->conf.Attributes = CONF_ENABLE_IRQ; 1567 link->conf.Attributes = CONF_ENABLE_IRQ;
1575 link->conf.IntType = INT_MEMORY_AND_IO; 1568 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -1646,8 +1639,7 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev,
1646 } 1639 }
1647 1640
1648 /* Do we need to allocate an interrupt? */ 1641 /* Do we need to allocate an interrupt? */
1649 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) 1642 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
1650 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
1651 1643
1652 /* IO window settings */ 1644 /* IO window settings */
1653 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 1645 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -1720,10 +1712,8 @@ static int nsp_cs_config(struct pcmcia_device *link)
1720 if (ret) 1712 if (ret)
1721 goto cs_failed; 1713 goto cs_failed;
1722 1714
1723 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 1715 if (pcmcia_request_irq(link, nspintr))
1724 if (pcmcia_request_irq(link, &link->irq)) 1716 goto cs_failed;
1725 goto cs_failed;
1726 }
1727 1717
1728 ret = pcmcia_request_configuration(link, &link->conf); 1718 ret = pcmcia_request_configuration(link, &link->conf);
1729 if (ret) 1719 if (ret)
@@ -1741,7 +1731,7 @@ static int nsp_cs_config(struct pcmcia_device *link)
1741 /* Set port and IRQ */ 1731 /* Set port and IRQ */
1742 data->BaseAddress = link->io.BasePort1; 1732 data->BaseAddress = link->io.BasePort1;
1743 data->NumAddress = link->io.NumPorts1; 1733 data->NumAddress = link->io.NumPorts1;
1744 data->IrqNumber = link->irq.AssignedIRQ; 1734 data->IrqNumber = link->irq;
1745 1735
1746 nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d", 1736 nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d",
1747 data->BaseAddress, data->NumAddress, data->IrqNumber); 1737 data->BaseAddress, data->NumAddress, data->IrqNumber);
@@ -1764,8 +1754,6 @@ static int nsp_cs_config(struct pcmcia_device *link)
1764 1754
1765 scsi_scan_host(host); 1755 scsi_scan_host(host);
1766 1756
1767 snprintf(info->node.dev_name, sizeof(info->node.dev_name), "scsi%d", host->host_no);
1768 link->dev_node = &info->node;
1769 info->host = host; 1757 info->host = host;
1770 1758
1771 /* Finally, report what we've done */ 1759 /* Finally, report what we've done */
@@ -1775,7 +1763,7 @@ static int nsp_cs_config(struct pcmcia_device *link)
1775 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10); 1763 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
1776 } 1764 }
1777 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 1765 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
1778 printk(", irq %d", link->irq.AssignedIRQ); 1766 printk(", irq %d", link->irq);
1779 } 1767 }
1780 if (link->io.NumPorts1) { 1768 if (link->io.NumPorts1) {
1781 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 1769 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
@@ -1823,7 +1811,6 @@ static void nsp_cs_release(struct pcmcia_device *link)
1823 if (info->host != NULL) { 1811 if (info->host != NULL) {
1824 scsi_remove_host(info->host); 1812 scsi_remove_host(info->host);
1825 } 1813 }
1826 link->dev_node = NULL;
1827 1814
1828 if (link->win) { 1815 if (link->win) {
1829 if (data != NULL) { 1816 if (data != NULL) {
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index 8c61a4fe1db9..d68c9f267c5e 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -224,7 +224,6 @@
224typedef struct scsi_info_t { 224typedef struct scsi_info_t {
225 struct pcmcia_device *p_dev; 225 struct pcmcia_device *p_dev;
226 struct Scsi_Host *host; 226 struct Scsi_Host *host;
227 dev_node_t node;
228 int stop; 227 int stop;
229} scsi_info_t; 228} scsi_info_t;
230 229
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index f85f094870b4..f0fc6baed9fc 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -82,7 +82,6 @@ static struct scsi_host_template qlogicfas_driver_template = {
82 82
83typedef struct scsi_info_t { 83typedef struct scsi_info_t {
84 struct pcmcia_device *p_dev; 84 struct pcmcia_device *p_dev;
85 dev_node_t node;
86 struct Scsi_Host *host; 85 struct Scsi_Host *host;
87 unsigned short manf_id; 86 unsigned short manf_id;
88} scsi_info_t; 87} scsi_info_t;
@@ -161,7 +160,6 @@ static int qlogic_probe(struct pcmcia_device *link)
161 link->io.NumPorts1 = 16; 160 link->io.NumPorts1 = 16;
162 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 161 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
163 link->io.IOAddrLines = 10; 162 link->io.IOAddrLines = 10;
164 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
165 link->conf.Attributes = CONF_ENABLE_IRQ; 163 link->conf.Attributes = CONF_ENABLE_IRQ;
166 link->conf.IntType = INT_MEMORY_AND_IO; 164 link->conf.IntType = INT_MEMORY_AND_IO;
167 link->conf.Present = PRESENT_OPTION; 165 link->conf.Present = PRESENT_OPTION;
@@ -209,8 +207,7 @@ static int qlogic_config(struct pcmcia_device * link)
209 if (ret) 207 if (ret)
210 goto failed; 208 goto failed;
211 209
212 ret = pcmcia_request_irq(link, &link->irq); 210 if (!link->irq)
213 if (ret)
214 goto failed; 211 goto failed;
215 212
216 ret = pcmcia_request_configuration(link, &link->conf); 213 ret = pcmcia_request_configuration(link, &link->conf);
@@ -227,18 +224,16 @@ static int qlogic_config(struct pcmcia_device * link)
227 /* The KXL-810AN has a bigger IO port window */ 224 /* The KXL-810AN has a bigger IO port window */
228 if (link->io.NumPorts1 == 32) 225 if (link->io.NumPorts1 == 32)
229 host = qlogic_detect(&qlogicfas_driver_template, link, 226 host = qlogic_detect(&qlogicfas_driver_template, link,
230 link->io.BasePort1 + 16, link->irq.AssignedIRQ); 227 link->io.BasePort1 + 16, link->irq);
231 else 228 else
232 host = qlogic_detect(&qlogicfas_driver_template, link, 229 host = qlogic_detect(&qlogicfas_driver_template, link,
233 link->io.BasePort1, link->irq.AssignedIRQ); 230 link->io.BasePort1, link->irq);
234 231
235 if (!host) { 232 if (!host) {
236 printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name); 233 printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name);
237 goto failed; 234 goto failed;
238 } 235 }
239 236
240 sprintf(info->node.dev_name, "scsi%d", host->host_no);
241 link->dev_node = &info->node;
242 info->host = host; 237 info->host = host;
243 238
244 return 0; 239 return 0;
@@ -258,7 +253,7 @@ static void qlogic_release(struct pcmcia_device *link)
258 253
259 scsi_remove_host(info->host); 254 scsi_remove_host(info->host);
260 255
261 free_irq(link->irq.AssignedIRQ, info->host); 256 free_irq(link->irq, info->host);
262 pcmcia_disable_device(link); 257 pcmcia_disable_device(link);
263 258
264 scsi_host_put(info->host); 259 scsi_host_put(info->host);
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index e7564d8f0cbf..a51164171179 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -191,7 +191,6 @@
191 191
192struct scsi_info_t { 192struct scsi_info_t {
193 struct pcmcia_device *p_dev; 193 struct pcmcia_device *p_dev;
194 dev_node_t node;
195 struct Scsi_Host *host; 194 struct Scsi_Host *host;
196 unsigned short manf_id; 195 unsigned short manf_id;
197}; 196};
@@ -719,8 +718,7 @@ SYM53C500_config(struct pcmcia_device *link)
719 if (ret) 718 if (ret)
720 goto failed; 719 goto failed;
721 720
722 ret = pcmcia_request_irq(link, &link->irq); 721 if (!link->irq)
723 if (ret)
724 goto failed; 722 goto failed;
725 723
726 ret = pcmcia_request_configuration(link, &link->conf); 724 ret = pcmcia_request_configuration(link, &link->conf);
@@ -752,7 +750,7 @@ SYM53C500_config(struct pcmcia_device *link)
752 * 0x320, 0x330, 0x340, 0x350 750 * 0x320, 0x330, 0x340, 0x350
753 */ 751 */
754 port_base = link->io.BasePort1; 752 port_base = link->io.BasePort1;
755 irq_level = link->irq.AssignedIRQ; 753 irq_level = link->irq;
756 754
757 DEB(printk("SYM53C500: port_base=0x%x, irq=%d, fast_pio=%d\n", 755 DEB(printk("SYM53C500: port_base=0x%x, irq=%d, fast_pio=%d\n",
758 port_base, irq_level, USE_FAST_PIO);) 756 port_base, irq_level, USE_FAST_PIO);)
@@ -793,8 +791,6 @@ SYM53C500_config(struct pcmcia_device *link)
793 */ 791 */
794 data->fast_pio = USE_FAST_PIO; 792 data->fast_pio = USE_FAST_PIO;
795 793
796 sprintf(info->node.dev_name, "scsi%d", host->host_no);
797 link->dev_node = &info->node;
798 info->host = host; 794 info->host = host;
799 795
800 if (scsi_add_host(host, NULL)) 796 if (scsi_add_host(host, NULL))
@@ -866,7 +862,6 @@ SYM53C500_probe(struct pcmcia_device *link)
866 link->io.NumPorts1 = 16; 862 link->io.NumPorts1 = 16;
867 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 863 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
868 link->io.IOAddrLines = 10; 864 link->io.IOAddrLines = 10;
869 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
870 link->conf.Attributes = CONF_ENABLE_IRQ; 865 link->conf.Attributes = CONF_ENABLE_IRQ;
871 link->conf.IntType = INT_MEMORY_AND_IO; 866 link->conf.IntType = INT_MEMORY_AND_IO;
872 867
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 09d6d4b76f39..caeb7d10ae04 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -467,7 +467,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
467 if (conn_err_detail) 467 if (conn_err_detail)
468 *conn_err_detail = mbox_sts[5]; 468 *conn_err_detail = mbox_sts[5];
469 if (tcp_source_port_num) 469 if (tcp_source_port_num)
470 *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16; 470 *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16);
471 if (connection_id) 471 if (connection_id)
472 *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; 472 *connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
473 status = QLA_SUCCESS; 473 status = QLA_SUCCESS;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 3e10c306de94..3a5bfd10b2cb 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -957,7 +957,8 @@ static int resp_start_stop(struct scsi_cmnd * scp,
957static sector_t get_sdebug_capacity(void) 957static sector_t get_sdebug_capacity(void)
958{ 958{
959 if (scsi_debug_virtual_gb > 0) 959 if (scsi_debug_virtual_gb > 0)
960 return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb; 960 return (sector_t)scsi_debug_virtual_gb *
961 (1073741824 / scsi_debug_sector_size);
961 else 962 else
962 return sdebug_store_sectors; 963 return sdebug_store_sectors;
963} 964}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index d45c69ca5737..7ad53fa42766 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -302,7 +302,20 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
302 if (scmd->device->allow_restart && 302 if (scmd->device->allow_restart &&
303 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) 303 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
304 return FAILED; 304 return FAILED;
305 return SUCCESS; 305
306 if (blk_barrier_rq(scmd->request))
307 /*
308 * barrier requests should always retry on UA
309 * otherwise block will get a spurious error
310 */
311 return NEEDS_RETRY;
312 else
313 /*
314 * for normal (non barrier) commands, pass the
315 * UA upwards for a determination in the
316 * completion functions
317 */
318 return SUCCESS;
306 319
307 /* these three are not supported */ 320 /* these three are not supported */
308 case COPY_ABORTED: 321 case COPY_ABORTED:
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 8b827f37b03e..de6c60320f6f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1040,6 +1040,7 @@ static void sd_prepare_flush(struct request_queue *q, struct request *rq)
1040{ 1040{
1041 rq->cmd_type = REQ_TYPE_BLOCK_PC; 1041 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1042 rq->timeout = SD_TIMEOUT; 1042 rq->timeout = SD_TIMEOUT;
1043 rq->retries = SD_MAX_RETRIES;
1043 rq->cmd[0] = SYNCHRONIZE_CACHE; 1044 rq->cmd[0] = SYNCHRONIZE_CACHE;
1044 rq->cmd_len = 10; 1045 rq->cmd_len = 10;
1045} 1046}
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index 105449c15fa9..e17764d71476 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -69,6 +69,7 @@ static struct zorro_device_id zorro7xx_zorro_tbl[] __devinitdata = {
69 }, 69 },
70 { 0 } 70 { 0 }
71}; 71};
72MODULE_DEVICE_TABLE(zorro, zorro7xx_zorro_tbl);
72 73
73static int __devinit zorro7xx_init_one(struct zorro_dev *z, 74static int __devinit zorro7xx_init_one(struct zorro_dev *z,
74 const struct zorro_device_id *ent) 75 const struct zorro_device_id *ent)
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 24485cc62ff8..4822cb50cd0f 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -348,6 +348,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
348 { "FUJ02E6", 0 }, 348 { "FUJ02E6", 0 },
349 /* Fujitsu Wacom 2FGT Tablet PC device */ 349 /* Fujitsu Wacom 2FGT Tablet PC device */
350 { "FUJ02E7", 0 }, 350 { "FUJ02E7", 0 },
351 /* Fujitsu Wacom 1FGT Tablet PC device */
352 { "FUJ02E9", 0 },
351 /* 353 /*
352 * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in 354 * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
353 * disguise) 355 * disguise)
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index f55c49475a8c..302836a80693 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -518,12 +518,13 @@ config SERIAL_S3C2412
518 Serial port support for the Samsung S3C2412 and S3C2413 SoC 518 Serial port support for the Samsung S3C2412 and S3C2413 SoC
519 519
520config SERIAL_S3C2440 520config SERIAL_S3C2440
521 tristate "Samsung S3C2440/S3C2442 Serial port support" 521 tristate "Samsung S3C2440/S3C2442/S3C2416 Serial port support"
522 depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442) 522 depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442 || CPU_S3C2416)
523 default y if CPU_S3C2440 523 default y if CPU_S3C2440
524 default y if CPU_S3C2442 524 default y if CPU_S3C2442
525 select SERIAL_SAMSUNG_UARTS_4 if CPU_S3C2416
525 help 526 help
526 Serial port support for the Samsung S3C2440 and S3C2442 SoC 527 Serial port support for the Samsung S3C2440, S3C2416 and S3C2442 SoC
527 528
528config SERIAL_S3C24A0 529config SERIAL_S3C24A0
529 tristate "Samsung S3C24A0 Serial port support" 530 tristate "Samsung S3C24A0 Serial port support"
@@ -533,21 +534,13 @@ config SERIAL_S3C24A0
533 Serial port support for the Samsung S3C24A0 SoC 534 Serial port support for the Samsung S3C24A0 SoC
534 535
535config SERIAL_S3C6400 536config SERIAL_S3C6400
536 tristate "Samsung S3C6400/S3C6410/S5P6440 Seria port support" 537 tristate "Samsung S3C6400/S3C6410/S5P6440/S5PC100 Serial port support"
537 depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440) 538 depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440 || CPU_S5PC100)
538 select SERIAL_SAMSUNG_UARTS_4 539 select SERIAL_SAMSUNG_UARTS_4
539 default y 540 default y
540 help 541 help
541 Serial port support for the Samsung S3C6400, S3C6410 and S5P6440 542 Serial port support for the Samsung S3C6400, S3C6410, S5P6440
542 SoCs 543 and S5PC100 SoCs
543
544config SERIAL_S5PC100
545 tristate "Samsung S5PC100 Serial port support"
546 depends on SERIAL_SAMSUNG && CPU_S5PC100
547 select SERIAL_SAMSUNG_UARTS_4
548 default y
549 help
550 Serial port support for the Samsung S5PC100 SoCs
551 544
552config SERIAL_S5PV210 545config SERIAL_S5PV210
553 tristate "Samsung S5PV210 Serial port support" 546 tristate "Samsung S5PV210 Serial port support"
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 6aa4723b74ee..328f107346c4 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_SERIAL_S3C2412) += s3c2412.o
44obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o 44obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
45obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o 45obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o
46obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o 46obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
47obj-$(CONFIG_SERIAL_S5PC100) += s3c6400.o
48obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o 47obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
49obj-$(CONFIG_SERIAL_MAX3100) += max3100.o 48obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
50obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o 49obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 2c9bf9b68327..eed3c2d8dd1c 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -38,6 +38,7 @@
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/atmel_pdc.h> 39#include <linux/atmel_pdc.h>
40#include <linux/atmel_serial.h> 40#include <linux/atmel_serial.h>
41#include <linux/uaccess.h>
41 42
42#include <asm/io.h> 43#include <asm/io.h>
43 44
@@ -59,6 +60,9 @@
59 60
60#include <linux/serial_core.h> 61#include <linux/serial_core.h>
61 62
63static void atmel_start_rx(struct uart_port *port);
64static void atmel_stop_rx(struct uart_port *port);
65
62#ifdef CONFIG_SERIAL_ATMEL_TTYAT 66#ifdef CONFIG_SERIAL_ATMEL_TTYAT
63 67
64/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we 68/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
@@ -93,6 +97,7 @@
93#define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR) 97#define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR)
94#define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR) 98#define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR)
95#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR) 99#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR)
100#define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR)
96 101
97 /* PDC registers */ 102 /* PDC registers */
98#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR) 103#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
@@ -147,6 +152,9 @@ struct atmel_uart_port {
147 unsigned int irq_status_prev; 152 unsigned int irq_status_prev;
148 153
149 struct circ_buf rx_ring; 154 struct circ_buf rx_ring;
155
156 struct serial_rs485 rs485; /* rs485 settings */
157 unsigned int tx_done_mask;
150}; 158};
151 159
152static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; 160static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
@@ -187,6 +195,46 @@ static bool atmel_use_dma_tx(struct uart_port *port)
187} 195}
188#endif 196#endif
189 197
198/* Enable or disable the rs485 support */
199void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
200{
201 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
202 unsigned int mode;
203
204 spin_lock(&port->lock);
205
206 /* Disable interrupts */
207 UART_PUT_IDR(port, atmel_port->tx_done_mask);
208
209 mode = UART_GET_MR(port);
210
211 /* Resetting serial mode to RS232 (0x0) */
212 mode &= ~ATMEL_US_USMODE;
213
214 atmel_port->rs485 = *rs485conf;
215
216 if (rs485conf->flags & SER_RS485_ENABLED) {
217 dev_dbg(port->dev, "Setting UART to RS485\n");
218 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
219 UART_PUT_TTGR(port, rs485conf->delay_rts_before_send);
220 mode |= ATMEL_US_USMODE_RS485;
221 } else {
222 dev_dbg(port->dev, "Setting UART to RS232\n");
223 if (atmel_use_dma_tx(port))
224 atmel_port->tx_done_mask = ATMEL_US_ENDTX |
225 ATMEL_US_TXBUFE;
226 else
227 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
228 }
229 UART_PUT_MR(port, mode);
230
231 /* Enable interrupts */
232 UART_PUT_IER(port, atmel_port->tx_done_mask);
233
234 spin_unlock(&port->lock);
235
236}
237
190/* 238/*
191 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. 239 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
192 */ 240 */
@@ -202,6 +250,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
202{ 250{
203 unsigned int control = 0; 251 unsigned int control = 0;
204 unsigned int mode; 252 unsigned int mode;
253 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
205 254
206#ifdef CONFIG_ARCH_AT91RM9200 255#ifdef CONFIG_ARCH_AT91RM9200
207 if (cpu_is_at91rm9200()) { 256 if (cpu_is_at91rm9200()) {
@@ -236,6 +285,17 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
236 mode |= ATMEL_US_CHMODE_LOC_LOOP; 285 mode |= ATMEL_US_CHMODE_LOC_LOOP;
237 else 286 else
238 mode |= ATMEL_US_CHMODE_NORMAL; 287 mode |= ATMEL_US_CHMODE_NORMAL;
288
289 /* Resetting serial mode to RS232 (0x0) */
290 mode &= ~ATMEL_US_USMODE;
291
292 if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
293 dev_dbg(port->dev, "Setting UART to RS485\n");
294 UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_before_send);
295 mode |= ATMEL_US_USMODE_RS485;
296 } else {
297 dev_dbg(port->dev, "Setting UART to RS232\n");
298 }
239 UART_PUT_MR(port, mode); 299 UART_PUT_MR(port, mode);
240} 300}
241 301
@@ -268,12 +328,17 @@ static u_int atmel_get_mctrl(struct uart_port *port)
268 */ 328 */
269static void atmel_stop_tx(struct uart_port *port) 329static void atmel_stop_tx(struct uart_port *port)
270{ 330{
331 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
332
271 if (atmel_use_dma_tx(port)) { 333 if (atmel_use_dma_tx(port)) {
272 /* disable PDC transmit */ 334 /* disable PDC transmit */
273 UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS); 335 UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
274 UART_PUT_IDR(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE); 336 }
275 } else 337 /* Disable interrupts */
276 UART_PUT_IDR(port, ATMEL_US_TXRDY); 338 UART_PUT_IDR(port, atmel_port->tx_done_mask);
339
340 if (atmel_port->rs485.flags & SER_RS485_ENABLED)
341 atmel_start_rx(port);
277} 342}
278 343
279/* 344/*
@@ -281,17 +346,39 @@ static void atmel_stop_tx(struct uart_port *port)
281 */ 346 */
282static void atmel_start_tx(struct uart_port *port) 347static void atmel_start_tx(struct uart_port *port)
283{ 348{
349 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
350
284 if (atmel_use_dma_tx(port)) { 351 if (atmel_use_dma_tx(port)) {
285 if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN) 352 if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN)
286 /* The transmitter is already running. Yes, we 353 /* The transmitter is already running. Yes, we
287 really need this.*/ 354 really need this.*/
288 return; 355 return;
289 356
290 UART_PUT_IER(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE); 357 if (atmel_port->rs485.flags & SER_RS485_ENABLED)
358 atmel_stop_rx(port);
359
291 /* re-enable PDC transmit */ 360 /* re-enable PDC transmit */
292 UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); 361 UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
293 } else 362 }
294 UART_PUT_IER(port, ATMEL_US_TXRDY); 363 /* Enable interrupts */
364 UART_PUT_IER(port, atmel_port->tx_done_mask);
365}
366
367/*
368 * start receiving - port is in process of being opened.
369 */
370static void atmel_start_rx(struct uart_port *port)
371{
372 UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */
373
374 if (atmel_use_dma_rx(port)) {
375 /* enable PDC controller */
376 UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
377 port->read_status_mask);
378 UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
379 } else {
380 UART_PUT_IER(port, ATMEL_US_RXRDY);
381 }
295} 382}
296 383
297/* 384/*
@@ -302,9 +389,11 @@ static void atmel_stop_rx(struct uart_port *port)
302 if (atmel_use_dma_rx(port)) { 389 if (atmel_use_dma_rx(port)) {
303 /* disable PDC receive */ 390 /* disable PDC receive */
304 UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS); 391 UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
305 UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 392 UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
306 } else 393 port->read_status_mask);
394 } else {
307 UART_PUT_IDR(port, ATMEL_US_RXRDY); 395 UART_PUT_IDR(port, ATMEL_US_RXRDY);
396 }
308} 397}
309 398
310/* 399/*
@@ -428,8 +517,9 @@ static void atmel_rx_chars(struct uart_port *port)
428static void atmel_tx_chars(struct uart_port *port) 517static void atmel_tx_chars(struct uart_port *port)
429{ 518{
430 struct circ_buf *xmit = &port->state->xmit; 519 struct circ_buf *xmit = &port->state->xmit;
520 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
431 521
432 if (port->x_char && UART_GET_CSR(port) & ATMEL_US_TXRDY) { 522 if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) {
433 UART_PUT_CHAR(port, port->x_char); 523 UART_PUT_CHAR(port, port->x_char);
434 port->icount.tx++; 524 port->icount.tx++;
435 port->x_char = 0; 525 port->x_char = 0;
@@ -437,7 +527,7 @@ static void atmel_tx_chars(struct uart_port *port)
437 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) 527 if (uart_circ_empty(xmit) || uart_tx_stopped(port))
438 return; 528 return;
439 529
440 while (UART_GET_CSR(port) & ATMEL_US_TXRDY) { 530 while (UART_GET_CSR(port) & atmel_port->tx_done_mask) {
441 UART_PUT_CHAR(port, xmit->buf[xmit->tail]); 531 UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
442 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 532 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
443 port->icount.tx++; 533 port->icount.tx++;
@@ -449,7 +539,8 @@ static void atmel_tx_chars(struct uart_port *port)
449 uart_write_wakeup(port); 539 uart_write_wakeup(port);
450 540
451 if (!uart_circ_empty(xmit)) 541 if (!uart_circ_empty(xmit))
452 UART_PUT_IER(port, ATMEL_US_TXRDY); 542 /* Enable interrupts */
543 UART_PUT_IER(port, atmel_port->tx_done_mask);
453} 544}
454 545
455/* 546/*
@@ -501,18 +592,10 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
501{ 592{
502 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 593 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
503 594
504 if (atmel_use_dma_tx(port)) { 595 if (pending & atmel_port->tx_done_mask) {
505 /* PDC transmit */ 596 /* Either PDC or interrupt transmission */
506 if (pending & (ATMEL_US_ENDTX | ATMEL_US_TXBUFE)) { 597 UART_PUT_IDR(port, atmel_port->tx_done_mask);
507 UART_PUT_IDR(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE); 598 tasklet_schedule(&atmel_port->tasklet);
508 tasklet_schedule(&atmel_port->tasklet);
509 }
510 } else {
511 /* Interrupt transmit */
512 if (pending & ATMEL_US_TXRDY) {
513 UART_PUT_IDR(port, ATMEL_US_TXRDY);
514 tasklet_schedule(&atmel_port->tasklet);
515 }
516 } 599 }
517} 600}
518 601
@@ -590,9 +673,15 @@ static void atmel_tx_dma(struct uart_port *port)
590 673
591 UART_PUT_TPR(port, pdc->dma_addr + xmit->tail); 674 UART_PUT_TPR(port, pdc->dma_addr + xmit->tail);
592 UART_PUT_TCR(port, count); 675 UART_PUT_TCR(port, count);
593 /* re-enable PDC transmit and interrupts */ 676 /* re-enable PDC transmit */
594 UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); 677 UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
595 UART_PUT_IER(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE); 678 /* Enable interrupts */
679 UART_PUT_IER(port, atmel_port->tx_done_mask);
680 } else {
681 if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
682 /* DMA done, stop TX, start RX for RS485 */
683 atmel_start_rx(port);
684 }
596 } 685 }
597 686
598 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 687 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -1017,6 +1106,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
1017{ 1106{
1018 unsigned long flags; 1107 unsigned long flags;
1019 unsigned int mode, imr, quot, baud; 1108 unsigned int mode, imr, quot, baud;
1109 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1020 1110
1021 /* Get current mode register */ 1111 /* Get current mode register */
1022 mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL 1112 mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL
@@ -1115,6 +1205,17 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
1115 /* disable receiver and transmitter */ 1205 /* disable receiver and transmitter */
1116 UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS); 1206 UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
1117 1207
1208 /* Resetting serial mode to RS232 (0x0) */
1209 mode &= ~ATMEL_US_USMODE;
1210
1211 if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
1212 dev_dbg(port->dev, "Setting UART to RS485\n");
1213 UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_before_send);
1214 mode |= ATMEL_US_USMODE_RS485;
1215 } else {
1216 dev_dbg(port->dev, "Setting UART to RS232\n");
1217 }
1218
1118 /* set the parity, stop bits and data size */ 1219 /* set the parity, stop bits and data size */
1119 UART_PUT_MR(port, mode); 1220 UART_PUT_MR(port, mode);
1120 1221
@@ -1231,6 +1332,35 @@ static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
1231} 1332}
1232#endif 1333#endif
1233 1334
1335static int
1336atmel_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg)
1337{
1338 struct serial_rs485 rs485conf;
1339
1340 switch (cmd) {
1341 case TIOCSRS485:
1342 if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg,
1343 sizeof(rs485conf)))
1344 return -EFAULT;
1345
1346 atmel_config_rs485(port, &rs485conf);
1347 break;
1348
1349 case TIOCGRS485:
1350 if (copy_to_user((struct serial_rs485 *) arg,
1351 &(to_atmel_uart_port(port)->rs485),
1352 sizeof(rs485conf)))
1353 return -EFAULT;
1354 break;
1355
1356 default:
1357 return -ENOIOCTLCMD;
1358 }
1359 return 0;
1360}
1361
1362
1363
1234static struct uart_ops atmel_pops = { 1364static struct uart_ops atmel_pops = {
1235 .tx_empty = atmel_tx_empty, 1365 .tx_empty = atmel_tx_empty,
1236 .set_mctrl = atmel_set_mctrl, 1366 .set_mctrl = atmel_set_mctrl,
@@ -1250,6 +1380,7 @@ static struct uart_ops atmel_pops = {
1250 .config_port = atmel_config_port, 1380 .config_port = atmel_config_port,
1251 .verify_port = atmel_verify_port, 1381 .verify_port = atmel_verify_port,
1252 .pm = atmel_serial_pm, 1382 .pm = atmel_serial_pm,
1383 .ioctl = atmel_ioctl,
1253#ifdef CONFIG_CONSOLE_POLL 1384#ifdef CONFIG_CONSOLE_POLL
1254 .poll_get_char = atmel_poll_get_char, 1385 .poll_get_char = atmel_poll_get_char,
1255 .poll_put_char = atmel_poll_put_char, 1386 .poll_put_char = atmel_poll_put_char,
@@ -1265,13 +1396,12 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
1265 struct uart_port *port = &atmel_port->uart; 1396 struct uart_port *port = &atmel_port->uart;
1266 struct atmel_uart_data *data = pdev->dev.platform_data; 1397 struct atmel_uart_data *data = pdev->dev.platform_data;
1267 1398
1268 port->iotype = UPIO_MEM; 1399 port->iotype = UPIO_MEM;
1269 port->flags = UPF_BOOT_AUTOCONF; 1400 port->flags = UPF_BOOT_AUTOCONF;
1270 port->ops = &atmel_pops; 1401 port->ops = &atmel_pops;
1271 port->fifosize = 1; 1402 port->fifosize = 1;
1272 port->line = pdev->id; 1403 port->line = pdev->id;
1273 port->dev = &pdev->dev; 1404 port->dev = &pdev->dev;
1274
1275 port->mapbase = pdev->resource[0].start; 1405 port->mapbase = pdev->resource[0].start;
1276 port->irq = pdev->resource[1].start; 1406 port->irq = pdev->resource[1].start;
1277 1407
@@ -1299,8 +1429,16 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
1299 1429
1300 atmel_port->use_dma_rx = data->use_dma_rx; 1430 atmel_port->use_dma_rx = data->use_dma_rx;
1301 atmel_port->use_dma_tx = data->use_dma_tx; 1431 atmel_port->use_dma_tx = data->use_dma_tx;
1302 if (atmel_use_dma_tx(port)) 1432 atmel_port->rs485 = data->rs485;
1433 /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
1434 if (atmel_port->rs485.flags & SER_RS485_ENABLED)
1435 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
1436 else if (atmel_use_dma_tx(port)) {
1303 port->fifosize = PDC_BUFFER_SIZE; 1437 port->fifosize = PDC_BUFFER_SIZE;
1438 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
1439 } else {
1440 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
1441 }
1304} 1442}
1305 1443
1306/* 1444/*
@@ -1334,6 +1472,7 @@ static void atmel_console_putchar(struct uart_port *port, int ch)
1334static void atmel_console_write(struct console *co, const char *s, u_int count) 1472static void atmel_console_write(struct console *co, const char *s, u_int count)
1335{ 1473{
1336 struct uart_port *port = &atmel_ports[co->index].uart; 1474 struct uart_port *port = &atmel_ports[co->index].uart;
1475 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1337 unsigned int status, imr; 1476 unsigned int status, imr;
1338 unsigned int pdc_tx; 1477 unsigned int pdc_tx;
1339 1478
@@ -1341,7 +1480,7 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
1341 * First, save IMR and then disable interrupts 1480 * First, save IMR and then disable interrupts
1342 */ 1481 */
1343 imr = UART_GET_IMR(port); 1482 imr = UART_GET_IMR(port);
1344 UART_PUT_IDR(port, ATMEL_US_RXRDY | ATMEL_US_TXRDY); 1483 UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask);
1345 1484
1346 /* Store PDC transmit status and disable it */ 1485 /* Store PDC transmit status and disable it */
1347 pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN; 1486 pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN;
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 4315b23590bd..eacb588a9345 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -120,7 +120,8 @@
120#define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */ 120#define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */
121#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */ 121#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
122#define UCR3_BPEN (1<<0) /* Preset registers enable */ 122#define UCR3_BPEN (1<<0) /* Preset registers enable */
123#define UCR4_CTSTL_32 (32<<10) /* CTS trigger level (32 chars) */ 123#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
124#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
124#define UCR4_INVR (1<<9) /* Inverted infrared reception */ 125#define UCR4_INVR (1<<9) /* Inverted infrared reception */
125#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */ 126#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
126#define UCR4_WKEN (1<<7) /* Wake interrupt enable */ 127#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
@@ -591,6 +592,9 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
591 return 0; 592 return 0;
592} 593}
593 594
595/* half the RX buffer size */
596#define CTSTL 16
597
594static int imx_startup(struct uart_port *port) 598static int imx_startup(struct uart_port *port)
595{ 599{
596 struct imx_port *sport = (struct imx_port *)port; 600 struct imx_port *sport = (struct imx_port *)port;
@@ -607,6 +611,10 @@ static int imx_startup(struct uart_port *port)
607 if (USE_IRDA(sport)) 611 if (USE_IRDA(sport))
608 temp |= UCR4_IRSC; 612 temp |= UCR4_IRSC;
609 613
614 /* set the trigger level for CTS */
615 temp &= ~(UCR4_CTSTL_MASK<< UCR4_CTSTL_SHF);
616 temp |= CTSTL<< UCR4_CTSTL_SHF;
617
610 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); 618 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
611 619
612 if (USE_IRDA(sport)) { 620 if (USE_IRDA(sport)) {
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 3119fddaedb5..02469c31bf0b 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -29,39 +29,6 @@
29 * kind, whether express or implied. 29 * kind, whether express or implied.
30 */ 30 */
31 31
32/* Platform device Usage :
33 *
34 * Since PSCs can have multiple function, the correct driver for each one
35 * is selected by calling mpc52xx_match_psc_function(...). The function
36 * handled by this driver is "uart".
37 *
38 * The driver init all necessary registers to place the PSC in uart mode without
39 * DCD. However, the pin multiplexing aren't changed and should be set either
40 * by the bootloader or in the platform init code.
41 *
42 * The idx field must be equal to the PSC index (e.g. 0 for PSC1, 1 for PSC2,
43 * and so on). So the PSC1 is mapped to /dev/ttyPSC0, PSC2 to /dev/ttyPSC1 and
44 * so on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly
45 * fpr the console code : without this 1:1 mapping, at early boot time, when we
46 * are parsing the kernel args console=ttyPSC?, we wouldn't know which PSC it
47 * will be mapped to.
48 */
49
50/* OF Platform device Usage :
51 *
52 * This driver is only used for PSCs configured in uart mode. The device
53 * tree will have a node for each PSC with "mpc52xx-psc-uart" in the compatible
54 * list.
55 *
56 * By default, PSC devices are enumerated in the order they are found. However
57 * a particular PSC number can be forces by adding 'device_no = <port#>'
58 * to the device node.
59 *
60 * The driver init all necessary registers to place the PSC in uart mode without
61 * DCD. However, the pin multiplexing aren't changed and should be set either
62 * by the bootloader or in the platform init code.
63 */
64
65#undef DEBUG 32#undef DEBUG
66 33
67#include <linux/device.h> 34#include <linux/device.h>
@@ -1500,7 +1467,7 @@ mpc52xx_uart_init(void)
1500 /* 1467 /*
1501 * Map the PSC FIFO Controller and init if on MPC512x. 1468 * Map the PSC FIFO Controller and init if on MPC512x.
1502 */ 1469 */
1503 if (psc_ops->fifoc_init) { 1470 if (psc_ops && psc_ops->fifoc_init) {
1504 ret = psc_ops->fifoc_init(); 1471 ret = psc_ops->fifoc_init();
1505 if (ret) 1472 if (ret)
1506 return ret; 1473 return ret;
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index 4eaa043ca2a8..700e10833bf9 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -752,8 +752,10 @@ static void pmz_break_ctl(struct uart_port *port, int break_state)
752 uap->curregs[R5] = new_reg; 752 uap->curregs[R5] = new_reg;
753 753
754 /* NOTE: Not subject to 'transmitter active' rule. */ 754 /* NOTE: Not subject to 'transmitter active' rule. */
755 if (ZS_IS_ASLEEP(uap)) 755 if (ZS_IS_ASLEEP(uap)) {
756 spin_unlock_irqrestore(&port->lock, flags);
756 return; 757 return;
758 }
757 write_zsreg(uap, R5, uap->curregs[R5]); 759 write_zsreg(uap, R5, uap->curregs[R5]);
758 } 760 }
759 761
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 8cfa5b12ea7a..dadd686c9801 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -89,7 +89,6 @@ struct serial_info {
89 int manfid; 89 int manfid;
90 int prodid; 90 int prodid;
91 int c950ctrl; 91 int c950ctrl;
92 dev_node_t node[4];
93 int line[4]; 92 int line[4];
94 const struct serial_quirk *quirk; 93 const struct serial_quirk *quirk;
95}; 94};
@@ -289,8 +288,6 @@ static void serial_remove(struct pcmcia_device *link)
289 for (i = 0; i < info->ndev; i++) 288 for (i = 0; i < info->ndev; i++)
290 serial8250_unregister_port(info->line[i]); 289 serial8250_unregister_port(info->line[i]);
291 290
292 info->p_dev->dev_node = NULL;
293
294 if (!info->slave) 291 if (!info->slave)
295 pcmcia_disable_device(link); 292 pcmcia_disable_device(link);
296} 293}
@@ -343,7 +340,6 @@ static int serial_probe(struct pcmcia_device *link)
343 340
344 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 341 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
345 link->io.NumPorts1 = 8; 342 link->io.NumPorts1 = 8;
346 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
347 link->conf.Attributes = CONF_ENABLE_IRQ; 343 link->conf.Attributes = CONF_ENABLE_IRQ;
348 if (do_sound) { 344 if (do_sound) {
349 link->conf.Attributes |= CONF_ENABLE_SPKR; 345 link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -411,11 +407,6 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
411 } 407 }
412 408
413 info->line[info->ndev] = line; 409 info->line[info->ndev] = line;
414 sprintf(info->node[info->ndev].dev_name, "ttyS%d", line);
415 info->node[info->ndev].major = TTY_MAJOR;
416 info->node[info->ndev].minor = 0x40 + line;
417 if (info->ndev > 0)
418 info->node[info->ndev - 1].next = &info->node[info->ndev];
419 info->ndev++; 410 info->ndev++;
420 411
421 return 0; 412 return 0;
@@ -486,7 +477,7 @@ static int simple_config(struct pcmcia_device *link)
486 } 477 }
487 if (info->slave) { 478 if (info->slave) {
488 return setup_serial(link, info, port, 479 return setup_serial(link, info, port,
489 link->irq.AssignedIRQ); 480 link->irq);
490 } 481 }
491 } 482 }
492 483
@@ -507,10 +498,6 @@ static int simple_config(struct pcmcia_device *link)
507 return -1; 498 return -1;
508 499
509found_port: 500found_port:
510 i = pcmcia_request_irq(link, &link->irq);
511 if (i != 0)
512 link->irq.AssignedIRQ = 0;
513
514 if (info->multi && (info->manfid == MANFID_3COM)) 501 if (info->multi && (info->manfid == MANFID_3COM))
515 link->conf.ConfigIndex &= ~(0x08); 502 link->conf.ConfigIndex &= ~(0x08);
516 503
@@ -523,7 +510,7 @@ found_port:
523 i = pcmcia_request_configuration(link, &link->conf); 510 i = pcmcia_request_configuration(link, &link->conf);
524 if (i != 0) 511 if (i != 0)
525 return -1; 512 return -1;
526 return setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ); 513 return setup_serial(link, info, link->io.BasePort1, link->irq);
527} 514}
528 515
529static int multi_config_check(struct pcmcia_device *p_dev, 516static int multi_config_check(struct pcmcia_device *p_dev,
@@ -586,13 +573,9 @@ static int multi_config(struct pcmcia_device *link)
586 } 573 }
587 } 574 }
588 575
589 i = pcmcia_request_irq(link, &link->irq); 576 if (!link->irq)
590 if (i != 0) { 577 dev_warn(&link->dev,
591 /* FIXME: comment does not fit, error handling does not fit */ 578 "serial_cs: no usable IRQ found, continuing...\n");
592 printk(KERN_NOTICE
593 "serial_cs: no usable port range found, giving up\n");
594 link->irq.AssignedIRQ = 0;
595 }
596 579
597 /* 580 /*
598 * Apply any configuration quirks. 581 * Apply any configuration quirks.
@@ -615,11 +598,11 @@ static int multi_config(struct pcmcia_device *link)
615 if (link->conf.ConfigIndex == 1 || 598 if (link->conf.ConfigIndex == 1 ||
616 link->conf.ConfigIndex == 3) { 599 link->conf.ConfigIndex == 3) {
617 err = setup_serial(link, info, base2, 600 err = setup_serial(link, info, base2,
618 link->irq.AssignedIRQ); 601 link->irq);
619 base2 = link->io.BasePort1; 602 base2 = link->io.BasePort1;
620 } else { 603 } else {
621 err = setup_serial(link, info, link->io.BasePort1, 604 err = setup_serial(link, info, link->io.BasePort1,
622 link->irq.AssignedIRQ); 605 link->irq);
623 } 606 }
624 info->c950ctrl = base2; 607 info->c950ctrl = base2;
625 608
@@ -633,10 +616,10 @@ static int multi_config(struct pcmcia_device *link)
633 return 0; 616 return 0;
634 } 617 }
635 618
636 setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ); 619 setup_serial(link, info, link->io.BasePort1, link->irq);
637 for (i = 0; i < info->multi - 1; i++) 620 for (i = 0; i < info->multi - 1; i++)
638 setup_serial(link, info, base2 + (8 * i), 621 setup_serial(link, info, base2 + (8 * i),
639 link->irq.AssignedIRQ); 622 link->irq);
640 return 0; 623 return 0;
641} 624}
642 625
@@ -720,7 +703,6 @@ static int serial_config(struct pcmcia_device * link)
720 if (info->quirk->post(link)) 703 if (info->quirk->post(link))
721 goto failed; 704 goto failed;
722 705
723 link->dev_node = &info->node[0];
724 return 0; 706 return 0;
725 707
726failed: 708failed:
diff --git a/drivers/serial/serial_ks8695.c b/drivers/serial/serial_ks8695.c
index 2e71bbc04dac..b1962025b1aa 100644
--- a/drivers/serial/serial_ks8695.c
+++ b/drivers/serial/serial_ks8695.c
@@ -650,6 +650,7 @@ static struct console ks8695_console = {
650 650
651static int __init ks8695_console_init(void) 651static int __init ks8695_console_init(void)
652{ 652{
653 add_preferred_console(SERIAL_KS8695_DEVNAME, 0, NULL);
653 register_console(&ks8695_console); 654 register_console(&ks8695_console);
654 return 0; 655 return 0;
655} 656}
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 8eb094c1f61b..8d993c4cceac 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -83,16 +83,16 @@ struct sci_port {
83 83
84 /* Interface clock */ 84 /* Interface clock */
85 struct clk *iclk; 85 struct clk *iclk;
86 /* Data clock */ 86 /* Function clock */
87 struct clk *dclk; 87 struct clk *fclk;
88 88
89 struct list_head node; 89 struct list_head node;
90 struct dma_chan *chan_tx; 90 struct dma_chan *chan_tx;
91 struct dma_chan *chan_rx; 91 struct dma_chan *chan_rx;
92#ifdef CONFIG_SERIAL_SH_SCI_DMA 92#ifdef CONFIG_SERIAL_SH_SCI_DMA
93 struct device *dma_dev; 93 struct device *dma_dev;
94 enum sh_dmae_slave_chan_id slave_tx; 94 unsigned int slave_tx;
95 enum sh_dmae_slave_chan_id slave_rx; 95 unsigned int slave_rx;
96 struct dma_async_tx_descriptor *desc_tx; 96 struct dma_async_tx_descriptor *desc_tx;
97 struct dma_async_tx_descriptor *desc_rx[2]; 97 struct dma_async_tx_descriptor *desc_rx[2];
98 dma_cookie_t cookie_tx; 98 dma_cookie_t cookie_tx;
@@ -107,6 +107,7 @@ struct sci_port {
107 struct work_struct work_tx; 107 struct work_struct work_tx;
108 struct work_struct work_rx; 108 struct work_struct work_rx;
109 struct timer_list rx_timer; 109 struct timer_list rx_timer;
110 unsigned int rx_timeout;
110#endif 111#endif
111}; 112};
112 113
@@ -674,22 +675,22 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
674 struct sci_port *s = to_sci_port(port); 675 struct sci_port *s = to_sci_port(port);
675 676
676 if (s->chan_rx) { 677 if (s->chan_rx) {
677 unsigned long tout;
678 u16 scr = sci_in(port, SCSCR); 678 u16 scr = sci_in(port, SCSCR);
679 u16 ssr = sci_in(port, SCxSR); 679 u16 ssr = sci_in(port, SCxSR);
680 680
681 /* Disable future Rx interrupts */ 681 /* Disable future Rx interrupts */
682 sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE); 682 if (port->type == PORT_SCIFA) {
683 disable_irq_nosync(irq);
684 scr |= 0x4000;
685 } else {
686 scr &= ~SCI_CTRL_FLAGS_RIE;
687 }
688 sci_out(port, SCSCR, scr);
683 /* Clear current interrupt */ 689 /* Clear current interrupt */
684 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port))); 690 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
685 /* Calculate delay for 1.5 DMA buffers */ 691 dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
686 tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 / 692 jiffies, s->rx_timeout);
687 port->fifosize / 2; 693 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
688 dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n",
689 tout * 1000 / HZ);
690 if (tout < 2)
691 tout = 2;
692 mod_timer(&s->rx_timer, jiffies + tout);
693 694
694 return IRQ_HANDLED; 695 return IRQ_HANDLED;
695 } 696 }
@@ -799,7 +800,7 @@ static int sci_notifier(struct notifier_block *self,
799 (phase == CPUFREQ_RESUMECHANGE)) { 800 (phase == CPUFREQ_RESUMECHANGE)) {
800 spin_lock_irqsave(&priv->lock, flags); 801 spin_lock_irqsave(&priv->lock, flags);
801 list_for_each_entry(sci_port, &priv->ports, node) 802 list_for_each_entry(sci_port, &priv->ports, node)
802 sci_port->port.uartclk = clk_get_rate(sci_port->dclk); 803 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
803 spin_unlock_irqrestore(&priv->lock, flags); 804 spin_unlock_irqrestore(&priv->lock, flags);
804 } 805 }
805 806
@@ -810,21 +811,17 @@ static void sci_clk_enable(struct uart_port *port)
810{ 811{
811 struct sci_port *sci_port = to_sci_port(port); 812 struct sci_port *sci_port = to_sci_port(port);
812 813
813 clk_enable(sci_port->dclk); 814 clk_enable(sci_port->iclk);
814 sci_port->port.uartclk = clk_get_rate(sci_port->dclk); 815 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
815 816 clk_enable(sci_port->fclk);
816 if (sci_port->iclk)
817 clk_enable(sci_port->iclk);
818} 817}
819 818
820static void sci_clk_disable(struct uart_port *port) 819static void sci_clk_disable(struct uart_port *port)
821{ 820{
822 struct sci_port *sci_port = to_sci_port(port); 821 struct sci_port *sci_port = to_sci_port(port);
823 822
824 if (sci_port->iclk) 823 clk_disable(sci_port->fclk);
825 clk_disable(sci_port->iclk); 824 clk_disable(sci_port->iclk);
826
827 clk_disable(sci_port->dclk);
828} 825}
829 826
830static int sci_request_irq(struct sci_port *port) 827static int sci_request_irq(struct sci_port *port)
@@ -913,22 +910,26 @@ static void sci_dma_tx_complete(void *arg)
913 910
914 spin_lock_irqsave(&port->lock, flags); 911 spin_lock_irqsave(&port->lock, flags);
915 912
916 xmit->tail += s->sg_tx.length; 913 xmit->tail += sg_dma_len(&s->sg_tx);
917 xmit->tail &= UART_XMIT_SIZE - 1; 914 xmit->tail &= UART_XMIT_SIZE - 1;
918 915
919 port->icount.tx += s->sg_tx.length; 916 port->icount.tx += sg_dma_len(&s->sg_tx);
920 917
921 async_tx_ack(s->desc_tx); 918 async_tx_ack(s->desc_tx);
922 s->cookie_tx = -EINVAL; 919 s->cookie_tx = -EINVAL;
923 s->desc_tx = NULL; 920 s->desc_tx = NULL;
924 921
925 spin_unlock_irqrestore(&port->lock, flags);
926
927 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 922 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
928 uart_write_wakeup(port); 923 uart_write_wakeup(port);
929 924
930 if (uart_circ_chars_pending(xmit)) 925 if (!uart_circ_empty(xmit)) {
931 schedule_work(&s->work_tx); 926 schedule_work(&s->work_tx);
927 } else if (port->type == PORT_SCIFA) {
928 u16 ctrl = sci_in(port, SCSCR);
929 sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
930 }
931
932 spin_unlock_irqrestore(&port->lock, flags);
932} 933}
933 934
934/* Locking: called with port lock held */ 935/* Locking: called with port lock held */
@@ -972,13 +973,13 @@ static void sci_dma_rx_complete(void *arg)
972 unsigned long flags; 973 unsigned long flags;
973 int count; 974 int count;
974 975
975 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 976 dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
976 977
977 spin_lock_irqsave(&port->lock, flags); 978 spin_lock_irqsave(&port->lock, flags);
978 979
979 count = sci_dma_rx_push(s, tty, s->buf_len_rx); 980 count = sci_dma_rx_push(s, tty, s->buf_len_rx);
980 981
981 mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5)); 982 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
982 983
983 spin_unlock_irqrestore(&port->lock, flags); 984 spin_unlock_irqrestore(&port->lock, flags);
984 985
@@ -1050,6 +1051,8 @@ static void sci_submit_rx(struct sci_port *s)
1050 sci_rx_dma_release(s, true); 1051 sci_rx_dma_release(s, true);
1051 return; 1052 return;
1052 } 1053 }
1054 dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
1055 s->cookie_rx[i], i);
1053 } 1056 }
1054 1057
1055 s->active_rx = s->cookie_rx[0]; 1058 s->active_rx = s->cookie_rx[0];
@@ -1107,10 +1110,10 @@ static void work_fn_rx(struct work_struct *work)
1107 return; 1110 return;
1108 } 1111 }
1109 1112
1110 dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__,
1111 s->cookie_rx[new], new);
1112
1113 s->active_rx = s->cookie_rx[!new]; 1113 s->active_rx = s->cookie_rx[!new];
1114
1115 dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
1116 s->cookie_rx[new], new, s->active_rx);
1114} 1117}
1115 1118
1116static void work_fn_tx(struct work_struct *work) 1119static void work_fn_tx(struct work_struct *work)
@@ -1131,14 +1134,13 @@ static void work_fn_tx(struct work_struct *work)
1131 */ 1134 */
1132 spin_lock_irq(&port->lock); 1135 spin_lock_irq(&port->lock);
1133 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); 1136 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1134 sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + 1137 sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1135 sg->offset; 1138 sg->offset;
1136 sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), 1139 sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1137 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); 1140 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1138 sg->dma_length = sg->length;
1139 spin_unlock_irq(&port->lock); 1141 spin_unlock_irq(&port->lock);
1140 1142
1141 BUG_ON(!sg->length); 1143 BUG_ON(!sg_dma_len(sg));
1142 1144
1143 desc = chan->device->device_prep_slave_sg(chan, 1145 desc = chan->device->device_prep_slave_sg(chan,
1144 sg, s->sg_len_tx, DMA_TO_DEVICE, 1146 sg, s->sg_len_tx, DMA_TO_DEVICE,
@@ -1173,23 +1175,28 @@ static void work_fn_tx(struct work_struct *work)
1173 1175
1174static void sci_start_tx(struct uart_port *port) 1176static void sci_start_tx(struct uart_port *port)
1175{ 1177{
1178 struct sci_port *s = to_sci_port(port);
1176 unsigned short ctrl; 1179 unsigned short ctrl;
1177 1180
1178#ifdef CONFIG_SERIAL_SH_SCI_DMA 1181#ifdef CONFIG_SERIAL_SH_SCI_DMA
1179 struct sci_port *s = to_sci_port(port); 1182 if (port->type == PORT_SCIFA) {
1180 1183 u16 new, scr = sci_in(port, SCSCR);
1181 if (s->chan_tx) { 1184 if (s->chan_tx)
1182 if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0) 1185 new = scr | 0x8000;
1183 schedule_work(&s->work_tx); 1186 else
1184 1187 new = scr & ~0x8000;
1185 return; 1188 if (new != scr)
1189 sci_out(port, SCSCR, new);
1186 } 1190 }
1191 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1192 s->cookie_tx < 0)
1193 schedule_work(&s->work_tx);
1187#endif 1194#endif
1188 1195 if (!s->chan_tx || port->type == PORT_SCIFA) {
1189 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ 1196 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1190 ctrl = sci_in(port, SCSCR); 1197 ctrl = sci_in(port, SCSCR);
1191 ctrl |= SCI_CTRL_FLAGS_TIE; 1198 sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
1192 sci_out(port, SCSCR, ctrl); 1199 }
1193} 1200}
1194 1201
1195static void sci_stop_tx(struct uart_port *port) 1202static void sci_stop_tx(struct uart_port *port)
@@ -1198,6 +1205,8 @@ static void sci_stop_tx(struct uart_port *port)
1198 1205
1199 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ 1206 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1200 ctrl = sci_in(port, SCSCR); 1207 ctrl = sci_in(port, SCSCR);
1208 if (port->type == PORT_SCIFA)
1209 ctrl &= ~0x8000;
1201 ctrl &= ~SCI_CTRL_FLAGS_TIE; 1210 ctrl &= ~SCI_CTRL_FLAGS_TIE;
1202 sci_out(port, SCSCR, ctrl); 1211 sci_out(port, SCSCR, ctrl);
1203} 1212}
@@ -1208,6 +1217,8 @@ static void sci_start_rx(struct uart_port *port)
1208 1217
1209 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ 1218 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */
1210 ctrl |= sci_in(port, SCSCR); 1219 ctrl |= sci_in(port, SCSCR);
1220 if (port->type == PORT_SCIFA)
1221 ctrl &= ~0x4000;
1211 sci_out(port, SCSCR, ctrl); 1222 sci_out(port, SCSCR, ctrl);
1212} 1223}
1213 1224
@@ -1217,6 +1228,8 @@ static void sci_stop_rx(struct uart_port *port)
1217 1228
1218 /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */ 1229 /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
1219 ctrl = sci_in(port, SCSCR); 1230 ctrl = sci_in(port, SCSCR);
1231 if (port->type == PORT_SCIFA)
1232 ctrl &= ~0x4000;
1220 ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE); 1233 ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
1221 sci_out(port, SCSCR, ctrl); 1234 sci_out(port, SCSCR, ctrl);
1222} 1235}
@@ -1251,8 +1264,12 @@ static void rx_timer_fn(unsigned long arg)
1251{ 1264{
1252 struct sci_port *s = (struct sci_port *)arg; 1265 struct sci_port *s = (struct sci_port *)arg;
1253 struct uart_port *port = &s->port; 1266 struct uart_port *port = &s->port;
1254
1255 u16 scr = sci_in(port, SCSCR); 1267 u16 scr = sci_in(port, SCSCR);
1268
1269 if (port->type == PORT_SCIFA) {
1270 scr &= ~0x4000;
1271 enable_irq(s->irqs[1]);
1272 }
1256 sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE); 1273 sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
1257 dev_dbg(port->dev, "DMA Rx timed out\n"); 1274 dev_dbg(port->dev, "DMA Rx timed out\n");
1258 schedule_work(&s->work_rx); 1275 schedule_work(&s->work_rx);
@@ -1339,8 +1356,7 @@ static void sci_request_dma(struct uart_port *port)
1339 sg_init_table(sg, 1); 1356 sg_init_table(sg, 1);
1340 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, 1357 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1341 (int)buf[i] & ~PAGE_MASK); 1358 (int)buf[i] & ~PAGE_MASK);
1342 sg->dma_address = dma[i]; 1359 sg_dma_address(sg) = dma[i];
1343 sg->dma_length = sg->length;
1344 } 1360 }
1345 1361
1346 INIT_WORK(&s->work_rx, work_fn_rx); 1362 INIT_WORK(&s->work_rx, work_fn_rx);
@@ -1403,8 +1419,12 @@ static void sci_shutdown(struct uart_port *port)
1403static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 1419static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1404 struct ktermios *old) 1420 struct ktermios *old)
1405{ 1421{
1422#ifdef CONFIG_SERIAL_SH_SCI_DMA
1423 struct sci_port *s = to_sci_port(port);
1424#endif
1406 unsigned int status, baud, smr_val, max_baud; 1425 unsigned int status, baud, smr_val, max_baud;
1407 int t = -1; 1426 int t = -1;
1427 u16 scfcr = 0;
1408 1428
1409 /* 1429 /*
1410 * earlyprintk comes here early on with port->uartclk set to zero. 1430 * earlyprintk comes here early on with port->uartclk set to zero.
@@ -1427,7 +1447,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1427 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ 1447 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1428 1448
1429 if (port->type != PORT_SCI) 1449 if (port->type != PORT_SCI)
1430 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); 1450 sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
1431 1451
1432 smr_val = sci_in(port, SCSMR) & 3; 1452 smr_val = sci_in(port, SCSMR) & 3;
1433 if ((termios->c_cflag & CSIZE) == CS7) 1453 if ((termios->c_cflag & CSIZE) == CS7)
@@ -1458,10 +1478,32 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1458 } 1478 }
1459 1479
1460 sci_init_pins(port, termios->c_cflag); 1480 sci_init_pins(port, termios->c_cflag);
1461 sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0); 1481 sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
1462 1482
1463 sci_out(port, SCSCR, SCSCR_INIT(port)); 1483 sci_out(port, SCSCR, SCSCR_INIT(port));
1464 1484
1485#ifdef CONFIG_SERIAL_SH_SCI_DMA
1486 /*
1487 * Calculate delay for 1.5 DMA buffers: see
1488 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1489 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1490 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1491 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1492 * sizes), but it has been found out experimentally, that this is not
1493 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1494 * as a minimum seem to work perfectly.
1495 */
1496 if (s->chan_rx) {
1497 s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1498 port->fifosize / 2;
1499 dev_dbg(port->dev,
1500 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1501 s->rx_timeout * 1000 / HZ, port->timeout);
1502 if (s->rx_timeout < msecs_to_jiffies(20))
1503 s->rx_timeout = msecs_to_jiffies(20);
1504 }
1505#endif
1506
1465 if ((termios->c_cflag & CREAD) != 0) 1507 if ((termios->c_cflag & CREAD) != 0)
1466 sci_start_rx(port); 1508 sci_start_rx(port);
1467} 1509}
@@ -1553,10 +1595,10 @@ static struct uart_ops sci_uart_ops = {
1553#endif 1595#endif
1554}; 1596};
1555 1597
1556static void __devinit sci_init_single(struct platform_device *dev, 1598static int __devinit sci_init_single(struct platform_device *dev,
1557 struct sci_port *sci_port, 1599 struct sci_port *sci_port,
1558 unsigned int index, 1600 unsigned int index,
1559 struct plat_sci_port *p) 1601 struct plat_sci_port *p)
1560{ 1602{
1561 struct uart_port *port = &sci_port->port; 1603 struct uart_port *port = &sci_port->port;
1562 1604
@@ -1577,8 +1619,23 @@ static void __devinit sci_init_single(struct platform_device *dev,
1577 } 1619 }
1578 1620
1579 if (dev) { 1621 if (dev) {
1580 sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL; 1622 sci_port->iclk = clk_get(&dev->dev, "sci_ick");
1581 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); 1623 if (IS_ERR(sci_port->iclk)) {
1624 sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
1625 if (IS_ERR(sci_port->iclk)) {
1626 dev_err(&dev->dev, "can't get iclk\n");
1627 return PTR_ERR(sci_port->iclk);
1628 }
1629 }
1630
1631 /*
1632 * The function clock is optional, ignore it if we can't
1633 * find it.
1634 */
1635 sci_port->fclk = clk_get(&dev->dev, "sci_fck");
1636 if (IS_ERR(sci_port->fclk))
1637 sci_port->fclk = NULL;
1638
1582 sci_port->enable = sci_clk_enable; 1639 sci_port->enable = sci_clk_enable;
1583 sci_port->disable = sci_clk_disable; 1640 sci_port->disable = sci_clk_disable;
1584 port->dev = &dev->dev; 1641 port->dev = &dev->dev;
@@ -1605,6 +1662,7 @@ static void __devinit sci_init_single(struct platform_device *dev,
1605#endif 1662#endif
1606 1663
1607 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); 1664 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
1665 return 0;
1608} 1666}
1609 1667
1610#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 1668#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
@@ -1754,8 +1812,11 @@ static int sci_remove(struct platform_device *dev)
1754 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); 1812 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
1755 1813
1756 spin_lock_irqsave(&priv->lock, flags); 1814 spin_lock_irqsave(&priv->lock, flags);
1757 list_for_each_entry(p, &priv->ports, node) 1815 list_for_each_entry(p, &priv->ports, node) {
1758 uart_remove_one_port(&sci_uart_driver, &p->port); 1816 uart_remove_one_port(&sci_uart_driver, &p->port);
1817 clk_put(p->iclk);
1818 clk_put(p->fclk);
1819 }
1759 spin_unlock_irqrestore(&priv->lock, flags); 1820 spin_unlock_irqrestore(&priv->lock, flags);
1760 1821
1761 kfree(priv); 1822 kfree(priv);
@@ -1781,7 +1842,9 @@ static int __devinit sci_probe_single(struct platform_device *dev,
1781 return 0; 1842 return 0;
1782 } 1843 }
1783 1844
1784 sci_init_single(dev, sciport, index, p); 1845 ret = sci_init_single(dev, sciport, index, p);
1846 if (ret)
1847 return ret;
1785 1848
1786 ret = uart_add_one_port(&sci_uart_driver, &sciport->port); 1849 ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
1787 if (ret) 1850 if (ret)
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig
new file mode 100644
index 000000000000..a54de0b9b3df
--- /dev/null
+++ b/drivers/sh/Kconfig
@@ -0,0 +1,24 @@
1config INTC_USERIMASK
2 bool "Userspace interrupt masking support"
3 depends on ARCH_SHMOBILE || (SUPERH && CPU_SH4A)
4 help
5 This enables support for hardware-assisted userspace hardirq
6 masking.
7
8 SH-4A and newer interrupt blocks all support a special shadowed
9 page with all non-masking registers obscured when mapped in to
10 userspace. This is primarily for use by userspace device
11 drivers that are using special priority levels.
12
13 If in doubt, say N.
14
15config INTC_BALANCING
16 bool "Hardware IRQ balancing support"
17 depends on SMP && SUPERH && CPU_SUBTYPE_SH7786
18 help
19 This enables support for IRQ auto-distribution mode on SH-X3
20 SMP parts. All of the balancing and CPU wakeup decisions are
21 taken care of automatically by hardware for distributed
22 vectors.
23
24 If in doubt, say N.
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 4956bf1f2134..78bb5127abd0 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -4,4 +4,6 @@
4obj-$(CONFIG_SUPERHYWAY) += superhyway/ 4obj-$(CONFIG_SUPERHYWAY) += superhyway/
5obj-$(CONFIG_MAPLE) += maple/ 5obj-$(CONFIG_MAPLE) += maple/
6obj-$(CONFIG_GENERIC_GPIO) += pfc.o 6obj-$(CONFIG_GENERIC_GPIO) += pfc.o
7obj-$(CONFIG_SUPERH) += clk.o
8obj-$(CONFIG_SH_CLK_CPG) += clk-cpg.o
7obj-y += intc.o 9obj-y += intc.o
diff --git a/drivers/sh/clk-cpg.c b/drivers/sh/clk-cpg.c
new file mode 100644
index 000000000000..f5c80ba9ab1c
--- /dev/null
+++ b/drivers/sh/clk-cpg.c
@@ -0,0 +1,298 @@
1#include <linux/clk.h>
2#include <linux/compiler.h>
3#include <linux/slab.h>
4#include <linux/io.h>
5#include <linux/sh_clk.h>
6
7static int sh_clk_mstp32_enable(struct clk *clk)
8{
9 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
10 clk->enable_reg);
11 return 0;
12}
13
14static void sh_clk_mstp32_disable(struct clk *clk)
15{
16 __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
17 clk->enable_reg);
18}
19
20static struct clk_ops sh_clk_mstp32_clk_ops = {
21 .enable = sh_clk_mstp32_enable,
22 .disable = sh_clk_mstp32_disable,
23 .recalc = followparent_recalc,
24};
25
26int __init sh_clk_mstp32_register(struct clk *clks, int nr)
27{
28 struct clk *clkp;
29 int ret = 0;
30 int k;
31
32 for (k = 0; !ret && (k < nr); k++) {
33 clkp = clks + k;
34 clkp->ops = &sh_clk_mstp32_clk_ops;
35 ret |= clk_register(clkp);
36 }
37
38 return ret;
39}
40
41static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
42{
43 return clk_rate_table_round(clk, clk->freq_table, rate);
44}
45
46static int sh_clk_div6_divisors[64] = {
47 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
48 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
49 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
50 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
51};
52
53static struct clk_div_mult_table sh_clk_div6_table = {
54 .divisors = sh_clk_div6_divisors,
55 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
56};
57
58static unsigned long sh_clk_div6_recalc(struct clk *clk)
59{
60 struct clk_div_mult_table *table = &sh_clk_div6_table;
61 unsigned int idx;
62
63 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
64 table, NULL);
65
66 idx = __raw_readl(clk->enable_reg) & 0x003f;
67
68 return clk->freq_table[idx].frequency;
69}
70
71static int sh_clk_div6_set_rate(struct clk *clk,
72 unsigned long rate, int algo_id)
73{
74 unsigned long value;
75 int idx;
76
77 idx = clk_rate_table_find(clk, clk->freq_table, rate);
78 if (idx < 0)
79 return idx;
80
81 value = __raw_readl(clk->enable_reg);
82 value &= ~0x3f;
83 value |= idx;
84 __raw_writel(value, clk->enable_reg);
85 return 0;
86}
87
88static int sh_clk_div6_enable(struct clk *clk)
89{
90 unsigned long value;
91 int ret;
92
93 ret = sh_clk_div6_set_rate(clk, clk->rate, 0);
94 if (ret == 0) {
95 value = __raw_readl(clk->enable_reg);
96 value &= ~0x100; /* clear stop bit to enable clock */
97 __raw_writel(value, clk->enable_reg);
98 }
99 return ret;
100}
101
102static void sh_clk_div6_disable(struct clk *clk)
103{
104 unsigned long value;
105
106 value = __raw_readl(clk->enable_reg);
107 value |= 0x100; /* stop clock */
108 value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
109 __raw_writel(value, clk->enable_reg);
110}
111
112static struct clk_ops sh_clk_div6_clk_ops = {
113 .recalc = sh_clk_div6_recalc,
114 .round_rate = sh_clk_div_round_rate,
115 .set_rate = sh_clk_div6_set_rate,
116 .enable = sh_clk_div6_enable,
117 .disable = sh_clk_div6_disable,
118};
119
120int __init sh_clk_div6_register(struct clk *clks, int nr)
121{
122 struct clk *clkp;
123 void *freq_table;
124 int nr_divs = sh_clk_div6_table.nr_divisors;
125 int freq_table_size = sizeof(struct cpufreq_frequency_table);
126 int ret = 0;
127 int k;
128
129 freq_table_size *= (nr_divs + 1);
130 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
131 if (!freq_table) {
132 pr_err("sh_clk_div6_register: unable to alloc memory\n");
133 return -ENOMEM;
134 }
135
136 for (k = 0; !ret && (k < nr); k++) {
137 clkp = clks + k;
138
139 clkp->ops = &sh_clk_div6_clk_ops;
140 clkp->id = -1;
141 clkp->freq_table = freq_table + (k * freq_table_size);
142 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
143
144 ret = clk_register(clkp);
145 }
146
147 return ret;
148}
149
150static unsigned long sh_clk_div4_recalc(struct clk *clk)
151{
152 struct clk_div4_table *d4t = clk->priv;
153 struct clk_div_mult_table *table = d4t->div_mult_table;
154 unsigned int idx;
155
156 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
157 table, &clk->arch_flags);
158
159 idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
160
161 return clk->freq_table[idx].frequency;
162}
163
164static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
165{
166 struct clk_div4_table *d4t = clk->priv;
167 struct clk_div_mult_table *table = d4t->div_mult_table;
168 u32 value;
169 int ret;
170
171 /* we really need a better way to determine parent index, but for
172 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
173 * no CLK_ENABLE_ON_INIT means external clock...
174 */
175
176 if (parent->flags & CLK_ENABLE_ON_INIT)
177 value = __raw_readl(clk->enable_reg) & ~(1 << 7);
178 else
179 value = __raw_readl(clk->enable_reg) | (1 << 7);
180
181 ret = clk_reparent(clk, parent);
182 if (ret < 0)
183 return ret;
184
185 __raw_writel(value, clk->enable_reg);
186
187 /* Rebiuld the frequency table */
188 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
189 table, &clk->arch_flags);
190
191 return 0;
192}
193
194static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id)
195{
196 struct clk_div4_table *d4t = clk->priv;
197 unsigned long value;
198 int idx = clk_rate_table_find(clk, clk->freq_table, rate);
199 if (idx < 0)
200 return idx;
201
202 value = __raw_readl(clk->enable_reg);
203 value &= ~(0xf << clk->enable_bit);
204 value |= (idx << clk->enable_bit);
205 __raw_writel(value, clk->enable_reg);
206
207 if (d4t->kick)
208 d4t->kick(clk);
209
210 return 0;
211}
212
213static int sh_clk_div4_enable(struct clk *clk)
214{
215 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
216 return 0;
217}
218
219static void sh_clk_div4_disable(struct clk *clk)
220{
221 __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
222}
223
224static struct clk_ops sh_clk_div4_clk_ops = {
225 .recalc = sh_clk_div4_recalc,
226 .set_rate = sh_clk_div4_set_rate,
227 .round_rate = sh_clk_div_round_rate,
228};
229
230static struct clk_ops sh_clk_div4_enable_clk_ops = {
231 .recalc = sh_clk_div4_recalc,
232 .set_rate = sh_clk_div4_set_rate,
233 .round_rate = sh_clk_div_round_rate,
234 .enable = sh_clk_div4_enable,
235 .disable = sh_clk_div4_disable,
236};
237
238static struct clk_ops sh_clk_div4_reparent_clk_ops = {
239 .recalc = sh_clk_div4_recalc,
240 .set_rate = sh_clk_div4_set_rate,
241 .round_rate = sh_clk_div_round_rate,
242 .enable = sh_clk_div4_enable,
243 .disable = sh_clk_div4_disable,
244 .set_parent = sh_clk_div4_set_parent,
245};
246
247static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
248 struct clk_div4_table *table, struct clk_ops *ops)
249{
250 struct clk *clkp;
251 void *freq_table;
252 int nr_divs = table->div_mult_table->nr_divisors;
253 int freq_table_size = sizeof(struct cpufreq_frequency_table);
254 int ret = 0;
255 int k;
256
257 freq_table_size *= (nr_divs + 1);
258 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
259 if (!freq_table) {
260 pr_err("sh_clk_div4_register: unable to alloc memory\n");
261 return -ENOMEM;
262 }
263
264 for (k = 0; !ret && (k < nr); k++) {
265 clkp = clks + k;
266
267 clkp->ops = ops;
268 clkp->id = -1;
269 clkp->priv = table;
270
271 clkp->freq_table = freq_table + (k * freq_table_size);
272 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
273
274 ret = clk_register(clkp);
275 }
276
277 return ret;
278}
279
280int __init sh_clk_div4_register(struct clk *clks, int nr,
281 struct clk_div4_table *table)
282{
283 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
284}
285
286int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
287 struct clk_div4_table *table)
288{
289 return sh_clk_div4_register_ops(clks, nr, table,
290 &sh_clk_div4_enable_clk_ops);
291}
292
293int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
294 struct clk_div4_table *table)
295{
296 return sh_clk_div4_register_ops(clks, nr, table,
297 &sh_clk_div4_reparent_clk_ops);
298}
diff --git a/drivers/sh/clk.c b/drivers/sh/clk.c
new file mode 100644
index 000000000000..5d84adac9ec4
--- /dev/null
+++ b/drivers/sh/clk.c
@@ -0,0 +1,545 @@
1/*
2 * drivers/sh/clk.c - SuperH clock framework
3 *
4 * Copyright (C) 2005 - 2009 Paul Mundt
5 *
6 * This clock framework is derived from the OMAP version by:
7 *
8 * Copyright (C) 2004 - 2008 Nokia Corporation
9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10 *
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/mutex.h>
21#include <linux/list.h>
22#include <linux/kobject.h>
23#include <linux/sysdev.h>
24#include <linux/seq_file.h>
25#include <linux/err.h>
26#include <linux/platform_device.h>
27#include <linux/debugfs.h>
28#include <linux/cpufreq.h>
29#include <linux/clk.h>
30#include <linux/sh_clk.h>
31
32static LIST_HEAD(clock_list);
33static DEFINE_SPINLOCK(clock_lock);
34static DEFINE_MUTEX(clock_list_sem);
35
36void clk_rate_table_build(struct clk *clk,
37 struct cpufreq_frequency_table *freq_table,
38 int nr_freqs,
39 struct clk_div_mult_table *src_table,
40 unsigned long *bitmap)
41{
42 unsigned long mult, div;
43 unsigned long freq;
44 int i;
45
46 for (i = 0; i < nr_freqs; i++) {
47 div = 1;
48 mult = 1;
49
50 if (src_table->divisors && i < src_table->nr_divisors)
51 div = src_table->divisors[i];
52
53 if (src_table->multipliers && i < src_table->nr_multipliers)
54 mult = src_table->multipliers[i];
55
56 if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
57 freq = CPUFREQ_ENTRY_INVALID;
58 else
59 freq = clk->parent->rate * mult / div;
60
61 freq_table[i].index = i;
62 freq_table[i].frequency = freq;
63 }
64
65 /* Termination entry */
66 freq_table[i].index = i;
67 freq_table[i].frequency = CPUFREQ_TABLE_END;
68}
69
70long clk_rate_table_round(struct clk *clk,
71 struct cpufreq_frequency_table *freq_table,
72 unsigned long rate)
73{
74 unsigned long rate_error, rate_error_prev = ~0UL;
75 unsigned long rate_best_fit = rate;
76 unsigned long highest, lowest;
77 int i;
78
79 highest = lowest = 0;
80
81 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
82 unsigned long freq = freq_table[i].frequency;
83
84 if (freq == CPUFREQ_ENTRY_INVALID)
85 continue;
86
87 if (freq > highest)
88 highest = freq;
89 if (freq < lowest)
90 lowest = freq;
91
92 rate_error = abs(freq - rate);
93 if (rate_error < rate_error_prev) {
94 rate_best_fit = freq;
95 rate_error_prev = rate_error;
96 }
97
98 if (rate_error == 0)
99 break;
100 }
101
102 if (rate >= highest)
103 rate_best_fit = highest;
104 if (rate <= lowest)
105 rate_best_fit = lowest;
106
107 return rate_best_fit;
108}
109
110int clk_rate_table_find(struct clk *clk,
111 struct cpufreq_frequency_table *freq_table,
112 unsigned long rate)
113{
114 int i;
115
116 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
117 unsigned long freq = freq_table[i].frequency;
118
119 if (freq == CPUFREQ_ENTRY_INVALID)
120 continue;
121
122 if (freq == rate)
123 return i;
124 }
125
126 return -ENOENT;
127}
128
129/* Used for clocks that always have same value as the parent clock */
130unsigned long followparent_recalc(struct clk *clk)
131{
132 return clk->parent ? clk->parent->rate : 0;
133}
134
135int clk_reparent(struct clk *child, struct clk *parent)
136{
137 list_del_init(&child->sibling);
138 if (parent)
139 list_add(&child->sibling, &parent->children);
140 child->parent = parent;
141
142 /* now do the debugfs renaming to reattach the child
143 to the proper parent */
144
145 return 0;
146}
147
148/* Propagate rate to children */
149void propagate_rate(struct clk *tclk)
150{
151 struct clk *clkp;
152
153 list_for_each_entry(clkp, &tclk->children, sibling) {
154 if (clkp->ops && clkp->ops->recalc)
155 clkp->rate = clkp->ops->recalc(clkp);
156
157 propagate_rate(clkp);
158 }
159}
160
161static void __clk_disable(struct clk *clk)
162{
163 if (WARN(!clk->usecount, "Trying to disable clock %s with 0 usecount\n",
164 clk->name))
165 return;
166
167 if (!(--clk->usecount)) {
168 if (likely(clk->ops && clk->ops->disable))
169 clk->ops->disable(clk);
170 if (likely(clk->parent))
171 __clk_disable(clk->parent);
172 }
173}
174
175void clk_disable(struct clk *clk)
176{
177 unsigned long flags;
178
179 if (!clk)
180 return;
181
182 spin_lock_irqsave(&clock_lock, flags);
183 __clk_disable(clk);
184 spin_unlock_irqrestore(&clock_lock, flags);
185}
186EXPORT_SYMBOL_GPL(clk_disable);
187
188static int __clk_enable(struct clk *clk)
189{
190 int ret = 0;
191
192 if (clk->usecount++ == 0) {
193 if (clk->parent) {
194 ret = __clk_enable(clk->parent);
195 if (unlikely(ret))
196 goto err;
197 }
198
199 if (clk->ops && clk->ops->enable) {
200 ret = clk->ops->enable(clk);
201 if (ret) {
202 if (clk->parent)
203 __clk_disable(clk->parent);
204 goto err;
205 }
206 }
207 }
208
209 return ret;
210err:
211 clk->usecount--;
212 return ret;
213}
214
215int clk_enable(struct clk *clk)
216{
217 unsigned long flags;
218 int ret;
219
220 if (!clk)
221 return -EINVAL;
222
223 spin_lock_irqsave(&clock_lock, flags);
224 ret = __clk_enable(clk);
225 spin_unlock_irqrestore(&clock_lock, flags);
226
227 return ret;
228}
229EXPORT_SYMBOL_GPL(clk_enable);
230
231static LIST_HEAD(root_clks);
232
233/**
234 * recalculate_root_clocks - recalculate and propagate all root clocks
235 *
236 * Recalculates all root clocks (clocks with no parent), which if the
237 * clock's .recalc is set correctly, should also propagate their rates.
238 * Called at init.
239 */
240void recalculate_root_clocks(void)
241{
242 struct clk *clkp;
243
244 list_for_each_entry(clkp, &root_clks, sibling) {
245 if (clkp->ops && clkp->ops->recalc)
246 clkp->rate = clkp->ops->recalc(clkp);
247 propagate_rate(clkp);
248 }
249}
250
251int clk_register(struct clk *clk)
252{
253 if (clk == NULL || IS_ERR(clk))
254 return -EINVAL;
255
256 /*
257 * trap out already registered clocks
258 */
259 if (clk->node.next || clk->node.prev)
260 return 0;
261
262 mutex_lock(&clock_list_sem);
263
264 INIT_LIST_HEAD(&clk->children);
265 clk->usecount = 0;
266
267 if (clk->parent)
268 list_add(&clk->sibling, &clk->parent->children);
269 else
270 list_add(&clk->sibling, &root_clks);
271
272 list_add(&clk->node, &clock_list);
273 if (clk->ops && clk->ops->init)
274 clk->ops->init(clk);
275 mutex_unlock(&clock_list_sem);
276
277 return 0;
278}
279EXPORT_SYMBOL_GPL(clk_register);
280
281void clk_unregister(struct clk *clk)
282{
283 mutex_lock(&clock_list_sem);
284 list_del(&clk->sibling);
285 list_del(&clk->node);
286 mutex_unlock(&clock_list_sem);
287}
288EXPORT_SYMBOL_GPL(clk_unregister);
289
290void clk_enable_init_clocks(void)
291{
292 struct clk *clkp;
293
294 list_for_each_entry(clkp, &clock_list, node)
295 if (clkp->flags & CLK_ENABLE_ON_INIT)
296 clk_enable(clkp);
297}
298
299unsigned long clk_get_rate(struct clk *clk)
300{
301 return clk->rate;
302}
303EXPORT_SYMBOL_GPL(clk_get_rate);
304
305int clk_set_rate(struct clk *clk, unsigned long rate)
306{
307 return clk_set_rate_ex(clk, rate, 0);
308}
309EXPORT_SYMBOL_GPL(clk_set_rate);
310
311int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
312{
313 int ret = -EOPNOTSUPP;
314 unsigned long flags;
315
316 spin_lock_irqsave(&clock_lock, flags);
317
318 if (likely(clk->ops && clk->ops->set_rate)) {
319 ret = clk->ops->set_rate(clk, rate, algo_id);
320 if (ret != 0)
321 goto out_unlock;
322 } else {
323 clk->rate = rate;
324 ret = 0;
325 }
326
327 if (clk->ops && clk->ops->recalc)
328 clk->rate = clk->ops->recalc(clk);
329
330 propagate_rate(clk);
331
332out_unlock:
333 spin_unlock_irqrestore(&clock_lock, flags);
334
335 return ret;
336}
337EXPORT_SYMBOL_GPL(clk_set_rate_ex);
338
339int clk_set_parent(struct clk *clk, struct clk *parent)
340{
341 unsigned long flags;
342 int ret = -EINVAL;
343
344 if (!parent || !clk)
345 return ret;
346 if (clk->parent == parent)
347 return 0;
348
349 spin_lock_irqsave(&clock_lock, flags);
350 if (clk->usecount == 0) {
351 if (clk->ops->set_parent)
352 ret = clk->ops->set_parent(clk, parent);
353 else
354 ret = clk_reparent(clk, parent);
355
356 if (ret == 0) {
357 pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
358 clk->name, clk->parent->name, clk->rate);
359 if (clk->ops->recalc)
360 clk->rate = clk->ops->recalc(clk);
361 propagate_rate(clk);
362 }
363 } else
364 ret = -EBUSY;
365 spin_unlock_irqrestore(&clock_lock, flags);
366
367 return ret;
368}
369EXPORT_SYMBOL_GPL(clk_set_parent);
370
371struct clk *clk_get_parent(struct clk *clk)
372{
373 return clk->parent;
374}
375EXPORT_SYMBOL_GPL(clk_get_parent);
376
377long clk_round_rate(struct clk *clk, unsigned long rate)
378{
379 if (likely(clk->ops && clk->ops->round_rate)) {
380 unsigned long flags, rounded;
381
382 spin_lock_irqsave(&clock_lock, flags);
383 rounded = clk->ops->round_rate(clk, rate);
384 spin_unlock_irqrestore(&clock_lock, flags);
385
386 return rounded;
387 }
388
389 return clk_get_rate(clk);
390}
391EXPORT_SYMBOL_GPL(clk_round_rate);
392
393#ifdef CONFIG_PM
394static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
395{
396 static pm_message_t prev_state;
397 struct clk *clkp;
398
399 switch (state.event) {
400 case PM_EVENT_ON:
401 /* Resumeing from hibernation */
402 if (prev_state.event != PM_EVENT_FREEZE)
403 break;
404
405 list_for_each_entry(clkp, &clock_list, node) {
406 if (likely(clkp->ops)) {
407 unsigned long rate = clkp->rate;
408
409 if (likely(clkp->ops->set_parent))
410 clkp->ops->set_parent(clkp,
411 clkp->parent);
412 if (likely(clkp->ops->set_rate))
413 clkp->ops->set_rate(clkp,
414 rate, NO_CHANGE);
415 else if (likely(clkp->ops->recalc))
416 clkp->rate = clkp->ops->recalc(clkp);
417 }
418 }
419 break;
420 case PM_EVENT_FREEZE:
421 break;
422 case PM_EVENT_SUSPEND:
423 break;
424 }
425
426 prev_state = state;
427 return 0;
428}
429
430static int clks_sysdev_resume(struct sys_device *dev)
431{
432 return clks_sysdev_suspend(dev, PMSG_ON);
433}
434
435static struct sysdev_class clks_sysdev_class = {
436 .name = "clks",
437};
438
439static struct sysdev_driver clks_sysdev_driver = {
440 .suspend = clks_sysdev_suspend,
441 .resume = clks_sysdev_resume,
442};
443
444static struct sys_device clks_sysdev_dev = {
445 .cls = &clks_sysdev_class,
446};
447
448static int __init clk_sysdev_init(void)
449{
450 sysdev_class_register(&clks_sysdev_class);
451 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
452 sysdev_register(&clks_sysdev_dev);
453
454 return 0;
455}
456subsys_initcall(clk_sysdev_init);
457#endif
458
459/*
460 * debugfs support to trace clock tree hierarchy and attributes
461 */
462static struct dentry *clk_debugfs_root;
463
464static int clk_debugfs_register_one(struct clk *c)
465{
466 int err;
467 struct dentry *d, *child, *child_tmp;
468 struct clk *pa = c->parent;
469 char s[255];
470 char *p = s;
471
472 p += sprintf(p, "%s", c->name);
473 if (c->id >= 0)
474 sprintf(p, ":%d", c->id);
475 d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
476 if (!d)
477 return -ENOMEM;
478 c->dentry = d;
479
480 d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
481 if (!d) {
482 err = -ENOMEM;
483 goto err_out;
484 }
485 d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
486 if (!d) {
487 err = -ENOMEM;
488 goto err_out;
489 }
490 d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
491 if (!d) {
492 err = -ENOMEM;
493 goto err_out;
494 }
495 return 0;
496
497err_out:
498 d = c->dentry;
499 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
500 debugfs_remove(child);
501 debugfs_remove(c->dentry);
502 return err;
503}
504
505static int clk_debugfs_register(struct clk *c)
506{
507 int err;
508 struct clk *pa = c->parent;
509
510 if (pa && !pa->dentry) {
511 err = clk_debugfs_register(pa);
512 if (err)
513 return err;
514 }
515
516 if (!c->dentry && c->name) {
517 err = clk_debugfs_register_one(c);
518 if (err)
519 return err;
520 }
521 return 0;
522}
523
524static int __init clk_debugfs_init(void)
525{
526 struct clk *c;
527 struct dentry *d;
528 int err;
529
530 d = debugfs_create_dir("clock", NULL);
531 if (!d)
532 return -ENOMEM;
533 clk_debugfs_root = d;
534
535 list_for_each_entry(c, &clock_list, node) {
536 err = clk_debugfs_register(c);
537 if (err)
538 goto err_out;
539 }
540 return 0;
541err_out:
542 debugfs_remove_recursive(clk_debugfs_root);
543 return err;
544}
545late_initcall(clk_debugfs_init);
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 94ad6bd86a00..c585574b9aed 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -28,6 +28,7 @@
28#include <linux/topology.h> 28#include <linux/topology.h>
29#include <linux/bitmap.h> 29#include <linux/bitmap.h>
30#include <linux/cpumask.h> 30#include <linux/cpumask.h>
31#include <asm/sizes.h>
31 32
32#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ 33#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
33 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ 34 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -45,6 +46,12 @@ struct intc_handle_int {
45 unsigned long handle; 46 unsigned long handle;
46}; 47};
47 48
49struct intc_window {
50 phys_addr_t phys;
51 void __iomem *virt;
52 unsigned long size;
53};
54
48struct intc_desc_int { 55struct intc_desc_int {
49 struct list_head list; 56 struct list_head list;
50 struct sys_device sysdev; 57 struct sys_device sysdev;
@@ -58,6 +65,8 @@ struct intc_desc_int {
58 unsigned int nr_prio; 65 unsigned int nr_prio;
59 struct intc_handle_int *sense; 66 struct intc_handle_int *sense;
60 unsigned int nr_sense; 67 unsigned int nr_sense;
68 struct intc_window *window;
69 unsigned int nr_windows;
61 struct irq_chip chip; 70 struct irq_chip chip;
62}; 71};
63 72
@@ -87,8 +96,12 @@ static DEFINE_SPINLOCK(vector_lock);
87#define SMP_NR(d, x) 1 96#define SMP_NR(d, x) 1
88#endif 97#endif
89 98
90static unsigned int intc_prio_level[NR_IRQS]; /* for now */ 99static unsigned int intc_prio_level[NR_IRQS]; /* for now */
100static unsigned int default_prio_level = 2; /* 2 - 16 */
91static unsigned long ack_handle[NR_IRQS]; 101static unsigned long ack_handle[NR_IRQS];
102#ifdef CONFIG_INTC_BALANCING
103static unsigned long dist_handle[NR_IRQS];
104#endif
92 105
93static inline struct intc_desc_int *get_intc_desc(unsigned int irq) 106static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
94{ 107{
@@ -96,6 +109,47 @@ static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
96 return container_of(chip, struct intc_desc_int, chip); 109 return container_of(chip, struct intc_desc_int, chip);
97} 110}
98 111
112static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
113 unsigned long address)
114{
115 struct intc_window *window;
116 int k;
117
118 /* scan through physical windows and convert address */
119 for (k = 0; k < d->nr_windows; k++) {
120 window = d->window + k;
121
122 if (address < window->phys)
123 continue;
124
125 if (address >= (window->phys + window->size))
126 continue;
127
128 address -= window->phys;
129 address += (unsigned long)window->virt;
130
131 return address;
132 }
133
134 /* no windows defined, register must be 1:1 mapped virt:phys */
135 return address;
136}
137
138static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
139{
140 unsigned int k;
141
142 address = intc_phys_to_virt(d, address);
143
144 for (k = 0; k < d->nr_reg; k++) {
145 if (d->reg[k] == address)
146 return k;
147 }
148
149 BUG();
150 return 0;
151}
152
99static inline unsigned int set_field(unsigned int value, 153static inline unsigned int set_field(unsigned int value,
100 unsigned int field_value, 154 unsigned int field_value,
101 unsigned int handle) 155 unsigned int handle)
@@ -229,6 +283,85 @@ static void (*intc_disable_fns[])(unsigned long addr,
229 [MODE_PCLR_REG] = intc_mode_field, 283 [MODE_PCLR_REG] = intc_mode_field,
230}; 284};
231 285
286#ifdef CONFIG_INTC_BALANCING
287static inline void intc_balancing_enable(unsigned int irq)
288{
289 struct intc_desc_int *d = get_intc_desc(irq);
290 unsigned long handle = dist_handle[irq];
291 unsigned long addr;
292
293 if (irq_balancing_disabled(irq) || !handle)
294 return;
295
296 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
297 intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
298}
299
300static inline void intc_balancing_disable(unsigned int irq)
301{
302 struct intc_desc_int *d = get_intc_desc(irq);
303 unsigned long handle = dist_handle[irq];
304 unsigned long addr;
305
306 if (irq_balancing_disabled(irq) || !handle)
307 return;
308
309 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
310 intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
311}
312
313static unsigned int intc_dist_data(struct intc_desc *desc,
314 struct intc_desc_int *d,
315 intc_enum enum_id)
316{
317 struct intc_mask_reg *mr = desc->hw.mask_regs;
318 unsigned int i, j, fn, mode;
319 unsigned long reg_e, reg_d;
320
321 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
322 mr = desc->hw.mask_regs + i;
323
324 /*
325 * Skip this entry if there's no auto-distribution
326 * register associated with it.
327 */
328 if (!mr->dist_reg)
329 continue;
330
331 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
332 if (mr->enum_ids[j] != enum_id)
333 continue;
334
335 fn = REG_FN_MODIFY_BASE;
336 mode = MODE_ENABLE_REG;
337 reg_e = mr->dist_reg;
338 reg_d = mr->dist_reg;
339
340 fn += (mr->reg_width >> 3) - 1;
341 return _INTC_MK(fn, mode,
342 intc_get_reg(d, reg_e),
343 intc_get_reg(d, reg_d),
344 1,
345 (mr->reg_width - 1) - j);
346 }
347 }
348
349 /*
350 * It's possible we've gotten here with no distribution options
351 * available for the IRQ in question, so we just skip over those.
352 */
353 return 0;
354}
355#else
356static inline void intc_balancing_enable(unsigned int irq)
357{
358}
359
360static inline void intc_balancing_disable(unsigned int irq)
361{
362}
363#endif
364
232static inline void _intc_enable(unsigned int irq, unsigned long handle) 365static inline void _intc_enable(unsigned int irq, unsigned long handle)
233{ 366{
234 struct intc_desc_int *d = get_intc_desc(irq); 367 struct intc_desc_int *d = get_intc_desc(irq);
@@ -244,6 +377,8 @@ static inline void _intc_enable(unsigned int irq, unsigned long handle)
244 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ 377 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
245 [_INTC_FN(handle)], irq); 378 [_INTC_FN(handle)], irq);
246 } 379 }
380
381 intc_balancing_enable(irq);
247} 382}
248 383
249static void intc_enable(unsigned int irq) 384static void intc_enable(unsigned int irq)
@@ -254,10 +389,12 @@ static void intc_enable(unsigned int irq)
254static void intc_disable(unsigned int irq) 389static void intc_disable(unsigned int irq)
255{ 390{
256 struct intc_desc_int *d = get_intc_desc(irq); 391 struct intc_desc_int *d = get_intc_desc(irq);
257 unsigned long handle = (unsigned long) get_irq_chip_data(irq); 392 unsigned long handle = (unsigned long)get_irq_chip_data(irq);
258 unsigned long addr; 393 unsigned long addr;
259 unsigned int cpu; 394 unsigned int cpu;
260 395
396 intc_balancing_disable(irq);
397
261 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { 398 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
262#ifdef CONFIG_SMP 399#ifdef CONFIG_SMP
263 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity)) 400 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
@@ -336,8 +473,7 @@ static void intc_mask_ack(unsigned int irq)
336 473
337 intc_disable(irq); 474 intc_disable(irq);
338 475
339 /* read register and write zero only to the assocaited bit */ 476 /* read register and write zero only to the associated bit */
340
341 if (handle) { 477 if (handle) {
342 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); 478 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
343 switch (_INTC_FN(handle)) { 479 switch (_INTC_FN(handle)) {
@@ -366,7 +502,8 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
366{ 502{
367 int i; 503 int i;
368 504
369 /* this doesn't scale well, but... 505 /*
506 * this doesn't scale well, but...
370 * 507 *
371 * this function should only be used for cerain uncommon 508 * this function should only be used for cerain uncommon
372 * operations such as intc_set_priority() and intc_set_sense() 509 * operations such as intc_set_priority() and intc_set_sense()
@@ -377,7 +514,6 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
377 * memory footprint down is to make sure the array is sorted 514 * memory footprint down is to make sure the array is sorted
378 * and then perform a bisect to lookup the irq. 515 * and then perform a bisect to lookup the irq.
379 */ 516 */
380
381 for (i = 0; i < nr_hp; i++) { 517 for (i = 0; i < nr_hp; i++) {
382 if ((hp + i)->irq != irq) 518 if ((hp + i)->irq != irq)
383 continue; 519 continue;
@@ -408,7 +544,6 @@ int intc_set_priority(unsigned int irq, unsigned int prio)
408 * primary masking method is using intc_prio_level[irq] 544 * primary masking method is using intc_prio_level[irq]
409 * priority level will be set during next enable() 545 * priority level will be set during next enable()
410 */ 546 */
411
412 if (_INTC_FN(ihp->handle) != REG_FN_ERR) 547 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
413 _intc_enable(irq, ihp->handle); 548 _intc_enable(irq, ihp->handle);
414 } 549 }
@@ -447,20 +582,6 @@ static int intc_set_sense(unsigned int irq, unsigned int type)
447 return 0; 582 return 0;
448} 583}
449 584
450static unsigned int __init intc_get_reg(struct intc_desc_int *d,
451 unsigned long address)
452{
453 unsigned int k;
454
455 for (k = 0; k < d->nr_reg; k++) {
456 if (d->reg[k] == address)
457 return k;
458 }
459
460 BUG();
461 return 0;
462}
463
464static intc_enum __init intc_grp_id(struct intc_desc *desc, 585static intc_enum __init intc_grp_id(struct intc_desc *desc,
465 intc_enum enum_id) 586 intc_enum enum_id)
466{ 587{
@@ -718,13 +839,14 @@ static void __init intc_register_irq(struct intc_desc *desc,
718 */ 839 */
719 set_bit(irq, intc_irq_map); 840 set_bit(irq, intc_irq_map);
720 841
721 /* Prefer single interrupt source bitmap over other combinations: 842 /*
843 * Prefer single interrupt source bitmap over other combinations:
844 *
722 * 1. bitmap, single interrupt source 845 * 1. bitmap, single interrupt source
723 * 2. priority, single interrupt source 846 * 2. priority, single interrupt source
724 * 3. bitmap, multiple interrupt sources (groups) 847 * 3. bitmap, multiple interrupt sources (groups)
725 * 4. priority, multiple interrupt sources (groups) 848 * 4. priority, multiple interrupt sources (groups)
726 */ 849 */
727
728 data[0] = intc_mask_data(desc, d, enum_id, 0); 850 data[0] = intc_mask_data(desc, d, enum_id, 0);
729 data[1] = intc_prio_data(desc, d, enum_id, 0); 851 data[1] = intc_prio_data(desc, d, enum_id, 0);
730 852
@@ -749,10 +871,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
749 handle_level_irq, "level"); 871 handle_level_irq, "level");
750 set_irq_chip_data(irq, (void *)data[primary]); 872 set_irq_chip_data(irq, (void *)data[primary]);
751 873
752 /* set priority level 874 /*
875 * set priority level
753 * - this needs to be at least 2 for 5-bit priorities on 7780 876 * - this needs to be at least 2 for 5-bit priorities on 7780
754 */ 877 */
755 intc_prio_level[irq] = 2; 878 intc_prio_level[irq] = default_prio_level;
756 879
757 /* enable secondary masking method if present */ 880 /* enable secondary masking method if present */
758 if (data[!primary]) 881 if (data[!primary])
@@ -769,7 +892,6 @@ static void __init intc_register_irq(struct intc_desc *desc,
769 * only secondary priority should access registers, so 892 * only secondary priority should access registers, so
770 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority() 893 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
771 */ 894 */
772
773 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0); 895 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
774 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0); 896 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
775 } 897 }
@@ -790,6 +912,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
790 if (desc->hw.ack_regs) 912 if (desc->hw.ack_regs)
791 ack_handle[irq] = intc_ack_data(desc, d, enum_id); 913 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
792 914
915#ifdef CONFIG_INTC_BALANCING
916 if (desc->hw.mask_regs)
917 dist_handle[irq] = intc_dist_data(desc, d, enum_id);
918#endif
919
793#ifdef CONFIG_ARM 920#ifdef CONFIG_ARM
794 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */ 921 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
795#endif 922#endif
@@ -801,6 +928,8 @@ static unsigned int __init save_reg(struct intc_desc_int *d,
801 unsigned int smp) 928 unsigned int smp)
802{ 929{
803 if (value) { 930 if (value) {
931 value = intc_phys_to_virt(d, value);
932
804 d->reg[cnt] = value; 933 d->reg[cnt] = value;
805#ifdef CONFIG_SMP 934#ifdef CONFIG_SMP
806 d->smp[cnt] = smp; 935 d->smp[cnt] = smp;
@@ -816,25 +945,59 @@ static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
816 generic_handle_irq((unsigned int)get_irq_data(irq)); 945 generic_handle_irq((unsigned int)get_irq_data(irq));
817} 946}
818 947
819void __init register_intc_controller(struct intc_desc *desc) 948int __init register_intc_controller(struct intc_desc *desc)
820{ 949{
821 unsigned int i, k, smp; 950 unsigned int i, k, smp;
822 struct intc_hw_desc *hw = &desc->hw; 951 struct intc_hw_desc *hw = &desc->hw;
823 struct intc_desc_int *d; 952 struct intc_desc_int *d;
953 struct resource *res;
954
955 pr_info("intc: Registered controller '%s' with %u IRQs\n",
956 desc->name, hw->nr_vectors);
824 957
825 d = kzalloc(sizeof(*d), GFP_NOWAIT); 958 d = kzalloc(sizeof(*d), GFP_NOWAIT);
959 if (!d)
960 goto err0;
826 961
827 INIT_LIST_HEAD(&d->list); 962 INIT_LIST_HEAD(&d->list);
828 list_add(&d->list, &intc_list); 963 list_add(&d->list, &intc_list);
829 964
965 if (desc->num_resources) {
966 d->nr_windows = desc->num_resources;
967 d->window = kzalloc(d->nr_windows * sizeof(*d->window),
968 GFP_NOWAIT);
969 if (!d->window)
970 goto err1;
971
972 for (k = 0; k < d->nr_windows; k++) {
973 res = desc->resource + k;
974 WARN_ON(resource_type(res) != IORESOURCE_MEM);
975 d->window[k].phys = res->start;
976 d->window[k].size = resource_size(res);
977 d->window[k].virt = ioremap_nocache(res->start,
978 resource_size(res));
979 if (!d->window[k].virt)
980 goto err2;
981 }
982 }
983
830 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0; 984 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
985#ifdef CONFIG_INTC_BALANCING
986 if (d->nr_reg)
987 d->nr_reg += hw->nr_mask_regs;
988#endif
831 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0; 989 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
832 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0; 990 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
833 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0; 991 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
834 992
835 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); 993 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
994 if (!d->reg)
995 goto err2;
996
836#ifdef CONFIG_SMP 997#ifdef CONFIG_SMP
837 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT); 998 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
999 if (!d->smp)
1000 goto err3;
838#endif 1001#endif
839 k = 0; 1002 k = 0;
840 1003
@@ -843,12 +1006,17 @@ void __init register_intc_controller(struct intc_desc *desc)
843 smp = IS_SMP(hw->mask_regs[i]); 1006 smp = IS_SMP(hw->mask_regs[i]);
844 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp); 1007 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
845 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp); 1008 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
1009#ifdef CONFIG_INTC_BALANCING
1010 k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
1011#endif
846 } 1012 }
847 } 1013 }
848 1014
849 if (hw->prio_regs) { 1015 if (hw->prio_regs) {
850 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio), 1016 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
851 GFP_NOWAIT); 1017 GFP_NOWAIT);
1018 if (!d->prio)
1019 goto err4;
852 1020
853 for (i = 0; i < hw->nr_prio_regs; i++) { 1021 for (i = 0; i < hw->nr_prio_regs; i++) {
854 smp = IS_SMP(hw->prio_regs[i]); 1022 smp = IS_SMP(hw->prio_regs[i]);
@@ -860,6 +1028,8 @@ void __init register_intc_controller(struct intc_desc *desc)
860 if (hw->sense_regs) { 1028 if (hw->sense_regs) {
861 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense), 1029 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
862 GFP_NOWAIT); 1030 GFP_NOWAIT);
1031 if (!d->sense)
1032 goto err5;
863 1033
864 for (i = 0; i < hw->nr_sense_regs; i++) 1034 for (i = 0; i < hw->nr_sense_regs; i++)
865 k += save_reg(d, k, hw->sense_regs[i].reg, 0); 1035 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
@@ -906,7 +1076,7 @@ void __init register_intc_controller(struct intc_desc *desc)
906 1076
907 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id()); 1077 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
908 if (unlikely(!irq_desc)) { 1078 if (unlikely(!irq_desc)) {
909 pr_info("can't get irq_desc for %d\n", irq); 1079 pr_err("can't get irq_desc for %d\n", irq);
910 continue; 1080 continue;
911 } 1081 }
912 1082
@@ -926,7 +1096,7 @@ void __init register_intc_controller(struct intc_desc *desc)
926 */ 1096 */
927 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id()); 1097 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
928 if (unlikely(!irq_desc)) { 1098 if (unlikely(!irq_desc)) {
929 pr_info("can't get irq_desc for %d\n", irq2); 1099 pr_err("can't get irq_desc for %d\n", irq2);
930 continue; 1100 continue;
931 } 1101 }
932 1102
@@ -942,8 +1112,100 @@ void __init register_intc_controller(struct intc_desc *desc)
942 /* enable bits matching force_enable after registering irqs */ 1112 /* enable bits matching force_enable after registering irqs */
943 if (desc->force_enable) 1113 if (desc->force_enable)
944 intc_enable_disable_enum(desc, d, desc->force_enable, 1); 1114 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
1115
1116 return 0;
1117err5:
1118 kfree(d->prio);
1119err4:
1120#ifdef CONFIG_SMP
1121 kfree(d->smp);
1122err3:
1123#endif
1124 kfree(d->reg);
1125err2:
1126 for (k = 0; k < d->nr_windows; k++)
1127 if (d->window[k].virt)
1128 iounmap(d->window[k].virt);
1129
1130 kfree(d->window);
1131err1:
1132 kfree(d);
1133err0:
1134 pr_err("unable to allocate INTC memory\n");
1135
1136 return -ENOMEM;
1137}
1138
1139#ifdef CONFIG_INTC_USERIMASK
1140static void __iomem *uimask;
1141
1142int register_intc_userimask(unsigned long addr)
1143{
1144 if (unlikely(uimask))
1145 return -EBUSY;
1146
1147 uimask = ioremap_nocache(addr, SZ_4K);
1148 if (unlikely(!uimask))
1149 return -ENOMEM;
1150
1151 pr_info("intc: userimask support registered for levels 0 -> %d\n",
1152 default_prio_level - 1);
1153
1154 return 0;
1155}
1156
1157static ssize_t
1158show_intc_userimask(struct sysdev_class *cls,
1159 struct sysdev_class_attribute *attr, char *buf)
1160{
1161 return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
1162}
1163
1164static ssize_t
1165store_intc_userimask(struct sysdev_class *cls,
1166 struct sysdev_class_attribute *attr,
1167 const char *buf, size_t count)
1168{
1169 unsigned long level;
1170
1171 level = simple_strtoul(buf, NULL, 10);
1172
1173 /*
1174 * Minimal acceptable IRQ levels are in the 2 - 16 range, but
1175 * these are chomped so as to not interfere with normal IRQs.
1176 *
1177 * Level 1 is a special case on some CPUs in that it's not
1178 * directly settable, but given that USERIMASK cuts off below a
1179 * certain level, we don't care about this limitation here.
1180 * Level 0 on the other hand equates to user masking disabled.
1181 *
1182 * We use default_prio_level as a cut off so that only special
1183 * case opt-in IRQs can be mangled.
1184 */
1185 if (level >= default_prio_level)
1186 return -EINVAL;
1187
1188 __raw_writel(0xa5 << 24 | level << 4, uimask);
1189
1190 return count;
945} 1191}
946 1192
1193static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
1194 show_intc_userimask, store_intc_userimask);
1195#endif
1196
1197static ssize_t
1198show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
1199{
1200 struct intc_desc_int *d;
1201
1202 d = container_of(dev, struct intc_desc_int, sysdev);
1203
1204 return sprintf(buf, "%s\n", d->chip.name);
1205}
1206
1207static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
1208
947static int intc_suspend(struct sys_device *dev, pm_message_t state) 1209static int intc_suspend(struct sys_device *dev, pm_message_t state)
948{ 1210{
949 struct intc_desc_int *d; 1211 struct intc_desc_int *d;
@@ -1003,19 +1265,28 @@ static int __init register_intc_sysdevs(void)
1003 int id = 0; 1265 int id = 0;
1004 1266
1005 error = sysdev_class_register(&intc_sysdev_class); 1267 error = sysdev_class_register(&intc_sysdev_class);
1268#ifdef CONFIG_INTC_USERIMASK
1269 if (!error && uimask)
1270 error = sysdev_class_create_file(&intc_sysdev_class,
1271 &attr_userimask);
1272#endif
1006 if (!error) { 1273 if (!error) {
1007 list_for_each_entry(d, &intc_list, list) { 1274 list_for_each_entry(d, &intc_list, list) {
1008 d->sysdev.id = id; 1275 d->sysdev.id = id;
1009 d->sysdev.cls = &intc_sysdev_class; 1276 d->sysdev.cls = &intc_sysdev_class;
1010 error = sysdev_register(&d->sysdev); 1277 error = sysdev_register(&d->sysdev);
1278 if (error == 0)
1279 error = sysdev_create_file(&d->sysdev,
1280 &attr_name);
1011 if (error) 1281 if (error)
1012 break; 1282 break;
1283
1013 id++; 1284 id++;
1014 } 1285 }
1015 } 1286 }
1016 1287
1017 if (error) 1288 if (error)
1018 pr_warning("intc: sysdev registration error\n"); 1289 pr_err("intc: sysdev registration error\n");
1019 1290
1020 return error; 1291 return error;
1021} 1292}
@@ -1048,7 +1319,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
1048 1319
1049 desc = irq_to_desc_alloc_node(new, node); 1320 desc = irq_to_desc_alloc_node(new, node);
1050 if (unlikely(!desc)) { 1321 if (unlikely(!desc)) {
1051 pr_info("can't get irq_desc for %d\n", new); 1322 pr_err("can't get irq_desc for %d\n", new);
1052 goto out_unlock; 1323 goto out_unlock;
1053 } 1324 }
1054 1325
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index d8356af118a8..e0de0d0eedea 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -204,6 +204,7 @@ static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
204 204
205 cs->chconf0 = val; 205 cs->chconf0 = val;
206 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val); 206 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
207 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
207} 208}
208 209
209static void omap2_mcspi_set_dma_req(const struct spi_device *spi, 210static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
@@ -532,7 +533,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
532 goto out; 533 goto out;
533 } 534 }
534#ifdef VERBOSE 535#ifdef VERBOSE
535 dev_dbg(&spi->dev, "write-%d %04x\n", 536 dev_dbg(&spi->dev, "write-%d %08x\n",
536 word_len, *tx); 537 word_len, *tx);
537#endif 538#endif
538 __raw_writel(*tx++, tx_reg); 539 __raw_writel(*tx++, tx_reg);
@@ -550,7 +551,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
550 mcspi_write_chconf0(spi, l); 551 mcspi_write_chconf0(spi, l);
551 *rx++ = __raw_readl(rx_reg); 552 *rx++ = __raw_readl(rx_reg);
552#ifdef VERBOSE 553#ifdef VERBOSE
553 dev_dbg(&spi->dev, "read-%d %04x\n", 554 dev_dbg(&spi->dev, "read-%d %08x\n",
554 word_len, *(rx - 1)); 555 word_len, *(rx - 1));
555#endif 556#endif
556 } 557 }
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 36828358a4d8..e76b1afafe07 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -36,8 +36,7 @@
36#include <asm/delay.h> 36#include <asm/delay.h>
37 37
38#include <mach/dma.h> 38#include <mach/dma.h>
39#include <mach/regs-ssp.h> 39#include <plat/ssp.h>
40#include <mach/ssp.h>
41#include <mach/pxa2xx_spi.h> 40#include <mach/pxa2xx_spi.h>
42 41
43MODULE_AUTHOR("Stephen Street"); 42MODULE_AUTHOR("Stephen Street");
@@ -1318,14 +1317,14 @@ static int setup(struct spi_device *spi)
1318 /* NOTE: PXA25x_SSP _could_ use external clocking ... */ 1317 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
1319 if (drv_data->ssp_type != PXA25x_SSP) 1318 if (drv_data->ssp_type != PXA25x_SSP)
1320 dev_dbg(&spi->dev, "%ld Hz actual, %s\n", 1319 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1321 clk_get_rate(ssp->clk) 1320 clk_get_rate(ssp->clk)
1322 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), 1321 / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
1323 chip->enable_dma ? "DMA" : "PIO"); 1322 chip->enable_dma ? "DMA" : "PIO");
1324 else 1323 else
1325 dev_dbg(&spi->dev, "%ld Hz actual, %s\n", 1324 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1326 clk_get_rate(ssp->clk) / 2 1325 clk_get_rate(ssp->clk) / 2
1327 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), 1326 / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1328 chip->enable_dma ? "DMA" : "PIO"); 1327 chip->enable_dma ? "DMA" : "PIO");
1329 1328
1330 if (spi->bits_per_word <= 8) { 1329 if (spi->bits_per_word <= 8) {
1331 chip->n_bytes = 1; 1330 chip->n_bytes = 1;
@@ -1466,7 +1465,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
1466 1465
1467 platform_info = dev->platform_data; 1466 platform_info = dev->platform_data;
1468 1467
1469 ssp = ssp_request(pdev->id, pdev->name); 1468 ssp = pxa_ssp_request(pdev->id, pdev->name);
1470 if (ssp == NULL) { 1469 if (ssp == NULL) {
1471 dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id); 1470 dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id);
1472 return -ENODEV; 1471 return -ENODEV;
@@ -1476,7 +1475,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
1476 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); 1475 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
1477 if (!master) { 1476 if (!master) {
1478 dev_err(&pdev->dev, "cannot alloc spi_master\n"); 1477 dev_err(&pdev->dev, "cannot alloc spi_master\n");
1479 ssp_free(ssp); 1478 pxa_ssp_free(ssp);
1480 return -ENOMEM; 1479 return -ENOMEM;
1481 } 1480 }
1482 drv_data = spi_master_get_devdata(master); 1481 drv_data = spi_master_get_devdata(master);
@@ -1558,7 +1557,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
1558 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | 1557 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
1559 SSCR1_TxTresh(TX_THRESH_DFLT), 1558 SSCR1_TxTresh(TX_THRESH_DFLT),
1560 drv_data->ioaddr); 1559 drv_data->ioaddr);
1561 write_SSCR0(SSCR0_SerClkDiv(2) 1560 write_SSCR0(SSCR0_SCR(2)
1562 | SSCR0_Motorola 1561 | SSCR0_Motorola
1563 | SSCR0_DataSize(8), 1562 | SSCR0_DataSize(8),
1564 drv_data->ioaddr); 1563 drv_data->ioaddr);
@@ -1605,7 +1604,7 @@ out_error_irq_alloc:
1605 1604
1606out_error_master_alloc: 1605out_error_master_alloc:
1607 spi_master_put(master); 1606 spi_master_put(master);
1608 ssp_free(ssp); 1607 pxa_ssp_free(ssp);
1609 return status; 1608 return status;
1610} 1609}
1611 1610
@@ -1649,7 +1648,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
1649 free_irq(ssp->irq, drv_data); 1648 free_irq(ssp->irq, drv_data);
1650 1649
1651 /* Release SSP */ 1650 /* Release SSP */
1652 ssp_free(ssp); 1651 pxa_ssp_free(ssp);
1653 1652
1654 /* Disconnect from the SPI framework */ 1653 /* Disconnect from the SPI framework */
1655 spi_unregister_master(drv_data->master); 1654 spi_unregister_master(drv_data->master);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 9ffb0fdbd6fe..b3a1f9259b62 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -41,7 +41,7 @@ static void spidev_release(struct device *dev)
41 spi->master->cleanup(spi); 41 spi->master->cleanup(spi);
42 42
43 spi_master_put(spi->master); 43 spi_master_put(spi->master);
44 kfree(dev); 44 kfree(spi);
45} 45}
46 46
47static ssize_t 47static ssize_t
@@ -257,6 +257,7 @@ int spi_add_device(struct spi_device *spi)
257{ 257{
258 static DEFINE_MUTEX(spi_add_lock); 258 static DEFINE_MUTEX(spi_add_lock);
259 struct device *dev = spi->master->dev.parent; 259 struct device *dev = spi->master->dev.parent;
260 struct device *d;
260 int status; 261 int status;
261 262
262 /* Chipselects are numbered 0..max; validate. */ 263 /* Chipselects are numbered 0..max; validate. */
@@ -278,10 +279,11 @@ int spi_add_device(struct spi_device *spi)
278 */ 279 */
279 mutex_lock(&spi_add_lock); 280 mutex_lock(&spi_add_lock);
280 281
281 if (bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)) 282 d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev));
282 != NULL) { 283 if (d != NULL) {
283 dev_err(dev, "chipselect %d already in use\n", 284 dev_err(dev, "chipselect %d already in use\n",
284 spi->chip_select); 285 spi->chip_select);
286 put_device(d);
285 status = -EBUSY; 287 status = -EBUSY;
286 goto done; 288 goto done;
287 } 289 }
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 80ff7d9e60de..bc9bdb277bec 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -490,7 +490,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
490 break; 490 break;
491 case SSB_BUSTYPE_PCMCIA: 491 case SSB_BUSTYPE_PCMCIA:
492#ifdef CONFIG_SSB_PCMCIAHOST 492#ifdef CONFIG_SSB_PCMCIAHOST
493 sdev->irq = bus->host_pcmcia->irq.AssignedIRQ; 493 sdev->irq = bus->host_pcmcia->irq;
494 dev->parent = &bus->host_pcmcia->dev; 494 dev->parent = &bus->host_pcmcia->dev;
495#endif 495#endif
496 break; 496 break;
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 5632991760af..30b522c0bf2c 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -180,12 +180,12 @@ static int das16cs_attach(struct comedi_device *dev,
180 } 180 }
181 printk("\n"); 181 printk("\n");
182 182
183 ret = request_irq(link->irq.AssignedIRQ, das16cs_interrupt, 183 ret = request_irq(link->irq, das16cs_interrupt,
184 IRQF_SHARED, "cb_das16_cs", dev); 184 IRQF_SHARED, "cb_das16_cs", dev);
185 if (ret < 0) { 185 if (ret < 0) {
186 return ret; 186 return ret;
187 } 187 }
188 dev->irq = link->irq.AssignedIRQ; 188 dev->irq = link->irq;
189 printk("irq=%u ", dev->irq); 189 printk("irq=%u ", dev->irq);
190 190
191 dev->board_ptr = das16cs_probe(dev, link); 191 dev->board_ptr = das16cs_probe(dev, link);
@@ -671,7 +671,6 @@ static dev_info_t dev_info = "cb_das16_cs";
671 671
672struct local_info_t { 672struct local_info_t {
673 struct pcmcia_device *link; 673 struct pcmcia_device *link;
674 dev_node_t node;
675 int stop; 674 int stop;
676 struct bus_operations *bus; 675 struct bus_operations *bus;
677}; 676};
@@ -702,10 +701,6 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link)
702 link->priv = local; 701 link->priv = local;
703 702
704 /* Initialize the pcmcia_device structure */ 703 /* Initialize the pcmcia_device structure */
705 /* Interrupt setup */
706 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
707 link->irq.Handler = NULL;
708
709 link->conf.Attributes = 0; 704 link->conf.Attributes = 0;
710 link->conf.IntType = INT_MEMORY_AND_IO; 705 link->conf.IntType = INT_MEMORY_AND_IO;
711 706
@@ -720,10 +715,8 @@ static void das16cs_pcmcia_detach(struct pcmcia_device *link)
720{ 715{
721 dev_dbg(&link->dev, "das16cs_pcmcia_detach\n"); 716 dev_dbg(&link->dev, "das16cs_pcmcia_detach\n");
722 717
723 if (link->dev_node) { 718 ((struct local_info_t *)link->priv)->stop = 1;
724 ((struct local_info_t *)link->priv)->stop = 1; 719 das16cs_pcmcia_release(link);
725 das16cs_pcmcia_release(link);
726 }
727 /* This points to the parent struct local_info_t struct */ 720 /* This points to the parent struct local_info_t struct */
728 if (link->priv) 721 if (link->priv)
729 kfree(link->priv); 722 kfree(link->priv);
@@ -740,8 +733,7 @@ static int das16cs_pcmcia_config_loop(struct pcmcia_device *p_dev,
740 return -EINVAL; 733 return -EINVAL;
741 734
742 /* Do we need to allocate an interrupt? */ 735 /* Do we need to allocate an interrupt? */
743 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) 736 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
744 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
745 737
746 /* IO window settings */ 738 /* IO window settings */
747 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 739 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -769,7 +761,6 @@ static int das16cs_pcmcia_config_loop(struct pcmcia_device *p_dev,
769 761
770static void das16cs_pcmcia_config(struct pcmcia_device *link) 762static void das16cs_pcmcia_config(struct pcmcia_device *link)
771{ 763{
772 struct local_info_t *dev = link->priv;
773 int ret; 764 int ret;
774 765
775 dev_dbg(&link->dev, "das16cs_pcmcia_config\n"); 766 dev_dbg(&link->dev, "das16cs_pcmcia_config\n");
@@ -780,16 +771,9 @@ static void das16cs_pcmcia_config(struct pcmcia_device *link)
780 goto failed; 771 goto failed;
781 } 772 }
782 773
783 /* 774 if (!link->irq)
784 Allocate an interrupt line. Note that this does not assign a 775 goto failed;
785 handler to the interrupt, unless the 'Handler' member of the 776
786 irq structure is initialized.
787 */
788 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
789 ret = pcmcia_request_irq(link, &link->irq);
790 if (ret)
791 goto failed;
792 }
793 /* 777 /*
794 This actually configures the PCMCIA socket -- setting up 778 This actually configures the PCMCIA socket -- setting up
795 the I/O windows and the interrupt mapping, and putting the 779 the I/O windows and the interrupt mapping, and putting the
@@ -799,19 +783,10 @@ static void das16cs_pcmcia_config(struct pcmcia_device *link)
799 if (ret) 783 if (ret)
800 goto failed; 784 goto failed;
801 785
802 /*
803 At this point, the dev_node_t structure(s) need to be
804 initialized and arranged in a linked list at link->dev.
805 */
806 sprintf(dev->node.dev_name, "cb_das16_cs");
807 dev->node.major = dev->node.minor = 0;
808 link->dev_node = &dev->node;
809
810 /* Finally, report what we've done */ 786 /* Finally, report what we've done */
811 printk(KERN_INFO "%s: index 0x%02x", 787 dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
812 dev->node.dev_name, link->conf.ConfigIndex);
813 if (link->conf.Attributes & CONF_ENABLE_IRQ) 788 if (link->conf.Attributes & CONF_ENABLE_IRQ)
814 printk(", irq %u", link->irq.AssignedIRQ); 789 printk(", irq %u", link->irq);
815 if (link->io.NumPorts1) 790 if (link->io.NumPorts1)
816 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 791 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
817 link->io.BasePort1 + link->io.NumPorts1 - 1); 792 link->io.BasePort1 + link->io.NumPorts1 - 1);
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 9164ce158dcd..896d25bc85b5 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -142,7 +142,6 @@ static const dev_info_t dev_info = "pcm-das08";
142 142
143struct local_info_t { 143struct local_info_t {
144 struct pcmcia_device *link; 144 struct pcmcia_device *link;
145 dev_node_t node;
146 int stop; 145 int stop;
147 struct bus_operations *bus; 146 struct bus_operations *bus;
148}; 147};
@@ -172,10 +171,6 @@ static int das08_pcmcia_attach(struct pcmcia_device *link)
172 local->link = link; 171 local->link = link;
173 link->priv = local; 172 link->priv = local;
174 173
175 /* Interrupt setup */
176 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
177 link->irq.Handler = NULL;
178
179 /* 174 /*
180 General socket configuration defaults can go here. In this 175 General socket configuration defaults can go here. In this
181 client, we assume very little, and rely on the CIS for almost 176 client, we assume very little, and rely on the CIS for almost
@@ -207,10 +202,8 @@ static void das08_pcmcia_detach(struct pcmcia_device *link)
207 202
208 dev_dbg(&link->dev, "das08_pcmcia_detach\n"); 203 dev_dbg(&link->dev, "das08_pcmcia_detach\n");
209 204
210 if (link->dev_node) { 205 ((struct local_info_t *)link->priv)->stop = 1;
211 ((struct local_info_t *)link->priv)->stop = 1; 206 das08_pcmcia_release(link);
212 das08_pcmcia_release(link);
213 }
214 207
215 /* This points to the parent struct local_info_t struct */ 208 /* This points to the parent struct local_info_t struct */
216 if (link->priv) 209 if (link->priv)
@@ -229,8 +222,7 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
229 return -ENODEV; 222 return -ENODEV;
230 223
231 /* Do we need to allocate an interrupt? */ 224 /* Do we need to allocate an interrupt? */
232 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) 225 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
233 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
234 226
235 /* IO window settings */ 227 /* IO window settings */
236 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 228 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -266,7 +258,6 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
266 258
267static void das08_pcmcia_config(struct pcmcia_device *link) 259static void das08_pcmcia_config(struct pcmcia_device *link)
268{ 260{
269 struct local_info_t *dev = link->priv;
270 int ret; 261 int ret;
271 262
272 dev_dbg(&link->dev, "das08_pcmcia_config\n"); 263 dev_dbg(&link->dev, "das08_pcmcia_config\n");
@@ -277,11 +268,8 @@ static void das08_pcmcia_config(struct pcmcia_device *link)
277 goto failed; 268 goto failed;
278 } 269 }
279 270
280 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 271 if (!link->irq)
281 ret = pcmcia_request_irq(link, &link->irq); 272 goto failed;
282 if (ret)
283 goto failed;
284 }
285 273
286 /* 274 /*
287 This actually configures the PCMCIA socket -- setting up 275 This actually configures the PCMCIA socket -- setting up
@@ -292,19 +280,10 @@ static void das08_pcmcia_config(struct pcmcia_device *link)
292 if (ret) 280 if (ret)
293 goto failed; 281 goto failed;
294 282
295 /*
296 At this point, the dev_node_t structure(s) need to be
297 initialized and arranged in a linked list at link->dev.
298 */
299 sprintf(dev->node.dev_name, "pcm-das08");
300 dev->node.major = dev->node.minor = 0;
301 link->dev_node = &dev->node;
302
303 /* Finally, report what we've done */ 283 /* Finally, report what we've done */
304 printk(KERN_INFO "%s: index 0x%02x", 284 dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
305 dev->node.dev_name, link->conf.ConfigIndex);
306 if (link->conf.Attributes & CONF_ENABLE_IRQ) 285 if (link->conf.Attributes & CONF_ENABLE_IRQ)
307 printk(", irq %u", link->irq.AssignedIRQ); 286 printk(", irq %u", link->irq);
308 if (link->io.NumPorts1) 287 if (link->io.NumPorts1)
309 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 288 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
310 link->io.BasePort1 + link->io.NumPorts1 - 1); 289 link->io.BasePort1 + link->io.NumPorts1 - 1);
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 7ea64538e055..06dd44ff1a95 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -380,7 +380,7 @@ static int dio700_attach(struct comedi_device *dev, struct comedi_devconfig *it)
380 return -EIO; 380 return -EIO;
381 iobase = link->io.BasePort1; 381 iobase = link->io.BasePort1;
382#ifdef incomplete 382#ifdef incomplete
383 irq = link->irq.AssignedIRQ; 383 irq = link->irq;
384#endif 384#endif
385 break; 385 break;
386 default: 386 default:
@@ -470,7 +470,6 @@ static const dev_info_t dev_info = "ni_daq_700";
470 470
471struct local_info_t { 471struct local_info_t {
472 struct pcmcia_device *link; 472 struct pcmcia_device *link;
473 dev_node_t node;
474 int stop; 473 int stop;
475 struct bus_operations *bus; 474 struct bus_operations *bus;
476}; 475};
@@ -502,10 +501,6 @@ static int dio700_cs_attach(struct pcmcia_device *link)
502 local->link = link; 501 local->link = link;
503 link->priv = local; 502 link->priv = local;
504 503
505 /* Interrupt setup */
506 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
507 link->irq.Handler = NULL;
508
509 /* 504 /*
510 General socket configuration defaults can go here. In this 505 General socket configuration defaults can go here. In this
511 client, we assume very little, and rely on the CIS for almost 506 client, we assume very little, and rely on the CIS for almost
@@ -539,10 +534,8 @@ static void dio700_cs_detach(struct pcmcia_device *link)
539 534
540 dev_dbg(&link->dev, "dio700_cs_detach\n"); 535 dev_dbg(&link->dev, "dio700_cs_detach\n");
541 536
542 if (link->dev_node) { 537 ((struct local_info_t *)link->priv)->stop = 1;
543 ((struct local_info_t *)link->priv)->stop = 1; 538 dio700_release(link);
544 dio700_release(link);
545 }
546 539
547 /* This points to the parent struct local_info_t struct */ 540 /* This points to the parent struct local_info_t struct */
548 if (link->priv) 541 if (link->priv)
@@ -577,8 +570,7 @@ static int dio700_pcmcia_config_loop(struct pcmcia_device *p_dev,
577 } 570 }
578 571
579 /* Do we need to allocate an interrupt? */ 572 /* Do we need to allocate an interrupt? */
580 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) 573 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
581 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
582 574
583 /* IO window settings */ 575 /* IO window settings */
584 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 576 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -625,7 +617,6 @@ static int dio700_pcmcia_config_loop(struct pcmcia_device *p_dev,
625 617
626static void dio700_config(struct pcmcia_device *link) 618static void dio700_config(struct pcmcia_device *link)
627{ 619{
628 struct local_info_t *dev = link->priv;
629 win_req_t req; 620 win_req_t req;
630 int ret; 621 int ret;
631 622
@@ -639,16 +630,8 @@ static void dio700_config(struct pcmcia_device *link)
639 goto failed; 630 goto failed;
640 } 631 }
641 632
642 /* 633 if (!link->irq)
643 Allocate an interrupt line. Note that this does not assign a 634 goto failed;
644 handler to the interrupt, unless the 'Handler' member of the
645 irq structure is initialized.
646 */
647 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
648 ret = pcmcia_request_irq(link, &link->irq);
649 if (ret)
650 goto failed;
651 }
652 635
653 /* 636 /*
654 This actually configures the PCMCIA socket -- setting up 637 This actually configures the PCMCIA socket -- setting up
@@ -659,19 +642,10 @@ static void dio700_config(struct pcmcia_device *link)
659 if (ret != 0) 642 if (ret != 0)
660 goto failed; 643 goto failed;
661 644
662 /*
663 At this point, the dev_node_t structure(s) need to be
664 initialized and arranged in a linked list at link->dev.
665 */
666 sprintf(dev->node.dev_name, "ni_daq_700");
667 dev->node.major = dev->node.minor = 0;
668 link->dev_node = &dev->node;
669
670 /* Finally, report what we've done */ 645 /* Finally, report what we've done */
671 printk(KERN_INFO "%s: index 0x%02x", 646 dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
672 dev->node.dev_name, link->conf.ConfigIndex);
673 if (link->conf.Attributes & CONF_ENABLE_IRQ) 647 if (link->conf.Attributes & CONF_ENABLE_IRQ)
674 printk(", irq %d", link->irq.AssignedIRQ); 648 printk(", irq %d", link->irq);
675 if (link->io.NumPorts1) 649 if (link->io.NumPorts1)
676 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 650 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
677 link->io.BasePort1 + link->io.NumPorts1 - 1); 651 link->io.BasePort1 + link->io.NumPorts1 - 1);
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index ddc312b5d20d..7bfe08b01fe9 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -131,7 +131,7 @@ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it)
131 return -EIO; 131 return -EIO;
132 iobase = link->io.BasePort1; 132 iobase = link->io.BasePort1;
133#ifdef incomplete 133#ifdef incomplete
134 irq = link->irq.AssignedIRQ; 134 irq = link->irq;
135#endif 135#endif
136 break; 136 break;
137 default: 137 default:
@@ -221,7 +221,6 @@ static const dev_info_t dev_info = "ni_daq_dio24";
221 221
222struct local_info_t { 222struct local_info_t {
223 struct pcmcia_device *link; 223 struct pcmcia_device *link;
224 dev_node_t node;
225 int stop; 224 int stop;
226 struct bus_operations *bus; 225 struct bus_operations *bus;
227}; 226};
@@ -253,10 +252,6 @@ static int dio24_cs_attach(struct pcmcia_device *link)
253 local->link = link; 252 local->link = link;
254 link->priv = local; 253 link->priv = local;
255 254
256 /* Interrupt setup */
257 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
258 link->irq.Handler = NULL;
259
260 /* 255 /*
261 General socket configuration defaults can go here. In this 256 General socket configuration defaults can go here. In this
262 client, we assume very little, and rely on the CIS for almost 257 client, we assume very little, and rely on the CIS for almost
@@ -290,10 +285,8 @@ static void dio24_cs_detach(struct pcmcia_device *link)
290 285
291 dev_dbg(&link->dev, "dio24_cs_detach\n"); 286 dev_dbg(&link->dev, "dio24_cs_detach\n");
292 287
293 if (link->dev_node) { 288 ((struct local_info_t *)link->priv)->stop = 1;
294 ((struct local_info_t *)link->priv)->stop = 1; 289 dio24_release(link);
295 dio24_release(link);
296 }
297 290
298 /* This points to the parent local_info_t struct */ 291 /* This points to the parent local_info_t struct */
299 if (link->priv) 292 if (link->priv)
@@ -328,8 +321,7 @@ static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev,
328 } 321 }
329 322
330 /* Do we need to allocate an interrupt? */ 323 /* Do we need to allocate an interrupt? */
331 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) 324 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
332 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
333 325
334 /* IO window settings */ 326 /* IO window settings */
335 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 327 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -376,7 +368,6 @@ static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev,
376 368
377static void dio24_config(struct pcmcia_device *link) 369static void dio24_config(struct pcmcia_device *link)
378{ 370{
379 struct local_info_t *dev = link->priv;
380 int ret; 371 int ret;
381 win_req_t req; 372 win_req_t req;
382 373
@@ -390,16 +381,8 @@ static void dio24_config(struct pcmcia_device *link)
390 goto failed; 381 goto failed;
391 } 382 }
392 383
393 /* 384 if (!link->irq)
394 Allocate an interrupt line. Note that this does not assign a 385 goto failed;
395 handler to the interrupt, unless the 'Handler' member of the
396 irq structure is initialized.
397 */
398 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
399 ret = pcmcia_request_irq(link, &link->irq);
400 if (ret)
401 goto failed;
402 }
403 386
404 /* 387 /*
405 This actually configures the PCMCIA socket -- setting up 388 This actually configures the PCMCIA socket -- setting up
@@ -410,19 +393,10 @@ static void dio24_config(struct pcmcia_device *link)
410 if (ret) 393 if (ret)
411 goto failed; 394 goto failed;
412 395
413 /*
414 At this point, the dev_node_t structure(s) need to be
415 initialized and arranged in a linked list at link->dev.
416 */
417 sprintf(dev->node.dev_name, "ni_daq_dio24");
418 dev->node.major = dev->node.minor = 0;
419 link->dev_node = &dev->node;
420
421 /* Finally, report what we've done */ 396 /* Finally, report what we've done */
422 printk(KERN_INFO "%s: index 0x%02x", 397 dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
423 dev->node.dev_name, link->conf.ConfigIndex);
424 if (link->conf.Attributes & CONF_ENABLE_IRQ) 398 if (link->conf.Attributes & CONF_ENABLE_IRQ)
425 printk(", irq %d", link->irq.AssignedIRQ); 399 printk(", irq %d", link->irq);
426 if (link->io.NumPorts1) 400 if (link->io.NumPorts1)
427 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 401 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
428 link->io.BasePort1 + link->io.NumPorts1 - 1); 402 link->io.BasePort1 + link->io.NumPorts1 - 1);
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index 8ad1055a5cc1..fd8d3e9520a0 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -144,7 +144,7 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
144 if (!link) 144 if (!link)
145 return -EIO; 145 return -EIO;
146 iobase = link->io.BasePort1; 146 iobase = link->io.BasePort1;
147 irq = link->irq.AssignedIRQ; 147 irq = link->irq;
148 break; 148 break;
149 default: 149 default:
150 printk("bug! couldn't determine board type\n"); 150 printk("bug! couldn't determine board type\n");
@@ -199,7 +199,6 @@ static const dev_info_t dev_info = "daqcard-1200";
199 199
200struct local_info_t { 200struct local_info_t {
201 struct pcmcia_device *link; 201 struct pcmcia_device *link;
202 dev_node_t node;
203 int stop; 202 int stop;
204 struct bus_operations *bus; 203 struct bus_operations *bus;
205}; 204};
@@ -229,10 +228,6 @@ static int labpc_cs_attach(struct pcmcia_device *link)
229 local->link = link; 228 local->link = link;
230 link->priv = local; 229 link->priv = local;
231 230
232 /* Interrupt setup */
233 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE;
234 link->irq.Handler = NULL;
235
236 /* 231 /*
237 General socket configuration defaults can go here. In this 232 General socket configuration defaults can go here. In this
238 client, we assume very little, and rely on the CIS for almost 233 client, we assume very little, and rely on the CIS for almost
@@ -269,10 +264,8 @@ static void labpc_cs_detach(struct pcmcia_device *link)
269 the release() function is called, that will trigger a proper 264 the release() function is called, that will trigger a proper
270 detach(). 265 detach().
271 */ 266 */
272 if (link->dev_node) { 267 ((struct local_info_t *)link->priv)->stop = 1;
273 ((struct local_info_t *)link->priv)->stop = 1; 268 labpc_release(link);
274 labpc_release(link);
275 }
276 269
277 /* This points to the parent local_info_t struct (may be null) */ 270 /* This points to the parent local_info_t struct (may be null) */
278 kfree(link->priv); 271 kfree(link->priv);
@@ -306,8 +299,7 @@ static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev,
306 } 299 }
307 300
308 /* Do we need to allocate an interrupt? */ 301 /* Do we need to allocate an interrupt? */
309 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) 302 p_dev->conf.Attributes |= CONF_ENABLE_IRQ | CONF_ENABLE_PULSE_IRQ;
310 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
311 303
312 /* IO window settings */ 304 /* IO window settings */
313 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 305 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -355,7 +347,6 @@ static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev,
355 347
356static void labpc_config(struct pcmcia_device *link) 348static void labpc_config(struct pcmcia_device *link)
357{ 349{
358 struct local_info_t *dev = link->priv;
359 int ret; 350 int ret;
360 win_req_t req; 351 win_req_t req;
361 352
@@ -367,16 +358,8 @@ static void labpc_config(struct pcmcia_device *link)
367 goto failed; 358 goto failed;
368 } 359 }
369 360
370 /* 361 if (!link->irq)
371 Allocate an interrupt line. Note that this does not assign a 362 goto failed;
372 handler to the interrupt, unless the 'Handler' member of the
373 irq structure is initialized.
374 */
375 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
376 ret = pcmcia_request_irq(link, &link->irq);
377 if (ret)
378 goto failed;
379 }
380 363
381 /* 364 /*
382 This actually configures the PCMCIA socket -- setting up 365 This actually configures the PCMCIA socket -- setting up
@@ -387,19 +370,10 @@ static void labpc_config(struct pcmcia_device *link)
387 if (ret) 370 if (ret)
388 goto failed; 371 goto failed;
389 372
390 /*
391 At this point, the dev_node_t structure(s) need to be
392 initialized and arranged in a linked list at link->dev.
393 */
394 sprintf(dev->node.dev_name, "daqcard-1200");
395 dev->node.major = dev->node.minor = 0;
396 link->dev_node = &dev->node;
397
398 /* Finally, report what we've done */ 373 /* Finally, report what we've done */
399 printk(KERN_INFO "%s: index 0x%02x", 374 dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
400 dev->node.dev_name, link->conf.ConfigIndex);
401 if (link->conf.Attributes & CONF_ENABLE_IRQ) 375 if (link->conf.Attributes & CONF_ENABLE_IRQ)
402 printk(", irq %d", link->irq.AssignedIRQ); 376 printk(", irq %d", link->irq);
403 if (link->io.NumPorts1) 377 if (link->io.NumPorts1)
404 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 378 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
405 link->io.BasePort1 + link->io.NumPorts1 - 1); 379 link->io.BasePort1 + link->io.NumPorts1 - 1);
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index dc4849a40c97..1e8aebae8ae8 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -262,17 +262,11 @@ static void cs_detach(struct pcmcia_device *);
262 262
263static struct pcmcia_device *cur_dev = NULL; 263static struct pcmcia_device *cur_dev = NULL;
264static const dev_info_t dev_info = "ni_mio_cs"; 264static const dev_info_t dev_info = "ni_mio_cs";
265static dev_node_t dev_node = {
266 "ni_mio_cs",
267 COMEDI_MAJOR, 0,
268 NULL
269};
270 265
271static int cs_attach(struct pcmcia_device *link) 266static int cs_attach(struct pcmcia_device *link)
272{ 267{
273 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 268 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
274 link->io.NumPorts1 = 16; 269 link->io.NumPorts1 = 16;
275 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
276 link->conf.Attributes = CONF_ENABLE_IRQ; 270 link->conf.Attributes = CONF_ENABLE_IRQ;
277 link->conf.IntType = INT_MEMORY_AND_IO; 271 link->conf.IntType = INT_MEMORY_AND_IO;
278 272
@@ -292,8 +286,7 @@ static void cs_detach(struct pcmcia_device *link)
292{ 286{
293 DPRINTK("cs_detach(link=%p)\n", link); 287 DPRINTK("cs_detach(link=%p)\n", link);
294 288
295 if (link->dev_node) 289 cs_release(link);
296 cs_release(link);
297} 290}
298 291
299static int mio_cs_suspend(struct pcmcia_device *link) 292static int mio_cs_suspend(struct pcmcia_device *link)
@@ -344,14 +337,10 @@ static void mio_cs_config(struct pcmcia_device *link)
344 return; 337 return;
345 } 338 }
346 339
347 ret = pcmcia_request_irq(link, &link->irq); 340 if (!link->irq)
348 if (ret) { 341 dev_info(&link->dev, "no IRQ available\n");
349 printk("pcmcia_request_irq() returned error: %i\n", ret);
350 }
351 342
352 ret = pcmcia_request_configuration(link, &link->conf); 343 ret = pcmcia_request_configuration(link, &link->conf);
353
354 link->dev_node = &dev_node;
355} 344}
356 345
357static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it) 346static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it)
@@ -369,7 +358,7 @@ static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it)
369 dev->driver = &driver_ni_mio_cs; 358 dev->driver = &driver_ni_mio_cs;
370 dev->iobase = link->io.BasePort1; 359 dev->iobase = link->io.BasePort1;
371 360
372 irq = link->irq.AssignedIRQ; 361 irq = link->irq;
373 362
374 printk("comedi%d: %s: DAQCard: io 0x%04lx, irq %u, ", 363 printk("comedi%d: %s: DAQCard: io 0x%04lx, irq %u, ",
375 dev->minor, dev->driver->driver_name, dev->iobase, irq); 364 dev->minor, dev->driver->driver_name, dev->iobase, irq);
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 3325f24448b5..1786db2f3378 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -60,7 +60,6 @@ Devices: [Quatech] DAQP-208 (daqp), DAQP-308
60 60
61struct local_info_t { 61struct local_info_t {
62 struct pcmcia_device *link; 62 struct pcmcia_device *link;
63 dev_node_t node;
64 int stop; 63 int stop;
65 int table_index; 64 int table_index;
66 char board_name[32]; 65 char board_name[32];
@@ -1040,10 +1039,6 @@ static int daqp_cs_attach(struct pcmcia_device *link)
1040 local->link = link; 1039 local->link = link;
1041 link->priv = local; 1040 link->priv = local;
1042 1041
1043 /* Interrupt setup */
1044 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
1045 link->irq.Handler = daqp_interrupt;
1046
1047 /* 1042 /*
1048 General socket configuration defaults can go here. In this 1043 General socket configuration defaults can go here. In this
1049 client, we assume very little, and rely on the CIS for almost 1044 client, we assume very little, and rely on the CIS for almost
@@ -1074,10 +1069,8 @@ static void daqp_cs_detach(struct pcmcia_device *link)
1074 1069
1075 dev_dbg(&link->dev, "daqp_cs_detach\n"); 1070 dev_dbg(&link->dev, "daqp_cs_detach\n");
1076 1071
1077 if (link->dev_node) { 1072 dev->stop = 1;
1078 dev->stop = 1; 1073 daqp_cs_release(link);
1079 daqp_cs_release(link);
1080 }
1081 1074
1082 /* Unlink device structure, and free it */ 1075 /* Unlink device structure, and free it */
1083 dev_table[dev->table_index] = NULL; 1076 dev_table[dev->table_index] = NULL;
@@ -1105,8 +1098,7 @@ static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev,
1105 return -ENODEV; 1098 return -ENODEV;
1106 1099
1107 /* Do we need to allocate an interrupt? */ 1100 /* Do we need to allocate an interrupt? */
1108 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) 1101 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
1109 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
1110 1102
1111 /* IO window settings */ 1103 /* IO window settings */
1112 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 1104 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -1133,7 +1125,6 @@ static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev,
1133 1125
1134static void daqp_cs_config(struct pcmcia_device *link) 1126static void daqp_cs_config(struct pcmcia_device *link)
1135{ 1127{
1136 struct local_info_t *dev = link->priv;
1137 int ret; 1128 int ret;
1138 1129
1139 dev_dbg(&link->dev, "daqp_cs_config\n"); 1130 dev_dbg(&link->dev, "daqp_cs_config\n");
@@ -1144,16 +1135,9 @@ static void daqp_cs_config(struct pcmcia_device *link)
1144 goto failed; 1135 goto failed;
1145 } 1136 }
1146 1137
1147 /* 1138 ret = pcmcia_request_irq(link, daqp_interrupt);
1148 Allocate an interrupt line. Note that this does not assign a 1139 if (ret)
1149 handler to the interrupt, unless the 'Handler' member of the 1140 goto failed;
1150 irq structure is initialized.
1151 */
1152 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
1153 ret = pcmcia_request_irq(link, &link->irq);
1154 if (ret)
1155 goto failed;
1156 }
1157 1141
1158 /* 1142 /*
1159 This actually configures the PCMCIA socket -- setting up 1143 This actually configures the PCMCIA socket -- setting up
@@ -1164,23 +1148,10 @@ static void daqp_cs_config(struct pcmcia_device *link)
1164 if (ret) 1148 if (ret)
1165 goto failed; 1149 goto failed;
1166 1150
1167 /*
1168 At this point, the dev_node_t structure(s) need to be
1169 initialized and arranged in a linked list at link->dev.
1170 */
1171 /* Comedi's PCMCIA script uses this device name (extracted
1172 * from /var/lib/pcmcia/stab) to pass to comedi_config
1173 */
1174 /* sprintf(dev->node.dev_name, "daqp%d", dev->table_index); */
1175 sprintf(dev->node.dev_name, "quatech_daqp_cs");
1176 dev->node.major = dev->node.minor = 0;
1177 link->dev_node = &dev->node;
1178
1179 /* Finally, report what we've done */ 1151 /* Finally, report what we've done */
1180 printk(KERN_INFO "%s: index 0x%02x", 1152 dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
1181 dev->node.dev_name, link->conf.ConfigIndex);
1182 if (link->conf.Attributes & CONF_ENABLE_IRQ) 1153 if (link->conf.Attributes & CONF_ENABLE_IRQ)
1183 printk(", irq %u", link->irq.AssignedIRQ); 1154 printk(", irq %u", link->irq);
1184 if (link->io.NumPorts1) 1155 if (link->io.NumPorts1)
1185 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 1156 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
1186 link->io.BasePort1 + link->io.NumPorts1 - 1); 1157 link->io.BasePort1 + link->io.NumPorts1 - 1);
diff --git a/drivers/staging/dt3155/dt3155_drv.c b/drivers/staging/dt3155/dt3155_drv.c
index e2c44ec6fc45..7ac2c6d8e9a3 100644
--- a/drivers/staging/dt3155/dt3155_drv.c
+++ b/drivers/staging/dt3155/dt3155_drv.c
@@ -464,9 +464,9 @@ static void dt3155_init_isr(int minor)
464 /* 50/60 Hz should be set before this point but let's make sure it is */ 464 /* 50/60 Hz should be set before this point but let's make sure it is */
465 /* right anyway */ 465 /* right anyway */
466 466
467 ReadI2C(dt3155_lbase[ minor ], CONFIG, &i2c_csr2.reg); 467 ReadI2C(dt3155_lbase[ minor ], CSR2, &i2c_csr2.reg);
468 i2c_csr2.fld.HZ50 = FORMAT50HZ; 468 i2c_csr2.fld.HZ50 = FORMAT50HZ;
469 WriteI2C(dt3155_lbase[ minor ], CONFIG, i2c_config.reg); 469 WriteI2C(dt3155_lbase[ minor ], CSR2, i2c_csr2.reg);
470 470
471 /* enable busmaster chip, clear flags */ 471 /* enable busmaster chip, clear flags */
472 472
diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
index 5d53889fb4a4..3a1112d29aeb 100644
--- a/drivers/staging/hv/Hv.c
+++ b/drivers/staging/hv/Hv.c
@@ -306,9 +306,9 @@ void HvCleanup(void)
306 DPRINT_ENTER(VMBUS); 306 DPRINT_ENTER(VMBUS);
307 307
308 if (gHvContext.SignalEventBuffer) { 308 if (gHvContext.SignalEventBuffer) {
309 kfree(gHvContext.SignalEventBuffer);
309 gHvContext.SignalEventBuffer = NULL; 310 gHvContext.SignalEventBuffer = NULL;
310 gHvContext.SignalEventParam = NULL; 311 gHvContext.SignalEventParam = NULL;
311 kfree(gHvContext.SignalEventBuffer);
312 } 312 }
313 313
314 if (gHvContext.HypercallPage) { 314 if (gHvContext.HypercallPage) {
diff --git a/drivers/staging/hv/RndisFilter.c b/drivers/staging/hv/RndisFilter.c
index cd2930de2176..6704f64c93f0 100644
--- a/drivers/staging/hv/RndisFilter.c
+++ b/drivers/staging/hv/RndisFilter.c
@@ -751,6 +751,7 @@ static int RndisFilterOpenDevice(struct rndis_device *Device)
751 751
752 ret = RndisFilterSetPacketFilter(Device, 752 ret = RndisFilterSetPacketFilter(Device,
753 NDIS_PACKET_TYPE_BROADCAST | 753 NDIS_PACKET_TYPE_BROADCAST |
754 NDIS_PACKET_TYPE_ALL_MULTICAST |
754 NDIS_PACKET_TYPE_DIRECTED); 755 NDIS_PACKET_TYPE_DIRECTED);
755 if (ret == 0) 756 if (ret == 0)
756 Device->State = RNDIS_DEV_DATAINITIALIZED; 757 Device->State = RNDIS_DEV_DATAINITIALIZED;
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 2ccb6b93fe47..ab27d9a4446d 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -403,8 +403,7 @@ static int netvsc_probe(struct device *device)
403 if (!net_drv_obj->Base.OnDeviceAdd) 403 if (!net_drv_obj->Base.OnDeviceAdd)
404 return -1; 404 return -1;
405 405
406 net = alloc_netdev(sizeof(struct net_device_context), "seth%d", 406 net = alloc_etherdev(sizeof(struct net_device_context));
407 ether_setup);
408 if (!net) 407 if (!net)
409 return -1; 408 return -1;
410 409
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index ea76902797bb..82e43588e8a5 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -618,7 +618,7 @@ static int lis3l02dq_thresh_handler_th(struct iio_dev *dev_info,
618static void lis3l02dq_thresh_handler_bh_no_check(struct work_struct *work_s) 618static void lis3l02dq_thresh_handler_bh_no_check(struct work_struct *work_s)
619{ 619{
620 struct iio_work_cont *wc 620 struct iio_work_cont *wc
621 = container_of(work_s, struct iio_work_cont, ws_nocheck); 621 = container_of(work_s, struct iio_work_cont, ws);
622 struct lis3l02dq_state *st = wc->st; 622 struct lis3l02dq_state *st = wc->st;
623 u8 t; 623 u8 t;
624 624
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 93712430e579..a4d97ea0df3d 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -493,6 +493,9 @@ int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
493 struct lis3l02dq_state *state = indio_dev->dev_data; 493 struct lis3l02dq_state *state = indio_dev->dev_data;
494 494
495 state->trig = iio_allocate_trigger(); 495 state->trig = iio_allocate_trigger();
496 if (!state->trig)
497 return -ENOMEM;
498
496 state->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL); 499 state->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
497 if (!state->trig->name) { 500 if (!state->trig->name) {
498 ret = -ENOMEM; 501 ret = -ENOMEM;
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index 790d1cc9cdc3..773f1d1d9c6e 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -557,6 +557,7 @@ error_put_reg:
557 if (!IS_ERR(st->reg)) 557 if (!IS_ERR(st->reg))
558 regulator_put(st->reg); 558 regulator_put(st->reg);
559error_free_st: 559error_free_st:
560 i2c_set_clientdata(client, NULL);
560 kfree(st); 561 kfree(st);
561 562
562error_ret: 563error_ret:
@@ -574,6 +575,7 @@ static int max1363_remove(struct i2c_client *client)
574 regulator_disable(st->reg); 575 regulator_disable(st->reg);
575 regulator_put(st->reg); 576 regulator_put(st->reg);
576 } 577 }
578 i2c_set_clientdata(client, NULL);
577 kfree(st); 579 kfree(st);
578 580
579 return 0; 581 return 0;
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 37f58f66e491..1d77082c8531 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -537,6 +537,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *dev_info)
537 sysfs_remove_group(&dev_info->dev.kobj, dev_info->attrs); 537 sysfs_remove_group(&dev_info->dev.kobj, dev_info->attrs);
538} 538}
539 539
540/* Return a negative errno on failure */
540int iio_get_new_idr_val(struct idr *this_idr) 541int iio_get_new_idr_val(struct idr *this_idr)
541{ 542{
542 int ret; 543 int ret;
@@ -660,7 +661,7 @@ static int iio_device_register_eventset(struct iio_dev *dev_info)
660 for (i = 0; i < dev_info->num_interrupt_lines; i++) { 661 for (i = 0; i < dev_info->num_interrupt_lines; i++) {
661 dev_info->event_interfaces[i].owner = dev_info->driver_module; 662 dev_info->event_interfaces[i].owner = dev_info->driver_module;
662 ret = iio_get_new_idr_val(&iio_event_idr); 663 ret = iio_get_new_idr_val(&iio_event_idr);
663 if (ret) 664 if (ret < 0)
664 goto error_free_setup_ev_ints; 665 goto error_free_setup_ev_ints;
665 else 666 else
666 dev_info->event_interfaces[i].id = ret; 667 dev_info->event_interfaces[i].id = ret;
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index 1ba4aa392f6e..8770a00e3652 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -682,6 +682,7 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
682fail2: 682fail2:
683 iio_device_unregister(chip->indio_dev); 683 iio_device_unregister(chip->indio_dev);
684fail1: 684fail1:
685 i2c_set_clientdata(client, NULL);
685 kfree(chip); 686 kfree(chip);
686 return err; 687 return err;
687} 688}
@@ -692,6 +693,7 @@ static int tsl2563_remove(struct i2c_client *client)
692 693
693 iio_device_unregister(chip->indio_dev); 694 iio_device_unregister(chip->indio_dev);
694 695
696 i2c_set_clientdata(client, NULL);
695 kfree(chip); 697 kfree(chip);
696 return 0; 698 return 0;
697} 699}
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index b104c3d9c35e..cf22c091668c 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -293,7 +293,7 @@ again:
293 return -EAGAIN; 293 return -EAGAIN;
294 memcpy(data, last_written_p_copy, ring->buf.bpd); 294 memcpy(data, last_written_p_copy, ring->buf.bpd);
295 295
296 if (unlikely(ring->last_written_p >= last_written_p_copy)) 296 if (unlikely(ring->last_written_p != last_written_p_copy))
297 goto again; 297 goto again;
298 298
299 iio_unmark_sw_rb_in_use(&ring->buf); 299 iio_unmark_sw_rb_in_use(&ring->buf);
diff --git a/drivers/staging/netwave/netwave_cs.c b/drivers/staging/netwave/netwave_cs.c
index 3875a722d12b..f1ee2cbc8407 100644
--- a/drivers/staging/netwave/netwave_cs.c
+++ b/drivers/staging/netwave/netwave_cs.c
@@ -61,7 +61,6 @@
61#include <pcmcia/cistpl.h> 61#include <pcmcia/cistpl.h>
62#include <pcmcia/cisreg.h> 62#include <pcmcia/cisreg.h>
63#include <pcmcia/ds.h> 63#include <pcmcia/ds.h>
64#include <pcmcia/mem_op.h>
65 64
66#include <asm/system.h> 65#include <asm/system.h>
67#include <asm/io.h> 66#include <asm/io.h>
@@ -382,10 +381,6 @@ static int netwave_probe(struct pcmcia_device *link)
382 link->io.Attributes2 = IO_DATA_PATH_WIDTH_16; */ 381 link->io.Attributes2 = IO_DATA_PATH_WIDTH_16; */
383 link->io.IOAddrLines = 5; 382 link->io.IOAddrLines = 5;
384 383
385 /* Interrupt setup */
386 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
387 link->irq.Handler = &netwave_interrupt;
388
389 /* General socket configuration */ 384 /* General socket configuration */
390 link->conf.Attributes = CONF_ENABLE_IRQ; 385 link->conf.Attributes = CONF_ENABLE_IRQ;
391 link->conf.IntType = INT_MEMORY_AND_IO; 386 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -732,7 +727,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
732 * Now allocate an interrupt line. Note that this does not 727 * Now allocate an interrupt line. Note that this does not
733 * actually assign a handler to the interrupt. 728 * actually assign a handler to the interrupt.
734 */ 729 */
735 ret = pcmcia_request_irq(link, &link->irq); 730 ret = pcmcia_request_irq(link, netwave_interrupt);
736 if (ret) 731 if (ret)
737 goto failed; 732 goto failed;
738 733
@@ -767,7 +762,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
767 ramBase = ioremap(req.Base, 0x8000); 762 ramBase = ioremap(req.Base, 0x8000);
768 priv->ramBase = ramBase; 763 priv->ramBase = ramBase;
769 764
770 dev->irq = link->irq.AssignedIRQ; 765 dev->irq = link->irq;
771 dev->base_addr = link->io.BasePort1; 766 dev->base_addr = link->io.BasePort1;
772 SET_NETDEV_DEV(dev, &link->dev); 767 SET_NETDEV_DEV(dev, &link->dev);
773 768
diff --git a/drivers/staging/octeon/cvmx-helper-board.c b/drivers/staging/octeon/cvmx-helper-board.c
index 3085e38a6f99..00a555b83354 100644
--- a/drivers/staging/octeon/cvmx-helper-board.c
+++ b/drivers/staging/octeon/cvmx-helper-board.c
@@ -153,6 +153,14 @@ int cvmx_helper_board_get_mii_address(int ipd_port)
153 * through switch. 153 * through switch.
154 */ 154 */
155 return -1; 155 return -1;
156
157 case CVMX_BOARD_TYPE_CUST_WSX16:
158 if (ipd_port >= 0 && ipd_port <= 3)
159 return ipd_port;
160 else if (ipd_port >= 16 && ipd_port <= 19)
161 return ipd_port - 16 + 4;
162 else
163 return -1;
156 } 164 }
157 165
158 /* Some unknown board. Somebody forgot to update this function... */ 166 /* Some unknown board. Somebody forgot to update this function... */
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index 1873a79bb033..740db0c1ac01 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -63,6 +63,7 @@ struct usb_device_id rtusb_usb_id[] = {
63 {USB_DEVICE(0x07D1, 0x3C11)}, /* D-Link */ 63 {USB_DEVICE(0x07D1, 0x3C11)}, /* D-Link */
64 {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */ 64 {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */
65 {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */ 65 {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */
66 {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */
66 {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */ 67 {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */
67 {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */ 68 {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */
68 {USB_DEVICE(0x07AA, 0x002F)}, /* Corega */ 69 {USB_DEVICE(0x07AA, 0x002F)}, /* Corega */
diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c
index e16256fe595a..04d9b85f3d4c 100644
--- a/drivers/staging/rtl8192su/r8192U_core.c
+++ b/drivers/staging/rtl8192su/r8192U_core.c
@@ -113,14 +113,17 @@ u32 rt_global_debug_component = \
113 113
114static const struct usb_device_id rtl8192_usb_id_tbl[] = { 114static const struct usb_device_id rtl8192_usb_id_tbl[] = {
115 /* Realtek */ 115 /* Realtek */
116 {USB_DEVICE(0x0bda, 0x8171)},
116 {USB_DEVICE(0x0bda, 0x8192)}, 117 {USB_DEVICE(0x0bda, 0x8192)},
117 {USB_DEVICE(0x0bda, 0x8709)}, 118 {USB_DEVICE(0x0bda, 0x8709)},
118 /* Corega */ 119 /* Corega */
119 {USB_DEVICE(0x07aa, 0x0043)}, 120 {USB_DEVICE(0x07aa, 0x0043)},
120 /* Belkin */ 121 /* Belkin */
121 {USB_DEVICE(0x050d, 0x805E)}, 122 {USB_DEVICE(0x050d, 0x805E)},
123 {USB_DEVICE(0x050d, 0x815F)}, /* Belkin F5D8053 v6 */
122 /* Sitecom */ 124 /* Sitecom */
123 {USB_DEVICE(0x0df6, 0x0031)}, 125 {USB_DEVICE(0x0df6, 0x0031)},
126 {USB_DEVICE(0x0df6, 0x004b)}, /* WL-349 */
124 /* EnGenius */ 127 /* EnGenius */
125 {USB_DEVICE(0x1740, 0x9201)}, 128 {USB_DEVICE(0x1740, 0x9201)},
126 /* Dlink */ 129 /* Dlink */
diff --git a/drivers/staging/usbip/usbip_event.c b/drivers/staging/usbip/usbip_event.c
index 6da1021e8a65..a2566f1075d5 100644
--- a/drivers/staging/usbip/usbip_event.c
+++ b/drivers/staging/usbip/usbip_event.c
@@ -117,6 +117,9 @@ void usbip_stop_eh(struct usbip_device *ud)
117{ 117{
118 struct usbip_task *eh = &ud->eh; 118 struct usbip_task *eh = &ud->eh;
119 119
120 if (eh->thread == current)
121 return; /* do not wait for myself */
122
120 wait_for_completion(&eh->thread_done); 123 wait_for_completion(&eh->thread_done);
121 usbip_dbg_eh("usbip_eh has finished\n"); 124 usbip_dbg_eh("usbip_eh has finished\n");
122} 125}
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
index 68f24425977f..783051f59f19 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -2455,9 +2455,10 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2455 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n", 2455 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2456 err_chk ? "enabled" : "disabled"); 2456 err_chk ? "enabled" : "disabled");
2457 2457
2458 if (tsi148_crcsr_init(tsi148_bridge, pdev)) 2458 if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
2459 dev_err(&pdev->dev, "CR/CSR configuration failed.\n"); 2459 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2460 goto err_crcsr; 2460 goto err_crcsr;
2461 }
2461 2462
2462 retval = vme_register_bridge(tsi148_bridge); 2463 retval = vme_register_bridge(tsi148_bridge);
2463 if (retval != 0) { 2464 if (retval != 0) {
diff --git a/drivers/staging/wavelan/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
index 04f691d127b4..37fa85517a58 100644
--- a/drivers/staging/wavelan/wavelan_cs.c
+++ b/drivers/staging/wavelan/wavelan_cs.c
@@ -3850,12 +3850,8 @@ wv_pcmcia_config(struct pcmcia_device * link)
3850 if (i != 0) 3850 if (i != 0)
3851 break; 3851 break;
3852 3852
3853 /* 3853 i = pcmcia_request_interrupt(link, wavelan_interrupt);
3854 * Now allocate an interrupt line. Note that this does not 3854 if (!i)
3855 * actually assign a handler to the interrupt.
3856 */
3857 i = pcmcia_request_irq(link, &link->irq);
3858 if (i != 0)
3859 break; 3855 break;
3860 3856
3861 /* 3857 /*
@@ -3890,7 +3886,7 @@ wv_pcmcia_config(struct pcmcia_device * link)
3890 break; 3886 break;
3891 3887
3892 /* Feed device with this info... */ 3888 /* Feed device with this info... */
3893 dev->irq = link->irq.AssignedIRQ; 3889 dev->irq = link->irq;
3894 dev->base_addr = link->io.BasePort1; 3890 dev->base_addr = link->io.BasePort1;
3895 netif_start_queue(dev); 3891 netif_start_queue(dev);
3896 3892
@@ -4437,10 +4433,6 @@ wavelan_probe(struct pcmcia_device *p_dev)
4437 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 4433 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
4438 p_dev->io.IOAddrLines = 3; 4434 p_dev->io.IOAddrLines = 3;
4439 4435
4440 /* Interrupt setup */
4441 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
4442 p_dev->irq.Handler = wavelan_interrupt;
4443
4444 /* General socket configuration */ 4436 /* General socket configuration */
4445 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 4437 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
4446 p_dev->conf.IntType = INT_MEMORY_AND_IO; 4438 p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -4487,7 +4479,6 @@ wavelan_probe(struct pcmcia_device *p_dev)
4487 4479
4488 ret = wv_hw_config(dev); 4480 ret = wv_hw_config(dev);
4489 if (ret) { 4481 if (ret) {
4490 dev->irq = 0;
4491 pcmcia_disable_device(p_dev); 4482 pcmcia_disable_device(p_dev);
4492 return ret; 4483 return ret;
4493 } 4484 }
diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c
index 9da42e66085e..c9d99d88b786 100644
--- a/drivers/staging/wlags49_h2/wl_cs.c
+++ b/drivers/staging/wlags49_h2/wl_cs.c
@@ -156,15 +156,12 @@ static int wl_adapter_attach(struct pcmcia_device *link)
156 link->io.NumPorts1 = HCF_NUM_IO_PORTS; 156 link->io.NumPorts1 = HCF_NUM_IO_PORTS;
157 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 157 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
158 link->io.IOAddrLines = 6; 158 link->io.IOAddrLines = 6;
159 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
160 link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
161 link->irq.Handler = &wl_isr;
162 link->conf.Attributes = CONF_ENABLE_IRQ; 159 link->conf.Attributes = CONF_ENABLE_IRQ;
163 link->conf.IntType = INT_MEMORY_AND_IO; 160 link->conf.IntType = INT_MEMORY_AND_IO;
164 link->conf.ConfigIndex = 5; 161 link->conf.ConfigIndex = 5;
165 link->conf.Present = PRESENT_OPTION; 162 link->conf.Present = PRESENT_OPTION;
166 163
167 link->priv = link->irq.Instance = dev; 164 link->priv = dev;
168 lp = wl_priv(dev); 165 lp = wl_priv(dev);
169 lp->link = link; 166 lp->link = link;
170 167
@@ -318,11 +315,11 @@ void wl_adapter_insert( struct pcmcia_device *link )
318 link->conf.Attributes |= CONF_ENABLE_IRQ; 315 link->conf.Attributes |= CONF_ENABLE_IRQ;
319 316
320 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io)); 317 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
321 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 318 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, wl_isr));
322 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 319 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
323 320
324 321
325 dev->irq = link->irq.AssignedIRQ; 322 dev->irq = link->irq;
326 dev->base_addr = link->io.BasePort1; 323 dev->base_addr = link->io.BasePort1;
327 324
328 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 325 SET_NETDEV_DEV(dev, &handle_to_dev(link));
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index d442fd35620a..99cb2246ac72 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -22,7 +22,6 @@
22 22
23typedef struct ixj_info_t { 23typedef struct ixj_info_t {
24 int ndev; 24 int ndev;
25 dev_node_t node;
26 struct ixj *port; 25 struct ixj *port;
27} ixj_info_t; 26} ixj_info_t;
28 27
@@ -155,8 +154,6 @@ static int ixj_config(struct pcmcia_device * link)
155 j = ixj_pcmcia_probe(link->io.BasePort1, link->io.BasePort1 + 0x10); 154 j = ixj_pcmcia_probe(link->io.BasePort1, link->io.BasePort1 + 0x10);
156 155
157 info->ndev = 1; 156 info->ndev = 1;
158 info->node.major = PHONE_MAJOR;
159 link->dev_node = &info->node;
160 ixj_get_serial(link, j); 157 ixj_get_serial(link, j);
161 return 0; 158 return 0;
162 159
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index be6331e2c276..5e1a253b08a0 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1542,6 +1542,9 @@ static const struct usb_device_id acm_ids[] = {
1542 { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */ 1542 { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
1543 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ 1543 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1544 }, 1544 },
1545 { USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
1546 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1547 },
1545 1548
1546 /* Nokia S60 phones expose two ACM channels. The first is 1549 /* Nokia S60 phones expose two ACM channels. The first is
1547 * a modem and is picked up by the standard AT-command 1550 * a modem and is picked up by the standard AT-command
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 97a819c23ef3..7e594449600e 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -109,7 +109,7 @@ config USB_SUSPEND
109config USB_OTG 109config USB_OTG
110 bool 110 bool
111 depends on USB && EXPERIMENTAL 111 depends on USB && EXPERIMENTAL
112 select USB_SUSPEND 112 depends on USB_SUSPEND
113 default n 113 default n
114 114
115 115
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 6a3b5cae3a6e..2f3dc4cdf79b 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -301,7 +301,7 @@ static int usb_probe_interface(struct device *dev)
301 301
302 intf->condition = USB_INTERFACE_BINDING; 302 intf->condition = USB_INTERFACE_BINDING;
303 303
304 /* Bound interfaces are initially active. They are 304 /* Probed interfaces are initially active. They are
305 * runtime-PM-enabled only if the driver has autosuspend support. 305 * runtime-PM-enabled only if the driver has autosuspend support.
306 * They are sensitive to their children's power states. 306 * They are sensitive to their children's power states.
307 */ 307 */
@@ -437,11 +437,11 @@ int usb_driver_claim_interface(struct usb_driver *driver,
437 437
438 iface->condition = USB_INTERFACE_BOUND; 438 iface->condition = USB_INTERFACE_BOUND;
439 439
440 /* Bound interfaces are initially active. They are 440 /* Claimed interfaces are initially inactive (suspended). They are
441 * runtime-PM-enabled only if the driver has autosuspend support. 441 * runtime-PM-enabled only if the driver has autosuspend support.
442 * They are sensitive to their children's power states. 442 * They are sensitive to their children's power states.
443 */ 443 */
444 pm_runtime_set_active(dev); 444 pm_runtime_set_suspended(dev);
445 pm_suspend_ignore_children(dev, false); 445 pm_suspend_ignore_children(dev, false);
446 if (driver->supports_autosuspend) 446 if (driver->supports_autosuspend)
447 pm_runtime_enable(dev); 447 pm_runtime_enable(dev);
@@ -1170,7 +1170,7 @@ done:
1170static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) 1170static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1171{ 1171{
1172 int status = 0; 1172 int status = 0;
1173 int i = 0; 1173 int i = 0, n = 0;
1174 struct usb_interface *intf; 1174 struct usb_interface *intf;
1175 1175
1176 if (udev->state == USB_STATE_NOTATTACHED || 1176 if (udev->state == USB_STATE_NOTATTACHED ||
@@ -1179,7 +1179,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1179 1179
1180 /* Suspend all the interfaces and then udev itself */ 1180 /* Suspend all the interfaces and then udev itself */
1181 if (udev->actconfig) { 1181 if (udev->actconfig) {
1182 for (; i < udev->actconfig->desc.bNumInterfaces; i++) { 1182 n = udev->actconfig->desc.bNumInterfaces;
1183 for (i = n - 1; i >= 0; --i) {
1183 intf = udev->actconfig->interface[i]; 1184 intf = udev->actconfig->interface[i];
1184 status = usb_suspend_interface(udev, intf, msg); 1185 status = usb_suspend_interface(udev, intf, msg);
1185 if (status != 0) 1186 if (status != 0)
@@ -1192,7 +1193,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1192 /* If the suspend failed, resume interfaces that did get suspended */ 1193 /* If the suspend failed, resume interfaces that did get suspended */
1193 if (status != 0) { 1194 if (status != 0) {
1194 msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); 1195 msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
1195 while (--i >= 0) { 1196 while (++i < n) {
1196 intf = udev->actconfig->interface[i]; 1197 intf = udev->actconfig->interface[i];
1197 usb_resume_interface(udev, intf, msg, 0); 1198 usb_resume_interface(udev, intf, msg, 0);
1198 } 1199 }
@@ -1263,13 +1264,47 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
1263 return status; 1264 return status;
1264} 1265}
1265 1266
1267static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
1268{
1269 int w, i;
1270 struct usb_interface *intf;
1271
1272 /* Remote wakeup is needed only when we actually go to sleep.
1273 * For things like FREEZE and QUIESCE, if the device is already
1274 * autosuspended then its current wakeup setting is okay.
1275 */
1276 if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
1277 if (udev->state != USB_STATE_SUSPENDED)
1278 udev->do_remote_wakeup = 0;
1279 return;
1280 }
1281
1282 /* If remote wakeup is permitted, see whether any interface drivers
1283 * actually want it.
1284 */
1285 w = 0;
1286 if (device_may_wakeup(&udev->dev) && udev->actconfig) {
1287 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
1288 intf = udev->actconfig->interface[i];
1289 w |= intf->needs_remote_wakeup;
1290 }
1291 }
1292
1293 /* If the device is autosuspended with the wrong wakeup setting,
1294 * autoresume now so the setting can be changed.
1295 */
1296 if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup)
1297 pm_runtime_resume(&udev->dev);
1298 udev->do_remote_wakeup = w;
1299}
1300
1266/* The device lock is held by the PM core */ 1301/* The device lock is held by the PM core */
1267int usb_suspend(struct device *dev, pm_message_t msg) 1302int usb_suspend(struct device *dev, pm_message_t msg)
1268{ 1303{
1269 struct usb_device *udev = to_usb_device(dev); 1304 struct usb_device *udev = to_usb_device(dev);
1270 1305
1271 do_unbind_rebind(udev, DO_UNBIND); 1306 do_unbind_rebind(udev, DO_UNBIND);
1272 udev->do_remote_wakeup = device_may_wakeup(&udev->dev); 1307 choose_wakeup(udev, msg);
1273 return usb_suspend_both(udev, msg); 1308 return usb_suspend_both(udev, msg);
1274} 1309}
1275 1310
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index bdf87a8414a1..2c95153c0f24 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -120,7 +120,7 @@ int usb_choose_configuration(struct usb_device *udev)
120 * than a vendor-specific driver. */ 120 * than a vendor-specific driver. */
121 else if (udev->descriptor.bDeviceClass != 121 else if (udev->descriptor.bDeviceClass !=
122 USB_CLASS_VENDOR_SPEC && 122 USB_CLASS_VENDOR_SPEC &&
123 (!desc || desc->bInterfaceClass != 123 (desc && desc->bInterfaceClass !=
124 USB_CLASS_VENDOR_SPEC)) { 124 USB_CLASS_VENDOR_SPEC)) {
125 best = c; 125 best = c;
126 break; 126 break;
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 97b40ce133f0..111a01a747fc 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -380,6 +380,7 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry)
380 mutex_lock(&inode->i_mutex); 380 mutex_lock(&inode->i_mutex);
381 dentry_unhash(dentry); 381 dentry_unhash(dentry);
382 if (usbfs_empty(dentry)) { 382 if (usbfs_empty(dentry)) {
383 dont_mount(dentry);
383 drop_nlink(dentry->d_inode); 384 drop_nlink(dentry->d_inode);
384 drop_nlink(dentry->d_inode); 385 drop_nlink(dentry->d_inode);
385 dput(dentry); 386 dput(dentry);
@@ -515,13 +516,13 @@ static int fs_create_by_name (const char *name, mode_t mode,
515 *dentry = NULL; 516 *dentry = NULL;
516 mutex_lock(&parent->d_inode->i_mutex); 517 mutex_lock(&parent->d_inode->i_mutex);
517 *dentry = lookup_one_len(name, parent, strlen(name)); 518 *dentry = lookup_one_len(name, parent, strlen(name));
518 if (!IS_ERR(dentry)) { 519 if (!IS_ERR(*dentry)) {
519 if ((mode & S_IFMT) == S_IFDIR) 520 if ((mode & S_IFMT) == S_IFDIR)
520 error = usbfs_mkdir (parent->d_inode, *dentry, mode); 521 error = usbfs_mkdir (parent->d_inode, *dentry, mode);
521 else 522 else
522 error = usbfs_create (parent->d_inode, *dentry, mode); 523 error = usbfs_create (parent->d_inode, *dentry, mode);
523 } else 524 } else
524 error = PTR_ERR(dentry); 525 error = PTR_ERR(*dentry);
525 mutex_unlock(&parent->d_inode->i_mutex); 526 mutex_unlock(&parent->d_inode->i_mutex);
526 527
527 return error; 528 return error;
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 1297e9b16a51..0561430f2ede 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -718,7 +718,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
718EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor); 718EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor);
719 719
720/** 720/**
721 * usb_buffer_alloc - allocate dma-consistent buffer for URB_NO_xxx_DMA_MAP 721 * usb_alloc_coherent - allocate dma-consistent buffer for URB_NO_xxx_DMA_MAP
722 * @dev: device the buffer will be used with 722 * @dev: device the buffer will be used with
723 * @size: requested buffer size 723 * @size: requested buffer size
724 * @mem_flags: affect whether allocation may block 724 * @mem_flags: affect whether allocation may block
@@ -737,30 +737,30 @@ EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor);
737 * architectures where CPU caches are not DMA-coherent. On systems without 737 * architectures where CPU caches are not DMA-coherent. On systems without
738 * bus-snooping caches, these buffers are uncached. 738 * bus-snooping caches, these buffers are uncached.
739 * 739 *
740 * When the buffer is no longer used, free it with usb_buffer_free(). 740 * When the buffer is no longer used, free it with usb_free_coherent().
741 */ 741 */
742void *usb_buffer_alloc(struct usb_device *dev, size_t size, gfp_t mem_flags, 742void *usb_alloc_coherent(struct usb_device *dev, size_t size, gfp_t mem_flags,
743 dma_addr_t *dma) 743 dma_addr_t *dma)
744{ 744{
745 if (!dev || !dev->bus) 745 if (!dev || !dev->bus)
746 return NULL; 746 return NULL;
747 return hcd_buffer_alloc(dev->bus, size, mem_flags, dma); 747 return hcd_buffer_alloc(dev->bus, size, mem_flags, dma);
748} 748}
749EXPORT_SYMBOL_GPL(usb_buffer_alloc); 749EXPORT_SYMBOL_GPL(usb_alloc_coherent);
750 750
751/** 751/**
752 * usb_buffer_free - free memory allocated with usb_buffer_alloc() 752 * usb_free_coherent - free memory allocated with usb_alloc_coherent()
753 * @dev: device the buffer was used with 753 * @dev: device the buffer was used with
754 * @size: requested buffer size 754 * @size: requested buffer size
755 * @addr: CPU address of buffer 755 * @addr: CPU address of buffer
756 * @dma: DMA address of buffer 756 * @dma: DMA address of buffer
757 * 757 *
758 * This reclaims an I/O buffer, letting it be reused. The memory must have 758 * This reclaims an I/O buffer, letting it be reused. The memory must have
759 * been allocated using usb_buffer_alloc(), and the parameters must match 759 * been allocated using usb_alloc_coherent(), and the parameters must match
760 * those provided in that allocation request. 760 * those provided in that allocation request.
761 */ 761 */
762void usb_buffer_free(struct usb_device *dev, size_t size, void *addr, 762void usb_free_coherent(struct usb_device *dev, size_t size, void *addr,
763 dma_addr_t dma) 763 dma_addr_t dma)
764{ 764{
765 if (!dev || !dev->bus) 765 if (!dev || !dev->bus)
766 return; 766 return;
@@ -768,7 +768,7 @@ void usb_buffer_free(struct usb_device *dev, size_t size, void *addr,
768 return; 768 return;
769 hcd_buffer_free(dev->bus, size, addr, dma); 769 hcd_buffer_free(dev->bus, size, addr, dma);
770} 770}
771EXPORT_SYMBOL_GPL(usb_buffer_free); 771EXPORT_SYMBOL_GPL(usb_free_coherent);
772 772
773/** 773/**
774 * usb_buffer_map - create DMA mapping(s) for an urb 774 * usb_buffer_map - create DMA mapping(s) for an urb
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index df1bae9b048e..eaa79c8a9b8c 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -366,6 +366,13 @@ rescan:
366 if (is_done) 366 if (is_done)
367 done(ep, req, 0); 367 done(ep, req, 0);
368 else if (ep->is_pingpong) { 368 else if (ep->is_pingpong) {
369 /*
370 * One dummy read to delay the code because of a HW glitch:
371 * CSR returns bad RXCOUNT when read too soon after updating
372 * RX_DATA_BK flags.
373 */
374 csr = __raw_readl(creg);
375
369 bufferspace -= count; 376 bufferspace -= count;
370 buf += count; 377 buf += count;
371 goto rescan; 378 goto rescan;
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 124a8ccfdcda..1f73b485732d 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2145,6 +2145,7 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
2145 u32 epctrl; 2145 u32 epctrl;
2146 u32 mps; 2146 u32 mps;
2147 int dir_in; 2147 int dir_in;
2148 int ret = 0;
2148 2149
2149 dev_dbg(hsotg->dev, 2150 dev_dbg(hsotg->dev,
2150 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n", 2151 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
@@ -2196,7 +2197,8 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
2196 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 2197 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
2197 case USB_ENDPOINT_XFER_ISOC: 2198 case USB_ENDPOINT_XFER_ISOC:
2198 dev_err(hsotg->dev, "no current ISOC support\n"); 2199 dev_err(hsotg->dev, "no current ISOC support\n");
2199 return -EINVAL; 2200 ret = -EINVAL;
2201 goto out;
2200 2202
2201 case USB_ENDPOINT_XFER_BULK: 2203 case USB_ENDPOINT_XFER_BULK:
2202 epctrl |= S3C_DxEPCTL_EPType_Bulk; 2204 epctrl |= S3C_DxEPCTL_EPType_Bulk;
@@ -2235,8 +2237,9 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
2235 /* enable the endpoint interrupt */ 2237 /* enable the endpoint interrupt */
2236 s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1); 2238 s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
2237 2239
2240out:
2238 spin_unlock_irqrestore(&hs_ep->lock, flags); 2241 spin_unlock_irqrestore(&hs_ep->lock, flags);
2239 return 0; 2242 return ret;
2240} 2243}
2241 2244
2242static int s3c_hsotg_ep_disable(struct usb_ep *ep) 2245static int s3c_hsotg_ep_disable(struct usb_ep *ep)
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 207e7a85aeb0..13ead00aecd5 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd)
543 */ 543 */
544 ehci->periodic_size = DEFAULT_I_TDPS; 544 ehci->periodic_size = DEFAULT_I_TDPS;
545 INIT_LIST_HEAD(&ehci->cached_itd_list); 545 INIT_LIST_HEAD(&ehci->cached_itd_list);
546 INIT_LIST_HEAD(&ehci->cached_sitd_list);
546 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) 547 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
547 return retval; 548 return retval;
548 549
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 19372673bf09..c7178bcde67a 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -801,7 +801,7 @@ static int ehci_hub_control (
801 * this bit; seems too long to spin routinely... 801 * this bit; seems too long to spin routinely...
802 */ 802 */
803 retval = handshake(ehci, status_reg, 803 retval = handshake(ehci, status_reg,
804 PORT_RESET, 0, 750); 804 PORT_RESET, 0, 1000);
805 if (retval != 0) { 805 if (retval != 0) {
806 ehci_err (ehci, "port %d reset error %d\n", 806 ehci_err (ehci, "port %d reset error %d\n",
807 wIndex + 1, retval); 807 wIndex + 1, retval);
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index aeda96e0af67..1f3f01eacaf0 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh)
136 136
137static void ehci_mem_cleanup (struct ehci_hcd *ehci) 137static void ehci_mem_cleanup (struct ehci_hcd *ehci)
138{ 138{
139 free_cached_itd_list(ehci); 139 free_cached_lists(ehci);
140 if (ehci->async) 140 if (ehci->async)
141 qh_put (ehci->async); 141 qh_put (ehci->async);
142 ehci->async = NULL; 142 ehci->async = NULL;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index bed6de342ecc..0cd6c7795d90 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -629,11 +629,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
629 } 629 }
630 snprintf(supply, sizeof(supply), "hsusb%d", i); 630 snprintf(supply, sizeof(supply), "hsusb%d", i);
631 omap->regulator[i] = regulator_get(omap->dev, supply); 631 omap->regulator[i] = regulator_get(omap->dev, supply);
632 if (IS_ERR(omap->regulator[i])) 632 if (IS_ERR(omap->regulator[i])) {
633 omap->regulator[i] = NULL;
633 dev_dbg(&pdev->dev, 634 dev_dbg(&pdev->dev,
634 "failed to get ehci port%d regulator\n", i); 635 "failed to get ehci port%d regulator\n", i);
635 else 636 } else {
636 regulator_enable(omap->regulator[i]); 637 regulator_enable(omap->regulator[i]);
638 }
637 } 639 }
638 640
639 ret = omap_start_ehc(omap, hcd); 641 ret = omap_start_ehc(omap, hcd);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index a0aaaaff2560..805ec633a652 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -510,7 +510,7 @@ static int disable_periodic (struct ehci_hcd *ehci)
510 ehci_writel(ehci, cmd, &ehci->regs->command); 510 ehci_writel(ehci, cmd, &ehci->regs->command);
511 /* posted write ... */ 511 /* posted write ... */
512 512
513 free_cached_itd_list(ehci); 513 free_cached_lists(ehci);
514 514
515 ehci->next_uframe = -1; 515 ehci->next_uframe = -1;
516 return 0; 516 return 0;
@@ -2139,13 +2139,27 @@ sitd_complete (
2139 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); 2139 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
2140 } 2140 }
2141 iso_stream_put (ehci, stream); 2141 iso_stream_put (ehci, stream);
2142 /* OK to recycle this SITD now that its completion callback ran. */ 2142
2143done: 2143done:
2144 sitd->urb = NULL; 2144 sitd->urb = NULL;
2145 sitd->stream = NULL; 2145 if (ehci->clock_frame != sitd->frame) {
2146 list_move(&sitd->sitd_list, &stream->free_list); 2146 /* OK to recycle this SITD now. */
2147 iso_stream_put(ehci, stream); 2147 sitd->stream = NULL;
2148 2148 list_move(&sitd->sitd_list, &stream->free_list);
2149 iso_stream_put(ehci, stream);
2150 } else {
2151 /* HW might remember this SITD, so we can't recycle it yet.
2152 * Move it to a safe place until a new frame starts.
2153 */
2154 list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
2155 if (stream->refcount == 2) {
2156 /* If iso_stream_put() were called here, stream
2157 * would be freed. Instead, just prevent reuse.
2158 */
2159 stream->ep->hcpriv = NULL;
2160 stream->ep = NULL;
2161 }
2162 }
2149 return retval; 2163 return retval;
2150} 2164}
2151 2165
@@ -2211,9 +2225,10 @@ done:
2211 2225
2212/*-------------------------------------------------------------------------*/ 2226/*-------------------------------------------------------------------------*/
2213 2227
2214static void free_cached_itd_list(struct ehci_hcd *ehci) 2228static void free_cached_lists(struct ehci_hcd *ehci)
2215{ 2229{
2216 struct ehci_itd *itd, *n; 2230 struct ehci_itd *itd, *n;
2231 struct ehci_sitd *sitd, *sn;
2217 2232
2218 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { 2233 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
2219 struct ehci_iso_stream *stream = itd->stream; 2234 struct ehci_iso_stream *stream = itd->stream;
@@ -2221,6 +2236,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci)
2221 list_move(&itd->itd_list, &stream->free_list); 2236 list_move(&itd->itd_list, &stream->free_list);
2222 iso_stream_put(ehci, stream); 2237 iso_stream_put(ehci, stream);
2223 } 2238 }
2239
2240 list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
2241 struct ehci_iso_stream *stream = sitd->stream;
2242 sitd->stream = NULL;
2243 list_move(&sitd->sitd_list, &stream->free_list);
2244 iso_stream_put(ehci, stream);
2245 }
2224} 2246}
2225 2247
2226/*-------------------------------------------------------------------------*/ 2248/*-------------------------------------------------------------------------*/
@@ -2247,7 +2269,7 @@ scan_periodic (struct ehci_hcd *ehci)
2247 clock_frame = -1; 2269 clock_frame = -1;
2248 } 2270 }
2249 if (ehci->clock_frame != clock_frame) { 2271 if (ehci->clock_frame != clock_frame) {
2250 free_cached_itd_list(ehci); 2272 free_cached_lists(ehci);
2251 ehci->clock_frame = clock_frame; 2273 ehci->clock_frame = clock_frame;
2252 } 2274 }
2253 clock %= mod; 2275 clock %= mod;
@@ -2414,7 +2436,7 @@ restart:
2414 clock = now; 2436 clock = now;
2415 clock_frame = clock >> 3; 2437 clock_frame = clock >> 3;
2416 if (ehci->clock_frame != clock_frame) { 2438 if (ehci->clock_frame != clock_frame) {
2417 free_cached_itd_list(ehci); 2439 free_cached_lists(ehci);
2418 ehci->clock_frame = clock_frame; 2440 ehci->clock_frame = clock_frame;
2419 } 2441 }
2420 } else { 2442 } else {
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index b1dce96dd621..556c0b48f3ab 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */
87 int next_uframe; /* scan periodic, start here */ 87 int next_uframe; /* scan periodic, start here */
88 unsigned periodic_sched; /* periodic activity count */ 88 unsigned periodic_sched; /* periodic activity count */
89 89
90 /* list of itds completed while clock_frame was still active */ 90 /* list of itds & sitds completed while clock_frame was still active */
91 struct list_head cached_itd_list; 91 struct list_head cached_itd_list;
92 struct list_head cached_sitd_list;
92 unsigned clock_frame; 93 unsigned clock_frame;
93 94
94 /* per root hub port */ 95 /* per root hub port */
@@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
195 clear_bit (action, &ehci->actions); 196 clear_bit (action, &ehci->actions);
196} 197}
197 198
198static void free_cached_itd_list(struct ehci_hcd *ehci); 199static void free_cached_lists(struct ehci_hcd *ehci);
199 200
200/*-------------------------------------------------------------------------*/ 201/*-------------------------------------------------------------------------*/
201 202
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 68b83ab70719..944291e10f97 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -331,6 +331,8 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
331 */ 331 */
332 if (at91_suspend_entering_slow_clock()) { 332 if (at91_suspend_entering_slow_clock()) {
333 ohci_usb_reset (ohci); 333 ohci_usb_reset (ohci);
334 /* flush the writes */
335 (void) ohci_readl (ohci, &ohci->regs->control);
334 at91_stop_clock(); 336 at91_stop_clock();
335 } 337 }
336 338
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 4aa08d36d077..d22fb4d577b7 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -23,7 +23,7 @@
23#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX." 23#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
24#endif 24#endif
25 25
26#define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG) 26#define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG)
27 27
28static struct clk *usb11_clk; 28static struct clk *usb11_clk;
29static struct clk *usb20_clk; 29static struct clk *usb20_clk;
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 32bbce9718f0..65cac8cc8921 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -697,7 +697,7 @@ static int ohci_hub_control (
697 u16 wLength 697 u16 wLength
698) { 698) {
699 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 699 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
700 int ports = hcd_to_bus (hcd)->root_hub->maxchild; 700 int ports = ohci->num_ports;
701 u32 temp; 701 u32 temp;
702 int retval = 0; 702 int retval = 0;
703 703
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 50f57f468836..e62b30b3e429 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -660,13 +660,13 @@ static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
660 if (qh->dummy == NULL) { 660 if (qh->dummy == NULL) {
661 oxu_dbg(oxu, "no dummy td\n"); 661 oxu_dbg(oxu, "no dummy td\n");
662 oxu->qh_used[i] = 0; 662 oxu->qh_used[i] = 0;
663 663 qh = NULL;
664 return NULL; 664 goto unlock;
665 } 665 }
666 666
667 oxu->qh_used[i] = 1; 667 oxu->qh_used[i] = 1;
668 } 668 }
669 669unlock:
670 spin_unlock(&oxu->mem_lock); 670 spin_unlock(&oxu->mem_lock);
671 671
672 return qh; 672 return qh;
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index e11cc3aa4b82..3b867a8af7b2 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -720,10 +720,10 @@ retry:
720 /* port status seems weird until after reset, so 720 /* port status seems weird until after reset, so
721 * force the reset and make khubd clean up later. 721 * force the reset and make khubd clean up later.
722 */ 722 */
723 if (sl811->stat_insrmv & 1) 723 if (irqstat & SL11H_INTMASK_RD)
724 sl811->port1 |= 1 << USB_PORT_FEAT_CONNECTION;
725 else
726 sl811->port1 &= ~(1 << USB_PORT_FEAT_CONNECTION); 724 sl811->port1 &= ~(1 << USB_PORT_FEAT_CONNECTION);
725 else
726 sl811->port1 |= 1 << USB_PORT_FEAT_CONNECTION;
727 727
728 sl811->port1 |= 1 << USB_PORT_FEAT_C_CONNECTION; 728 sl811->port1 |= 1 << USB_PORT_FEAT_C_CONNECTION;
729 729
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 39d253e841f6..58cb73c8420a 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -47,7 +47,6 @@ static const char driver_name[DEV_NAME_LEN] = "sl811_cs";
47 47
48typedef struct local_info_t { 48typedef struct local_info_t {
49 struct pcmcia_device *p_dev; 49 struct pcmcia_device *p_dev;
50 dev_node_t node;
51} local_info_t; 50} local_info_t;
52 51
53static void sl811_cs_release(struct pcmcia_device * link); 52static void sl811_cs_release(struct pcmcia_device * link);
@@ -163,8 +162,7 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev,
163 dflt->vpp1.param[CISTPL_POWER_VNOM]/10000; 162 dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
164 163
165 /* we need an interrupt */ 164 /* we need an interrupt */
166 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) 165 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
167 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
168 166
169 /* IO window settings */ 167 /* IO window settings */
170 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 168 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -186,7 +184,6 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev,
186static int sl811_cs_config(struct pcmcia_device *link) 184static int sl811_cs_config(struct pcmcia_device *link)
187{ 185{
188 struct device *parent = &link->dev; 186 struct device *parent = &link->dev;
189 local_info_t *dev = link->priv;
190 int ret; 187 int ret;
191 188
192 dev_dbg(&link->dev, "sl811_cs_config\n"); 189 dev_dbg(&link->dev, "sl811_cs_config\n");
@@ -197,31 +194,24 @@ static int sl811_cs_config(struct pcmcia_device *link)
197 /* require an IRQ and two registers */ 194 /* require an IRQ and two registers */
198 if (!link->io.NumPorts1 || link->io.NumPorts1 < 2) 195 if (!link->io.NumPorts1 || link->io.NumPorts1 < 2)
199 goto failed; 196 goto failed;
200 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 197
201 ret = pcmcia_request_irq(link, &link->irq); 198 if (!link->irq)
202 if (ret)
203 goto failed;
204 } else
205 goto failed; 199 goto failed;
206 200
207 ret = pcmcia_request_configuration(link, &link->conf); 201 ret = pcmcia_request_configuration(link, &link->conf);
208 if (ret) 202 if (ret)
209 goto failed; 203 goto failed;
210 204
211 sprintf(dev->node.dev_name, driver_name); 205 dev_info(&link->dev, "index 0x%02x: ",
212 dev->node.major = dev->node.minor = 0; 206 link->conf.ConfigIndex);
213 link->dev_node = &dev->node;
214
215 printk(KERN_INFO "%s: index 0x%02x: ",
216 dev->node.dev_name, link->conf.ConfigIndex);
217 if (link->conf.Vpp) 207 if (link->conf.Vpp)
218 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10); 208 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
219 printk(", irq %d", link->irq.AssignedIRQ); 209 printk(", irq %d", link->irq);
220 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 210 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
221 link->io.BasePort1+link->io.NumPorts1-1); 211 link->io.BasePort1+link->io.NumPorts1-1);
222 printk("\n"); 212 printk("\n");
223 213
224 if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ) 214 if (sl811_hc_init(parent, link->io.BasePort1, link->irq)
225 < 0) { 215 < 0) {
226failed: 216failed:
227 printk(KERN_WARNING "sl811_cs_config failed\n"); 217 printk(KERN_WARNING "sl811_cs_config failed\n");
@@ -241,10 +231,6 @@ static int sl811_cs_probe(struct pcmcia_device *link)
241 local->p_dev = link; 231 local->p_dev = link;
242 link->priv = local; 232 link->priv = local;
243 233
244 /* Initialize */
245 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
246 link->irq.Handler = NULL;
247
248 link->conf.Attributes = 0; 234 link->conf.Attributes = 0;
249 link->conf.IntType = INT_MEMORY_AND_IO; 235 link->conf.IntType = INT_MEMORY_AND_IO;
250 236
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index c09539bad1ee..d64f5724bfc4 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -582,6 +582,19 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
582 return EP_INTERVAL(interval); 582 return EP_INTERVAL(interval);
583} 583}
584 584
585/* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
586 * High speed endpoint descriptors can define "the number of additional
587 * transaction opportunities per microframe", but that goes in the Max Burst
588 * endpoint context field.
589 */
590static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
591 struct usb_host_endpoint *ep)
592{
593 if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
594 return 0;
595 return ep->ss_ep_comp->desc.bmAttributes;
596}
597
585static inline u32 xhci_get_endpoint_type(struct usb_device *udev, 598static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
586 struct usb_host_endpoint *ep) 599 struct usb_host_endpoint *ep)
587{ 600{
@@ -612,6 +625,36 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
612 return type; 625 return type;
613} 626}
614 627
628/* Return the maximum endpoint service interval time (ESIT) payload.
629 * Basically, this is the maxpacket size, multiplied by the burst size
630 * and mult size.
631 */
632static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
633 struct usb_device *udev,
634 struct usb_host_endpoint *ep)
635{
636 int max_burst;
637 int max_packet;
638
639 /* Only applies for interrupt or isochronous endpoints */
640 if (usb_endpoint_xfer_control(&ep->desc) ||
641 usb_endpoint_xfer_bulk(&ep->desc))
642 return 0;
643
644 if (udev->speed == USB_SPEED_SUPER) {
645 if (ep->ss_ep_comp)
646 return ep->ss_ep_comp->desc.wBytesPerInterval;
647 xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
648 /* Assume no bursts, no multiple opportunities to send. */
649 return ep->desc.wMaxPacketSize;
650 }
651
652 max_packet = ep->desc.wMaxPacketSize & 0x3ff;
653 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
654 /* A 0 in max burst means 1 transfer per ESIT */
655 return max_packet * (max_burst + 1);
656}
657
615int xhci_endpoint_init(struct xhci_hcd *xhci, 658int xhci_endpoint_init(struct xhci_hcd *xhci,
616 struct xhci_virt_device *virt_dev, 659 struct xhci_virt_device *virt_dev,
617 struct usb_device *udev, 660 struct usb_device *udev,
@@ -623,6 +666,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
623 struct xhci_ring *ep_ring; 666 struct xhci_ring *ep_ring;
624 unsigned int max_packet; 667 unsigned int max_packet;
625 unsigned int max_burst; 668 unsigned int max_burst;
669 u32 max_esit_payload;
626 670
627 ep_index = xhci_get_endpoint_index(&ep->desc); 671 ep_index = xhci_get_endpoint_index(&ep->desc);
628 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 672 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
@@ -644,6 +688,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
644 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; 688 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
645 689
646 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); 690 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
691 ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
647 692
648 /* FIXME dig Mult and streams info out of ep companion desc */ 693 /* FIXME dig Mult and streams info out of ep companion desc */
649 694
@@ -689,6 +734,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
689 default: 734 default:
690 BUG(); 735 BUG();
691 } 736 }
737 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
738 ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
739
740 /*
741 * XXX no idea how to calculate the average TRB buffer length for bulk
742 * endpoints, as the driver gives us no clue how big each scatter gather
743 * list entry (or buffer) is going to be.
744 *
745 * For isochronous and interrupt endpoints, we set it to the max
746 * available, until we have new API in the USB core to allow drivers to
747 * declare how much bandwidth they actually need.
748 *
749 * Normally, it would be calculated by taking the total of the buffer
750 * lengths in the TD and then dividing by the number of TRBs in a TD,
751 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
752 * use Event Data TRBs, and we don't chain in a link TRB on short
753 * transfers, we're basically dividing by 1.
754 */
755 ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
756
692 /* FIXME Debug endpoint context */ 757 /* FIXME Debug endpoint context */
693 return 0; 758 return 0;
694} 759}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e5eb09b2f38e..ea389e9a4931 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -609,6 +609,10 @@ struct xhci_ep_ctx {
609#define MAX_PACKET_MASK (0xffff << 16) 609#define MAX_PACKET_MASK (0xffff << 16)
610#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) 610#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
611 611
612/* tx_info bitmasks */
613#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
614#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
615
612 616
613/** 617/**
614 * struct xhci_input_control_context 618 * struct xhci_input_control_context
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index a9555cb901a1..de8ef945b536 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -49,6 +49,7 @@ struct usb_sevsegdev {
49 u16 textlength; 49 u16 textlength;
50 50
51 u8 shadow_power; /* for PM */ 51 u8 shadow_power; /* for PM */
52 u8 has_interface_pm;
52}; 53};
53 54
54/* sysfs_streq can't replace this completely 55/* sysfs_streq can't replace this completely
@@ -68,12 +69,16 @@ static void update_display_powered(struct usb_sevsegdev *mydev)
68{ 69{
69 int rc; 70 int rc;
70 71
71 if (!mydev->shadow_power && mydev->powered) { 72 if (mydev->powered && !mydev->has_interface_pm) {
72 rc = usb_autopm_get_interface(mydev->intf); 73 rc = usb_autopm_get_interface(mydev->intf);
73 if (rc < 0) 74 if (rc < 0)
74 return; 75 return;
76 mydev->has_interface_pm = 1;
75 } 77 }
76 78
79 if (mydev->shadow_power != 1)
80 return;
81
77 rc = usb_control_msg(mydev->udev, 82 rc = usb_control_msg(mydev->udev,
78 usb_sndctrlpipe(mydev->udev, 0), 83 usb_sndctrlpipe(mydev->udev, 0),
79 0x12, 84 0x12,
@@ -86,8 +91,10 @@ static void update_display_powered(struct usb_sevsegdev *mydev)
86 if (rc < 0) 91 if (rc < 0)
87 dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc); 92 dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc);
88 93
89 if (mydev->shadow_power && !mydev->powered) 94 if (!mydev->powered && mydev->has_interface_pm) {
90 usb_autopm_put_interface(mydev->intf); 95 usb_autopm_put_interface(mydev->intf);
96 mydev->has_interface_pm = 0;
97 }
91} 98}
92 99
93static void update_display_mode(struct usb_sevsegdev *mydev) 100static void update_display_mode(struct usb_sevsegdev *mydev)
@@ -351,6 +358,10 @@ static int sevseg_probe(struct usb_interface *interface,
351 mydev->intf = interface; 358 mydev->intf = interface;
352 usb_set_intfdata(interface, mydev); 359 usb_set_intfdata(interface, mydev);
353 360
361 /* PM */
362 mydev->shadow_power = 1; /* currently active */
363 mydev->has_interface_pm = 0; /* have not issued autopm_get */
364
354 /*set defaults */ 365 /*set defaults */
355 mydev->textmode = 0x02; /* ascii mode */ 366 mydev->textmode = 0x02; /* ascii mode */
356 mydev->mode_msb = 0x06; /* 6 characters */ 367 mydev->mode_msb = 0x06; /* 6 characters */
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index b4c783c284ba..07fe490b44d8 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -42,7 +42,7 @@ config USB_MUSB_SOC
42 default y if (BF52x && !BF522 && !BF523) 42 default y if (BF52x && !BF522 && !BF523)
43 43
44comment "DaVinci 35x and 644x USB support" 44comment "DaVinci 35x and 644x USB support"
45 depends on USB_MUSB_HDRC && ARCH_DAVINCI 45 depends on USB_MUSB_HDRC && ARCH_DAVINCI_DMx
46 46
47comment "OMAP 243x high speed USB support" 47comment "OMAP 243x high speed USB support"
48 depends on USB_MUSB_HDRC && ARCH_OMAP2430 48 depends on USB_MUSB_HDRC && ARCH_OMAP2430
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 85710ccc1887..3a485dabebbb 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -6,7 +6,7 @@ musb_hdrc-objs := musb_core.o
6 6
7obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o 7obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
8 8
9ifeq ($(CONFIG_ARCH_DAVINCI),y) 9ifeq ($(CONFIG_ARCH_DAVINCI_DMx),y)
10 musb_hdrc-objs += davinci.o 10 musb_hdrc-objs += davinci.o
11endif 11endif
12 12
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 719a22d664ef..ec8d324237f6 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -172,13 +172,7 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
172 172
173 spin_unlock_irqrestore(&musb->lock, flags); 173 spin_unlock_irqrestore(&musb->lock, flags);
174 174
175 /* REVISIT we sometimes get spurious IRQs on g_ep0 175 return retval;
176 * not clear why... fall in BF54x too.
177 */
178 if (retval != IRQ_HANDLED)
179 DBG(5, "spurious?\n");
180
181 return IRQ_HANDLED;
182} 176}
183 177
184static void musb_conn_timer_handler(unsigned long _musb) 178static void musb_conn_timer_handler(unsigned long _musb)
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 29bce5c0fd10..ce2e16fee0df 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -444,6 +444,8 @@ int __init musb_platform_init(struct musb *musb)
444 return 0; 444 return 0;
445 445
446fail: 446fail:
447 clk_disable(musb->clock);
448
447 usb_nop_xceiv_unregister(); 449 usb_nop_xceiv_unregister();
448 return -ENODEV; 450 return -ENODEV;
449} 451}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 0e8b8ab1d168..705cc4ad8737 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -965,10 +965,8 @@ static void musb_shutdown(struct platform_device *pdev)
965 spin_lock_irqsave(&musb->lock, flags); 965 spin_lock_irqsave(&musb->lock, flags);
966 musb_platform_disable(musb); 966 musb_platform_disable(musb);
967 musb_generic_disable(musb); 967 musb_generic_disable(musb);
968 if (musb->clock) { 968 if (musb->clock)
969 clk_put(musb->clock); 969 clk_put(musb->clock);
970 musb->clock = NULL;
971 }
972 spin_unlock_irqrestore(&musb->lock, flags); 970 spin_unlock_irqrestore(&musb->lock, flags);
973 971
974 /* FIXME power down */ 972 /* FIXME power down */
@@ -1853,15 +1851,6 @@ static void musb_free(struct musb *musb)
1853 put_device(musb->xceiv->dev); 1851 put_device(musb->xceiv->dev);
1854#endif 1852#endif
1855 1853
1856 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1857 musb_platform_exit(musb);
1858 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1859
1860 if (musb->clock) {
1861 clk_disable(musb->clock);
1862 clk_put(musb->clock);
1863 }
1864
1865#ifdef CONFIG_USB_MUSB_HDRC_HCD 1854#ifdef CONFIG_USB_MUSB_HDRC_HCD
1866 usb_put_hcd(musb_to_hcd(musb)); 1855 usb_put_hcd(musb_to_hcd(musb));
1867#else 1856#else
@@ -1889,8 +1878,10 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1889 */ 1878 */
1890 if (!plat) { 1879 if (!plat) {
1891 dev_dbg(dev, "no platform_data?\n"); 1880 dev_dbg(dev, "no platform_data?\n");
1892 return -ENODEV; 1881 status = -ENODEV;
1882 goto fail0;
1893 } 1883 }
1884
1894 switch (plat->mode) { 1885 switch (plat->mode) {
1895 case MUSB_HOST: 1886 case MUSB_HOST:
1896#ifdef CONFIG_USB_MUSB_HDRC_HCD 1887#ifdef CONFIG_USB_MUSB_HDRC_HCD
@@ -1912,13 +1903,16 @@ bad_config:
1912#endif 1903#endif
1913 default: 1904 default:
1914 dev_err(dev, "incompatible Kconfig role setting\n"); 1905 dev_err(dev, "incompatible Kconfig role setting\n");
1915 return -EINVAL; 1906 status = -EINVAL;
1907 goto fail0;
1916 } 1908 }
1917 1909
1918 /* allocate */ 1910 /* allocate */
1919 musb = allocate_instance(dev, plat->config, ctrl); 1911 musb = allocate_instance(dev, plat->config, ctrl);
1920 if (!musb) 1912 if (!musb) {
1921 return -ENOMEM; 1913 status = -ENOMEM;
1914 goto fail0;
1915 }
1922 1916
1923 spin_lock_init(&musb->lock); 1917 spin_lock_init(&musb->lock);
1924 musb->board_mode = plat->mode; 1918 musb->board_mode = plat->mode;
@@ -1936,7 +1930,7 @@ bad_config:
1936 if (IS_ERR(musb->clock)) { 1930 if (IS_ERR(musb->clock)) {
1937 status = PTR_ERR(musb->clock); 1931 status = PTR_ERR(musb->clock);
1938 musb->clock = NULL; 1932 musb->clock = NULL;
1939 goto fail; 1933 goto fail1;
1940 } 1934 }
1941 } 1935 }
1942 1936
@@ -1955,12 +1949,12 @@ bad_config:
1955 */ 1949 */
1956 musb->isr = generic_interrupt; 1950 musb->isr = generic_interrupt;
1957 status = musb_platform_init(musb); 1951 status = musb_platform_init(musb);
1958
1959 if (status < 0) 1952 if (status < 0)
1960 goto fail; 1953 goto fail2;
1954
1961 if (!musb->isr) { 1955 if (!musb->isr) {
1962 status = -ENODEV; 1956 status = -ENODEV;
1963 goto fail2; 1957 goto fail3;
1964 } 1958 }
1965 1959
1966#ifndef CONFIG_MUSB_PIO_ONLY 1960#ifndef CONFIG_MUSB_PIO_ONLY
@@ -1986,7 +1980,7 @@ bad_config:
1986 ? MUSB_CONTROLLER_MHDRC 1980 ? MUSB_CONTROLLER_MHDRC
1987 : MUSB_CONTROLLER_HDRC, musb); 1981 : MUSB_CONTROLLER_HDRC, musb);
1988 if (status < 0) 1982 if (status < 0)
1989 goto fail2; 1983 goto fail3;
1990 1984
1991#ifdef CONFIG_USB_MUSB_OTG 1985#ifdef CONFIG_USB_MUSB_OTG
1992 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); 1986 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
@@ -1999,7 +1993,7 @@ bad_config:
1999 if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) { 1993 if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
2000 dev_err(dev, "request_irq %d failed!\n", nIrq); 1994 dev_err(dev, "request_irq %d failed!\n", nIrq);
2001 status = -ENODEV; 1995 status = -ENODEV;
2002 goto fail2; 1996 goto fail3;
2003 } 1997 }
2004 musb->nIrq = nIrq; 1998 musb->nIrq = nIrq;
2005/* FIXME this handles wakeup irqs wrong */ 1999/* FIXME this handles wakeup irqs wrong */
@@ -2039,8 +2033,6 @@ bad_config:
2039 musb->xceiv->state = OTG_STATE_A_IDLE; 2033 musb->xceiv->state = OTG_STATE_A_IDLE;
2040 2034
2041 status = usb_add_hcd(musb_to_hcd(musb), -1, 0); 2035 status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
2042 if (status)
2043 goto fail;
2044 2036
2045 DBG(1, "%s mode, status %d, devctl %02x %c\n", 2037 DBG(1, "%s mode, status %d, devctl %02x %c\n",
2046 "HOST", status, 2038 "HOST", status,
@@ -2055,8 +2047,6 @@ bad_config:
2055 musb->xceiv->state = OTG_STATE_B_IDLE; 2047 musb->xceiv->state = OTG_STATE_B_IDLE;
2056 2048
2057 status = musb_gadget_setup(musb); 2049 status = musb_gadget_setup(musb);
2058 if (status)
2059 goto fail;
2060 2050
2061 DBG(1, "%s mode, status %d, dev%02x\n", 2051 DBG(1, "%s mode, status %d, dev%02x\n",
2062 is_otg_enabled(musb) ? "OTG" : "PERIPHERAL", 2052 is_otg_enabled(musb) ? "OTG" : "PERIPHERAL",
@@ -2064,12 +2054,14 @@ bad_config:
2064 musb_readb(musb->mregs, MUSB_DEVCTL)); 2054 musb_readb(musb->mregs, MUSB_DEVCTL));
2065 2055
2066 } 2056 }
2057 if (status < 0)
2058 goto fail3;
2067 2059
2068#ifdef CONFIG_SYSFS 2060#ifdef CONFIG_SYSFS
2069 status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group); 2061 status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
2070#endif
2071 if (status) 2062 if (status)
2072 goto fail2; 2063 goto fail4;
2064#endif
2073 2065
2074 dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n", 2066 dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n",
2075 ({char *s; 2067 ({char *s;
@@ -2085,17 +2077,29 @@ bad_config:
2085 2077
2086 return 0; 2078 return 0;
2087 2079
2088fail2: 2080fail4:
2081 if (!is_otg_enabled(musb) && is_host_enabled(musb))
2082 usb_remove_hcd(musb_to_hcd(musb));
2083 else
2084 musb_gadget_cleanup(musb);
2085
2086fail3:
2087 if (musb->irq_wake)
2088 device_init_wakeup(dev, 0);
2089 musb_platform_exit(musb); 2089 musb_platform_exit(musb);
2090fail:
2091 dev_err(musb->controller,
2092 "musb_init_controller failed with status %d\n", status);
2093 2090
2091fail2:
2094 if (musb->clock) 2092 if (musb->clock)
2095 clk_put(musb->clock); 2093 clk_put(musb->clock);
2096 device_init_wakeup(dev, 0); 2094
2095fail1:
2096 dev_err(musb->controller,
2097 "musb_init_controller failed with status %d\n", status);
2098
2097 musb_free(musb); 2099 musb_free(musb);
2098 2100
2101fail0:
2102
2099 return status; 2103 return status;
2100 2104
2101} 2105}
@@ -2132,7 +2136,6 @@ static int __init musb_probe(struct platform_device *pdev)
2132 /* clobbered by use_dma=n */ 2136 /* clobbered by use_dma=n */
2133 orig_dma_mask = dev->dma_mask; 2137 orig_dma_mask = dev->dma_mask;
2134#endif 2138#endif
2135
2136 status = musb_init_controller(dev, irq, base); 2139 status = musb_init_controller(dev, irq, base);
2137 if (status < 0) 2140 if (status < 0)
2138 iounmap(base); 2141 iounmap(base);
@@ -2155,6 +2158,10 @@ static int __exit musb_remove(struct platform_device *pdev)
2155 if (musb->board_mode == MUSB_HOST) 2158 if (musb->board_mode == MUSB_HOST)
2156 usb_remove_hcd(musb_to_hcd(musb)); 2159 usb_remove_hcd(musb_to_hcd(musb));
2157#endif 2160#endif
2161 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2162 musb_platform_exit(musb);
2163 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2164
2158 musb_free(musb); 2165 musb_free(musb);
2159 iounmap(ctrl_base); 2166 iounmap(ctrl_base);
2160 device_init_wakeup(&pdev->dev, 0); 2167 device_init_wakeup(&pdev->dev, 0);
@@ -2176,6 +2183,7 @@ void musb_save_context(struct musb *musb)
2176 if (is_host_enabled(musb)) { 2183 if (is_host_enabled(musb)) {
2177 musb_context.frame = musb_readw(musb_base, MUSB_FRAME); 2184 musb_context.frame = musb_readw(musb_base, MUSB_FRAME);
2178 musb_context.testmode = musb_readb(musb_base, MUSB_TESTMODE); 2185 musb_context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
2186 musb_context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
2179 } 2187 }
2180 musb_context.power = musb_readb(musb_base, MUSB_POWER); 2188 musb_context.power = musb_readb(musb_base, MUSB_POWER);
2181 musb_context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE); 2189 musb_context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
@@ -2247,6 +2255,7 @@ void musb_restore_context(struct musb *musb)
2247 if (is_host_enabled(musb)) { 2255 if (is_host_enabled(musb)) {
2248 musb_writew(musb_base, MUSB_FRAME, musb_context.frame); 2256 musb_writew(musb_base, MUSB_FRAME, musb_context.frame);
2249 musb_writeb(musb_base, MUSB_TESTMODE, musb_context.testmode); 2257 musb_writeb(musb_base, MUSB_TESTMODE, musb_context.testmode);
2258 musb_write_ulpi_buscontrol(musb->mregs, musb_context.busctl);
2250 } 2259 }
2251 musb_writeb(musb_base, MUSB_POWER, musb_context.power); 2260 musb_writeb(musb_base, MUSB_POWER, musb_context.power);
2252 musb_writew(musb_base, MUSB_INTRTXE, musb_context.intrtxe); 2261 musb_writew(musb_base, MUSB_INTRTXE, musb_context.intrtxe);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index cd9f4a9a06c6..ac17b004909b 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -478,7 +478,7 @@ struct musb_context_registers {
478 u16 frame; 478 u16 frame;
479 u8 index, testmode; 479 u8 index, testmode;
480 480
481 u8 devctl, misc; 481 u8 devctl, busctl, misc;
482 482
483 struct musb_csr_regs index_regs[MUSB_C_NUM_EPS]; 483 struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
484}; 484};
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index dec896e888db..877d20b1dff9 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2042,6 +2042,7 @@ static int musb_urb_enqueue(
2042 * odd, rare, error prone, but legal. 2042 * odd, rare, error prone, but legal.
2043 */ 2043 */
2044 kfree(qh); 2044 kfree(qh);
2045 qh = NULL;
2045 ret = 0; 2046 ret = 0;
2046 } else 2047 } else
2047 ret = musb_schedule(musb, qh, 2048 ret = musb_schedule(musb, qh,
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 490cdf15ccb6..82592633502f 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -331,8 +331,5 @@ int musb_platform_exit(struct musb *musb)
331 331
332 musb_platform_suspend(musb); 332 musb_platform_suspend(musb);
333 333
334 clk_put(musb->clock);
335 musb->clock = NULL;
336
337 return 0; 334 return 0;
338} 335}
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index ab776a8d98ca..60d3938cafcf 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -29,6 +29,19 @@ static void tusb_source_power(struct musb *musb, int is_on);
29#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf) 29#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
30#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf) 30#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
31 31
32#ifdef CONFIG_PM
33/* REVISIT: These should be only needed if somebody implements off idle */
34void musb_platform_save_context(struct musb *musb,
35 struct musb_context_registers *musb_context)
36{
37}
38
39void musb_platform_restore_context(struct musb *musb,
40 struct musb_context_registers *musb_context)
41{
42}
43#endif
44
32/* 45/*
33 * Checks the revision. We need to use the DMA register as 3.0 does not 46 * Checks the revision. We need to use the DMA register as 3.0 does not
34 * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV. 47 * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index 5afa070d7dc9..c061a88f2b0f 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -39,7 +39,7 @@ struct tusb_omap_dma_ch {
39 39
40 struct tusb_omap_dma *tusb_dma; 40 struct tusb_omap_dma *tusb_dma;
41 41
42 void __iomem *dma_addr; 42 dma_addr_t dma_addr;
43 43
44 u32 len; 44 u32 len;
45 u16 packet_sz; 45 u16 packet_sz;
@@ -126,6 +126,7 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
126 struct tusb_omap_dma_ch *chdat = to_chdat(channel); 126 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
127 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; 127 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
128 struct musb *musb = chdat->musb; 128 struct musb *musb = chdat->musb;
129 struct device *dev = musb->controller;
129 struct musb_hw_ep *hw_ep = chdat->hw_ep; 130 struct musb_hw_ep *hw_ep = chdat->hw_ep;
130 void __iomem *ep_conf = hw_ep->conf; 131 void __iomem *ep_conf = hw_ep->conf;
131 void __iomem *mbase = musb->mregs; 132 void __iomem *mbase = musb->mregs;
@@ -173,13 +174,15 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
173 DBG(3, "Using PIO for remaining %lu bytes\n", pio); 174 DBG(3, "Using PIO for remaining %lu bytes\n", pio);
174 buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len; 175 buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
175 if (chdat->tx) { 176 if (chdat->tx) {
176 dma_cache_maint(phys_to_virt((u32)chdat->dma_addr), 177 dma_unmap_single(dev, chdat->dma_addr,
177 chdat->transfer_len, DMA_TO_DEVICE); 178 chdat->transfer_len,
179 DMA_TO_DEVICE);
178 musb_write_fifo(hw_ep, pio, buf); 180 musb_write_fifo(hw_ep, pio, buf);
179 } else { 181 } else {
182 dma_unmap_single(dev, chdat->dma_addr,
183 chdat->transfer_len,
184 DMA_FROM_DEVICE);
180 musb_read_fifo(hw_ep, pio, buf); 185 musb_read_fifo(hw_ep, pio, buf);
181 dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
182 chdat->transfer_len, DMA_FROM_DEVICE);
183 } 186 }
184 channel->actual_len += pio; 187 channel->actual_len += pio;
185 } 188 }
@@ -224,6 +227,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
224 struct tusb_omap_dma_ch *chdat = to_chdat(channel); 227 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
225 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; 228 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
226 struct musb *musb = chdat->musb; 229 struct musb *musb = chdat->musb;
230 struct device *dev = musb->controller;
227 struct musb_hw_ep *hw_ep = chdat->hw_ep; 231 struct musb_hw_ep *hw_ep = chdat->hw_ep;
228 void __iomem *mbase = musb->mregs; 232 void __iomem *mbase = musb->mregs;
229 void __iomem *ep_conf = hw_ep->conf; 233 void __iomem *ep_conf = hw_ep->conf;
@@ -299,14 +303,16 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
299 chdat->packet_sz = packet_sz; 303 chdat->packet_sz = packet_sz;
300 chdat->len = len; 304 chdat->len = len;
301 channel->actual_len = 0; 305 channel->actual_len = 0;
302 chdat->dma_addr = (void __iomem *)dma_addr; 306 chdat->dma_addr = dma_addr;
303 channel->status = MUSB_DMA_STATUS_BUSY; 307 channel->status = MUSB_DMA_STATUS_BUSY;
304 308
305 /* Since we're recycling dma areas, we need to clean or invalidate */ 309 /* Since we're recycling dma areas, we need to clean or invalidate */
306 if (chdat->tx) 310 if (chdat->tx)
307 dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE); 311 dma_map_single(dev, phys_to_virt(dma_addr), len,
312 DMA_TO_DEVICE);
308 else 313 else
309 dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE); 314 dma_map_single(dev, phys_to_virt(dma_addr), len,
315 DMA_FROM_DEVICE);
310 316
311 /* Use 16-bit transfer if dma_addr is not 32-bit aligned */ 317 /* Use 16-bit transfer if dma_addr is not 32-bit aligned */
312 if ((dma_addr & 0x3) == 0) { 318 if ((dma_addr & 0x3) == 0) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ca9d866672aa..84d0edad8e4f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -305,6 +305,11 @@ static int option_resume(struct usb_serial *serial);
305#define ZTE_PRODUCT_CDMA_TECH 0xfffe 305#define ZTE_PRODUCT_CDMA_TECH 0xfffe
306#define ZTE_PRODUCT_AC8710 0xfff1 306#define ZTE_PRODUCT_AC8710 0xfff1
307#define ZTE_PRODUCT_AC2726 0xfff5 307#define ZTE_PRODUCT_AC2726 0xfff5
308#define ZTE_PRODUCT_AC8710T 0xffff
309
310/* ZTE PRODUCTS -- alternate vendor ID */
311#define ZTE_VENDOR_ID2 0x1d6b
312#define ZTE_PRODUCT_MF_330 0x0002
308 313
309#define BENQ_VENDOR_ID 0x04a5 314#define BENQ_VENDOR_ID 0x04a5
310#define BENQ_PRODUCT_H10 0x4068 315#define BENQ_PRODUCT_H10 0x4068
@@ -373,6 +378,8 @@ static int option_resume(struct usb_serial *serial);
373#define HAIER_VENDOR_ID 0x201e 378#define HAIER_VENDOR_ID 0x201e
374#define HAIER_PRODUCT_CE100 0x2009 379#define HAIER_PRODUCT_CE100 0x2009
375 380
381#define CINTERION_VENDOR_ID 0x0681
382
376/* some devices interfaces need special handling due to a number of reasons */ 383/* some devices interfaces need special handling due to a number of reasons */
377enum option_blacklist_reason { 384enum option_blacklist_reason {
378 OPTION_BLACKLIST_NONE = 0, 385 OPTION_BLACKLIST_NONE = 0,
@@ -679,6 +686,8 @@ static const struct usb_device_id option_ids[] = {
679 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, 686 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
680 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, 687 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
681 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, 688 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
689 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
690 { USB_DEVICE(ZTE_VENDOR_ID2, ZTE_PRODUCT_MF_330) },
682 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 691 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
683 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 692 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
684 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ 693 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
@@ -716,6 +725,7 @@ static const struct usb_device_id option_ids[] = {
716 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, 725 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
717 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, 726 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
718 727
728 { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
719 { } /* Terminating entry */ 729 { } /* Terminating entry */
720}; 730};
721MODULE_DEVICE_TABLE(usb, option_ids); 731MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 73d5f346d3e0..c28b1607eacc 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -59,6 +59,7 @@ static const struct usb_device_id id_table[] = {
59 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) }, 59 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
60 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) }, 60 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
61 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) }, 61 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
62 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
62 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, 63 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
63 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 64 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
64 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, 65 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
@@ -97,6 +98,7 @@ static const struct usb_device_id id_table[] = {
97 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, 98 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
98 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, 99 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
99 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, 100 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
101 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
100 { } /* Terminating entry */ 102 { } /* Terminating entry */
101}; 103};
102 104
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index d640dc951568..23c09b38b9ec 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -20,6 +20,7 @@
20#define PL2303_PRODUCT_ID_ALDIGA 0x0611 20#define PL2303_PRODUCT_ID_ALDIGA 0x0611
21#define PL2303_PRODUCT_ID_MMX 0x0612 21#define PL2303_PRODUCT_ID_MMX 0x0612
22#define PL2303_PRODUCT_ID_GPRS 0x0609 22#define PL2303_PRODUCT_ID_GPRS 0x0609
23#define PL2303_PRODUCT_ID_HCR331 0x331a
23 24
24#define ATEN_VENDOR_ID 0x0557 25#define ATEN_VENDOR_ID 0x0557
25#define ATEN_VENDOR_ID2 0x0547 26#define ATEN_VENDOR_ID2 0x0547
@@ -134,3 +135,7 @@
134/* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */ 135/* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */
135#define SANWA_VENDOR_ID 0x11ad 136#define SANWA_VENDOR_ID 0x11ad
136#define SANWA_PRODUCT_ID 0x0001 137#define SANWA_PRODUCT_ID 0x0001
138
139/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
140#define ADLINK_VENDOR_ID 0x0b63
141#define ADLINK_ND6530_PRODUCT_ID 0x6530
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 0b9362061713..7e3bea23600b 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -42,6 +42,14 @@
42#define CMOTECH_PRODUCT_CDU550 0x5553 42#define CMOTECH_PRODUCT_CDU550 0x5553
43#define CMOTECH_PRODUCT_CDX650 0x6512 43#define CMOTECH_PRODUCT_CDX650 0x6512
44 44
45/* LG devices */
46#define LG_VENDOR_ID 0x1004
47#define LG_PRODUCT_VX4400_6000 0x6000 /* VX4400/VX6000/Rumor */
48
49/* Sanyo devices */
50#define SANYO_VENDOR_ID 0x0474
51#define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */
52
45static struct usb_device_id id_table[] = { 53static struct usb_device_id id_table[] = {
46 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) }, 54 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
47 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) }, 55 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
@@ -51,6 +59,8 @@ static struct usb_device_id id_table[] = {
51 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) }, 59 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) },
52 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) }, 60 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) },
53 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) }, 61 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
62 { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
63 { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
54 { }, 64 { },
55}; 65};
56MODULE_DEVICE_TABLE(usb, id_table); 66MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 9202f94505e6..ef0bdb08d788 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -230,6 +230,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
230static const struct usb_device_id id_table[] = { 230static const struct usb_device_id id_table[] = {
231 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ 231 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
232 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ 232 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
233 { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */
233 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ 234 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
234 235
235 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ 236 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 0afe5c71c17e..e1bfda33f5b9 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -172,7 +172,7 @@ static unsigned int product_5052_count;
172/* the array dimension is the number of default entries plus */ 172/* the array dimension is the number of default entries plus */
173/* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ 173/* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
174/* null entry */ 174/* null entry */
175static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = { 175static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
176 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, 176 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
177 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, 177 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
178 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, 178 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -180,6 +180,9 @@ static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = {
180 { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, 180 { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
181 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, 181 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
182 { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, 182 { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
183 { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) },
184 { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) },
185 { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) },
183 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, 186 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
184 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, 187 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
185 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, 188 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
@@ -192,7 +195,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
192 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, 195 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
193}; 196};
194 197
195static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = { 198static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = {
196 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, 199 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
197 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, 200 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
198 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, 201 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -200,6 +203,9 @@ static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1]
200 { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, 203 { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
201 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, 204 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
202 { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, 205 { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
206 { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) },
207 { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) },
208 { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) },
203 { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, 209 { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
204 { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, 210 { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
205 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, 211 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
@@ -287,6 +293,8 @@ MODULE_FIRMWARE("ti_5052.fw");
287MODULE_FIRMWARE("mts_cdma.fw"); 293MODULE_FIRMWARE("mts_cdma.fw");
288MODULE_FIRMWARE("mts_gsm.fw"); 294MODULE_FIRMWARE("mts_gsm.fw");
289MODULE_FIRMWARE("mts_edge.fw"); 295MODULE_FIRMWARE("mts_edge.fw");
296MODULE_FIRMWARE("mts_mt9234mu.fw");
297MODULE_FIRMWARE("mts_mt9234zba.fw");
290 298
291module_param(debug, bool, S_IRUGO | S_IWUSR); 299module_param(debug, bool, S_IRUGO | S_IWUSR);
292MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes"); 300MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes");
@@ -1687,6 +1695,7 @@ static int ti_download_firmware(struct ti_device *tdev)
1687 const struct firmware *fw_p; 1695 const struct firmware *fw_p;
1688 char buf[32]; 1696 char buf[32];
1689 1697
1698 dbg("%s\n", __func__);
1690 /* try ID specific firmware first, then try generic firmware */ 1699 /* try ID specific firmware first, then try generic firmware */
1691 sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, 1700 sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
1692 dev->descriptor.idProduct); 1701 dev->descriptor.idProduct);
@@ -1703,7 +1712,15 @@ static int ti_download_firmware(struct ti_device *tdev)
1703 case MTS_EDGE_PRODUCT_ID: 1712 case MTS_EDGE_PRODUCT_ID:
1704 strcpy(buf, "mts_edge.fw"); 1713 strcpy(buf, "mts_edge.fw");
1705 break; 1714 break;
1706 } 1715 case MTS_MT9234MU_PRODUCT_ID:
1716 strcpy(buf, "mts_mt9234mu.fw");
1717 break;
1718 case MTS_MT9234ZBA_PRODUCT_ID:
1719 strcpy(buf, "mts_mt9234zba.fw");
1720 break;
1721 case MTS_MT9234ZBAOLD_PRODUCT_ID:
1722 strcpy(buf, "mts_mt9234zba.fw");
1723 break; }
1707 } 1724 }
1708 if (buf[0] == '\0') { 1725 if (buf[0] == '\0') {
1709 if (tdev->td_is_3410) 1726 if (tdev->td_is_3410)
@@ -1718,7 +1735,7 @@ static int ti_download_firmware(struct ti_device *tdev)
1718 return -ENOENT; 1735 return -ENOENT;
1719 } 1736 }
1720 if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { 1737 if (fw_p->size > TI_FIRMWARE_BUF_SIZE) {
1721 dev_err(&dev->dev, "%s - firmware too large\n", __func__); 1738 dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size);
1722 return -ENOENT; 1739 return -ENOENT;
1723 } 1740 }
1724 1741
@@ -1730,6 +1747,7 @@ static int ti_download_firmware(struct ti_device *tdev)
1730 status = ti_do_download(dev, pipe, buffer, fw_p->size); 1747 status = ti_do_download(dev, pipe, buffer, fw_p->size);
1731 kfree(buffer); 1748 kfree(buffer);
1732 } else { 1749 } else {
1750 dbg("%s ENOMEM\n", __func__);
1733 status = -ENOMEM; 1751 status = -ENOMEM;
1734 } 1752 }
1735 release_firmware(fw_p); 1753 release_firmware(fw_p);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index f323c6025858..2aac1953993b 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -45,6 +45,9 @@
45#define MTS_CDMA_PRODUCT_ID 0xF110 45#define MTS_CDMA_PRODUCT_ID 0xF110
46#define MTS_GSM_PRODUCT_ID 0xF111 46#define MTS_GSM_PRODUCT_ID 0xF111
47#define MTS_EDGE_PRODUCT_ID 0xF112 47#define MTS_EDGE_PRODUCT_ID 0xF112
48#define MTS_MT9234MU_PRODUCT_ID 0xF114
49#define MTS_MT9234ZBA_PRODUCT_ID 0xF115
50#define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319
48 51
49/* Commands */ 52/* Commands */
50#define TI_GET_VERSION 0x01 53#define TI_GET_VERSION 0x01
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index 46e79d349498..7ec24e46b34b 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -438,7 +438,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
438 old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); 438 old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr);
439 keep_alives = 0; 439 keep_alives = 0;
440 for (cnt = 0; 440 for (cnt = 0;
441 keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max; 441 keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max;
442 cnt++) { 442 cnt++) {
443 unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); 443 unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout);
444 444
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e69d238c5af0..49fa953aaf6e 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1035,7 +1035,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1035/* This actually signals the guest, using eventfd. */ 1035/* This actually signals the guest, using eventfd. */
1036void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1036void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1037{ 1037{
1038 __u16 flags = 0; 1038 __u16 flags;
1039 /* Flush out used index updates. This is paired
1040 * with the barrier that the Guest executes when enabling
1041 * interrupts. */
1042 smp_mb();
1043
1039 if (get_user(flags, &vq->avail->flags)) { 1044 if (get_user(flags, &vq->avail->flags)) {
1040 vq_err(vq, "Failed to get flags"); 1045 vq_err(vq, "Failed to get flags");
1041 return; 1046 return;
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index dca48df98444..e5d6b56d4447 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -50,8 +50,9 @@
50#include <linux/fb.h> 50#include <linux/fb.h>
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/ioport.h> 52#include <linux/ioport.h>
53 53#include <linux/platform_device.h>
54#include <linux/uaccess.h> 54#include <linux/uaccess.h>
55
55#include <asm/system.h> 56#include <asm/system.h>
56#include <asm/irq.h> 57#include <asm/irq.h>
57#include <asm/amigahw.h> 58#include <asm/amigahw.h>
@@ -1135,7 +1136,7 @@ static int amifb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg
1135 * Interface to the low level console driver 1136 * Interface to the low level console driver
1136 */ 1137 */
1137 1138
1138static void amifb_deinit(void); 1139static void amifb_deinit(struct platform_device *pdev);
1139 1140
1140 /* 1141 /*
1141 * Internal routines 1142 * Internal routines
@@ -2246,7 +2247,7 @@ static inline void chipfree(void)
2246 * Initialisation 2247 * Initialisation
2247 */ 2248 */
2248 2249
2249static int __init amifb_init(void) 2250static int __init amifb_probe(struct platform_device *pdev)
2250{ 2251{
2251 int tag, i, err = 0; 2252 int tag, i, err = 0;
2252 u_long chipptr; 2253 u_long chipptr;
@@ -2261,16 +2262,6 @@ static int __init amifb_init(void)
2261 } 2262 }
2262 amifb_setup(option); 2263 amifb_setup(option);
2263#endif 2264#endif
2264 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_VIDEO))
2265 return -ENODEV;
2266
2267 /*
2268 * We request all registers starting from bplpt[0]
2269 */
2270 if (!request_mem_region(CUSTOM_PHYSADDR+0xe0, 0x120,
2271 "amifb [Denise/Lisa]"))
2272 return -EBUSY;
2273
2274 custom.dmacon = DMAF_ALL | DMAF_MASTER; 2265 custom.dmacon = DMAF_ALL | DMAF_MASTER;
2275 2266
2276 switch (amiga_chipset) { 2267 switch (amiga_chipset) {
@@ -2377,6 +2368,7 @@ default_chipset:
2377 fb_info.fbops = &amifb_ops; 2368 fb_info.fbops = &amifb_ops;
2378 fb_info.par = &currentpar; 2369 fb_info.par = &currentpar;
2379 fb_info.flags = FBINFO_DEFAULT; 2370 fb_info.flags = FBINFO_DEFAULT;
2371 fb_info.device = &pdev->dev;
2380 2372
2381 if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, ami_modedb, 2373 if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, ami_modedb,
2382 NUM_TOTAL_MODES, &ami_modedb[defmode], 4)) { 2374 NUM_TOTAL_MODES, &ami_modedb[defmode], 4)) {
@@ -2451,18 +2443,18 @@ default_chipset:
2451 return 0; 2443 return 0;
2452 2444
2453amifb_error: 2445amifb_error:
2454 amifb_deinit(); 2446 amifb_deinit(pdev);
2455 return err; 2447 return err;
2456} 2448}
2457 2449
2458static void amifb_deinit(void) 2450static void amifb_deinit(struct platform_device *pdev)
2459{ 2451{
2460 if (fb_info.cmap.len) 2452 if (fb_info.cmap.len)
2461 fb_dealloc_cmap(&fb_info.cmap); 2453 fb_dealloc_cmap(&fb_info.cmap);
2454 fb_dealloc_cmap(&fb_info.cmap);
2462 chipfree(); 2455 chipfree();
2463 if (videomemory) 2456 if (videomemory)
2464 iounmap((void*)videomemory); 2457 iounmap((void*)videomemory);
2465 release_mem_region(CUSTOM_PHYSADDR+0xe0, 0x120);
2466 custom.dmacon = DMAF_ALL | DMAF_MASTER; 2458 custom.dmacon = DMAF_ALL | DMAF_MASTER;
2467} 2459}
2468 2460
@@ -3794,14 +3786,35 @@ static void ami_rebuild_copper(void)
3794 } 3786 }
3795} 3787}
3796 3788
3797static void __exit amifb_exit(void) 3789static int __exit amifb_remove(struct platform_device *pdev)
3798{ 3790{
3799 unregister_framebuffer(&fb_info); 3791 unregister_framebuffer(&fb_info);
3800 amifb_deinit(); 3792 amifb_deinit(pdev);
3801 amifb_video_off(); 3793 amifb_video_off();
3794 return 0;
3795}
3796
3797static struct platform_driver amifb_driver = {
3798 .remove = __exit_p(amifb_remove),
3799 .driver = {
3800 .name = "amiga-video",
3801 .owner = THIS_MODULE,
3802 },
3803};
3804
3805static int __init amifb_init(void)
3806{
3807 return platform_driver_probe(&amifb_driver, amifb_probe);
3802} 3808}
3803 3809
3804module_init(amifb_init); 3810module_init(amifb_init);
3811
3812static void __exit amifb_exit(void)
3813{
3814 platform_driver_unregister(&amifb_driver);
3815}
3816
3805module_exit(amifb_exit); 3817module_exit(amifb_exit);
3806 3818
3807MODULE_LICENSE("GPL"); 3819MODULE_LICENSE("GPL");
3820MODULE_ALIAS("platform:amiga-video");
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 44e49c28b2a7..c2ec3dcd4e91 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -488,9 +488,9 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
488 fbinfo->fbops = &bfin_t350mcqb_fb_ops; 488 fbinfo->fbops = &bfin_t350mcqb_fb_ops;
489 fbinfo->flags = FBINFO_FLAG_DEFAULT; 489 fbinfo->flags = FBINFO_FLAG_DEFAULT;
490 490
491 info->fb_buffer = 491 info->fb_buffer = dma_alloc_coherent(NULL, fbinfo->fix.smem_len +
492 dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle, 492 ACTIVE_VIDEO_MEM_OFFSET,
493 GFP_KERNEL); 493 &info->dma_handle, GFP_KERNEL);
494 494
495 if (NULL == info->fb_buffer) { 495 if (NULL == info->fb_buffer) {
496 printk(KERN_ERR DRIVER_NAME 496 printk(KERN_ERR DRIVER_NAME
@@ -568,8 +568,8 @@ out7:
568out6: 568out6:
569 fb_dealloc_cmap(&fbinfo->cmap); 569 fb_dealloc_cmap(&fbinfo->cmap);
570out4: 570out4:
571 dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, 571 dma_free_coherent(NULL, fbinfo->fix.smem_len + ACTIVE_VIDEO_MEM_OFFSET,
572 info->dma_handle); 572 info->fb_buffer, info->dma_handle);
573out3: 573out3:
574 framebuffer_release(fbinfo); 574 framebuffer_release(fbinfo);
575out2: 575out2:
@@ -592,8 +592,9 @@ static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
592 free_irq(info->irq, info); 592 free_irq(info->irq, info);
593 593
594 if (info->fb_buffer != NULL) 594 if (info->fb_buffer != NULL)
595 dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, 595 dma_free_coherent(NULL, fbinfo->fix.smem_len +
596 info->dma_handle); 596 ACTIVE_VIDEO_MEM_OFFSET, info->fb_buffer,
597 info->dma_handle);
597 598
598 fb_dealloc_cmap(&fbinfo->cmap); 599 fb_dealloc_cmap(&fbinfo->cmap);
599 600
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 8d8dfda2f868..6df7c54db0a3 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -299,6 +299,7 @@ static const struct zorro_device_id cirrusfb_zorro_table[] = {
299 }, 299 },
300 { 0 } 300 { 0 }
301}; 301};
302MODULE_DEVICE_TABLE(zorro, cirrusfb_zorro_table);
302 303
303static const struct { 304static const struct {
304 zorro_id id2; 305 zorro_id id2;
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 581d2dbf675a..ecf405562f5c 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -49,6 +49,7 @@ enum {
49 M_MBP_2, /* MacBook Pro 2nd gen */ 49 M_MBP_2, /* MacBook Pro 2nd gen */
50 M_MBP_SR, /* MacBook Pro (Santa Rosa) */ 50 M_MBP_SR, /* MacBook Pro (Santa Rosa) */
51 M_MBP_4, /* MacBook Pro, 4th gen */ 51 M_MBP_4, /* MacBook Pro, 4th gen */
52 M_MBP_5_1, /* MacBook Pro, 5,1th gen */
52 M_UNKNOWN /* placeholder */ 53 M_UNKNOWN /* placeholder */
53}; 54};
54 55
@@ -70,6 +71,7 @@ static struct efifb_dmi_info {
70 [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ 71 [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
71 [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, 72 [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
72 [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, 73 [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
74 [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
73 [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } 75 [M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
74}; 76};
75 77
@@ -106,6 +108,7 @@ static struct dmi_system_id __initdata dmi_system_table[] = {
106 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), 108 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
107 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), 109 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
108 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), 110 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
111 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
109 {}, 112 {},
110}; 113};
111 114
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index 6c91c61cdb63..1b0feb8e7244 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -219,6 +219,7 @@ static struct zorro_device_id fm2fb_devices[] __devinitdata = {
219 { ZORRO_PROD_HELFRICH_RAINBOW_II }, 219 { ZORRO_PROD_HELFRICH_RAINBOW_II },
220 { 0 } 220 { 0 }
221}; 221};
222MODULE_DEVICE_TABLE(zorro, fm2fb_devices);
222 223
223static struct zorro_driver fm2fb_driver = { 224static struct zorro_driver fm2fb_driver = {
224 .name = "fm2fb", 225 .name = "fm2fb",
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index e14bd0749129..e8c769944812 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -695,6 +695,7 @@ static int sh_mobile_lcdc_setup_clocks(struct platform_device *pdev,
695 * 1) Enable Runtime PM 695 * 1) Enable Runtime PM
696 * 2) Force Runtime PM Resume since hardware is accessed from probe() 696 * 2) Force Runtime PM Resume since hardware is accessed from probe()
697 */ 697 */
698 priv->dev = &pdev->dev;
698 pm_runtime_enable(priv->dev); 699 pm_runtime_enable(priv->dev);
699 pm_runtime_resume(priv->dev); 700 pm_runtime_resume(priv->dev);
700 return 0; 701 return 0;
@@ -957,25 +958,24 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
957 958
958 if (!pdev->dev.platform_data) { 959 if (!pdev->dev.platform_data) {
959 dev_err(&pdev->dev, "no platform data defined\n"); 960 dev_err(&pdev->dev, "no platform data defined\n");
960 error = -EINVAL; 961 return -EINVAL;
961 goto err0;
962 } 962 }
963 963
964 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 964 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
965 i = platform_get_irq(pdev, 0); 965 i = platform_get_irq(pdev, 0);
966 if (!res || i < 0) { 966 if (!res || i < 0) {
967 dev_err(&pdev->dev, "cannot get platform resources\n"); 967 dev_err(&pdev->dev, "cannot get platform resources\n");
968 error = -ENOENT; 968 return -ENOENT;
969 goto err0;
970 } 969 }
971 970
972 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 971 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
973 if (!priv) { 972 if (!priv) {
974 dev_err(&pdev->dev, "cannot allocate device data\n"); 973 dev_err(&pdev->dev, "cannot allocate device data\n");
975 error = -ENOMEM; 974 return -ENOMEM;
976 goto err0;
977 } 975 }
978 976
977 platform_set_drvdata(pdev, priv);
978
979 error = request_irq(i, sh_mobile_lcdc_irq, IRQF_DISABLED, 979 error = request_irq(i, sh_mobile_lcdc_irq, IRQF_DISABLED,
980 dev_name(&pdev->dev), priv); 980 dev_name(&pdev->dev), priv);
981 if (error) { 981 if (error) {
@@ -984,8 +984,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
984 } 984 }
985 985
986 priv->irq = i; 986 priv->irq = i;
987 priv->dev = &pdev->dev;
988 platform_set_drvdata(pdev, priv);
989 pdata = pdev->dev.platform_data; 987 pdata = pdev->dev.platform_data;
990 988
991 j = 0; 989 j = 0;
@@ -1099,9 +1097,9 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
1099 info = ch->info; 1097 info = ch->info;
1100 1098
1101 if (info->fbdefio) { 1099 if (info->fbdefio) {
1102 priv->ch->sglist = vmalloc(sizeof(struct scatterlist) * 1100 ch->sglist = vmalloc(sizeof(struct scatterlist) *
1103 info->fix.smem_len >> PAGE_SHIFT); 1101 info->fix.smem_len >> PAGE_SHIFT);
1104 if (!priv->ch->sglist) { 1102 if (!ch->sglist) {
1105 dev_err(&pdev->dev, "cannot allocate sglist\n"); 1103 dev_err(&pdev->dev, "cannot allocate sglist\n");
1106 goto err1; 1104 goto err1;
1107 } 1105 }
@@ -1126,9 +1124,9 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
1126 } 1124 }
1127 1125
1128 return 0; 1126 return 0;
1129 err1: 1127err1:
1130 sh_mobile_lcdc_remove(pdev); 1128 sh_mobile_lcdc_remove(pdev);
1131 err0: 1129
1132 return error; 1130 return error;
1133} 1131}
1134 1132
@@ -1139,7 +1137,7 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
1139 int i; 1137 int i;
1140 1138
1141 for (i = 0; i < ARRAY_SIZE(priv->ch); i++) 1139 for (i = 0; i < ARRAY_SIZE(priv->ch); i++)
1142 if (priv->ch[i].info->dev) 1140 if (priv->ch[i].info && priv->ch[i].info->dev)
1143 unregister_framebuffer(priv->ch[i].info); 1141 unregister_framebuffer(priv->ch[i].info);
1144 1142
1145 sh_mobile_lcdc_stop(priv); 1143 sh_mobile_lcdc_stop(priv);
@@ -1162,7 +1160,8 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
1162 if (priv->dot_clk) 1160 if (priv->dot_clk)
1163 clk_put(priv->dot_clk); 1161 clk_put(priv->dot_clk);
1164 1162
1165 pm_runtime_disable(priv->dev); 1163 if (priv->dev)
1164 pm_runtime_disable(priv->dev);
1166 1165
1167 if (priv->base) 1166 if (priv->base)
1168 iounmap(priv->base); 1167 iounmap(priv->base);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index ef36fca2eed4..3a7e9ff8a746 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/sched.h>
19 20
20#include <asm/irq.h> 21#include <asm/irq.h>
21#include <mach/hardware.h> 22#include <mach/hardware.h>
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 1ed3d554e372..17726a05a0a6 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -115,9 +115,8 @@ static struct w1_therm_family_converter w1_therm_families[] = {
115 115
116static inline int w1_DS18B20_convert_temp(u8 rom[9]) 116static inline int w1_DS18B20_convert_temp(u8 rom[9])
117{ 117{
118 int t = ((s16)rom[1] << 8) | rom[0]; 118 s16 t = le16_to_cpup((__le16 *)rom);
119 t = t*1000/16; 119 return t*1000/16;
120 return t;
121} 120}
122 121
123static inline int w1_DS18S20_convert_temp(u8 rom[9]) 122static inline int w1_DS18S20_convert_temp(u8 rom[9])
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0bf5020d0d32..b87ba23442d2 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -175,7 +175,7 @@ config SA1100_WATCHDOG
175 175
176config MPCORE_WATCHDOG 176config MPCORE_WATCHDOG
177 tristate "MPcore watchdog" 177 tristate "MPcore watchdog"
178 depends on ARM_MPCORE_PLATFORM && LOCAL_TIMERS 178 depends on HAVE_ARM_TWD
179 help 179 help
180 Watchdog timer embedded into the MPcore system. 180 Watchdog timer embedded into the MPcore system.
181 181
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 500d38342e1e..801ead191499 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -121,7 +121,7 @@ static ssize_t booke_wdt_write(struct file *file, const char __user *buf,
121 return count; 121 return count;
122} 122}
123 123
124static const struct watchdog_info ident = { 124static struct watchdog_info ident = {
125 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 125 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
126 .identity = "PowerPC Book-E Watchdog", 126 .identity = "PowerPC Book-E Watchdog",
127}; 127};
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index 88ed54e50f74..59359c9a5e01 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -244,7 +244,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
244module_param(timeout, int, 0); 244module_param(timeout, int, 0);
245MODULE_PARM_DESC(timeout, 245MODULE_PARM_DESC(timeout,
246 "Watchdog timeout in seconds. (1<=timeout<=3600, default=" 246 "Watchdog timeout in seconds. (1<=timeout<=3600, default="
247 __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 247 __MODULE_STRING(WDT_TIMEOUT) ")");
248 248
249MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>," 249MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>,"
250 "Alessandro Zummo <a.zummo@towertech.it>"); 250 "Alessandro Zummo <a.zummo@towertech.it>");
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 016c6a791cab..b8ec7aca3c8e 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -31,8 +31,9 @@
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/io.h>
34 35
35#include <asm/hardware/arm_twd.h> 36#include <asm/smp_twd.h>
36 37
37struct mpcore_wdt { 38struct mpcore_wdt {
38 unsigned long timer_alive; 39 unsigned long timer_alive;
@@ -44,7 +45,7 @@ struct mpcore_wdt {
44}; 45};
45 46
46static struct platform_device *mpcore_wdt_dev; 47static struct platform_device *mpcore_wdt_dev;
47extern unsigned int mpcore_timer_rate; 48static DEFINE_SPINLOCK(wdt_lock);
48 49
49#define TIMER_MARGIN 60 50#define TIMER_MARGIN 60
50static int mpcore_margin = TIMER_MARGIN; 51static int mpcore_margin = TIMER_MARGIN;
@@ -94,13 +95,15 @@ static irqreturn_t mpcore_wdt_fire(int irq, void *arg)
94 */ 95 */
95static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt) 96static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt)
96{ 97{
97 unsigned int count; 98 unsigned long count;
98 99
100 spin_lock(&wdt_lock);
99 /* Assume prescale is set to 256 */ 101 /* Assume prescale is set to 256 */
100 count = (mpcore_timer_rate / 256) * mpcore_margin; 102 count = __raw_readl(wdt->base + TWD_WDOG_COUNTER);
103 count = (0xFFFFFFFFU - count) * (HZ / 5);
104 count = (count / 256) * mpcore_margin;
101 105
102 /* Reload the counter */ 106 /* Reload the counter */
103 spin_lock(&wdt_lock);
104 writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD); 107 writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD);
105 wdt->perturb = wdt->perturb ? 0 : 1; 108 wdt->perturb = wdt->perturb ? 0 : 1;
106 spin_unlock(&wdt_lock); 109 spin_unlock(&wdt_lock);
@@ -119,7 +122,6 @@ static void mpcore_wdt_start(struct mpcore_wdt *wdt)
119{ 122{
120 dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n"); 123 dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n");
121 124
122 spin_lock(&wdt_lock);
123 /* This loads the count register but does NOT start the count yet */ 125 /* This loads the count register but does NOT start the count yet */
124 mpcore_wdt_keepalive(wdt); 126 mpcore_wdt_keepalive(wdt);
125 127
@@ -130,7 +132,6 @@ static void mpcore_wdt_start(struct mpcore_wdt *wdt)
130 /* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */ 132 /* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */
131 writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL); 133 writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL);
132 } 134 }
133 spin_unlock(&wdt_lock);
134} 135}
135 136
136static int mpcore_wdt_set_heartbeat(int t) 137static int mpcore_wdt_set_heartbeat(int t)
@@ -360,7 +361,7 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev)
360 mpcore_wdt_miscdev.parent = &dev->dev; 361 mpcore_wdt_miscdev.parent = &dev->dev;
361 ret = misc_register(&mpcore_wdt_miscdev); 362 ret = misc_register(&mpcore_wdt_miscdev);
362 if (ret) { 363 if (ret) {
363 dev_printk(KERN_ERR, _dev, 364 dev_printk(KERN_ERR, wdt->dev,
364 "cannot register miscdev on minor=%d (err=%d)\n", 365 "cannot register miscdev on minor=%d (err=%d)\n",
365 WATCHDOG_MINOR, ret); 366 WATCHDOG_MINOR, ret);
366 goto err_misc; 367 goto err_misc;
@@ -369,13 +370,13 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev)
369 ret = request_irq(wdt->irq, mpcore_wdt_fire, IRQF_DISABLED, 370 ret = request_irq(wdt->irq, mpcore_wdt_fire, IRQF_DISABLED,
370 "mpcore_wdt", wdt); 371 "mpcore_wdt", wdt);
371 if (ret) { 372 if (ret) {
372 dev_printk(KERN_ERR, _dev, 373 dev_printk(KERN_ERR, wdt->dev,
373 "cannot register IRQ%d for watchdog\n", wdt->irq); 374 "cannot register IRQ%d for watchdog\n", wdt->irq);
374 goto err_irq; 375 goto err_irq;
375 } 376 }
376 377
377 mpcore_wdt_stop(wdt); 378 mpcore_wdt_stop(wdt);
378 platform_set_drvdata(&dev->dev, wdt); 379 platform_set_drvdata(dev, wdt);
379 mpcore_wdt_dev = dev; 380 mpcore_wdt_dev = dev;
380 381
381 return 0; 382 return 0;
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index c8eadd478175..88c83aa57303 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -67,8 +67,8 @@ static DEFINE_SPINLOCK(sbwd_lock);
67void sbwdog_set(char __iomem *wdog, unsigned long t) 67void sbwdog_set(char __iomem *wdog, unsigned long t)
68{ 68{
69 spin_lock(&sbwd_lock); 69 spin_lock(&sbwd_lock);
70 __raw_writeb(0, wdog - 0x10); 70 __raw_writeb(0, wdog);
71 __raw_writeq(t & 0x7fffffUL, wdog); 71 __raw_writeq(t & 0x7fffffUL, wdog - 0x10);
72 spin_unlock(&sbwd_lock); 72 spin_unlock(&sbwd_lock);
73} 73}
74 74
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 8d44c9b6fb5b..c7d67e9a7465 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -30,7 +30,7 @@
30static int nowayout = WATCHDOG_NOWAYOUT; 30static int nowayout = WATCHDOG_NOWAYOUT;
31static unsigned int margin = 60; /* (secs) Default is 1 minute */ 31static unsigned int margin = 60; /* (secs) Default is 1 minute */
32static unsigned long wdt_status; 32static unsigned long wdt_status;
33static DEFINE_SPINLOCK(wdt_lock); 33static DEFINE_MUTEX(wdt_lock);
34 34
35#define WDT_IN_USE 0 35#define WDT_IN_USE 0
36#define WDT_OK_TO_CLOSE 1 36#define WDT_OK_TO_CLOSE 1
@@ -45,26 +45,26 @@ static DEFINE_SPINLOCK(wdt_lock);
45 45
46static void wdt_send_data(unsigned char command, unsigned char data) 46static void wdt_send_data(unsigned char command, unsigned char data)
47{ 47{
48 outb(command, COMMAND_PORT);
49 msleep(100);
50 outb(data, DATA_PORT); 48 outb(data, DATA_PORT);
51 msleep(200); 49 msleep(200);
50 outb(command, COMMAND_PORT);
51 msleep(100);
52} 52}
53 53
54static void wdt_enable(void) 54static void wdt_enable(void)
55{ 55{
56 spin_lock(&wdt_lock); 56 mutex_lock(&wdt_lock);
57 wdt_send_data(IFACE_ON_COMMAND, 1); 57 wdt_send_data(IFACE_ON_COMMAND, 1);
58 wdt_send_data(REBOOT_COMMAND, margin); 58 wdt_send_data(REBOOT_COMMAND, margin);
59 spin_unlock(&wdt_lock); 59 mutex_unlock(&wdt_lock);
60} 60}
61 61
62static void wdt_disable(void) 62static void wdt_disable(void)
63{ 63{
64 spin_lock(&wdt_lock); 64 mutex_lock(&wdt_lock);
65 wdt_send_data(IFACE_ON_COMMAND, 0); 65 wdt_send_data(IFACE_ON_COMMAND, 0);
66 wdt_send_data(REBOOT_COMMAND, 0); 66 wdt_send_data(REBOOT_COMMAND, 0);
67 spin_unlock(&wdt_lock); 67 mutex_unlock(&wdt_lock);
68} 68}
69 69
70static int fitpc2_wdt_open(struct inode *inode, struct file *file) 70static int fitpc2_wdt_open(struct inode *inode, struct file *file)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 2ac4440e7b08..8943b8ccee1a 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -80,12 +80,6 @@ static void do_suspend(void)
80 80
81 shutting_down = SHUTDOWN_SUSPEND; 81 shutting_down = SHUTDOWN_SUSPEND;
82 82
83 err = stop_machine_create();
84 if (err) {
85 printk(KERN_ERR "xen suspend: failed to setup stop_machine %d\n", err);
86 goto out;
87 }
88
89#ifdef CONFIG_PREEMPT 83#ifdef CONFIG_PREEMPT
90 /* If the kernel is preemptible, we need to freeze all the processes 84 /* If the kernel is preemptible, we need to freeze all the processes
91 to prevent them from being in the middle of a pagetable update 85 to prevent them from being in the middle of a pagetable update
@@ -93,7 +87,7 @@ static void do_suspend(void)
93 err = freeze_processes(); 87 err = freeze_processes();
94 if (err) { 88 if (err) {
95 printk(KERN_ERR "xen suspend: freeze failed %d\n", err); 89 printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
96 goto out_destroy_sm; 90 goto out;
97 } 91 }
98#endif 92#endif
99 93
@@ -136,12 +130,8 @@ out_resume:
136out_thaw: 130out_thaw:
137#ifdef CONFIG_PREEMPT 131#ifdef CONFIG_PREEMPT
138 thaw_processes(); 132 thaw_processes();
139
140out_destroy_sm:
141#endif
142 stop_machine_destroy();
143
144out: 133out:
134#endif
145 shutting_down = SHUTDOWN_INVALID; 135 shutting_down = SHUTDOWN_INVALID;
146} 136}
147#endif /* CONFIG_PM_SLEEP */ 137#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index d47c47fc048f..3c7046d79654 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -97,7 +97,7 @@ static void zorro_seq_stop(struct seq_file *m, void *v)
97 97
98static int zorro_seq_show(struct seq_file *m, void *v) 98static int zorro_seq_show(struct seq_file *m, void *v)
99{ 99{
100 u_int slot = *(loff_t *)v; 100 unsigned int slot = *(loff_t *)v;
101 struct zorro_dev *z = &zorro_autocon[slot]; 101 struct zorro_dev *z = &zorro_autocon[slot];
102 102
103 seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id, 103 seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id,
@@ -129,7 +129,7 @@ static const struct file_operations zorro_devices_proc_fops = {
129 129
130static struct proc_dir_entry *proc_bus_zorro_dir; 130static struct proc_dir_entry *proc_bus_zorro_dir;
131 131
132static int __init zorro_proc_attach_device(u_int slot) 132static int __init zorro_proc_attach_device(unsigned int slot)
133{ 133{
134 struct proc_dir_entry *entry; 134 struct proc_dir_entry *entry;
135 char name[4]; 135 char name[4];
@@ -146,7 +146,7 @@ static int __init zorro_proc_attach_device(u_int slot)
146 146
147static int __init zorro_proc_init(void) 147static int __init zorro_proc_init(void)
148{ 148{
149 u_int slot; 149 unsigned int slot;
150 150
151 if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { 151 if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) {
152 proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL); 152 proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL);
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index 53180a37cc9a..7ee2b6e71786 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -137,10 +137,34 @@ static int zorro_bus_match(struct device *dev, struct device_driver *drv)
137 return 0; 137 return 0;
138} 138}
139 139
140static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
141{
142#ifdef CONFIG_HOTPLUG
143 struct zorro_dev *z;
144
145 if (!dev)
146 return -ENODEV;
147
148 z = to_zorro_dev(dev);
149 if (!z)
150 return -ENODEV;
151
152 if (add_uevent_var(env, "ZORRO_ID=%08X", z->id) ||
153 add_uevent_var(env, "ZORRO_SLOT_NAME=%s", dev_name(dev)) ||
154 add_uevent_var(env, "ZORRO_SLOT_ADDR=%04X", z->slotaddr) ||
155 add_uevent_var(env, "MODALIAS=" ZORRO_DEVICE_MODALIAS_FMT, z->id))
156 return -ENOMEM;
157
158 return 0;
159#else /* !CONFIG_HOTPLUG */
160 return -ENODEV;
161#endif /* !CONFIG_HOTPLUG */
162}
140 163
141struct bus_type zorro_bus_type = { 164struct bus_type zorro_bus_type = {
142 .name = "zorro", 165 .name = "zorro",
143 .match = zorro_bus_match, 166 .match = zorro_bus_match,
167 .uevent = zorro_uevent,
144 .probe = zorro_device_probe, 168 .probe = zorro_device_probe,
145 .remove = zorro_device_remove, 169 .remove = zorro_device_remove,
146}; 170};
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 1d2a772ea14c..eb924e0a64ce 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -77,6 +77,16 @@ static struct bin_attribute zorro_config_attr = {
77 .read = zorro_read_config, 77 .read = zorro_read_config,
78}; 78};
79 79
80static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
81 char *buf)
82{
83 struct zorro_dev *z = to_zorro_dev(dev);
84
85 return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id);
86}
87
88static DEVICE_ATTR(modalias, S_IRUGO, modalias_show, NULL);
89
80int zorro_create_sysfs_dev_files(struct zorro_dev *z) 90int zorro_create_sysfs_dev_files(struct zorro_dev *z)
81{ 91{
82 struct device *dev = &z->dev; 92 struct device *dev = &z->dev;
@@ -89,6 +99,7 @@ int zorro_create_sysfs_dev_files(struct zorro_dev *z)
89 (error = device_create_file(dev, &dev_attr_slotaddr)) || 99 (error = device_create_file(dev, &dev_attr_slotaddr)) ||
90 (error = device_create_file(dev, &dev_attr_slotsize)) || 100 (error = device_create_file(dev, &dev_attr_slotsize)) ||
91 (error = device_create_file(dev, &dev_attr_resource)) || 101 (error = device_create_file(dev, &dev_attr_resource)) ||
102 (error = device_create_file(dev, &dev_attr_modalias)) ||
92 (error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr))) 103 (error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr)))
93 return error; 104 return error;
94 105
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index d45fb34e2d23..6455f3a244c5 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -15,6 +15,8 @@
15#include <linux/zorro.h> 15#include <linux/zorro.h>
16#include <linux/bitops.h> 16#include <linux/bitops.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/platform_device.h>
19#include <linux/slab.h>
18 20
19#include <asm/setup.h> 21#include <asm/setup.h>
20#include <asm/amigahw.h> 22#include <asm/amigahw.h>
@@ -26,24 +28,17 @@
26 * Zorro Expansion Devices 28 * Zorro Expansion Devices
27 */ 29 */
28 30
29u_int zorro_num_autocon = 0; 31unsigned int zorro_num_autocon;
30struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO]; 32struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
31 33
32 34
33 /* 35 /*
34 * Single Zorro bus 36 * Zorro bus
35 */ 37 */
36 38
37struct zorro_bus zorro_bus = {\ 39struct zorro_bus {
38 .resources = { 40 struct list_head devices; /* list of devices on this bus */
39 /* Zorro II regions (on Zorro II/III) */ 41 struct device dev;
40 { .name = "Zorro II exp", .start = 0x00e80000, .end = 0x00efffff },
41 { .name = "Zorro II mem", .start = 0x00200000, .end = 0x009fffff },
42 /* Zorro III regions (on Zorro III only) */
43 { .name = "Zorro III exp", .start = 0xff000000, .end = 0xffffffff },
44 { .name = "Zorro III cfg", .start = 0x40000000, .end = 0x7fffffff }
45 },
46 .name = "Zorro bus"
47}; 42};
48 43
49 44
@@ -53,18 +48,19 @@ struct zorro_bus zorro_bus = {\
53 48
54struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from) 49struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from)
55{ 50{
56 struct zorro_dev *z; 51 struct zorro_dev *z;
57 52
58 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO)) 53 if (!zorro_num_autocon)
59 return NULL; 54 return NULL;
60 55
61 for (z = from ? from+1 : &zorro_autocon[0]; 56 for (z = from ? from+1 : &zorro_autocon[0];
62 z < zorro_autocon+zorro_num_autocon; 57 z < zorro_autocon+zorro_num_autocon;
63 z++) 58 z++)
64 if (id == ZORRO_WILDCARD || id == z->id) 59 if (id == ZORRO_WILDCARD || id == z->id)
65 return z; 60 return z;
66 return NULL; 61 return NULL;
67} 62}
63EXPORT_SYMBOL(zorro_find_device);
68 64
69 65
70 /* 66 /*
@@ -83,121 +79,138 @@ struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from)
83 */ 79 */
84 80
85DECLARE_BITMAP(zorro_unused_z2ram, 128); 81DECLARE_BITMAP(zorro_unused_z2ram, 128);
82EXPORT_SYMBOL(zorro_unused_z2ram);
86 83
87 84
88static void __init mark_region(unsigned long start, unsigned long end, 85static void __init mark_region(unsigned long start, unsigned long end,
89 int flag) 86 int flag)
90{ 87{
91 if (flag)
92 start += Z2RAM_CHUNKMASK;
93 else
94 end += Z2RAM_CHUNKMASK;
95 start &= ~Z2RAM_CHUNKMASK;
96 end &= ~Z2RAM_CHUNKMASK;
97
98 if (end <= Z2RAM_START || start >= Z2RAM_END)
99 return;
100 start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START;
101 end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START;
102 while (start < end) {
103 u32 chunk = start>>Z2RAM_CHUNKSHIFT;
104 if (flag) 88 if (flag)
105 set_bit(chunk, zorro_unused_z2ram); 89 start += Z2RAM_CHUNKMASK;
106 else 90 else
107 clear_bit(chunk, zorro_unused_z2ram); 91 end += Z2RAM_CHUNKMASK;
108 start += Z2RAM_CHUNKSIZE; 92 start &= ~Z2RAM_CHUNKMASK;
109 } 93 end &= ~Z2RAM_CHUNKMASK;
94
95 if (end <= Z2RAM_START || start >= Z2RAM_END)
96 return;
97 start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START;
98 end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START;
99 while (start < end) {
100 u32 chunk = start>>Z2RAM_CHUNKSHIFT;
101 if (flag)
102 set_bit(chunk, zorro_unused_z2ram);
103 else
104 clear_bit(chunk, zorro_unused_z2ram);
105 start += Z2RAM_CHUNKSIZE;
106 }
110} 107}
111 108
112 109
113static struct resource __init *zorro_find_parent_resource(struct zorro_dev *z) 110static struct resource __init *zorro_find_parent_resource(
111 struct platform_device *bridge, struct zorro_dev *z)
114{ 112{
115 int i; 113 int i;
116 114
117 for (i = 0; i < zorro_bus.num_resources; i++) 115 for (i = 0; i < bridge->num_resources; i++) {
118 if (zorro_resource_start(z) >= zorro_bus.resources[i].start && 116 struct resource *r = &bridge->resource[i];
119 zorro_resource_end(z) <= zorro_bus.resources[i].end) 117 if (zorro_resource_start(z) >= r->start &&
120 return &zorro_bus.resources[i]; 118 zorro_resource_end(z) <= r->end)
121 return &iomem_resource; 119 return r;
120 }
121 return &iomem_resource;
122} 122}
123 123
124 124
125 /*
126 * Initialization
127 */
128 125
129static int __init zorro_init(void) 126static int __init amiga_zorro_probe(struct platform_device *pdev)
130{ 127{
131 struct zorro_dev *z; 128 struct zorro_bus *bus;
132 unsigned int i; 129 struct zorro_dev *z;
133 int error; 130 struct resource *r;
134 131 unsigned int i;
135 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO)) 132 int error;
136 return 0; 133
137 134 /* Initialize the Zorro bus */
138 pr_info("Zorro: Probing AutoConfig expansion devices: %d device%s\n", 135 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
139 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); 136 if (!bus)
140 137 return -ENOMEM;
141 /* Initialize the Zorro bus */ 138
142 INIT_LIST_HEAD(&zorro_bus.devices); 139 INIT_LIST_HEAD(&bus->devices);
143 dev_set_name(&zorro_bus.dev, "zorro"); 140 bus->dev.parent = &pdev->dev;
144 error = device_register(&zorro_bus.dev); 141 dev_set_name(&bus->dev, "zorro");
145 if (error) { 142 error = device_register(&bus->dev);
146 pr_err("Zorro: Error registering zorro_bus\n");
147 return error;
148 }
149
150 /* Request the resources */
151 zorro_bus.num_resources = AMIGAHW_PRESENT(ZORRO3) ? 4 : 2;
152 for (i = 0; i < zorro_bus.num_resources; i++)
153 request_resource(&iomem_resource, &zorro_bus.resources[i]);
154
155 /* Register all devices */
156 for (i = 0; i < zorro_num_autocon; i++) {
157 z = &zorro_autocon[i];
158 z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
159 if (z->id == ZORRO_PROD_GVP_EPC_BASE) {
160 /* GVP quirk */
161 unsigned long magic = zorro_resource_start(z)+0x8000;
162 z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK;
163 }
164 sprintf(z->name, "Zorro device %08x", z->id);
165 zorro_name_device(z);
166 z->resource.name = z->name;
167 if (request_resource(zorro_find_parent_resource(z), &z->resource))
168 pr_err("Zorro: Address space collision on device %s %pR\n",
169 z->name, &z->resource);
170 dev_set_name(&z->dev, "%02x", i);
171 z->dev.parent = &zorro_bus.dev;
172 z->dev.bus = &zorro_bus_type;
173 error = device_register(&z->dev);
174 if (error) { 143 if (error) {
175 pr_err("Zorro: Error registering device %s\n", z->name); 144 pr_err("Zorro: Error registering zorro_bus\n");
176 continue; 145 kfree(bus);
146 return error;
177 } 147 }
178 error = zorro_create_sysfs_dev_files(z); 148 platform_set_drvdata(pdev, bus);
179 if (error) 149
180 dev_err(&z->dev, "Error creating sysfs files\n"); 150 /* Register all devices */
181 } 151 pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
182 152 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
183 /* Mark all available Zorro II memory */ 153
184 zorro_for_each_dev(z) { 154 for (i = 0; i < zorro_num_autocon; i++) {
185 if (z->rom.er_Type & ERTF_MEMLIST) 155 z = &zorro_autocon[i];
186 mark_region(zorro_resource_start(z), zorro_resource_end(z)+1, 1); 156 z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
187 } 157 if (z->id == ZORRO_PROD_GVP_EPC_BASE) {
188 158 /* GVP quirk */
189 /* Unmark all used Zorro II memory */ 159 unsigned long magic = zorro_resource_start(z)+0x8000;
190 for (i = 0; i < m68k_num_memory; i++) 160 z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK;
191 if (m68k_memory[i].addr < 16*1024*1024) 161 }
192 mark_region(m68k_memory[i].addr, 162 sprintf(z->name, "Zorro device %08x", z->id);
193 m68k_memory[i].addr+m68k_memory[i].size, 0); 163 zorro_name_device(z);
194 164 z->resource.name = z->name;
195 return 0; 165 r = zorro_find_parent_resource(pdev, z);
166 error = request_resource(r, &z->resource);
167 if (error)
168 dev_err(&bus->dev,
169 "Address space collision on device %s %pR\n",
170 z->name, &z->resource);
171 dev_set_name(&z->dev, "%02x", i);
172 z->dev.parent = &bus->dev;
173 z->dev.bus = &zorro_bus_type;
174 error = device_register(&z->dev);
175 if (error) {
176 dev_err(&bus->dev, "Error registering device %s\n",
177 z->name);
178 continue;
179 }
180 error = zorro_create_sysfs_dev_files(z);
181 if (error)
182 dev_err(&z->dev, "Error creating sysfs files\n");
183 }
184
185 /* Mark all available Zorro II memory */
186 zorro_for_each_dev(z) {
187 if (z->rom.er_Type & ERTF_MEMLIST)
188 mark_region(zorro_resource_start(z),
189 zorro_resource_end(z)+1, 1);
190 }
191
192 /* Unmark all used Zorro II memory */
193 for (i = 0; i < m68k_num_memory; i++)
194 if (m68k_memory[i].addr < 16*1024*1024)
195 mark_region(m68k_memory[i].addr,
196 m68k_memory[i].addr+m68k_memory[i].size,
197 0);
198
199 return 0;
196} 200}
197 201
198subsys_initcall(zorro_init); 202static struct platform_driver amiga_zorro_driver = {
203 .driver = {
204 .name = "amiga-zorro",
205 .owner = THIS_MODULE,
206 },
207};
199 208
200EXPORT_SYMBOL(zorro_find_device); 209static int __init amiga_zorro_init(void)
201EXPORT_SYMBOL(zorro_unused_z2ram); 210{
211 return platform_driver_probe(&amiga_zorro_driver, amiga_zorro_probe);
212}
213
214module_init(amiga_zorro_init);
202 215
203MODULE_LICENSE("GPL"); 216MODULE_LICENSE("GPL");